repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
funkbit/django-funky-user | funky_user/views.py | 1 | 2479 | from django.conf import settings
from django.contrib.auth import login as auth_login, get_user_model
from django.contrib.auth.tokens import default_token_generator
from django.shortcuts import redirect, render
from django.utils.http import base36_to_int
from django.views.decorators.debug import sensitive_post_parameters
from funky_user import conf
from funky_user.forms import SignupForm
@sensitive_post_parameters('password')
def signup(request, template_name='auth/signup.html', extra_context=None):
"""
Signup form for new users.
"""
UserModel = get_user_model()
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
# Create the the new user
new_user = UserModel.objects.create_user(**form.cleaned_data)
# Send activation email
new_user.send_activation_email()
return redirect('user-signup-done')
else:
form = SignupForm()
# Template context
context = {
'form': form
}
if extra_context is not None:
context.update(extra_context)
return render(request, template_name, context)
def signup_done(request, template_name='auth/signup_done.html', extra_context=None):
"""
Screen shown to user after successfully signing up.
"""
return render(request, template_name, extra_context)
def activate(request, uidb36, token,
template_name='auth/signup_activation_failed.html',
extra_context=None):
"""
Check activation token for newly registered users. If successful,
mark as active and log them in. If not, show an error page.
Code borrowed from Django's auth reset mechanism.
"""
UserModel = get_user_model()
# Look up the user object
try:
uid_int = base36_to_int(uidb36)
user = UserModel.objects.get(id=uid_int)
except (ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None:
# Is the token valid?
if default_token_generator.check_token(user, token):
# Activate the user
user.is_active = True
user.save()
# Log in the user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
auth_login(request, user)
# Redirect to URL specified in settings
return redirect(conf.SIGNUP_REDIRECT_URL)
return render(request, template_name, extra_context)
| bsd-2-clause | -5,861,493,197,801,414,000 | 26.544444 | 84 | 0.653086 | false |
mooninite/cobbler | cobbler/action_check.py | 6 | 19294 | """
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import glob
import os
import re
import clogger
import utils
from utils import _
class CobblerCheck:
"""
Validates whether the system is reasonably well configured for
serving up content. This is the code behind 'cobbler check'.
"""
def __init__(self, collection_mgr, logger=None):
"""
Constructor
"""
self.collection_mgr = collection_mgr
self.settings = collection_mgr.settings()
if logger is None:
logger = clogger.Logger()
self.logger = logger
def run(self):
"""
Returns None if there are no errors, otherwise returns a list
of things to correct prior to running application 'for real'.
(The CLI usage is "cobbler check" before "cobbler sync")
"""
status = []
self.checked_family = utils.get_family()
self.check_name(status)
self.check_selinux(status)
if self.settings.manage_dhcp:
mode = self.collection_mgr.api.get_sync().dhcp.what()
if mode == "isc":
self.check_dhcpd_bin(status)
self.check_dhcpd_conf(status)
self.check_service(status, "dhcpd")
elif mode == "dnsmasq":
self.check_dnsmasq_bin(status)
self.check_service(status, "dnsmasq")
if self.settings.manage_dns:
mode = self.collection_mgr.api.get_sync().dns.what()
if mode == "bind":
self.check_bind_bin(status)
self.check_service(status, "named")
elif mode == "dnsmasq" and not self.settings.manage_dhcp:
self.check_dnsmasq_bin(status)
self.check_service(status, "dnsmasq")
mode = self.collection_mgr.api.get_sync().tftpd.what()
if mode == "in_tftpd":
self.check_tftpd_bin(status)
self.check_tftpd_dir(status)
self.check_tftpd_conf(status)
elif mode == "tftpd_py":
self.check_ctftpd_bin(status)
self.check_ctftpd_dir(status)
self.check_ctftpd_conf(status)
self.check_service(status, "cobblerd")
self.check_bootloaders(status)
self.check_for_wget_curl(status)
self.check_rsync_conf(status)
self.check_httpd(status)
self.check_iptables(status)
self.check_yum(status)
self.check_debmirror(status)
self.check_for_ksvalidator(status)
self.check_for_default_password(status)
self.check_for_unreferenced_repos(status)
self.check_for_unsynced_repos(status)
self.check_for_cman(status)
return status
def check_for_ksvalidator(self, status):
if self.checked_family == "debian":
return
if not os.path.exists("/usr/bin/ksvalidator"):
status.append("ksvalidator was not found, install pykickstart")
def check_for_cman(self, status):
# not doing rpm -q here to be cross-distro friendly
if not os.path.exists("/sbin/fence_ilo") and not os.path.exists("/usr/sbin/fence_ilo"):
status.append("fencing tools were not found, and are required to use the (optional) power management features. install cman or fence-agents to use them")
def check_service(self, status, which, notes=""):
if notes != "":
notes = " (NOTE: %s)" % notes
rc = 0
if self.checked_family in ("redhat", "suse"):
if os.path.exists("/etc/rc.d/init.d/%s" % which):
rc = utils.subprocess_call(self.logger, "/sbin/service %s status > /dev/null 2>/dev/null" % which, shell=True)
if rc != 0:
status.append(_("service %s is not running%s") % (which, notes))
return
elif self.checked_family == "debian":
# we still use /etc/init.d
if os.path.exists("/etc/init.d/%s" % which):
rc = utils.subprocess_call(self.logger, "/etc/init.d/%s status /dev/null 2>/dev/null" % which, shell=True)
if rc != 0:
status.append(_("service %s is not running%s") % (which, notes))
return
else:
status.append(_("Unknown distribution type, cannot check for running service %s" % which))
return
def check_iptables(self, status):
if os.path.exists("/etc/rc.d/init.d/iptables"):
rc = utils.subprocess_call(self.logger, "/sbin/service iptables status >/dev/null 2>/dev/null", shell=True)
if rc == 0:
status.append(_("since iptables may be running, ensure 69, 80/443, and %(xmlrpc)s are unblocked") % {"xmlrpc": self.settings.xmlrpc_port})
def check_yum(self, status):
if self.checked_family == "debian":
return
if not os.path.exists("/usr/bin/createrepo"):
status.append(_("createrepo package is not installed, needed for cobbler import and cobbler reposync, install createrepo?"))
if not os.path.exists("/usr/bin/reposync"):
status.append(_("reposync is not installed, need for cobbler reposync, install/upgrade yum-utils?"))
if not os.path.exists("/usr/bin/yumdownloader"):
status.append(_("yumdownloader is not installed, needed for cobbler repo add with --rpm-list parameter, install/upgrade yum-utils?"))
if self.settings.reposync_flags.find("-l"):
if self.checked_family in ("redhat", "suse"):
yum_utils_ver = utils.subprocess_get(self.logger, "/usr/bin/rpmquery --queryformat=%{VERSION} yum-utils", shell=True)
if yum_utils_ver < "1.1.17":
status.append(_("yum-utils need to be at least version 1.1.17 for reposync -l, current version is %s") % yum_utils_ver)
def check_debmirror(self, status):
if not os.path.exists("/usr/bin/debmirror"):
status.append(_("debmirror package is not installed, it will be required to manage debian deployments and repositories"))
if os.path.exists("/etc/debmirror.conf"):
f = open("/etc/debmirror.conf")
re_dists = re.compile(r'@dists=')
re_arches = re.compile(r'@arches=')
for line in f.readlines():
if re_dists.search(line) and not line.strip().startswith("#"):
status.append(_("comment out 'dists' on /etc/debmirror.conf for proper debian support"))
if re_arches.search(line) and not line.strip().startswith("#"):
status.append(_("comment out 'arches' on /etc/debmirror.conf for proper debian support"))
def check_name(self, status):
"""
If the server name in the config file is still set to localhost
automatic installations run from koan will not have proper kernel line
parameters.
"""
if self.settings.server == "127.0.0.1":
status.append(_("The 'server' field in /etc/cobbler/settings must be set to something other than localhost, or automatic installation features will not work. This should be a resolvable hostname or IP for the boot server as reachable by all machines that will use it."))
if self.settings.next_server == "127.0.0.1":
status.append(_("For PXE to be functional, the 'next_server' field in /etc/cobbler/settings must be set to something other than 127.0.0.1, and should match the IP of the boot server on the PXE network."))
def check_selinux(self, status):
"""
Suggests various SELinux rules changes to run Cobbler happily with
SELinux in enforcing mode. FIXME: this method could use some
refactoring in the future.
"""
if self.checked_family == "debian":
return
enabled = self.collection_mgr.api.is_selinux_enabled()
if enabled:
status.append(_("SELinux is enabled. Please review the following wiki page for details on ensuring cobbler works correctly in your SELinux environment:\n https://github.com/cobbler/cobbler/wiki/Selinux"))
def check_for_default_password(self, status):
default_pass = self.settings.default_password_crypted
if default_pass == "$1$mF86/UHC$WvcIcX2t6crBz2onWxyac.":
status.append(_("The default password used by the sample templates for newly installed machines (default_password_crypted in /etc/cobbler/settings) is still set to 'cobbler' and should be changed, try: \"openssl passwd -1 -salt 'random-phrase-here' 'your-password-here'\" to generate new one"))
def check_for_unreferenced_repos(self, status):
repos = []
referenced = []
not_found = []
for r in self.collection_mgr.api.repos():
repos.append(r.name)
for p in self.collection_mgr.api.profiles():
my_repos = p.repos
if my_repos != "<<inherit>>":
referenced.extend(my_repos)
for r in referenced:
if r not in repos and r != "<<inherit>>":
not_found.append(r)
if len(not_found) > 0:
status.append(_("One or more repos referenced by profile objects is no longer defined in cobbler: %s") % ", ".join(not_found))
def check_for_unsynced_repos(self, status):
need_sync = []
for r in self.collection_mgr.repos():
if r.mirror_locally == 1:
lookfor = os.path.join(self.settings.webdir, "repo_mirror", r.name)
if not os.path.exists(lookfor):
need_sync.append(r.name)
if len(need_sync) > 0:
status.append(_("One or more repos need to be processed by cobbler reposync for the first time before automating installations using them: %s") % ", ".join(need_sync))
def check_httpd(self, status):
"""
Check if Apache is installed.
"""
if self.checked_family == "redhat":
rc = utils.subprocess_get(self.logger, "httpd -v")
elif self.checked_family == "suse":
rc = utils.subprocess_get(self.logger, "httpd2 -v")
else:
rc = utils.subprocess_get(self.logger, "apache2 -v")
if rc.find("Server") == -1:
status.append("Apache (httpd) is not installed and/or in path")
def check_dhcpd_bin(self, status):
"""
Check if dhcpd is installed
"""
if not os.path.exists("/usr/sbin/dhcpd"):
status.append("dhcpd is not installed")
def check_dnsmasq_bin(self, status):
"""
Check if dnsmasq is installed
"""
rc = utils.subprocess_get(self.logger, "dnsmasq --help")
if rc.find("Valid options") == -1:
status.append("dnsmasq is not installed and/or in path")
def check_bind_bin(self, status):
"""
Check if bind is installed.
"""
rc = utils.subprocess_get(self.logger, "named -v")
# it should return something like "BIND 9.6.1-P1-RedHat-9.6.1-6.P1.fc11"
if rc.find("BIND") == -1:
status.append("named is not installed and/or in path")
def check_for_wget_curl(self, status):
"""
Check to make sure wget or curl is installed
"""
rc1 = utils.subprocess_call(self.logger, "which wget")
rc2 = utils.subprocess_call(self.logger, "which curl")
if rc1 != 0 and rc2 != 0:
status.append("Neither wget nor curl are installed and/or available in $PATH. Cobbler requires that one of these utilities be installed.")
def check_bootloaders(self, status):
"""
Check if network bootloaders are installed
"""
# FIXME: move zpxe.rexx to loaders
bootloaders = {
"menu.c32": ["/usr/share/syslinux/menu.c32",
"/usr/lib/syslinux/menu.c32",
"/var/lib/cobbler/loaders/menu.c32"],
"yaboot": ["/var/lib/cobbler/loaders/yaboot*"],
"pxelinux.0": ["/usr/share/syslinux/pxelinux.0",
"/usr/lib/syslinux/pxelinux.0",
"/var/lib/cobbler/loaders/pxelinux.0"],
"efi": ["/var/lib/cobbler/loaders/grub-x86.efi",
"/var/lib/cobbler/loaders/grub-x86_64.efi"],
}
# look for bootloaders at the glob locations above
found_bootloaders = []
items = bootloaders.keys()
for loader_name in items:
patterns = bootloaders[loader_name]
for pattern in patterns:
matches = glob.glob(pattern)
if len(matches) > 0:
found_bootloaders.append(loader_name)
not_found = []
# invert the list of what we've found so we can report on what we haven't found
for loader_name in items:
if loader_name not in found_bootloaders:
not_found.append(loader_name)
if len(not_found) > 0:
status.append("some network boot-loaders are missing from /var/lib/cobbler/loaders, you may run 'cobbler get-loaders' to download them, or, if you only want to handle x86/x86_64 netbooting, you may ensure that you have installed a *recent* version of the syslinux package installed and can ignore this message entirely. Files in this directory, should you want to support all architectures, should include pxelinux.0, menu.c32, and yaboot. The 'cobbler get-loaders' command is the easiest way to resolve these requirements.")
def check_tftpd_bin(self, status):
"""
Check if tftpd is installed
"""
if self.checked_family == "debian":
return
if not os.path.exists("/etc/xinetd.d/tftp"):
status.append("missing /etc/xinetd.d/tftp, install tftp-server?")
def check_tftpd_dir(self, status):
"""
Check if cobbler.conf's tftpboot directory exists
"""
if self.checked_family == "debian":
return
bootloc = utils.tftpboot_location()
if not os.path.exists(bootloc):
status.append(_("please create directory: %(dirname)s") % {"dirname": bootloc})
def check_tftpd_conf(self, status):
"""
Check that configured tftpd boot directory matches with actual
Check that tftpd is enabled to autostart
"""
if self.checked_family == "debian":
return
if os.path.exists("/etc/xinetd.d/tftp"):
f = open("/etc/xinetd.d/tftp")
re_disable = re.compile(r'disable.*=.*yes')
for line in f.readlines():
if re_disable.search(line) and not line.strip().startswith("#"):
status.append(_("change 'disable' to 'no' in %(file)s") % {"file": "/etc/xinetd.d/tftp"})
else:
status.append("missing configuration file: /etc/xinetd.d/tftp")
def check_ctftpd_bin(self, status):
"""
Check if the Cobbler tftp server is installed
"""
if self.checked_family == "debian":
return
if not os.path.exists("/etc/xinetd.d/ctftp"):
status.append("missing /etc/xinetd.d/ctftp")
def check_ctftpd_dir(self, status):
"""
Check if cobbler.conf's tftpboot directory exists
"""
if self.checked_family == "debian":
return
bootloc = utils.tftpboot_location()
if not os.path.exists(bootloc):
status.append(_("please create directory: %(dirname)s") % {"dirname": bootloc})
def check_ctftpd_conf(self, status):
"""
Check that configured tftpd boot directory matches with actual
Check that tftpd is enabled to autostart
"""
if self.checked_family == "debian":
return
if os.path.exists("/etc/xinetd.d/tftp"):
f = open("/etc/xinetd.d/tftp")
re_disable = re.compile(r'disable.*=.*no')
for line in f.readlines():
if re_disable.search(line) and not line.strip().startswith("#"):
status.append(_("change 'disable' to 'yes' in %(file)s") % {"file": "/etc/xinetd.d/tftp"})
if os.path.exists("/etc/xinetd.d/ctftp"):
f = open("/etc/xinetd.d/ctftp")
re_disable = re.compile(r'disable.*=.*yes')
for line in f.readlines():
if re_disable.search(line) and not line.strip().startswith("#"):
status.append(_("change 'disable' to 'no' in %(file)s") % {"file": "/etc/xinetd.d/ctftp"})
else:
status.append("missing configuration file: /etc/xinetd.d/ctftp")
def check_rsync_conf(self, status):
"""
Check that rsync is enabled to autostart
"""
if self.checked_family == "debian":
return
if os.path.exists("/etc/xinetd.d/rsync"):
f = open("/etc/xinetd.d/rsync")
re_disable = re.compile(r'disable.*=.*yes')
for line in f.readlines():
if re_disable.search(line) and not line.strip().startswith("#"):
status.append(_("change 'disable' to 'no' in %(file)s") % {"file": "/etc/xinetd.d/rsync"})
else:
status.append(_("file %(file)s does not exist") % {"file": "/etc/xinetd.d/rsync"})
def check_dhcpd_conf(self, status):
"""
NOTE: this code only applies if cobbler is *NOT* set to generate
a dhcp.conf file
Check that dhcpd *appears* to be configured for pxe booting.
We can't assure file correctness. Since a cobbler user might
have dhcp on another server, it's okay if it's not there and/or
not configured correctly according to automated scans.
"""
if not (self.settings.manage_dhcp == 0):
return
if os.path.exists(self.settings.dhcpd_conf):
match_next = False
match_file = False
f = open(self.settings.dhcpd_conf)
for line in f.readlines():
if line.find("next-server") != -1:
match_next = True
if line.find("filename") != -1:
match_file = True
if not match_next:
status.append(_("expecting next-server entry in %(file)s") % {"file": self.settings.dhcpd_conf})
if not match_file:
status.append(_("missing file: %(file)s") % {"file": self.settings.dhcpd_conf})
else:
status.append(_("missing file: %(file)s") % {"file": self.settings.dhcpd_conf})
# EOF
| gpl-2.0 | -3,521,767,215,008,514,600 | 43.354023 | 538 | 0.593915 | false |
ocefpaf/iris | lib/iris/tests/integration/test_pp.py | 2 | 29216 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Integration tests for loading and saving PP files."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import os
from unittest import mock
from cf_units import Unit
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.cube import Cube
import iris.fileformats.pp
import iris.fileformats.pp_load_rules
from iris.fileformats.pp_save_rules import verify
from iris.exceptions import IgnoreCubeException
from iris.fileformats.pp import load_pairs_from_fields
import iris.util
class TestVertical(tests.IrisTest):
def _test_coord(self, cube, point, bounds=None, **kwargs):
coords = cube.coords(**kwargs)
self.assertEqual(
len(coords),
1,
"failed to find exactly one coord" " using: {}".format(kwargs),
)
self.assertEqual(coords[0].points, point)
if bounds is not None:
self.assertArrayEqual(coords[0].bounds, [bounds])
@staticmethod
def _mock_field(**kwargs):
mock_data = np.zeros(1)
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(
lbuser=[0] * 7,
lbrsvd=[0] * 4,
brsvd=[0] * 4,
brlev=0,
t1=mock.MagicMock(year=1990, month=1, day=3),
t2=mock.MagicMock(year=1990, month=1, day=3),
core_data=mock_core_data,
realised_dtype=mock_data.dtype,
)
field.configure_mock(**kwargs)
return field
def test_soil_level_round_trip(self):
# Use pp.load_cubes() to convert a fake PPField into a Cube.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
soil_level = 1234
field = self._mock_field(
lbvc=6, lblev=soil_level, stash=iris.fileformats.pp.STASH(1, 0, 9)
)
load = mock.Mock(return_value=iter([field]))
with mock.patch("iris.fileformats.pp.load", new=load) as load:
cube = next(iris.fileformats.pp.load_cubes("DUMMY"))
self.assertIn("soil", cube.standard_name)
self._test_coord(cube, soil_level, long_name="soil_model_level_number")
# Now use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brsvd = [None] * 4
field.brlev = None
field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 6)
self.assertEqual(field.lblev, soil_level)
self.assertEqual(field.blev, soil_level)
self.assertEqual(field.brsvd[0], 0)
self.assertEqual(field.brlev, 0)
def test_soil_depth_round_trip(self):
# Use pp.load_cubes() to convert a fake PPField into a Cube.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
lower, point, upper = 1.2, 3.4, 5.6
brsvd = [lower, 0, 0, 0]
field = self._mock_field(
lbvc=6,
blev=point,
brsvd=brsvd,
brlev=upper,
stash=iris.fileformats.pp.STASH(1, 0, 9),
)
load = mock.Mock(return_value=iter([field]))
with mock.patch("iris.fileformats.pp.load", new=load) as load:
cube = next(iris.fileformats.pp.load_cubes("DUMMY"))
self.assertIn("soil", cube.standard_name)
self._test_coord(
cube, point, bounds=[lower, upper], standard_name="depth"
)
# Now use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brlev = None
field.brsvd = [None] * 4
field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 6)
self.assertEqual(field.blev, point)
self.assertEqual(field.brsvd[0], lower)
self.assertEqual(field.brlev, upper)
def test_potential_temperature_level_round_trip(self):
# Check save+load for data on 'potential temperature' levels.
# Use pp.load_cubes() to convert a fake PPField into a Cube.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
potm_value = 22.5
field = self._mock_field(lbvc=19, blev=potm_value)
load = mock.Mock(return_value=iter([field]))
with mock.patch("iris.fileformats.pp.load", new=load):
cube = next(iris.fileformats.pp.load_cubes("DUMMY"))
self._test_coord(
cube, potm_value, standard_name="air_potential_temperature"
)
# Now use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 19)
self.assertEqual(field.blev, potm_value)
@staticmethod
def _field_with_data(scale=1, **kwargs):
x, y = 40, 30
mock_data = np.arange(1200).reshape(y, x) * scale
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(
core_data=mock_core_data,
realised_dtype=mock_data.dtype,
lbcode=[1],
lbnpt=x,
lbrow=y,
bzx=350,
bdx=1.5,
bzy=40,
bdy=1.5,
lbuser=[0] * 7,
lbrsvd=[0] * 4,
t1=mock.MagicMock(year=1990, month=1, day=3),
t2=mock.MagicMock(year=1990, month=1, day=3),
)
field._x_coord_name = lambda: "longitude"
field._y_coord_name = lambda: "latitude"
field.coord_system = lambda: None
field.configure_mock(**kwargs)
return field
def test_hybrid_pressure_round_trip(self):
# Use pp.load_cubes() to convert fake PPFields into Cubes.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
# Make a fake reference surface field.
pressure_field = self._field_with_data(
10,
stash=iris.fileformats.pp.STASH(1, 0, 409),
lbuser=[0, 0, 0, 409, 0, 0, 0],
)
# Make a fake data field which needs the reference surface.
model_level = 5678
sigma_lower, sigma, sigma_upper = 0.85, 0.9, 0.95
delta_lower, delta, delta_upper = 0.05, 0.1, 0.15
data_field = self._field_with_data(
lbvc=9,
lblev=model_level,
bhlev=delta,
bhrlev=delta_lower,
blev=sigma,
brlev=sigma_lower,
brsvd=[sigma_upper, delta_upper],
)
# Convert both fields to cubes.
load = mock.Mock(return_value=iter([pressure_field, data_field]))
with mock.patch("iris.fileformats.pp.load", new=load) as load:
pressure_cube, data_cube = iris.fileformats.pp.load_cubes("DUMMY")
# Check the reference surface cube looks OK.
self.assertEqual(pressure_cube.standard_name, "surface_air_pressure")
self.assertEqual(pressure_cube.units, "Pa")
# Check the data cube is set up to use hybrid-pressure.
self._test_coord(
data_cube, model_level, standard_name="model_level_number"
)
self._test_coord(
data_cube,
delta,
[delta_lower, delta_upper],
long_name="level_pressure",
)
self._test_coord(
data_cube, sigma, [sigma_lower, sigma_upper], long_name="sigma"
)
aux_factories = data_cube.aux_factories
self.assertEqual(len(aux_factories), 1)
surface_coord = aux_factories[0].dependencies["surface_air_pressure"]
self.assertArrayEqual(
surface_coord.points, np.arange(12000, step=10).reshape(30, 40)
)
# Now use the save rules to convert the Cubes back into PPFields.
pressure_field = iris.fileformats.pp.PPField3()
pressure_field.lbfc = 0
pressure_field.lbvc = 0
pressure_field.brsvd = [None, None]
pressure_field.lbuser = [None] * 7
pressure_field = verify(pressure_cube, pressure_field)
data_field = iris.fileformats.pp.PPField3()
data_field.lbfc = 0
data_field.lbvc = 0
data_field.brsvd = [None, None]
data_field.lbuser = [None] * 7
data_field = verify(data_cube, data_field)
# The reference surface field should have STASH=409
self.assertArrayEqual(
pressure_field.lbuser, [None, None, None, 409, None, None, 1]
)
# Check the data field has the vertical coordinate as originally
# specified.
self.assertEqual(data_field.lbvc, 9)
self.assertEqual(data_field.lblev, model_level)
self.assertEqual(data_field.bhlev, delta)
self.assertEqual(data_field.bhrlev, delta_lower)
self.assertEqual(data_field.blev, sigma)
self.assertEqual(data_field.brlev, sigma_lower)
self.assertEqual(data_field.brsvd, [sigma_upper, delta_upper])
def test_hybrid_pressure_with_duplicate_references(self):
# Make a fake reference surface field.
pressure_field = self._field_with_data(
10,
stash=iris.fileformats.pp.STASH(1, 0, 409),
lbuser=[0, 0, 0, 409, 0, 0, 0],
)
# Make a fake data field which needs the reference surface.
model_level = 5678
sigma_lower, sigma, sigma_upper = 0.85, 0.9, 0.95
delta_lower, delta, delta_upper = 0.05, 0.1, 0.15
data_field = self._field_with_data(
lbvc=9,
lblev=model_level,
bhlev=delta,
bhrlev=delta_lower,
blev=sigma,
brlev=sigma_lower,
brsvd=[sigma_upper, delta_upper],
)
# Convert both fields to cubes.
load = mock.Mock(
return_value=iter([data_field, pressure_field, pressure_field])
)
msg = "Multiple reference cubes for surface_air_pressure"
with mock.patch(
"iris.fileformats.pp.load", new=load
) as load, mock.patch("warnings.warn") as warn:
_, _, _ = iris.fileformats.pp.load_cubes("DUMMY")
warn.assert_called_with(msg)
def test_hybrid_height_with_non_standard_coords(self):
# Check the save rules are using the AuxFactory to find the
# hybrid height coordinates and not relying on their names.
ny, nx = 30, 40
sigma_lower, sigma, sigma_upper = 0.75, 0.8, 0.75
delta_lower, delta, delta_upper = 150, 200, 250
cube = Cube(np.zeros((ny, nx)), "air_temperature")
level_coord = AuxCoord(0, "model_level_number")
cube.add_aux_coord(level_coord)
delta_coord = AuxCoord(
delta,
bounds=[[delta_lower, delta_upper]],
long_name="moog",
units="m",
)
sigma_coord = AuxCoord(
sigma, bounds=[[sigma_lower, sigma_upper]], long_name="mavis"
)
surface_altitude_coord = AuxCoord(
np.zeros((ny, nx)), "surface_altitude", units="m"
)
cube.add_aux_coord(delta_coord)
cube.add_aux_coord(sigma_coord)
cube.add_aux_coord(surface_altitude_coord, (0, 1))
cube.add_aux_factory(
HybridHeightFactory(
delta_coord, sigma_coord, surface_altitude_coord
)
)
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brsvd = [None, None]
field.lbuser = [None] * 7
field = verify(cube, field)
self.assertEqual(field.blev, delta)
self.assertEqual(field.brlev, delta_lower)
self.assertEqual(field.bhlev, sigma)
self.assertEqual(field.bhrlev, sigma_lower)
self.assertEqual(field.brsvd, [delta_upper, sigma_upper])
def test_hybrid_pressure_with_non_standard_coords(self):
# Check the save rules are using the AuxFactory to find the
# hybrid pressure coordinates and not relying on their names.
ny, nx = 30, 40
sigma_lower, sigma, sigma_upper = 0.75, 0.8, 0.75
delta_lower, delta, delta_upper = 0.15, 0.2, 0.25
cube = Cube(np.zeros((ny, nx)), "air_temperature")
level_coord = AuxCoord(0, "model_level_number")
cube.add_aux_coord(level_coord)
delta_coord = AuxCoord(
delta,
bounds=[[delta_lower, delta_upper]],
long_name="moog",
units="Pa",
)
sigma_coord = AuxCoord(
sigma, bounds=[[sigma_lower, sigma_upper]], long_name="mavis"
)
surface_air_pressure_coord = AuxCoord(
np.zeros((ny, nx)), "surface_air_pressure", units="Pa"
)
cube.add_aux_coord(delta_coord)
cube.add_aux_coord(sigma_coord)
cube.add_aux_coord(surface_air_pressure_coord, (0, 1))
cube.add_aux_factory(
HybridPressureFactory(
delta_coord, sigma_coord, surface_air_pressure_coord
)
)
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brsvd = [None, None]
field.lbuser = [None] * 7
field = verify(cube, field)
self.assertEqual(field.bhlev, delta)
self.assertEqual(field.bhrlev, delta_lower)
self.assertEqual(field.blev, sigma)
self.assertEqual(field.brlev, sigma_lower)
self.assertEqual(field.brsvd, [sigma_upper, delta_upper])
def test_hybrid_height_round_trip_no_reference(self):
# Use pp.load_cubes() to convert fake PPFields into Cubes.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
# Make a fake data field which needs the reference surface.
model_level = 5678
sigma_lower, sigma, sigma_upper = 0.85, 0.9, 0.95
delta_lower, delta, delta_upper = 0.05, 0.1, 0.15
data_field = self._field_with_data(
lbvc=65,
lblev=model_level,
bhlev=sigma,
bhrlev=sigma_lower,
blev=delta,
brlev=delta_lower,
brsvd=[delta_upper, sigma_upper],
)
# Convert field to a cube.
load = mock.Mock(return_value=iter([data_field]))
with mock.patch(
"iris.fileformats.pp.load", new=load
) as load, mock.patch("warnings.warn") as warn:
(data_cube,) = iris.fileformats.pp.load_cubes("DUMMY")
msg = (
"Unable to create instance of HybridHeightFactory. "
"The source data contains no field(s) for 'orography'."
)
warn.assert_called_once_with(msg)
# Check the data cube is set up to use hybrid height.
self._test_coord(
data_cube, model_level, standard_name="model_level_number"
)
self._test_coord(
data_cube,
delta,
[delta_lower, delta_upper],
long_name="level_height",
)
self._test_coord(
data_cube, sigma, [sigma_lower, sigma_upper], long_name="sigma"
)
# Check that no aux factory is created (due to missing
# reference surface).
aux_factories = data_cube.aux_factories
self.assertEqual(len(aux_factories), 0)
# Now use the save rules to convert the Cube back into a PPField.
data_field = iris.fileformats.pp.PPField3()
data_field.lbfc = 0
data_field.lbvc = 0
data_field.brsvd = [None, None]
data_field.lbuser = [None] * 7
data_field = verify(data_cube, data_field)
# Check the data field has the vertical coordinate as originally
# specified.
self.assertEqual(data_field.lbvc, 65)
self.assertEqual(data_field.lblev, model_level)
self.assertEqual(data_field.bhlev, sigma)
self.assertEqual(data_field.bhrlev, sigma_lower)
self.assertEqual(data_field.blev, delta)
self.assertEqual(data_field.brlev, delta_lower)
self.assertEqual(data_field.brsvd, [delta_upper, sigma_upper])
class TestSaveLBFT(tests.IrisTest):
def setUp(self):
delta_start = 24
delta_mid = 36
self.delta_end = 369 * 24
ref_offset = 10 * 24
self.args = (delta_start, delta_mid, self.delta_end, ref_offset)
def create_cube(self, fp_min, fp_mid, fp_max, ref_offset, season=None):
cube = Cube(np.zeros((3, 4)))
cube.add_aux_coord(
AuxCoord(
standard_name="forecast_period",
units="hours",
points=fp_mid,
bounds=[fp_min, fp_max],
)
)
cube.add_aux_coord(
AuxCoord(
standard_name="time",
units="hours since epoch",
points=ref_offset + fp_mid,
bounds=[ref_offset + fp_min, ref_offset + fp_max],
)
)
if season:
cube.add_aux_coord(
AuxCoord(long_name="clim_season", points=season)
)
cube.add_cell_method(CellMethod("DUMMY", "clim_season"))
return cube
def convert_cube_to_field(self, cube):
# Use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.lbtim = 0
field = verify(cube, field)
return field
def test_time_mean_from_forecast_period(self):
cube = self.create_cube(24, 36, 48, 72)
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 48)
def test_time_mean_from_forecast_reference_time(self):
cube = Cube(np.zeros((3, 4)))
cube.add_aux_coord(
AuxCoord(
standard_name="forecast_reference_time",
units="hours since epoch",
points=72,
)
)
cube.add_aux_coord(
AuxCoord(
standard_name="time",
units="hours since epoch",
points=72 + 36,
bounds=[72 + 24, 72 + 48],
)
)
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 48)
def test_climatological_mean_single_year(self):
cube = Cube(np.zeros((3, 4)))
cube.add_aux_coord(
AuxCoord(
standard_name="forecast_period",
units="hours",
points=36,
bounds=[24, 4 * 24],
)
)
cube.add_aux_coord(
AuxCoord(
standard_name="time",
units="hours since epoch",
points=240 + 36,
bounds=[240 + 24, 240 + 4 * 24],
)
)
cube.add_aux_coord(AuxCoord(long_name="clim_season", points="DUMMY"))
cube.add_cell_method(CellMethod("DUMMY", "clim_season"))
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 4 * 24)
def test_climatological_mean_multi_year_djf(self):
cube = self.create_cube(*self.args, season="djf")
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, self.delta_end)
def test_climatological_mean_multi_year_mam(self):
cube = self.create_cube(*self.args, season="mam")
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, self.delta_end)
def test_climatological_mean_multi_year_jja(self):
cube = self.create_cube(*self.args, season="jja")
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, self.delta_end)
def test_climatological_mean_multi_year_son(self):
cube = self.create_cube(*self.args, season="son")
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, self.delta_end)
class TestCoordinateForms(tests.IrisTest):
def _common(self, x_coord):
nx = len(x_coord.points)
ny = 2
data = np.zeros((ny, nx), dtype=np.float32)
test_cube = iris.cube.Cube(data)
y0 = np.float32(20.5)
dy = np.float32(3.72)
y_coord = iris.coords.DimCoord.from_regular(
zeroth=y0,
step=dy,
count=ny,
standard_name="latitude",
units="degrees_north",
)
test_cube.add_dim_coord(x_coord, 1)
test_cube.add_dim_coord(y_coord, 0)
# Write to a temporary PP file and read it back as a PPField
with self.temp_filename(".pp") as pp_filepath:
iris.save(test_cube, pp_filepath)
pp_loader = iris.fileformats.pp.load(pp_filepath)
pp_field = next(pp_loader)
return pp_field
def test_save_awkward_case_is_regular(self):
# Check that specific "awkward" values still save in a regular form.
nx = 3
x0 = np.float32(355.626)
dx = np.float32(0.0135)
x_coord = iris.coords.DimCoord.from_regular(
zeroth=x0,
step=dx,
count=nx,
standard_name="longitude",
units="degrees_east",
)
pp_field = self._common(x_coord)
# Check that the result has the regular coordinates as expected.
self.assertEqual(pp_field.bzx, x0)
self.assertEqual(pp_field.bdx, dx)
self.assertEqual(pp_field.lbnpt, nx)
def test_save_irregular(self):
# Check that a non-regular coordinate saves as expected.
nx = 3
x_values = [0.0, 1.1, 2.0]
x_coord = iris.coords.DimCoord(
x_values, standard_name="longitude", units="degrees_east"
)
pp_field = self._common(x_coord)
# Check that the result has the regular/irregular Y and X as expected.
self.assertEqual(pp_field.bdx, 0.0)
self.assertArrayAllClose(pp_field.x, x_values)
self.assertEqual(pp_field.lbnpt, nx)
@tests.skip_data
class TestLoadLittleendian(tests.IrisTest):
def test_load_sample(self):
file_path = tests.get_data_path(
("PP", "little_endian", "qrparm.orog.pp")
)
# Ensure it just loads.
cube = iris.load_cube(file_path, "surface_altitude")
self.assertEqual(cube.shape, (110, 160))
# Check for sensible floating point numbers.
def check_minmax(array, expect_min, expect_max):
found = np.array([np.min(array), np.max(array)])
expected = np.array([expect_min, expect_max])
self.assertArrayAlmostEqual(found, expected, decimal=2)
lons = cube.coord("grid_longitude").points
lats = cube.coord("grid_latitude").points
data = cube.data
check_minmax(lons, 342.0, 376.98)
check_minmax(lats, -10.48, 13.5)
check_minmax(data, -30.48, 6029.1)
@tests.skip_data
class TestAsCubes(tests.IrisTest):
def setUp(self):
dpath = tests.get_data_path(
["PP", "meanMaxMin", "200806081200__qwpb.T24.pp"]
)
self.ppfs = iris.fileformats.pp.load(dpath)
def test_pseudo_level_filter(self):
chosen_ppfs = []
for ppf in self.ppfs:
if ppf.lbuser[4] == 3:
chosen_ppfs.append(ppf)
cubes_fields = list(load_pairs_from_fields(chosen_ppfs))
self.assertEqual(len(cubes_fields), 8)
def test_pseudo_level_filter_none(self):
chosen_ppfs = []
for ppf in self.ppfs:
if ppf.lbuser[4] == 30:
chosen_ppfs.append(ppf)
cubes = list(load_pairs_from_fields(chosen_ppfs))
self.assertEqual(len(cubes), 0)
def test_as_pairs(self):
cube_ppf_pairs = load_pairs_from_fields(self.ppfs)
cubes = []
for cube, ppf in cube_ppf_pairs:
if ppf.lbuser[4] == 3:
cube.attributes["pseudo level"] = ppf.lbuser[4]
cubes.append(cube)
for cube in cubes:
self.assertEqual(cube.attributes["pseudo level"], 3)
class TestSaveLBPROC(tests.IrisTest):
def create_cube(self, longitude_coord="longitude"):
cube = Cube(np.zeros((2, 3, 4)))
tunit = Unit("days since epoch", calendar="gregorian")
tcoord = DimCoord(np.arange(2), standard_name="time", units=tunit)
xcoord = DimCoord(
np.arange(3), standard_name=longitude_coord, units="degrees"
)
ycoord = DimCoord(points=np.arange(4))
cube.add_dim_coord(tcoord, 0)
cube.add_dim_coord(xcoord, 1)
cube.add_dim_coord(ycoord, 2)
return cube
def convert_cube_to_field(self, cube):
field = iris.fileformats.pp.PPField3()
field.lbvc = 0
return verify(cube, field)
def test_time_mean_only(self):
cube = self.create_cube()
cube.add_cell_method(CellMethod(method="mean", coords="time"))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 128)
def test_longitudinal_mean_only(self):
cube = self.create_cube()
cube.add_cell_method(CellMethod(method="mean", coords="longitude"))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 64)
def test_grid_longitudinal_mean_only(self):
cube = self.create_cube(longitude_coord="grid_longitude")
cube.add_cell_method(
CellMethod(method="mean", coords="grid_longitude")
)
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 64)
def test_time_mean_and_zonal_mean(self):
cube = self.create_cube()
cube.add_cell_method(CellMethod(method="mean", coords="time"))
cube.add_cell_method(CellMethod(method="mean", coords="longitude"))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 192)
@tests.skip_data
class TestCallbackLoad(tests.IrisTest):
def setUp(self):
self.pass_name = "air_potential_temperature"
def callback_wrapper(self):
# Wrap the `iris.exceptions.IgnoreCubeException`-calling callback.
def callback_ignore_cube_exception(cube, field, filename):
if cube.name() != self.pass_name:
raise IgnoreCubeException
return callback_ignore_cube_exception
def test_ignore_cube_callback(self):
test_dataset = tests.get_data_path(
["PP", "globClim1", "dec_subset.pp"]
)
exception_callback = self.callback_wrapper()
result_cubes = iris.load(test_dataset, callback=exception_callback)
n_result_cubes = len(result_cubes)
# We ignore all but one cube (the `air_potential_temperature` cube).
self.assertEqual(n_result_cubes, 1)
self.assertEqual(result_cubes[0].name(), self.pass_name)
@tests.skip_data
class TestZonalMeanBounds(tests.IrisTest):
def test_mulitple_longitude(self):
# test that bounds are set for a zonal mean file with many longitude
# values
orig_file = tests.get_data_path(("PP", "aPPglob1", "global.pp"))
f = next(iris.fileformats.pp.load(orig_file))
f.lbproc = 192 # time and zonal mean
# Write out pp file
temp_filename = iris.util.create_temp_filename(".pp")
with open(temp_filename, "wb") as temp_fh:
f.save(temp_fh)
# Load pp file
cube = iris.load_cube(temp_filename)
self.assertTrue(cube.coord("longitude").has_bounds())
os.remove(temp_filename)
def test_singular_longitude(self):
# test that bounds are set for a zonal mean file with a single
# longitude value
pp_file = tests.get_data_path(("PP", "zonal_mean", "zonal_mean.pp"))
# Load pp file
cube = iris.load_cube(pp_file)
self.assertTrue(cube.coord("longitude").has_bounds())
@tests.skip_data
class TestLoadPartialMask(tests.IrisTest):
def test_data(self):
# Ensure that fields merge correctly where one has a mask and one
# doesn't.
filename = tests.get_data_path(["PP", "simple_pp", "partial_mask.pp"])
expected_data = np.ma.masked_array(
[[[0, 1], [11, 12]], [[99, 100], [-1, -1]]],
[[[0, 0], [0, 0]], [[0, 0], [1, 1]]],
dtype=np.int32,
)
cube = iris.load_cube(filename)
self.assertEqual(expected_data.dtype, cube.data.dtype)
self.assertMaskedArrayEqual(expected_data, cube.data, strict=False)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 1,716,455,523,335,447,000 | 35.474407 | 79 | 0.586904 | false |
devbv/incubator-openwhisk | tools/travis/box-upload.py | 11 | 2013 | #!/usr/bin/env python
"""Executable Python script for compressing folders to Box.
Compresses the contents of a folder and upload the result to Box.
Run this script as:
$ upload-logs.py LOG_DIR DEST_NAME
e.g.: $ upload-logs.py /tmp/wsklogs logs-5512.tar.gz
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import os
import subprocess
import sys
import tempfile
import urllib
def upload_file(local_file, remote_file):
"""Upload file."""
if remote_file[0] == '/':
remote_file = remote_file[1:]
subprocess.call(["curl", "-X", "POST", "--data-binary", "@%s" % local_file,
"http://wsklogfwd.mybluemix.net/upload?%s" %
urllib.urlencode({"name": remote_file})])
def tar_gz_dir(dir_path):
"""Create TAR (ZIP) of path and its contents."""
_, dst = tempfile.mkstemp(suffix=".tar.gz")
subprocess.call(["tar", "-cvzf", dst, dir_path])
return dst
if __name__ == "__main__":
dir_path = sys.argv[1]
dst_path = sys.argv[2]
if not os.path.isdir(dir_path):
print("Directory doesn't exist: %s." % dir_path)
sys.exit(0)
print("Compressing logs dir...")
tar = tar_gz_dir(dir_path)
print("Uploading to Box...")
upload_file(tar, dst_path)
| apache-2.0 | -247,345,912,731,283,650 | 30.952381 | 79 | 0.667163 | false |
maestrano/openerp | maestrano/app/sso/MnoSsoUser.py | 1 | 5468 | import os
from MnoSsoBaseUser import MnoSsoBaseUser
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.osv.fields import char
import inspect
import werkzeug
#
# Configure App specific behavior for
# Maestrano SSO
#
class MnoSsoUser(MnoSsoBaseUser):
# Database connection
connection = None
Users = None
env = None
#
# Extend constructor to inialize app specific objects
#
# @param OneLogin_Saml_Response $saml_response
# A SamlResponse object from Maestrano containing details
# about the user being authenticated
#
def __init__(self, saml_response, session=[], opts=[]):
super(MnoSsoUser,self).__init__(saml_response,session)
dbname = 'openerp'
self.connection = RegistryManager.get(dbname)
# Get Users service and extend it
self.Users = self.connection.get('res.users')
self.Users._columns['mno_uid'] = openerp.osv.fields.char()
self.Users._all_columns['mno_uid'] = openerp.osv.fields.column_info('mno_uid', openerp.osv.fields.char())
if opts['env']:
self.env = opts['env']
# Set user in session. Called by signIn method.
def setInSession(self):
password = self.generatePassword()
if self.local_id is not None:
with self.connection.cursor() as cr:
ret = self.Users.write(cr, SUPERUSER_ID, [self.local_id], {
'password': password
})
cr.commit()
self.session.authenticate('openerp', self.uid, password, self.env)
return True
# Sign the user in the application. By default,
# set the mno_uid, mno_session and mno_session_recheck
# in session.
# It is expected that this method get extended with
# application specific behavior in the MnoSsoUser class
def signIn(self):
if self.setInSession():
self.session.context['mno_uid'] = self.uid
self.session.context['mno_session'] = self.sso_session
self.session.context['mno_session_recheck'] = self.sso_session_recheck.isoformat()
# Create a local user based on the sso user
# Only if access scope is private
def createLocalUser(self):
if self.accessScope() == "private":
with self.connection.cursor() as cr:
user_hash = self.buildLocalUser()
user_id = self.Users.create(cr, SUPERUSER_ID, user_hash)
if user_id is not None:
# add groups
groups = self.getGroupIdsToAssign()
if groups:
vals = {'groups_id': [(4, g) for g in groups]}
ret = self.Users.write(cr, SUPERUSER_ID, [user_id], vals)
return user_id
return None
# Build a hash used for user creation
def buildLocalUser(self):
user = {
'login': self.uid,
'name': (self.name + ' ' + self.surname),
'password': self.generatePassword(),
'email': self.email
}
return user
# Create the role to give to the user based on context
# If the user is the owner of the app or at least Admin
# for each organization,
def getGroupIdsToAssign(self):
default_user_roles = None;
default_admin_roles = [1,2,6]
role_ids = default_user_roles #basic user
if self.app_owner:
role_ids = default_admin_roles
else:
for organization in self.organizations.itervalues():
if (organization['role'] == 'Admin' or organization['role'] == 'Super Admin'):
role_ids = default_admin_roles
else:
role_ids = default_user_roles
return role_ids
# Get the ID of a local user via Maestrano UID lookup
def getLocalIdByUid(self):
with self.connection.cursor() as cr:
usr_list = self.Users.search(cr, SUPERUSER_ID, [('mno_uid','=',self.uid)],0,1)
if len(usr_list) > 0:
return usr_list[0]
return None
# Get the ID of a local user via email lookup
def getLocalIdByEmail(self):
with self.connection.cursor() as cr:
usr_list = self.Users.search(cr, SUPERUSER_ID, [('user_email','=',self.email)],0,1)
if len(usr_list) > 0:
return usr_list[0].id
return None
# Set the Maestrano UID on a local user via email lookup
def setLocalUid(self):
if self.local_id is not None:
with self.connection.cursor() as cr:
ret = self.Users.write(cr, SUPERUSER_ID, [self.local_id], {
'mno_uid': self.uid
})
return ret
return None
# Set all 'soft' details on the user (like name, surname, email)
def syncLocalDetails(self):
if self.local_id is not None:
with self.connection.cursor() as cr:
ret = self.Users.write(cr, SUPERUSER_ID, [self.local_id], {
'name': self.name + ' ' + self.surname,
'user_email': self.email,
'login': self.uid
})
return ret
return None
| agpl-3.0 | -1,341,544,641,695,872,500 | 35.211921 | 113 | 0.56218 | false |
jleclanche/dj-stripe | djstripe/settings.py | 1 | 7051 | # -*- coding: utf-8 -*-
"""
.. module:: djstripe.settings.
:synopsis: dj-stripe settings
.. moduleauthor:: @kavdev, @pydanny, @lskillen, and @chrissmejia
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import stripe
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.module_loading import import_string
from .checks import validate_stripe_api_version
DEFAULT_STRIPE_API_VERSION = "2017-06-05"
def get_callback_function(setting_name, default=None):
"""
Resolve a callback function based on a setting name.
If the setting value isn't set, default is returned. If the setting value
is already a callable function, that value is used - If the setting value
is a string, an attempt is made to import it. Anything else will result in
a failed import causing ImportError to be raised.
:param setting_name: The name of the setting to resolve a callback from.
:type setting_name: string (``str``/``unicode``)
:param default: The default to return if setting isn't populated.
:type default: ``bool``
:returns: The resolved callback function (if any).
:type: ``callable``
"""
func = getattr(settings, setting_name, None)
if not func:
return default
if callable(func):
return func
if isinstance(func, six.string_types):
func = import_string(func)
if not callable(func):
raise ImproperlyConfigured("{name} must be callable.".format(name=setting_name))
return func
subscriber_request_callback = get_callback_function("DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK",
default=(lambda request: request.user))
def _get_idempotency_key(object_type, action, livemode):
from .models import IdempotencyKey
action = "{}:{}".format(object_type, action)
idempotency_key, _created = IdempotencyKey.objects.get_or_create(action=action, livemode=livemode)
return str(idempotency_key.uuid)
get_idempotency_key = get_callback_function("DJSTRIPE_IDEMPOTENCY_KEY_CALLBACK", _get_idempotency_key)
USE_NATIVE_JSONFIELD = getattr(settings, "DJSTRIPE_USE_NATIVE_JSONFIELD", False)
PRORATION_POLICY = getattr(settings, 'DJSTRIPE_PRORATION_POLICY', False)
CANCELLATION_AT_PERIOD_END = not getattr(settings, 'DJSTRIPE_PRORATION_POLICY', False)
DJSTRIPE_WEBHOOK_URL = getattr(settings, "DJSTRIPE_WEBHOOK_URL", r"^webhook/$")
# Webhook event callbacks allow an application to take control of what happens
# when an event from Stripe is received. One suggestion is to put the event
# onto a task queue (such as celery) for asynchronous processing.
WEBHOOK_EVENT_CALLBACK = get_callback_function("DJSTRIPE_WEBHOOK_EVENT_CALLBACK")
TEST_API_KEY = getattr(settings, "STRIPE_TEST_SECRET_KEY", "")
LIVE_API_KEY = getattr(settings, "STRIPE_LIVE_SECRET_KEY", "")
# Determines whether we are in live mode or test mode
STRIPE_LIVE_MODE = getattr(settings, "STRIPE_LIVE_MODE", False)
# Default secret key
if hasattr(settings, "STRIPE_SECRET_KEY"):
STRIPE_SECRET_KEY = settings.STRIPE_SECRET_KEY
else:
STRIPE_SECRET_KEY = LIVE_API_KEY if STRIPE_LIVE_MODE else TEST_API_KEY
# Default public key
if hasattr(settings, "STRIPE_PUBLIC_KEY"):
STRIPE_PUBLIC_KEY = settings.STRIPE_PUBLIC_KEY
elif STRIPE_LIVE_MODE:
STRIPE_PUBLIC_KEY = getattr(settings, "STRIPE_LIVE_PUBLIC_KEY", "")
else:
STRIPE_PUBLIC_KEY = getattr(settings, "STRIPE_TEST_PUBLIC_KEY", "")
# Set STRIPE_API_HOST if you want to use a different Stripe API server
# Example: https://github.com/stripe/stripe-mock
if hasattr(settings, "STRIPE_API_HOST"):
stripe.api_base = settings.STRIPE_API_HOST
def get_default_api_key(livemode):
"""
Returns the default API key for a value of `livemode`.
"""
if livemode is None:
# Livemode is unknown. Use the default secret key.
return STRIPE_SECRET_KEY
elif livemode:
# Livemode is true, use the live secret key
return LIVE_API_KEY or STRIPE_SECRET_KEY
else:
# Livemode is false, use the test secret key
return TEST_API_KEY or STRIPE_SECRET_KEY
SUBSCRIPTION_REDIRECT = getattr(settings, "DJSTRIPE_SUBSCRIPTION_REDIRECT", "djstripe:subscribe")
ZERO_DECIMAL_CURRENCIES = set([
"bif", "clp", "djf", "gnf", "jpy", "kmf", "krw", "mga", "pyg", "rwf",
"vnd", "vuv", "xaf", "xof", "xpf",
])
def get_subscriber_model_string():
"""Get the configured subscriber model as a module path string."""
return getattr(settings, "DJSTRIPE_SUBSCRIBER_MODEL", settings.AUTH_USER_MODEL)
def get_subscriber_model():
"""
Attempt to pull settings.DJSTRIPE_SUBSCRIBER_MODEL.
Users have the option of specifying a custom subscriber model via the
DJSTRIPE_SUBSCRIBER_MODEL setting.
This methods falls back to AUTH_USER_MODEL if DJSTRIPE_SUBSCRIBER_MODEL is not set.
Returns the subscriber model that is active in this project.
"""
model_name = get_subscriber_model_string()
# Attempt a Django 1.7 app lookup
try:
subscriber_model = django_apps.get_model(model_name)
except ValueError:
raise ImproperlyConfigured("DJSTRIPE_SUBSCRIBER_MODEL must be of the form 'app_label.model_name'.")
except LookupError:
raise ImproperlyConfigured("DJSTRIPE_SUBSCRIBER_MODEL refers to model '{model}' "
"that has not been installed.".format(model=model_name))
if (("email" not in [field_.name for field_ in subscriber_model._meta.get_fields()]) and
not hasattr(subscriber_model, 'email')):
raise ImproperlyConfigured("DJSTRIPE_SUBSCRIBER_MODEL must have an email attribute.")
if model_name != settings.AUTH_USER_MODEL:
# Custom user model detected. Make sure the callback is configured.
func = get_callback_function("DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK")
if not func:
raise ImproperlyConfigured(
"DJSTRIPE_SUBSCRIBER_MODEL_REQUEST_CALLBACK must be implemented "
"if a DJSTRIPE_SUBSCRIBER_MODEL is defined.")
return subscriber_model
def get_stripe_api_version():
"""Get the desired API version to use for Stripe requests."""
version = getattr(settings, 'STRIPE_API_VERSION', stripe.api_version)
return version or DEFAULT_STRIPE_API_VERSION
def set_stripe_api_version(version=None, validate=True):
"""
Set the desired API version to use for Stripe requests.
:param version: The version to set for the Stripe API.
:type version: ``str``
:param validate: If True validate the value for the specified version).
:type validate: ``bool``
"""
version = version or get_stripe_api_version()
if validate:
valid = validate_stripe_api_version(version)
if not valid:
raise ValueError("Bad stripe API version: {}".format(version))
stripe.api_version = version
| mit | -6,082,480,904,387,415,000 | 34.791878 | 107 | 0.698341 | false |
Kriechi/mitmproxy | mitmproxy/io/tnetstring.py | 3 | 8699 | """
tnetstring: data serialization using typed netstrings
======================================================
This is a custom Python 3 implementation of tnetstrings.
Compared to other implementations, the main difference
is that this implementation supports a custom unicode datatype.
An ordinary tnetstring is a blob of data prefixed with its length and postfixed
with its type. Here are some examples:
>>> tnetstring.dumps("hello world")
11:hello world,
>>> tnetstring.dumps(12345)
5:12345#
>>> tnetstring.dumps([12345, True, 0])
19:5:12345#4:true!1:0#]
This module gives you the following functions:
:dump: dump an object as a tnetstring to a file
:dumps: dump an object as a tnetstring to a string
:load: load a tnetstring-encoded object from a file
:loads: load a tnetstring-encoded object from a string
Note that since parsing a tnetstring requires reading all the data into memory
at once, there's no efficiency gain from using the file-based versions of these
functions. They're only here so you can use load() to read precisely one
item from a file or socket without consuming any extra data.
The tnetstrings specification explicitly states that strings are binary blobs
and forbids the use of unicode at the protocol level.
**This implementation decodes dictionary keys as surrogate-escaped ASCII**,
all other strings are returned as plain bytes.
:Copyright: (c) 2012-2013 by Ryan Kelly <[email protected]>.
:Copyright: (c) 2014 by Carlo Pires <[email protected]>.
:Copyright: (c) 2016 by Maximilian Hils <[email protected]>.
:License: MIT
"""
import collections
import typing
TSerializable = typing.Union[None, str, bool, int, float, bytes, list, tuple, dict]
def dumps(value: TSerializable) -> bytes:
"""
This function dumps a python object as a tnetstring.
"""
# This uses a deque to collect output fragments in reverse order,
# then joins them together at the end. It's measurably faster
# than creating all the intermediate strings.
q: collections.deque = collections.deque()
_rdumpq(q, 0, value)
return b''.join(q)
def dump(value: TSerializable, file_handle: typing.BinaryIO) -> None:
"""
This function dumps a python object as a tnetstring and
writes it to the given file.
"""
file_handle.write(dumps(value))
def _rdumpq(q: collections.deque, size: int, value: TSerializable) -> int:
"""
Dump value as a tnetstring, to a deque instance, last chunks first.
This function generates the tnetstring representation of the given value,
pushing chunks of the output onto the given deque instance. It pushes
the last chunk first, then recursively generates more chunks.
When passed in the current size of the string in the queue, it will return
the new size of the string in the queue.
Operating last-chunk-first makes it easy to calculate the size written
for recursive structures without having to build their representation as
a string. This is measurably faster than generating the intermediate
strings, especially on deeply nested structures.
"""
write = q.appendleft
if value is None:
write(b'0:~')
return size + 3
elif value is True:
write(b'4:true!')
return size + 7
elif value is False:
write(b'5:false!')
return size + 8
elif isinstance(value, int):
data = str(value).encode()
ldata = len(data)
span = str(ldata).encode()
write(b'%s:%s#' % (span, data))
return size + 2 + len(span) + ldata
elif isinstance(value, float):
# Use repr() for float rather than str().
# It round-trips more accurately.
# Probably unnecessary in later python versions that
# use David Gay's ftoa routines.
data = repr(value).encode()
ldata = len(data)
span = str(ldata).encode()
write(b'%s:%s^' % (span, data))
return size + 2 + len(span) + ldata
elif isinstance(value, bytes):
data = value
ldata = len(data)
span = str(ldata).encode()
write(b',')
write(data)
write(b':')
write(span)
return size + 2 + len(span) + ldata
elif isinstance(value, str):
data = value.encode("utf8")
ldata = len(data)
span = str(ldata).encode()
write(b';')
write(data)
write(b':')
write(span)
return size + 2 + len(span) + ldata
elif isinstance(value, (list, tuple)):
write(b']')
init_size = size = size + 1
for item in reversed(value):
size = _rdumpq(q, size, item)
span = str(size - init_size).encode()
write(b':')
write(span)
return size + 1 + len(span)
elif isinstance(value, dict):
write(b'}')
init_size = size = size + 1
for (k, v) in value.items():
size = _rdumpq(q, size, v)
size = _rdumpq(q, size, k)
span = str(size - init_size).encode()
write(b':')
write(span)
return size + 1 + len(span)
else:
raise ValueError("unserializable object: {} ({})".format(value, type(value)))
def loads(string: bytes) -> TSerializable:
"""
This function parses a tnetstring into a python object.
"""
return pop(string)[0]
def load(file_handle: typing.BinaryIO) -> TSerializable:
"""load(file) -> object
This function reads a tnetstring from a file and parses it into a
python object. The file must support the read() method, and this
function promises not to read more data than necessary.
"""
# Read the length prefix one char at a time.
# Note that the netstring spec explicitly forbids padding zeros.
c = file_handle.read(1)
if c == b"": # we want to detect this special case.
raise ValueError("not a tnetstring: empty file")
data_length = b""
while c.isdigit():
data_length += c
if len(data_length) > 9:
raise ValueError("not a tnetstring: absurdly large length prefix")
c = file_handle.read(1)
if c != b":":
raise ValueError("not a tnetstring: missing or invalid length prefix")
data = file_handle.read(int(data_length))
data_type = file_handle.read(1)[0]
return parse(data_type, data)
def parse(data_type: int, data: bytes) -> TSerializable:
if data_type == ord(b','):
return data
if data_type == ord(b';'):
return data.decode("utf8")
if data_type == ord(b'#'):
try:
return int(data)
except ValueError:
raise ValueError(f"not a tnetstring: invalid integer literal: {data!r}")
if data_type == ord(b'^'):
try:
return float(data)
except ValueError:
raise ValueError(f"not a tnetstring: invalid float literal: {data!r}")
if data_type == ord(b'!'):
if data == b'true':
return True
elif data == b'false':
return False
else:
raise ValueError(f"not a tnetstring: invalid boolean literal: {data!r}")
if data_type == ord(b'~'):
if data:
raise ValueError(f"not a tnetstring: invalid null literal: {data!r}")
return None
if data_type == ord(b']'):
l = []
while data:
item, data = pop(data)
l.append(item) # type: ignore
return l
if data_type == ord(b'}'):
d = {}
while data:
key, data = pop(data)
val, data = pop(data)
d[key] = val # type: ignore
return d
raise ValueError(f"unknown type tag: {data_type}")
def pop(data: bytes) -> typing.Tuple[TSerializable, bytes]:
"""
This function parses a tnetstring into a python object.
It returns a tuple giving the parsed object and a string
containing any unparsed data from the end of the string.
"""
# Parse out data length, type and remaining string.
try:
blength, data = data.split(b':', 1)
length = int(blength)
except ValueError:
raise ValueError(f"not a tnetstring: missing or invalid length prefix: {data!r}")
try:
data, data_type, remain = data[:length], data[length], data[length + 1:]
except IndexError:
# This fires if len(data) < dlen, meaning we don't need
# to further validate that data is the right length.
raise ValueError(f"not a tnetstring: invalid length prefix: {length}")
# Parse the data based on the type tag.
return parse(data_type, data), remain
__all__ = ["dump", "dumps", "load", "loads", "pop"]
| mit | 2,927,276,284,385,943,000 | 33.796 | 89 | 0.620876 | false |
h3llrais3r/Auto-Subliminal | lib/rebulk/pattern.py | 13 | 18405 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abstract pattern class definition along with various implementations (regexp, string, functional)
"""
# pylint: disable=super-init-not-called,wrong-import-position
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from . import debug
from .loose import call, ensure_list, ensure_dict
from .match import Match
from .remodule import re, REGEX_AVAILABLE
from .utils import find_all, is_iterable, get_first_defined
@six.add_metaclass(ABCMeta)
class Pattern(object):
"""
Definition of a particular pattern to search for.
"""
def __init__(self, name=None, tags=None, formatter=None, value=None, validator=None, children=False, every=False,
private_parent=False, private_children=False, private=False, private_names=None, ignore_names=None,
marker=False, format_all=False, validate_all=False, disabled=lambda context: False, log_level=None,
properties=None, post_processor=None, **kwargs):
"""
:param name: Name of this pattern
:type name: str
:param tags: List of tags related to this pattern
:type tags: list[str]
:param formatter: dict (name, func) of formatter to use with this pattern. name is the match name to support,
and func a function(input_string) that returns the formatted string. A single formatter function can also be
passed as a shortcut for {None: formatter}. The returned formatted string with be set in Match.value property.
:type formatter: dict[str, func] || func
:param value: dict (name, value) of value to use with this pattern. name is the match name to support,
and value an object for the match value. A single object value can also be
passed as a shortcut for {None: value}. The value with be set in Match.value property.
:type value: dict[str, object] || object
:param validator: dict (name, func) of validator to use with this pattern. name is the match name to support,
and func a function(match) that returns the a boolean. A single validator function can also be
passed as a shortcut for {None: validator}. If return value is False, match will be ignored.
:param children: generates children instead of parent
:type children: bool
:param every: generates both parent and children.
:type every: bool
:param private: flag this pattern as beeing private.
:type private: bool
:param private_parent: force return of parent and flag parent matches as private.
:type private_parent: bool
:param private_children: force return of children and flag children matches as private.
:type private_children: bool
:param private_names: force return of named matches as private.
:type private_names: bool
:param ignore_names: drop some named matches after validation.
:type ignore_names: bool
:param marker: flag this pattern as beeing a marker.
:type private: bool
:param format_all if True, pattern will format every match in the hierarchy (even match not yield).
:type format_all: bool
:param validate_all if True, pattern will validate every match in the hierarchy (even match not yield).
:type validate_all: bool
:param disabled: if True, this pattern is disabled. Can also be a function(context).
:type disabled: bool|function
:param log_lvl: Log level associated to this pattern
:type log_lvl: int
:param post_process: Post processing function
:type post_processor: func
"""
# pylint:disable=too-many-locals,unused-argument
self.name = name
self.tags = ensure_list(tags)
self.formatters, self._default_formatter = ensure_dict(formatter, lambda x: x)
self.values, self._default_value = ensure_dict(value, None)
self.validators, self._default_validator = ensure_dict(validator, lambda match: True)
self.every = every
self.children = children
self.private = private
self.private_names = private_names if private_names else []
self.ignore_names = ignore_names if ignore_names else []
self.private_parent = private_parent
self.private_children = private_children
self.marker = marker
self.format_all = format_all
self.validate_all = validate_all
if not callable(disabled):
self.disabled = lambda context: disabled
else:
self.disabled = disabled
self._log_level = log_level
self._properties = properties
self.defined_at = debug.defined_at()
if not callable(post_processor):
self.post_processor = None
else:
self.post_processor = post_processor
@property
def log_level(self):
"""
Log level for this pattern.
:return:
:rtype:
"""
return self._log_level if self._log_level is not None else debug.LOG_LEVEL
def _yield_children(self, match):
"""
Does this match has children
:param match:
:type match:
:return:
:rtype:
"""
return match.children and (self.children or self.every)
def _yield_parent(self):
"""
Does this mat
:param match:
:type match:
:return:
:rtype:
"""
return not self.children or self.every
def _match_parent(self, match, yield_parent):
"""
Handle a parent match
:param match:
:type match:
:param yield_parent:
:type yield_parent:
:return:
:rtype:
"""
if not match or match.value == "":
return False
pattern_value = get_first_defined(self.values, [match.name, '__parent__', None],
self._default_value)
if pattern_value:
match.value = pattern_value
if yield_parent or self.format_all:
match.formatter = get_first_defined(self.formatters, [match.name, '__parent__', None],
self._default_formatter)
if yield_parent or self.validate_all:
validator = get_first_defined(self.validators, [match.name, '__parent__', None],
self._default_validator)
if validator and not validator(match):
return False
return True
def _match_child(self, child, yield_children):
"""
Handle a children match
:param child:
:type child:
:param yield_children:
:type yield_children:
:return:
:rtype:
"""
if not child or child.value == "":
return False
pattern_value = get_first_defined(self.values, [child.name, '__children__', None],
self._default_value)
if pattern_value:
child.value = pattern_value
if yield_children or self.format_all:
child.formatter = get_first_defined(self.formatters, [child.name, '__children__', None],
self._default_formatter)
if yield_children or self.validate_all:
validator = get_first_defined(self.validators, [child.name, '__children__', None],
self._default_validator)
if validator and not validator(child):
return False
return True
def matches(self, input_string, context=None, with_raw_matches=False):
"""
Computes all matches for a given input
:param input_string: the string to parse
:type input_string: str
:param context: the context
:type context: dict
:param with_raw_matches: should return details
:type with_raw_matches: dict
:return: matches based on input_string for this pattern
:rtype: iterator[Match]
"""
# pylint: disable=too-many-branches
matches = []
raw_matches = []
for pattern in self.patterns:
yield_parent = self._yield_parent()
match_index = -1
for match in self._match(pattern, input_string, context):
match_index += 1
match.match_index = match_index
raw_matches.append(match)
yield_children = self._yield_children(match)
if not self._match_parent(match, yield_parent):
continue
validated = True
for child in match.children:
if not self._match_child(child, yield_children):
validated = False
break
if validated:
if self.private_parent:
match.private = True
if self.private_children:
for child in match.children:
child.private = True
if yield_parent or self.private_parent:
matches.append(match)
if yield_children or self.private_children:
for child in match.children:
child.match_index = match_index
matches.append(child)
matches = self._matches_post_process(matches)
self._matches_privatize(matches)
self._matches_ignore(matches)
if with_raw_matches:
return matches, raw_matches
return matches
def _matches_post_process(self, matches):
"""
Post process matches with user defined function
:param matches:
:type matches:
:return:
:rtype:
"""
if self.post_processor:
return self.post_processor(matches, self)
return matches
def _matches_privatize(self, matches):
"""
Mark matches included in private_names with private flag.
:param matches:
:type matches:
:return:
:rtype:
"""
if self.private_names:
for match in matches:
if match.name in self.private_names:
match.private = True
def _matches_ignore(self, matches):
"""
Ignore matches included in ignore_names.
:param matches:
:type matches:
:return:
:rtype:
"""
if self.ignore_names:
for match in list(matches):
if match.name in self.ignore_names:
matches.remove(match)
@abstractproperty
def patterns(self): # pragma: no cover
"""
List of base patterns defined
:return: A list of base patterns
:rtype: list
"""
pass
@property
def properties(self):
"""
Properties names and values that can ben retrieved by this pattern.
:return:
:rtype:
"""
if self._properties:
return self._properties
return {}
@abstractproperty
def match_options(self): # pragma: no cover
"""
dict of default options for generated Match objects
:return: **options to pass to Match constructor
:rtype: dict
"""
pass
@abstractmethod
def _match(self, pattern, input_string, context=None): # pragma: no cover
"""
Computes all matches for a given pattern and input
:param pattern: the pattern to use
:param input_string: the string to parse
:type input_string: str
:param context: the context
:type context: dict
:return: matches based on input_string for this pattern
:rtype: iterator[Match]
"""
pass
def __repr__(self):
defined = ""
if self.defined_at:
defined = "@%s" % (self.defined_at,)
return "<%s%s:%s>" % (self.__class__.__name__, defined, self.__repr__patterns__)
@property
def __repr__patterns__(self):
return self.patterns
class StringPattern(Pattern):
"""
Definition of one or many strings to search for.
"""
def __init__(self, *patterns, **kwargs):
super(StringPattern, self).__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@property
def patterns(self):
return self._patterns
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
for index in find_all(input_string, pattern, **self._kwargs):
yield Match(index, index + len(pattern), pattern=self, input_string=input_string, **self._match_kwargs)
class RePattern(Pattern):
"""
Definition of one or many regular expression pattern to search for.
"""
def __init__(self, *patterns, **kwargs):
super(RePattern, self).__init__(**kwargs)
self.repeated_captures = REGEX_AVAILABLE
if 'repeated_captures' in kwargs:
self.repeated_captures = kwargs.get('repeated_captures')
if self.repeated_captures and not REGEX_AVAILABLE: # pragma: no cover
raise NotImplementedError("repeated_capture is available only with regex module.")
self.abbreviations = kwargs.get('abbreviations', [])
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
self._children_match_kwargs = filter_match_kwargs(kwargs, children=True)
self._patterns = []
for pattern in patterns:
if isinstance(pattern, six.string_types):
if self.abbreviations and pattern:
for key, replacement in self.abbreviations:
pattern = pattern.replace(key, replacement)
pattern = call(re.compile, pattern, **self._kwargs)
elif isinstance(pattern, dict):
if self.abbreviations and 'pattern' in pattern:
for key, replacement in self.abbreviations:
pattern['pattern'] = pattern['pattern'].replace(key, replacement)
pattern = re.compile(**pattern)
elif hasattr(pattern, '__iter__'):
pattern = re.compile(*pattern)
self._patterns.append(pattern)
@property
def patterns(self):
return self._patterns
@property
def __repr__patterns__(self):
return [pattern.pattern for pattern in self.patterns]
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
names = dict((v, k) for k, v in pattern.groupindex.items())
for match_object in pattern.finditer(input_string):
start = match_object.start()
end = match_object.end()
main_match = Match(start, end, pattern=self, input_string=input_string, **self._match_kwargs)
if pattern.groups:
for i in range(1, pattern.groups + 1):
name = names.get(i, main_match.name)
if self.repeated_captures:
for start, end in match_object.spans(i):
child_match = Match(start, end, name=name, parent=main_match, pattern=self,
input_string=input_string, **self._children_match_kwargs)
main_match.children.append(child_match)
else:
start, end = match_object.span(i)
if start > -1 and end > -1:
child_match = Match(start, end, name=name, parent=main_match, pattern=self,
input_string=input_string, **self._children_match_kwargs)
main_match.children.append(child_match)
yield main_match
class FunctionalPattern(Pattern):
"""
Definition of one or many functional pattern to search for.
"""
def __init__(self, *patterns, **kwargs):
super(FunctionalPattern, self).__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@property
def patterns(self):
return self._patterns
@property
def match_options(self):
return self._match_kwargs
def _match(self, pattern, input_string, context=None):
ret = call(pattern, input_string, context, **self._kwargs)
if ret:
if not is_iterable(ret) or isinstance(ret, dict) \
or (is_iterable(ret) and hasattr(ret, '__getitem__') and isinstance(ret[0], int)):
args_iterable = [ret]
else:
args_iterable = ret
for args in args_iterable:
if isinstance(args, dict):
options = args
options.pop('input_string', None)
options.pop('pattern', None)
if self._match_kwargs:
options = self._match_kwargs.copy()
options.update(args)
yield Match(pattern=self, input_string=input_string, **options)
else:
kwargs = self._match_kwargs
if isinstance(args[-1], dict):
kwargs = dict(kwargs)
kwargs.update(args[-1])
args = args[:-1]
yield Match(*args, pattern=self, input_string=input_string, **kwargs)
def filter_match_kwargs(kwargs, children=False):
"""
Filters out kwargs for Match construction
:param kwargs:
:type kwargs: dict
:param children:
:type children: Flag to filter children matches
:return: A filtered dict
:rtype: dict
"""
kwargs = kwargs.copy()
for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'):
if key in kwargs:
del kwargs[key]
if children:
for key in ('name',):
if key in kwargs:
del kwargs[key]
return kwargs
| gpl-3.0 | -2,933,228,572,577,658,000 | 36.638037 | 118 | 0.571801 | false |
MrElusive/Subforce | Subforce.py | 1 | 29459 | # Useful documentation:
# Sublime Plugin Framework: http://docs.sublimetext.info/en/latest/reference/plugins.html
# Sublime Plugin Python API: http://www.sublimetext.com/docs/3/api_reference.html
# Perforce API: https://www.perforce.com/perforce/r16.1/manuals/cmdref
# Perforce Python API: https://www.perforce.com/perforce/doc.current/manuals/p4script/03_python.html
# Example Plugin: https://github.com/SideBarEnhancements-org/SideBarEnhancements/blob/st3/SideBar.py
import sublime
import sublime_plugin
import P4
import os
import sys
import threading
import subprocess
import re
import tempfile
from .utilities import \
getAllViewsForPath, \
coercePathsToActiveViewIfNeeded, \
getRevisionQualifiedDepotPath, \
checkForAndGetSinglePath, \
ellipsizeIfDirectory, \
createRevision
NEW_CHANGELIST_NAME = "new"
NEW_CHANGELIST_DESCRIPTION = "Creates a new changelist."
DEFAULT_CHANGELIST_NAME = "default"
DEFAULT_CHANGELIST_DESCRIPTION = "The default changelist."
HAVE_REVISION_NAME = "have"
HAVE_REVISION_DESCRIPTION = "The currently synced revision."
HEAD_REVISION_NAME = "head"
HEAD_REVISION_DESCRIPTION = "The most recently checked-in revision."
FILE_CHECKED_OUT_SETTING_KEY = "subforce_file_checked_out"
FILE_NOT_IN_DEPOT_SETTING_KEY = "subforce_file_not_in_depot"
CHANGELIST_NUMBER_STATUS_KEY = "subforce_changelist_number"
CURRENT_WORKING_DIRECTORY_SETTING_KEY = 'current_working_directory'
DISPLAY_WARNINGS_SETTING_KEY = 'display_warnings'
USE_CONNECTION_INFO_SETTINGS_KEY = 'use_connection_info'
CONNECTION_INFO_PORT_SETTINGS_KEY = 'connection_info_port'
CONNECTION_INFO_USER_SETTINGS_KEY = 'connection_info_user'
CONNECTION_INFO_CLIENT_SETTINGS_KEY = 'connection_info_client'
DISABLE_AUTO_CHECKOUT_SETTINGS_KEY = 'disable_auto_checkout'
class SettingsWrapper(object):
def __init__(self):
self._settings = sublime.load_settings("Subforce.sublime-settings")
def __getattr__(self, name):
return getattr(self._settings, name)
def getOrThrow(self, name):
setting = self._settings.get(name)
if setting is None:
raise P4.P4Exception("Subforce: You must set the {} setting!".format(name))
return setting
class PerforceWrapper(object):
def __init__(self, squelchErrorAndWarninMessages=False):
self._p4 = P4.P4()
self._settings = SettingsWrapper()
currentWorkingDirectorySetting = self._settings.get(CURRENT_WORKING_DIRECTORY_SETTING_KEY, None)
projectPath = sublime.active_window().extract_variables()['folder']
self._p4.cwd = currentWorkingDirectorySetting if currentWorkingDirectorySetting else projectPath
self._p4.exception_level = 1 # Only errors are raised as exceptions. Warnings are accessed through p4.warnings
self._p4.api_level = 79 # Lock to 2015.2 format
self._contextManagerEnterLevel = 0
self._squelchErrorAndWarninMessages = squelchErrorAndWarninMessages
def __getattr__(self, name):
attribute = getattr(self._p4, name)
return attribute
def __enter__(self):
if self._contextManagerEnterLevel == 0:
try:
if self._settings.get(USE_CONNECTION_INFO_SETTINGS_KEY, False):
self._p4.port = self._settings.getOrThrow(CONNECTION_INFO_PORT_SETTINGS_KEY)
self._p4.user = self._settings.getOrThrow(CONNECTION_INFO_USER_SETTINGS_KEY)
self._p4.client = self._settings.getOrThrow(CONNECTION_INFO_CLIENT_SETTINGS_KEY)
self._p4.connect()
except:
if self.__exit__(*sys.exc_info()):
pass
else:
raise
self._contextManagerEnterLevel += 1
return self
def __exit__(self, type, value, traceback):
noErrors = True
if self._contextManagerEnterLevel == 1:
self.handleWarnings()
try:
self._p4.disconnect()
except P4.P4Exception:
print("Subforce: failed to disconnect!")
noErrors = self.handleErrors(type, value, traceback)
self._contextManagerEnterLevel -= 1
return noErrors
def login(self, password):
self._p4.password = password
with self as p4:
p4.run_login()
print("Subforce: sucessfully logged in!")
def handleWarnings(self):
displayWarningsSetting = self._settings.get(DISPLAY_WARNINGS_SETTING_KEY, True)
if not self._squelchErrorAndWarninMessages and displayWarningsSetting:
for warning in self._p4.warnings:
sublime.message_dialog(str(warning))
def handleErrors(self, type, value, traceback):
noErrors = True
if type is P4.P4Exception:
if not self._squelchErrorAndWarninMessages:
sublime.error_message(str(value))
noErrors = False
elif type is not None:
noErrors = False
else:
noErrors = True
return noErrors
def plugin_loaded():
print("Subforce: plugin loaded!")
def plugin_unloaded():
print("Subforce: plugin unloaded!")
class SubforceDisplayDescriptionCommand(sublime_plugin.TextCommand):
def run(self, edit, description = ""):
# Enable editing momentarily to set description
self.view.set_read_only(False)
self.view.replace(edit, sublime.Region(0, self.view.size()), description)
self.view.sel().clear()
self.view.set_read_only(True)
class DescriptionOutputPanel(object):
_outputPanelName = 'description_output_panel'
_qualifiedOutputPanelName = 'output.description_output_panel'
_outputPanelCreationLock = threading.Lock()
def __init__(self, window):
self._outputPanelCreationLock.acquire(blocking=True, timeout=1)
self._window = window
self._descriptionOutputPanel = self._window.find_output_panel(self._outputPanelName)
if not self._descriptionOutputPanel:
self._descriptionOutputPanel = self._window.create_output_panel(self._outputPanelName, True)
self._descriptionOutputPanel.settings().set("is_description_output_panel", True)
self._outputPanelCreationLock.release()
def show(self, description):
self._window.run_command(
"show_panel",
{
"panel": self._qualifiedOutputPanelName
}
)
self._descriptionOutputPanel.run_command(
"subforce_display_description",
{
"description": description
}
)
def hide(self):
self._window.run_command(
"hide_panel",
{
"panel": self._qualifiedOutputPanelName,
"cancel": True
}
)
class ChangelistManager(object):
def __init__(self, window, perforceWrapper):
self._window = window
self._perforceWrapper = perforceWrapper
self._changelistDescriptionOutputPanel = DescriptionOutputPanel(self._window)
def viewAllChangelists(self, onDoneCallback, includeNew=False, includeDefault=False):
with self._perforceWrapper as p4:
changelists = []
if includeNew:
changelists.append({"change": NEW_CHANGELIST_NAME, "desc": NEW_CHANGELIST_DESCRIPTION})
if includeDefault:
changelists.append({"change": DEFAULT_CHANGELIST_NAME, "desc": DEFAULT_CHANGELIST_DESCRIPTION})
changelists.extend(p4.run_changes("-c", p4.client, "-s", "pending", "-l"))
def onDone(selectedIndex):
self._changelistDescriptionOutputPanel.hide()
selectedChangelistNumber = changelists[selectedIndex]['change'] if selectedIndex >= 0 else None
if selectedChangelistNumber == NEW_CHANGELIST_NAME:
selectedChangelistNumber = self.createChangelist()
if onDoneCallback and selectedChangelistNumber:
onDoneCallback(selectedChangelistNumber)
SubforceStatusUpdatingEventListener.updateStatus(self._window.active_view())
def onHighlighted(selectedIndex):
self._changelistDescriptionOutputPanel.show(changelists[selectedIndex]['desc'])
changelistItems = [[changelist['change'], changelist['desc'][:250]] for changelist in changelists]
self._window.show_quick_panel(
changelistItems,
onDone,
sublime.KEEP_OPEN_ON_FOCUS_LOST,
0,
onHighlighted
)
def createChangelist(self):
return self.editChangelist(None)
def editChangelist(self, changelistNumber):
with self._perforceWrapper as p4:
if changelistNumber:
changeResult = p4.run_change(changelistNumber)[0]
else: # create a new changelist
changeResult = p4.run_change()[0]
changeResultRE = r'Change (\d+) (updated|created).'
changeResultMatch = re.match(changeResultRE, changeResult)
assert changeResultMatch and changeResultMatch.group(1).isdigit()
return changeResultMatch.group(1)
def deleteChangelist(self, changelistNumber):
with self._perforceWrapper as p4:
p4.run_change("-d", changelistNumber)
def moveToChangelist(self, changelistNumber, file):
with self._perforceWrapper as p4:
p4.run_reopen("-c", changelistNumber, file)
def checkoutInChangelist(self, changelistNumber, path):
with self._perforceWrapper as p4:
if changelistNumber == DEFAULT_CHANGELIST_NAME:
p4.run_edit(path)
else:
p4.run_edit("-c", changelistNumber, path)
def revertFilesInChangelist(self, changelistNumber):
with self._perforceWrapper as p4:
p4.run_revert("-c", changelistNumber, "//...")
def addToChangelist(self, changelistNumber, file):
with self._perforceWrapper as p4:
if changelistNumber == DEFAULT_CHANGELIST_NAME:
p4.run_add(changelistNumber, file)
else:
p4.run_add("-c", changelistNumber, file)
class SubforceAutoCheckoutEventListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
if SettingsWrapper().get(DISABLE_AUTO_CHECKOUT_SETTINGS_KEY, False):
return
with PerforceWrapper() as p4:
fileName = view.file_name()
settings = view.settings()
if not fileName or \
settings.get(FILE_NOT_IN_DEPOT_SETTING_KEY, False) or \
settings.get(FILE_CHECKED_OUT_SETTING_KEY, False):
return
try:
stat = p4.run_fstat(fileName) # check if file is in depot
if "action" in stat[0]:
# Cache this setting, so we don't run fstat unnecessarily
settings.set(FILE_CHECKED_OUT_SETTING_KEY, True)
return
except:
raise
# More caching!
settings.set(FILE_NOT_IN_DEPOT_SETTING_KEY, True)
return
checkoutFile = sublime.ok_cancel_dialog(
"You are saving a file in your depot. Do you want to check it out first?",
"Checkout"
)
if checkoutFile:
# Because Sublime's show_quick_panel is non-blocking, we cannot use it to acquire the user's desired
# changelist before checking out the actual file. Instead, we check out the file first and then move it to
# the user's desired changelist.
p4.run_edit(fileName)
view.settings().set(FILE_CHECKED_OUT_SETTING_KEY, True)
else:
return
moveToChangelist = sublime.ok_cancel_dialog(
"You're file has been checked out in the default changelist. Do you want to move it to another changelist?",
"Move"
)
if moveToChangelist:
view.window().run_command(
"subforce_move_to_changelist",
{
"paths": [fileName]
}
)
@classmethod
def eraseAutoCheckoutEventListenerSettings(self, view):
settings = view.settings()
settings.erase(FILE_CHECKED_OUT_SETTING_KEY)
settings.erase(FILE_NOT_IN_DEPOT_SETTING_KEY)
def on_load(self, view):
self.eraseAutoCheckoutEventListenerSettings(view)
class SubforceStatusUpdatingEventListener(sublime_plugin.EventListener):
# Some of these may be redundant. Meh.
def on_activated(self, view):
self.updateStatus(view)
def on_deactivated(self, view):
self.updateStatus(view)
def on_post_window_command(self, window, commandName, args):
if commandName.startswith("subforce"):
self.updateStatus(window.active_view())
@classmethod
def updateStatus(self, view):
settings = view.settings()
try:
with PerforceWrapper(squelchErrorAndWarninMessages=True) as p4:
stat = p4.run_fstat(view.file_name()) # check if file is in depot
if stat:
stat = stat[0]
else:
return
if "change" in stat:
view.set_status(
CHANGELIST_NUMBER_STATUS_KEY,
"Changelist Number: {}".format(stat['change'])
)
else:
view.erase_status(CHANGELIST_NUMBER_STATUS_KEY)
except P4.P4Exception: # Squelch all Perforce exceptions
pass
class SubforceLoginCommand(sublime_plugin.WindowCommand):
savedPasswordCharacters = []
def run(self):
def onDone(password):
PerforceWrapper().login("".join(self.savedPasswordCharacters))
def onChange(password):
nextPasswordCharacter = password[len(self.savedPasswordCharacters):]
if len(password) < len(self.savedPasswordCharacters):
self.savedPasswordCharacters.pop()
elif len(password) > len(self.savedPasswordCharacters):
self.savedPasswordCharacters.append(nextPasswordCharacter)
else:
return
hiddenPassword = '*' * len(password)
self.window.show_input_panel(
"Password",
hiddenPassword,
onDone,
onChange,
None
)
self.window.show_input_panel(
"Password",
"",
onDone,
onChange,
None
)
class SubforceSyncCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
with PerforceWrapper() as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
dirtyOpenFiles = (view.file_name() for window in sublime.windows() for view in window.views() if view.is_dirty())
dirtyFileInSyncPath = False
for dirtyOpenFile in dirtyOpenFiles:
for path in paths:
if os.path.commonprefix([path, dirtyOpenFile]) == path:
dirtyFileInSyncPath = True
break
performSync = not dirtyFileInSyncPath or \
sublime.ok_cancel_dialog("You are about to sync over one or more files with unsaved modifications. Are you sure you want to proceed?")
paths = [ellipsizeIfDirectory(path) for path in paths]
if performSync:
# @TODO: Add a configurable logging system
print("Subforce: syncing\n\t{}".format("\n\t".join(paths)))
p4.run_sync(paths)
class SubforceAddCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
paths = [ellipsizeIfDirectory(path) for path in paths]
def onDoneCallback(selectedChangelistNumber):
print("Subforce: adding\n\t{}\nto changelist {}: ".format("\n\t".join(paths), selectedChangelistNumber))
changelistManager.addToChangelist(selectedChangelistNumber, paths)
changelistManager.viewAllChangelists(onDoneCallback, includeNew=True, includeDefault=True)
class SubforceGetRevisionCommand(sublime_plugin.WindowCommand):
def run(self, paths):
perforceWrapper = PerforceWrapper()
revisionManager = RevisionManager(self.window, perforceWrapper)
with perforceWrapper as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
path = checkForAndGetSinglePath(paths)
if not path:
return
path = ellipsizeIfDirectory(path)
def onDoneCallback(selectedRevision):
revisionManager.getRevision(selectedRevision, path)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback)
class SubforceCheckoutCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
paths = [ellipsizeIfDirectory(path) for path in paths]
def onDoneCallback(selectedChangelistNumber):
print("Subforce: checking out\n\t{}\nin changelist {}: ".format("\n\t".join(paths), selectedChangelistNumber))
changelistManager.checkoutInChangelist(selectedChangelistNumber, paths)
changelistManager.viewAllChangelists(onDoneCallback, includeNew=True, includeDefault=True)
class SubforceRevertCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
with PerforceWrapper() as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
ellipsizedPaths = [ellipsizeIfDirectory(path) for path in paths]
print("Subforce: reverting\n\t{}".format("\n\t".join(ellipsizedPaths)))
p4.run_revert(ellipsizedPaths)
self._resetAutoCheckoutEventListenerSettingsForAllViews(paths)
def _resetAutoCheckoutEventListenerSettingsForAllViews(self, paths):
for path in paths:
for view in getAllViewsForPath(path):
SubforceAutoCheckoutEventListener.eraseAutoCheckoutEventListenerSettings(view)
class SubforceRenameCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
with perforceWrapper as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
path = checkForAndGetSinglePath(paths)
if not path:
return
path = ellipsizeIfDirectory(path)
stat = p4.run_fstat(path)
if 'action' not in stat[0]:
requiresCheckout = True
else:
requiresCheckout = False
if requiresCheckout and not \
sublime.ok_cancel_dialog(
"File must be checked out before it can be renamed. Do you want to check it out now?",
"Checkout"
):
return
def renameFile(file):
def onDoneRenameCallback(newFileName):
with perforceWrapper as p4: # necessary because the callback runs in a different thread
p4.run_rename(file, newFileName)
self.window.show_input_panel(
"New File Name",
file,
onDoneRenameCallback,
None,
None
)
if requiresCheckout:
def onDoneViewingChangelistsCallback(selectedChangelistNumber):
changelistManager.checkoutInChangelist(selectedChangelistNumber, path)
renameFile(path)
changelistManager.viewAllChangelists(onDoneViewingChangelistsCallback, includeNew=True, includeDefault=True)
else:
renameFile(path)
class SubforceViewChangelistsCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
ChangelistManager(self.window, perforceWrapper).viewAllChangelists(None)
class SubforceCreateChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
ChangelistManager(self.window, perforceWrapper).createChangelist()
class SubforceEditChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
print("Subforce: editing {}".format(selectedChangelistNumber))
changelistManager.editChangelist(selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
class SubforceDeleteChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
print("Subforce: deleting {}".format(selectedChangelistNumber))
changelistManager.deleteChangelist(selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
class SubforceMoveToChangelistCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
paths = [ellipsizeIfDirectory(path) for path in paths]
def onDoneCallback(selectedChangelistNumber):
print("Subforce: moving\n\t{}\nto changelist {}".format("\n\t".join(paths), selectedChangelistNumber))
changelistManager.moveToChangelist(selectedChangelistNumber, paths)
changelistManager.viewAllChangelists(onDoneCallback, includeNew=True, includeDefault=True)
class SubforceRevertFilesInChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
print("Subforce: reverting files in {}".format(selectedChangelistNumber))
changelistManager.revertFilesInChangelist(selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
def executeP4VCCommand(command, *args):
with PerforceWrapper() as p4:
command = " ".join(["p4vc.exe", command] + list(args))
print("Subforce: executing p4vc command '{}'".format(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=p4.cwd)
stdout, stderr = process.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
class SubforceViewTimelapseCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
for path in paths:
executeP4VCCommand("timelapseview", path)
class SubforceSubmitChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
if selectedChangelistNumber:
executeP4VCCommand("submit", "-c", selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
class SubforceResolveCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
executeP4VCCommand("resolve", " ".join(paths))
class RevisionManager:
def __init__(self, window, perforceWrapper):
self._window = window
self._perforceWrapper = perforceWrapper
self._revisionDescriptionOutputPanel = DescriptionOutputPanel(self._window)
self._callbackDepth = 0
def diffClientFileAgainstDepotRevision(self, revision, file):
with self._perforceWrapper as p4:
depotFilePath = p4.run_fstat(file)[0]['depotFile']
temporaryDepotFilePath = self._createTemporaryDepotFile(depotFilePath, revision)
self._startP4MergeThread(
temporaryDepotFilePath,
file,
getRevisionQualifiedDepotPath(depotFilePath, revision),
"{} (workspace file)".format(file)
)
def diffDepotRevisions(self, revision1, revision2, file):
with self._perforceWrapper as p4:
(revision1, revision2) = sorted([revision1, revision2]) # ensures the most recent revision is on the right
depotFilePath = p4.run_fstat(file)[0]['depotFile']
temporaryDepotFilePath1 = self._createTemporaryDepotFile(depotFilePath, revision1)
temporaryDepotFilePath2 = self._createTemporaryDepotFile(depotFilePath, revision2)
self._startP4MergeThread(
temporaryDepotFilePath1,
temporaryDepotFilePath2,
getRevisionQualifiedDepotPath(depotFilePath, revision1),
getRevisionQualifiedDepotPath(depotFilePath, revision2)
)
def showHaveHeadRevisions(self, onDoneCallback):
revisions = [{'revision': HAVE_REVISION_NAME, 'desc': HAVE_REVISION_DESCRIPTION}, {'revision': HEAD_REVISION_NAME, 'desc': HEAD_REVISION_DESCRIPTION}]
self._showRevisions(revisions, onDoneCallback)
def showHaveHeadAndFileRevisions(self, file, onDoneCallback):
with self._perforceWrapper as p4:
revisions = [createRevision(HAVE_REVISION_NAME, HAVE_REVISION_DESCRIPTION), createRevision(HEAD_REVISION_NAME, HEAD_REVISION_DESCRIPTION)]
revisions.extend(
[
createRevision(str(revision.rev), revision.desc)
for revision in p4.run_filelog("-l", file)[0].revisions
]
)
self._showRevisions(revisions, onDoneCallback)
def getRevision(self, revision, file):
with self._perforceWrapper as p4:
depotFilePath = p4.run_fstat(file)[0]['depotFile']
p4.run_sync(getRevisionQualifiedDepotPath(depotFilePath, revision))
def _showRevisions(self, revisions, onDoneCallback):
self._callbackDepth += 1
def onDone(selectedIndex):
selectedRevision = revisions[selectedIndex]['revision'] if selectedIndex >= 0 else None
if onDoneCallback and selectedRevision:
onDoneCallback(selectedRevision)
if self._callbackDepth == 1: # last one out turns off the lights.
self._revisionDescriptionOutputPanel.hide()
self._callbackDepth -= 1
def onHighlighted(selectedIndex):
self._revisionDescriptionOutputPanel.show(revisions[selectedIndex]['desc'])
revisionItems = [[revision['revision'], revision['desc'][:250]] for revision in revisions]
self._window.show_quick_panel(
revisionItems,
onDone,
sublime.KEEP_OPEN_ON_FOCUS_LOST,
0,
onHighlighted
)
def _startP4MergeThread(self, leftFile, rightFile, leftFileAlias, rightFileAlias):
def target():
command = ["p4merge.exe", '-nl', leftFileAlias, '-nr', rightFileAlias, leftFile, rightFile]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
threading.Thread(target=target).start()
def _createTemporaryDepotFile(self, file, revision):
with self._perforceWrapper as p4:
# @TODO: At some point in time, we may want to create temporary files with the same naming convention as p4v.
with tempfile.NamedTemporaryFile(prefix="subforce_", delete=False) as temporaryFile:
depotFilePath = getRevisionQualifiedDepotPath(file, revision)
depotFileText = p4.run_print(depotFilePath)[1]
temporaryFile.write(bytes(depotFileText, 'UTF-8'))
return temporaryFile.name
class SubforceViewGraphicalDiffWorkspaceFileCommand(sublime_plugin.WindowCommand):
'''
Diffs one or more files against a depot revision.
A single file may be diffed against any revision.
Multiple files may only be diffed against the have or head revisions.
'''
def run(self, paths=[]):
perforceWrapper = PerforceWrapper()
revisionManager = RevisionManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
if len(paths) == 1:
path = paths[0]
def onDoneCallback(selectedRevision):
revisionManager.diffClientFileAgainstDepotRevision(selectedRevision, path)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback)
else:
def onDoneCallback(selectedRevision):
for path in paths:
revisionManager.diffClientFileAgainstDepotRevision(selectedRevision, path)
revisionManager.showHaveHeadRevisions(onDoneCallback)
class SubforceViewGraphicalDiffDepotRevisionsCommand(sublime_plugin.WindowCommand):
'''
Diffs two depot revisions of a given file.
Only a single file may be diffed at a time.
'''
def run(self, paths=[]):
perforceWrapper = PerforceWrapper()
revisionManager = RevisionManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
path = checkForAndGetSinglePath(paths)
if not path:
return
def onDoneCallback1(selectedRevision1):
def onDoneCallback2(selectedRevision2):
revisionManager.diffDepotRevisions(selectedRevision1, selectedRevision2, path)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback2)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback1)
| mit | 2,428,327,707,528,457,000 | 36.43202 | 156 | 0.676771 | false |
mentholi/django-user-accounts | account/models.py | 8 | 13425 | from __future__ import unicode_literals
import datetime
import operator
try:
from urllib.parse import urlencode
except ImportError: # python 2
from urllib import urlencode
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone, translation, six
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
import pytz
from account import signals
from account.conf import settings
from account.fields import TimeZoneField
from account.hooks import hookset
from account.managers import EmailAddressManager, EmailConfirmationManager
from account.signals import signup_code_sent, signup_code_used
class Account(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name="account", verbose_name=_("user"))
timezone = TimeZoneField(_("timezone"))
language = models.CharField(
_("language"),
max_length=10,
choices=settings.ACCOUNT_LANGUAGES,
default=settings.LANGUAGE_CODE
)
@classmethod
def for_request(cls, request):
if request.user.is_authenticated():
try:
account = Account._default_manager.get(user=request.user)
except Account.DoesNotExist:
account = AnonymousAccount(request)
else:
account = AnonymousAccount(request)
return account
@classmethod
def create(cls, request=None, **kwargs):
create_email = kwargs.pop("create_email", True)
confirm_email = kwargs.pop("confirm_email", None)
account = cls(**kwargs)
if "language" not in kwargs:
if request is None:
account.language = settings.LANGUAGE_CODE
else:
account.language = translation.get_language_from_request(request, check_path=True)
account.save()
if create_email and account.user.email:
kwargs = {"primary": True}
if confirm_email is not None:
kwargs["confirm"] = confirm_email
EmailAddress.objects.add_email(account.user, account.user.email, **kwargs)
return account
def __str__(self):
return str(self.user)
def now(self):
"""
Returns a timezone aware datetime localized to the account's timezone.
"""
now = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("UTC"))
timezone = settings.TIME_ZONE if not self.timezone else self.timezone
return now.astimezone(pytz.timezone(timezone))
def localtime(self, value):
"""
Given a datetime object as value convert it to the timezone of
the account.
"""
timezone = settings.TIME_ZONE if not self.timezone else self.timezone
if value.tzinfo is None:
value = pytz.timezone(settings.TIME_ZONE).localize(value)
return value.astimezone(pytz.timezone(timezone))
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def user_post_save(sender, **kwargs):
"""
After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save.
"""
user, created = kwargs["instance"], kwargs["created"]
disabled = getattr(user, "_disable_account_creation", not settings.ACCOUNT_CREATE_ON_SAVE)
if created and not disabled:
Account.create(user=user)
class AnonymousAccount(object):
def __init__(self, request=None):
self.user = AnonymousUser()
self.timezone = settings.TIME_ZONE
if request is None:
self.language = settings.LANGUAGE_CODE
else:
self.language = translation.get_language_from_request(request, check_path=True)
def __unicode__(self):
return "AnonymousAccount"
class SignupCode(models.Model):
class AlreadyExists(Exception):
pass
class InvalidCode(Exception):
pass
code = models.CharField(_("code"), max_length=64, unique=True)
max_uses = models.PositiveIntegerField(_("max uses"), default=0)
expiry = models.DateTimeField(_("expiry"), null=True, blank=True)
inviter = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True)
email = models.EmailField(blank=True)
notes = models.TextField(_("notes"), blank=True)
sent = models.DateTimeField(_("sent"), null=True, blank=True)
created = models.DateTimeField(_("created"), default=timezone.now, editable=False)
use_count = models.PositiveIntegerField(_("use count"), editable=False, default=0)
class Meta:
verbose_name = _("signup code")
verbose_name_plural = _("signup codes")
def __unicode__(self):
if self.email:
return "{0} [{1}]".format(self.email, self.code)
else:
return self.code
@classmethod
def exists(cls, code=None, email=None):
checks = []
if code:
checks.append(Q(code=code))
if email:
checks.append(Q(email=code))
if not checks:
return False
return cls._default_manager.filter(six.moves.reduce(operator.or_, checks)).exists()
@classmethod
def create(cls, **kwargs):
email, code = kwargs.get("email"), kwargs.get("code")
if kwargs.get("check_exists", True) and cls.exists(code=code, email=email):
raise cls.AlreadyExists()
expiry = timezone.now() + datetime.timedelta(hours=kwargs.get("expiry", 24))
if not code:
code = hookset.generate_signup_code_token(email)
params = {
"code": code,
"max_uses": kwargs.get("max_uses", 0),
"expiry": expiry,
"inviter": kwargs.get("inviter"),
"notes": kwargs.get("notes", "")
}
if email:
params["email"] = email
return cls(**params)
@classmethod
def check_code(cls, code):
try:
signup_code = cls._default_manager.get(code=code)
except cls.DoesNotExist:
raise cls.InvalidCode()
else:
if signup_code.max_uses and signup_code.max_uses <= signup_code.use_count:
raise cls.InvalidCode()
else:
if signup_code.expiry and timezone.now() > signup_code.expiry:
raise cls.InvalidCode()
else:
return signup_code
def calculate_use_count(self):
self.use_count = self.signupcoderesult_set.count()
self.save()
def use(self, user):
"""
Add a SignupCode result attached to the given user.
"""
result = SignupCodeResult()
result.signup_code = self
result.user = user
result.save()
signup_code_used.send(sender=result.__class__, signup_code_result=result)
def send(self, **kwargs):
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = kwargs["site"] if "site" in kwargs else Site.objects.get_current()
if "signup_url" not in kwargs:
signup_url = "{0}://{1}{2}?{3}".format(
protocol,
current_site.domain,
reverse("account_signup"),
urlencode({"code": self.code})
)
else:
signup_url = kwargs["signup_url"]
ctx = {
"signup_code": self,
"current_site": current_site,
"signup_url": signup_url,
}
ctx.update(kwargs.get("extra_ctx", {}))
hookset.send_invitation_email([self.email], ctx)
self.sent = timezone.now()
self.save()
signup_code_sent.send(sender=SignupCode, signup_code=self)
class SignupCodeResult(models.Model):
signup_code = models.ForeignKey(SignupCode)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
timestamp = models.DateTimeField(default=timezone.now)
def save(self, **kwargs):
super(SignupCodeResult, self).save(**kwargs)
self.signup_code.calculate_use_count()
class EmailAddress(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
email = models.EmailField(unique=settings.ACCOUNT_EMAIL_UNIQUE)
verified = models.BooleanField(_("verified"), default=False)
primary = models.BooleanField(_("primary"), default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not settings.ACCOUNT_EMAIL_UNIQUE:
unique_together = [("user", "email")]
def __unicode__(self):
return "{0} ({1})".format(self.email, self.user)
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
self.user.email = self.email
self.user.save()
return True
def send_confirmation(self, **kwargs):
confirmation = EmailConfirmation.create(self)
confirmation.send(**kwargs)
return confirmation
def change(self, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
with transaction.atomic():
self.user.email = new_email
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation()
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress)
created = models.DateTimeField(default=timezone.now)
sent = models.DateTimeField(null=True)
key = models.CharField(max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __unicode__(self):
return "confirmation for {0}".format(self.email_address)
@classmethod
def create(cls, email_address):
key = hookset.generate_email_confirmation_token(email_address.email)
return cls._default_manager.create(email_address=email_address, key=key)
def key_expired(self):
expiration_date = self.sent + datetime.timedelta(days=settings.ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
signals.email_confirmed.send(sender=self.__class__, email_address=email_address)
return email_address
def send(self, **kwargs):
current_site = kwargs["site"] if "site" in kwargs else Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
activate_url = "{0}://{1}{2}".format(
protocol,
current_site.domain,
reverse(settings.ACCOUNT_EMAIL_CONFIRMATION_URL, args=[self.key])
)
ctx = {
"email_address": self.email_address,
"user": self.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": self.key,
}
hookset.send_confirmation_email([self.email_address.email], ctx)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(sender=self.__class__, confirmation=self)
class AccountDeletion(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL)
email = models.EmailField()
date_requested = models.DateTimeField(_("date requested"), default=timezone.now)
date_expunged = models.DateTimeField(_("date expunged"), null=True, blank=True)
class Meta:
verbose_name = _("account deletion")
verbose_name_plural = _("account deletions")
@classmethod
def expunge(cls, hours_ago=None):
if hours_ago is None:
hours_ago = settings.ACCOUNT_DELETION_EXPUNGE_HOURS
before = timezone.now() - datetime.timedelta(hours=hours_ago)
count = 0
for account_deletion in cls.objects.filter(date_requested__lt=before, user__isnull=False):
settings.ACCOUNT_DELETION_EXPUNGE_CALLBACK(account_deletion)
account_deletion.date_expunged = timezone.now()
account_deletion.save()
count += 1
return count
@classmethod
def mark(cls, user):
account_deletion, created = cls.objects.get_or_create(user=user)
account_deletion.email = user.email
account_deletion.save()
settings.ACCOUNT_DELETION_MARK_CALLBACK(account_deletion)
return account_deletion
| mit | -2,159,135,778,819,646,700 | 34.143979 | 110 | 0.626443 | false |
crafty78/ansible | lib/ansible/utils/color.py | 9 | 3198 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from ansible import constants as C
ANSIBLE_COLOR=True
if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR=False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR=False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR=False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR=True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': u'0;30', 'bright gray': u'0;37',
'blue': u'0;34', 'white': u'1;37',
'green': u'0;32', 'bright blue': u'1;34',
'cyan': u'0;36', 'bright green': u'1;32',
'red': u'0;31', 'bright cyan': u'1;36',
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0' ,
}
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
return "\n".join([u"\033[%sm%s\033[0m" % (codeCodes[color], t) for t in text.split('\n')])
else:
return text
# --- end "pretty"
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
s = u"%s=%-4s" % (lead, str(num))
if num != 0 and ANSIBLE_COLOR and color is not None:
s = stringc(s, color)
return s
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return u"%-37s" % stringc(host, C.COLOR_ERROR)
elif stats['changed'] != 0:
return u"%-37s" % stringc(host, C.COLOR_CHANGED)
else:
return u"%-37s" % stringc(host, C.COLOR_OK)
return u"%-26s" % host
| gpl-3.0 | 6,810,178,754,648,268,000 | 32.663158 | 98 | 0.629143 | false |
ScotticusMaximus/ModTest | ModTest.py | 1 | 2941 | #!/usr/bin/env python3
# Copyright 2015 Scott Haiden.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
"""
A script for creating pseudo-jails. Useful for testing software modules. Kind
of similar to python's virtual env, but more generalized and centered around
the modulecmd utility.
"""
import os
import sys
import subprocess
from string import Template
TEMPLATE = """
#!/bin/bash
source /etc/profile
source $$HOME/.profile
PRISON="$prefix/prisons/$$HOSTNAME"
if [ ! -d "$$PRISON" ]; then
mkdir "$$PRISON"
ln -s "$$HOME/.Xauthority" "$$PRISON"
fi
export HOME="$$PRISON"
export _JAVA_OPTIONS="$$_JAVA_OPTIONS -Duser.home=$$HOME"
shopt -s expand_aliases
cd $$HOME
export PS1="{captive} $$PS1"
trap 'echo END SESSION' EXIT
echo 'BEGIN SESSION'
#begin
"""
TEMPLATE = Template(TEMPLATE.strip())
# Find where ModTest.py is stored...
BASEDIR = os.path.dirname(os.path.abspath(__file__))
# Create a "boilerplate" script with the current node's hostname.
SCRIPT = os.path.join(BASEDIR, "boilerplates", os.environ["HOSTNAME"])
DELIM = ":"
def resolve_module(module):
"""
If the module begins with a ., then it will return the absolute path to the
module file. This lets you make a local module (not in $MODULEPATH) and
load it using this script.
"""
if not module.startswith("."):
return module
return os.path.abspath(module)
def main(script, modules, cmds):
with open(script, "wt") as init:
init.write(TEMPLATE.substitute(prefix=BASEDIR))
init.write("\n")
for module in modules:
init.write("module load {}\n".format(module))
if cmds is None:
if modules:
lastmodule = modules[-1].partition("/")
cmds = [lastmodule[0], "exit"]
else:
cmds = []
for cmd in cmds:
init.write("{}\n".format(cmd))
ec = subprocess.call(["/bin/bash", "--init-file", script])
exit(ec)
def get_help():
print("Usage: Please see README.txt")
if __name__ == '__main__':
args = sys.argv[1:]
if DELIM in args:
idx = args.index(DELIM)
modules = [resolve_module(mod) for mod in args[:idx]]
commands = args[idx+1:]
else:
modules = [resolve_module(mod) for mod in args]
commands = None
main(SCRIPT, modules, commands)
| gpl-3.0 | -327,454,508,247,026,200 | 25.981651 | 79 | 0.654879 | false |
RollMan/LtSRcon | src/onchatcommands.py | 1 | 4142 | # -*- coding: utf-8 -*-
mapdic = {"Locker" :"MP_Prison",
"Zavod" :"MP_Abandoned",
"Dam" :"MP_Damage",
"Flood" :"MP_Flooded",
"Railway" :"MP_Journey",
"Paracel" :"MP_Naval",
"Hainan" :"MP_Resort",
"Shanghai":"MP_Siege",
"Rogue" :"MP_TheDish",
"Dawn" :"MP_Tremors",
"Silk" :"XP1_001",
"Altai" :"XP1_002",
"GPeaks" :"XP1_003",
"DPass" :"XP1_004",
"Caspian" :"XP0_Caspia",
"FStorm" :"XP0_FireStor",
"Metro" :"XP0_Metro",
"Oman" :"XP0_Oman",
"LIsland" :"XP2_001",
"Nansha" :"XP2_002",
"WBreaker":"XP2_003",
"OMortar" :"XP2_004",
"PMarket" :"XP3_MarketPl",
"Propag" :"XP3_Prpganda",
"LGarden" :"XP3_UrbanGdn",
"SDragon" :"XP3_WtrFront",
"Whiteout":"XP4_Arctic",
"Hammer" :"XP4_SubBase",
"Hanger" :"XP4_Titan",
"Giant" :"XP4_WalkerFactory"
}
modedic = {"cqL":"ConquestLarge0",
"cqS":"ConquestSmall0",
"dmn":"Domination0",
"tdm":"TeamDeathMatch0",
"rsh":"RushLarge0",
"obl":"Obliteration",
"dfs":"Elimination0",
"std":"SquadDeathMatch0",
"air":"AirSuperiority0",
"cap":"CaptureTheFlag0",
"caS":"CarrierAssaultSmall0",
"caL":"CarrierAssaultLarge0",
"lnk":"Chainlink0"
}
def sendcommand(event, server):
result = []
option = []
order = event[2].split()[0]
if " " in event[2]:
option = event[2].split()
del option[0]
if order[0] != '!':
return ["Not command"]
elif order == "!rs":
result.append(server.sndcmd("mapList.restartRound"))
elif order == "!help":
result.append(server.sndcmd("admin.say", ["!rs, !chgmap <map> <mode>, !maplist, !modelist", "all"]))
elif order == "!chgmap":
try:
if len(option) != 2:
result.append(server.sndcmd("admin.say", ["Usage: !chgmap <map> <mode>", "all"]))
elif option[0] not in mapdic or option[1] not in modedic:
result.append(server.sndcmd("admin.say", ["No such a map or mode.", "all"]))
else:
result.append(server.sndcmd("mapList.clear"))
result.append(server.sndcmd("mapList.add", [mapdic[option[0]], modedic[option[1]], 1]))
result.append(server.sndcmd("mapList.runNextRound"))
except Exception as e:
server.sndcmd("admin.say", ["Failed because of unknown problem.", "all"])
message = "ERROE: in !chgmap type:" + str(type(e)) + " args:" + str(e.args)
result.append(message)
elif order == "!maplist":
try:
line = ""
for key in mapdic:
if len(line) > 100:
result.append(server.sndcmd("admin.say", [line, "all"]))
line = ""
else:
line += key + ","
result.append(server.sndcmd("admin.say", [line, "all"]))
except Exception as e:
server.sndcmd("admin.say", ["Failed", "all"])
message = "ERROR: type:" + str(type(e)) + " args:" + str(e.args)
result.append(message)
elif order == "!modelist":
try:
line = ""
for key in modedic:
if len(line) > 100:
result.append(server.sndcmd("admin.say", [line, "all"]))
line = ""
else:
line += key + ","
result.append(server.sndcmd("admin.say", [line, "all"]))
except Exception as e:
server.sndcmd("admin.say", ["Failed", "all"])
message = "ERROE: type:" + str(type(e)) + "args:" + str(e.args)
result.append(message)
else:
result.append(server.sndcmd("admin.say", ["No such a command", "all"]))
return ["No such a command"]
return result
| mit | -553,197,620,928,180,540 | 35.017391 | 108 | 0.472236 | false |
tungvx/deploy | Django-0.90/django/core/template/loader.py | 1 | 8648 | # Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.core.template import Template, Context, Node, TemplateDoesNotExist, TemplateSyntaxError, resolve_variable_with_filters, register_tag
from django.conf.settings import TEMPLATE_LOADERS
template_source_loaders = []
for path in TEMPLATE_LOADERS:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, globals(), locals(), [attr])
except ImportError, e:
raise ImproperlyConfigured, 'Error importing template source loader %s: "%s"' % (module, e)
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured, 'Module "%s" does not define a "%s" callable template source loader' % (module, attr)
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % path)
else:
template_source_loaders.append(func)
def load_template_source(name, dirs=None):
for loader in template_source_loaders:
try:
return loader(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist, name
class ExtendsError(Exception):
pass
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
return get_template_from_string(load_template_source(template_name))
def get_template_from_string(source):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return t.render(context_instance)
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist:
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist, ', '.join(template_name_list)
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
context.push()
# Save context in case of block.super().
self.context = context
context['block'] = self
result = self.nodelist.render(context)
context.pop()
return result
def super(self):
if self.parent:
return self.parent.render(self.context)
return ''
def add_parent(self, nodelist):
if self.parent:
self.parent.add_parent(nodelist)
else:
self.parent = BlockNode(self.name, nodelist)
class ExtendsNode(Node):
def __init__(self, nodelist, parent_name, parent_name_var, template_dirs=None):
self.nodelist = nodelist
self.parent_name, self.parent_name_var = parent_name, parent_name_var
self.template_dirs = template_dirs
def get_parent(self, context):
if self.parent_name_var:
self.parent_name = resolve_variable_with_filters(self.parent_name_var, context)
parent = self.parent_name
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name_var:
error_msg += " Got this from the %r variable." % self.parent_name_var
raise TemplateSyntaxError, error_msg
try:
return get_template_from_string(load_template_source(parent, self.template_dirs))
except TemplateDoesNotExist:
raise TemplateSyntaxError, "Template %r cannot be extended, because it doesn't exist" % parent
def render(self, context):
compiled_parent = self.get_parent(context)
parent_is_child = isinstance(compiled_parent.nodelist[0], ExtendsNode)
parent_blocks = dict([(n.name, n) for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)])
for block_node in self.nodelist.get_nodes_by_type(BlockNode):
# Check for a BlockNode with this node's name, and replace it if found.
try:
parent_block = parent_blocks[block_node.name]
except KeyError:
# This BlockNode wasn't found in the parent template, but the
# parent block might be defined in the parent's *parent*, so we
# add this BlockNode to the parent's ExtendsNode nodelist, so
# it'll be checked when the parent node's render() is called.
if parent_is_child:
compiled_parent.nodelist[0].nodelist.append(block_node)
else:
# Keep any existing parents and add a new one. Used by BlockNode.
parent_block.parent = block_node.parent
parent_block.add_parent(parent_block.nodelist)
parent_block.nodelist = block_node.nodelist
return compiled_parent.render(context)
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "'%s' tag takes only one argument" % bits[0]
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError, "'%s' tag with name '%s' appears more than once" % (bits[0], block_name)
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser._loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
parser.delete_first_token()
return BlockNode(block_name, nodelist)
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% entends variable %}`` uses the value of ``variable`` as the name
of the parent template to extend.
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "'%s' takes one argument" % bits[0]
parent_name, parent_name_var = None, None
if (bits[1].startswith('"') and bits[1].endswith('"')) or (bits[1].startswith("'") and bits[1].endswith("'")):
parent_name = bits[1][1:-1]
else:
parent_name_var = bits[1]
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError, "'%s' cannot appear more than once in the same template" % bits[0]
return ExtendsNode(nodelist, parent_name, parent_name_var)
register_tag('block', do_block)
register_tag('extends', do_extends)
| apache-2.0 | 1,179,641,253,401,067,500 | 40.980583 | 201 | 0.662697 | false |
saulpw/visidata | visidata/graph.py | 1 | 7594 | from visidata import *
option('color_graph_axis', 'bold', 'color for graph axis labels')
def numericCols(cols):
return [c for c in cols if vd.isNumeric(c)]
class InvertedCanvas(Canvas):
def zoomTo(self, bbox):
super().zoomTo(bbox)
self.fixPoint(Point(self.plotviewBox.xmin, self.plotviewBox.ymax), bbox.xymin)
def plotpixel(self, x, y, attr, row=None):
y = self.plotviewBox.ymax-y
self.pixels[y][x][attr].append(row)
def scaleY(self, canvasY):
'returns plotter y coordinate, with y-axis inverted'
plotterY = super().scaleY(canvasY)
return (self.plotviewBox.ymax-plotterY+4)
def canvasH(self, plotterY):
return (self.plotviewBox.ymax-plotterY)/self.yScaler
@property
def canvasMouse(self):
p = super().canvasMouse
p.y = self.visibleBox.ymin + (self.plotviewBox.ymax-self.plotterMouse.y)/self.yScaler
return p
# provides axis labels, legend
class GraphSheet(InvertedCanvas):
def __init__(self, *names, **kwargs):
super().__init__(*names, **kwargs)
self.xcols or vd.fail('at least one key col necessary for x-axis')
self.ycols or vd.fail('%s is non-numeric' % '/'.join(yc.name for yc in kwargs.get('ycols')))
@asyncthread
def reload(self):
nerrors = 0
nplotted = 0
self.reset()
vd.status('loading data points')
catcols = [c for c in self.xcols if not vd.isNumeric(c)]
numcols = numericCols(self.xcols)
for ycol in self.ycols:
for rownum, row in enumerate(Progress(self.sourceRows, 'plotting')): # rows being plotted from source
try:
k = tuple(c.getValue(row) for c in catcols) if catcols else (ycol.name,)
# convert deliberately to float (to e.g. linearize date)
graph_x = float(numcols[0].type(numcols[0].getValue(row))) if numcols else rownum
graph_y = ycol.type(ycol.getValue(row))
attr = self.plotColor(k)
self.point(graph_x, graph_y, attr, row)
nplotted += 1
except Exception:
nerrors += 1
if options.debug:
raise
vd.status('loaded %d points (%d errors)' % (nplotted, nerrors))
self.xzoomlevel=self.yzoomlevel=1.0
self.resetBounds()
self.refresh()
def resetBounds(self):
super().resetBounds()
self.createLabels()
def moveToRow(self, rowstr):
ymin, ymax = map(float, map(self.parseY, rowstr.split()))
self.cursorBox.ymin = ymin
self.cursorBox.h = ymax-ymin
return True
def moveToCol(self, colstr):
xmin, xmax = map(float, map(self.parseX, colstr.split()))
self.cursorBox.xmin = xmin
self.cursorBox.w = xmax-xmin
return True
def formatX(self, amt):
return ','.join(xcol.format(xcol.type(amt)) for xcol in self.xcols if vd.isNumeric(xcol))
def formatY(self, amt):
srccol = self.ycols[0]
return srccol.format(srccol.type(amt))
def parseX(self, txt):
return self.xcols[0].type(txt)
def parseY(self, txt):
return self.ycols[0].type(txt)
def add_y_axis_label(self, frac):
txt = self.formatY(self.visibleBox.ymin + frac*self.visibleBox.h)
# plot y-axis labels on the far left of the canvas, but within the plotview height-wise
attr = colors.color_graph_axis
self.plotlabel(0, self.plotviewBox.ymin + (1.0-frac)*self.plotviewBox.h, txt, attr)
def add_x_axis_label(self, frac):
txt = self.formatX(self.visibleBox.xmin + frac*self.visibleBox.w)
# plot x-axis labels below the plotviewBox.ymax, but within the plotview width-wise
attr = colors.color_graph_axis
xmin = self.plotviewBox.xmin + frac*self.plotviewBox.w
if frac == 1.0:
# shift rightmost label to be readable
xmin -= max(len(txt)*2 - self.rightMarginPixels+1, 0)
self.plotlabel(xmin, self.plotviewBox.ymax+4, txt, attr)
def createLabels(self):
self.gridlabels = []
# y-axis
self.add_y_axis_label(1.00)
self.add_y_axis_label(0.75)
self.add_y_axis_label(0.50)
self.add_y_axis_label(0.25)
self.add_y_axis_label(0.00)
# x-axis
self.add_x_axis_label(1.00)
self.add_x_axis_label(0.75)
self.add_x_axis_label(0.50)
self.add_x_axis_label(0.25)
self.add_x_axis_label(0.00)
# TODO: if 0 line is within visible bounds, explicitly draw the axis
# TODO: grid lines corresponding to axis labels
xname = ','.join(xcol.name for xcol in self.xcols if vd.isNumeric(xcol)) or 'row#'
xname, _ = clipstr(xname, self.leftMarginPixels//2-2)
self.plotlabel(0, self.plotviewBox.ymax+4, xname+'»', colors.color_graph_axis)
Sheet.addCommand('.', 'plot-column', 'vd.push(GraphSheet(sheet.name, "graph", source=sheet, sourceRows=rows, xcols=keyCols, ycols=numericCols([cursorCol])))', 'plot current numeric column vs key columns; numeric key column is used for x-axis, while categorical key columns determine color')
Sheet.addCommand('g.', 'plot-numerics', 'vd.push(GraphSheet(sheet.name, "graph", source=sheet, sourceRows=rows, xcols=keyCols, ycols=numericCols(nonKeyVisibleCols)))', 'plot a graph of all visible numeric columns vs key columns')
# swap directions of up/down
InvertedCanvas.addCommand(None, 'go-up', 'sheet.cursorBox.ymin += cursorBox.h', 'move cursor up by its height')
InvertedCanvas.addCommand(None, 'go-down', 'sheet.cursorBox.ymin -= cursorBox.h', 'move cursor down by its height')
InvertedCanvas.addCommand(None, 'go-top', 'sheet.cursorBox.ymin = visibleBox.ymax', 'move cursor to top edge of visible canvas')
InvertedCanvas.addCommand(None, 'go-bottom', 'sheet.cursorBox.ymin = visibleBox.ymin', 'move cursor to bottom edge of visible canvas')
InvertedCanvas.addCommand(None, 'go-pagedown', 't=(visibleBox.ymax-visibleBox.ymin); sheet.cursorBox.ymin -= t; sheet.visibleBox.ymin -= t; sheet.refresh()', 'move cursor down to next visible page')
InvertedCanvas.addCommand(None, 'go-pageup', 't=(visibleBox.ymax-visibleBox.ymin); sheet.cursorBox.ymin += t; sheet.visibleBox.ymin += t; sheet.refresh()', 'move cursor up to previous visible page')
InvertedCanvas.addCommand(None, 'go-down-small', 'sheet.cursorBox.ymin -= canvasCharHeight', 'move cursor down one character')
InvertedCanvas.addCommand(None, 'go-up-small', 'sheet.cursorBox.ymin += canvasCharHeight', 'move cursor up one character')
InvertedCanvas.addCommand(None, 'resize-cursor-shorter', 'sheet.cursorBox.h -= canvasCharHeight', 'decrease cursor height by one character')
InvertedCanvas.addCommand(None, 'resize-cursor-taller', 'sheet.cursorBox.h += canvasCharHeight', 'increase cursor height by one character')
@GraphSheet.api
def set_y(sheet, s):
ymin, ymax = map(float, map(sheet.parseY, s.split()))
sheet.zoomTo(BoundingBox(sheet.visibleBox.xmin, ymin, sheet.visibleBox.xmax, ymax))
sheet.refresh()
@GraphSheet.api
def set_x(sheet, s):
xmin, xmax = map(float, map(sheet.parseX, s.split()))
sheet.zoomTo(BoundingBox(xmin, sheet.visibleBox.ymin, xmax, sheet.visibleBox.ymax))
sheet.refresh()
Canvas.addCommand('y', 'resize-y-input', 'sheet.set_y(input("set ymin ymax="))', 'set ymin/ymax on graph axes')
Canvas.addCommand('x', 'resize-x-input', 'sheet.set_x(input("set xmin xmax="))', 'set xmin/xmax on graph axes')
| gpl-3.0 | -749,379,561,964,870,000 | 40.950276 | 290 | 0.650863 | false |
tjakway/Gnucash-Regex-Importer | accregex/Args.py | 1 | 2127 | import argparse
import datetime
#see http://stackoverflow.com/questions/25470844/specify-format-for-input-arguments-argparse-python
def _valid_date(s):
try:
return datetime.datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def get_cli_arg_parser():
parser = argparse.ArgumentParser()
#can't pass -q and -v at the same time
group = parser.add_mutually_exclusive_group()
group.add_argument('-q', '--quiet', dest='quiet', help='Suppress output', action="store_true")
group.add_argument('-v', '--verbose', dest='verbose', help='Verbose output', action="store_true")
parser.add_argument('-f', '--input-file', dest='file', required=True, help='Gnucash input file')
parser.add_argument('-r', '--rule-file', dest='rulefile', required=True, help='JSON rule file')
parser.add_argument('--inplace', dest='inplace', help='Don\'t create a backup of the Gnucash file', action="store_true")
parser.add_argument('--no-relaunch', dest='norelaunch', help="Don't relaunch with gnucash-env (should not be passed except for debugging)", action="store_true")
#date range
#start date is required, end date is optional
#see http://stackoverflow.com/questions/25470844/specify-format-for-input-arguments-argparse-python
parser.add_argument('-s', "--startdate", dest="startdate", help="The Start Date - format YYYY-MM-DD ", required=True, type=_valid_date)
parser.add_argument('-e', "--enddate", dest="enddate", help="The End Date - format YYYY-MM-DD (If no end date is specified, assumed to mean start date -> last transaction)", required=False, type=_valid_date)
return parser
def need_relaunch(argv):
return get_cli_arg_parser().parse_args(argv).norelaunch == False
def verbose_enabled(argv):
#manually search the argv array for "-v" because argparse will call _sys.exit if it doesn't
#recognize some options...
if argv is None or argv is []:
return False
for i in argv:
if i == "-v":
return True
return False
| gpl-2.0 | 4,687,797,864,417,723,000 | 47.340909 | 212 | 0.679361 | false |
jruiperezv/ANALYSE | common/lib/xmodule/xmodule/template_module.py | 20 | 2423 | from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from lxml import etree
from mako.template import Template
class CustomTagModule(XModule):
"""
This module supports tags of the form
<customtag option="val" option2="val2" impl="tagname"/>
In this case, $tagname should refer to a file in data/custom_tags, which contains
a mako template that uses ${option} and ${option2} for the content.
For instance:
data/mycourse/custom_tags/book::
More information given in <a href="/book/${page}">the text</a>
course.xml::
...
<customtag page="234" impl="book"/>
...
Renders to::
More information given in <a href="/book/234">the text</a>
"""
def get_html(self):
return self.descriptor.rendered_html
class CustomTagDescriptor(RawDescriptor):
""" Descriptor for custom tags. Loads the template when created."""
module_class = CustomTagModule
template_dir_name = 'customtag'
def render_template(self, system, xml_data):
'''Render the template, given the definition xml_data'''
xmltree = etree.fromstring(xml_data)
if 'impl' in xmltree.attrib:
template_name = xmltree.attrib['impl']
else:
# VS[compat] backwards compatibility with old nested customtag structure
child_impl = xmltree.find('impl')
if child_impl is not None:
template_name = child_impl.text
else:
# TODO (vshnayder): better exception type
raise Exception("Could not find impl attribute in customtag {0}"
.format(self.location))
params = dict(xmltree.items())
# cdodge: look up the template as a module
template_loc = self.location.replace(category='custom_tag_template', name=template_name)
template_module = system.load_item(template_loc)
template_module_data = template_module.data
template = Template(template_module_data)
return template.render(**params)
@property
def rendered_html(self):
return self.render_template(self.system, self.data)
def export_to_file(self):
"""
Custom tags are special: since they're already pointers, we don't want
to export them in a file with yet another layer of indirection.
"""
return False
| agpl-3.0 | -6,219,778,616,322,502,000 | 32.652778 | 96 | 0.63475 | false |
waseem18/oh-mainline | mysite/customs/migrations/0004_add_newcomers_field_toroundupbugtracker.py | 17 | 3422 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.customs.models import *
class Migration:
def forwards(self, orm):
# Adding field 'RoundupBugTracker.my_bugs_are_always_good_for_newcomers'
db.add_column('customs_roundupbugtracker', 'my_bugs_are_always_good_for_newcomers', orm['customs.roundupbugtracker:my_bugs_are_always_good_for_newcomers'])
def backwards(self, orm):
# Deleting field 'RoundupBugTracker.my_bugs_are_always_good_for_newcomers'
db.delete_column('customs_roundupbugtracker', 'my_bugs_are_always_good_for_newcomers')
models = {
'customs.roundupbugtracker': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_these_roundup_bug_statuses': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '255'}),
'my_bugs_are_always_good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'roundup_root_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['customs']
| agpl-3.0 | 288,147,291,279,919,000 | 51.646154 | 163 | 0.614553 | false |
FenceAtMHacks/flaskbackend | fence-api/flask/lib/python2.7/site-packages/setuptools/_vendor/packaging/specifiers.py | 13 | 26705 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self._spec[0])(item, self._spec[1])
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease
and not (prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^\s]* # We just match everything, except for whitespace since this
# is a "legacy" specifier and the version string can be just
# about anything.
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post")
and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec)
and self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Less than are defined as exclusive operators, this implies that
# pre-releases do not match for the same series as the spec. This is
# implemented by making <V imply !=V.*.
spec = Version(spec)
return (prospective < spec
and self._get_operator("!=")(prospective, str(spec) + ".*"))
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Greater than are defined as exclusive operators, this implies that
# pre-releases do not match for the same series as the spec. This is
# implemented by making >V imply !=V.*.
spec = Version(spec)
return (prospective > spec
and self._get_operator("!=")(prospective, str(spec) + ".*"))
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split):])
right_split.append(left[len(right_split):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
# Note: The use of any() here means that an empty set of specifiers
# will always return False, this is an explicit design decision.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if (not (self.prereleases or prereleases)) and item.is_prerelease:
return False
# Determine if we're forcing a prerelease or not, we bypass
# self.prereleases here and use self._prereleases because we want to
# only take into consideration actual *forced* values. The underlying
# specifiers will handle the other logic.
# The logic here is: If prereleases is anything but None, we'll just
# go aheand and continue to use that. However if
# prereleases is None, then we'll use whatever the
# value of self._prereleases is as long as it is not
# None itself.
if prereleases is None and self._prereleases is not None:
prereleases = self._prereleases
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, we bypass
# self.prereleases here and use self._prereleases because we want to
# only take into consideration actual *forced* values. The underlying
# specifiers will handle the other logic.
# The logic here is: If prereleases is anything but None, we'll just
# go aheand and continue to use that. However if
# prereleases is None, then we'll use whatever the
# value of self._prereleases is as long as it is not
# None itself.
if prereleases is None and self._prereleases is not None:
prereleases = self._prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=prereleases)
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| mit | -5,997,865,656,622,859,000 | 35.382834 | 79 | 0.555814 | false |
ets-labs/python-dependency-injector | tests/unit/providers/test_container_py2_py3.py | 1 | 9113 | """Dependency injector container provider unit tests."""
import copy
import unittest
from dependency_injector import containers, providers, errors
TEST_VALUE_1 = 'core_section_value1'
TEST_CONFIG_1 = {
'core': {
'section': {
'value': TEST_VALUE_1,
},
},
}
TEST_VALUE_2 = 'core_section_value2'
TEST_CONFIG_2 = {
'core': {
'section': {
'value': TEST_VALUE_2,
},
},
}
def _copied(value):
return copy.deepcopy(value)
class TestCore(containers.DeclarativeContainer):
config = providers.Configuration('core')
value_getter = providers.Callable(lambda _: _, config.section.value)
class TestApplication(containers.DeclarativeContainer):
config = providers.Configuration('config')
core = providers.Container(TestCore, config=config.core)
dict_factory = providers.Factory(dict, value=core.value_getter)
class ContainerTests(unittest.TestCase):
def test(self):
application = TestApplication(config=_copied(TEST_CONFIG_1))
self.assertEqual(application.dict_factory(), {'value': TEST_VALUE_1})
def test_double_override(self):
application = TestApplication()
application.config.override(_copied(TEST_CONFIG_1))
application.config.override(_copied(TEST_CONFIG_2))
self.assertEqual(application.dict_factory(), {'value': TEST_VALUE_2})
def test_override(self):
# See: https://github.com/ets-labs/python-dependency-injector/issues/354
class D(containers.DeclarativeContainer):
foo = providers.Object('foo')
class A(containers.DeclarativeContainer):
d = providers.DependenciesContainer()
bar = providers.Callable(lambda f: f + '++', d.foo.provided)
class B(containers.DeclarativeContainer):
d = providers.Container(D)
a = providers.Container(A, d=d)
b = B(d=D())
result = b.a().bar()
self.assertEqual(result, 'foo++')
def test_override_not_root_provider(self):
# See: https://github.com/ets-labs/python-dependency-injector/issues/379
class NestedContainer(containers.DeclarativeContainer):
settings = providers.Configuration()
print_settings = providers.Callable(
lambda s: s,
settings,
)
class TestContainer(containers.DeclarativeContainer):
settings = providers.Configuration()
root_container = providers.Container(
NestedContainer,
settings=settings,
)
not_root_container = providers.Selector(
settings.container,
using_factory=providers.Factory(
NestedContainer,
settings=settings,
),
using_container=providers.Container(
NestedContainer,
settings=settings,
)
)
container_using_factory = TestContainer(settings=dict(
container='using_factory',
foo='bar'
))
self.assertEqual(
container_using_factory.root_container().print_settings(),
{'container': 'using_factory', 'foo': 'bar'},
)
self.assertEqual(
container_using_factory.not_root_container().print_settings(),
{'container': 'using_factory', 'foo': 'bar'},
)
container_using_container = TestContainer(settings=dict(
container='using_container',
foo='bar'
))
self.assertEqual(
container_using_container.root_container().print_settings(),
{'container': 'using_container', 'foo': 'bar'},
)
self.assertEqual(
container_using_container.not_root_container().print_settings(),
{'container': 'using_container', 'foo': 'bar'},
)
def test_override_by_not_a_container(self):
provider = providers.Container(TestCore)
with self.assertRaises(errors.Error):
provider.override(providers.Object('foo'))
def test_lazy_overriding(self):
# See: https://github.com/ets-labs/python-dependency-injector/issues/354
class D(containers.DeclarativeContainer):
foo = providers.Object("foo")
class A(containers.DeclarativeContainer):
d = providers.DependenciesContainer()
bar = providers.Callable(lambda f: f + "++", d.foo.provided)
class B(containers.DeclarativeContainer):
d = providers.DependenciesContainer()
a = providers.Container(A, d=d)
b = B(d=D())
result = b.a().bar()
self.assertEqual(result, 'foo++')
def test_lazy_overriding_deep(self):
# Extended version of test_lazy_overriding()
class D(containers.DeclarativeContainer):
foo = providers.Object("foo")
class C(containers.DeclarativeContainer):
d = providers.DependenciesContainer()
bar = providers.Callable(lambda f: f + "++", d.foo.provided)
class A(containers.DeclarativeContainer):
d = providers.DependenciesContainer()
c = providers.Container(C, d=d)
class B(containers.DeclarativeContainer):
d = providers.DependenciesContainer()
a = providers.Container(A, d=d)
b = B(d=D())
result = b.a().c().bar()
self.assertEqual(result, 'foo++')
def test_reset_last_overriding(self):
application = TestApplication(config=_copied(TEST_CONFIG_1))
application.core.override(TestCore(config=_copied(TEST_CONFIG_2['core'])))
application.core.reset_last_overriding()
self.assertEqual(application.dict_factory(), {'value': TEST_VALUE_1})
def test_reset_last_overriding_only_overridden(self):
application = TestApplication(config=_copied(TEST_CONFIG_1))
application.core.override(providers.DependenciesContainer(config=_copied(TEST_CONFIG_2['core'])))
application.core.reset_last_overriding()
self.assertEqual(application.dict_factory(), {'value': TEST_VALUE_1})
def test_override_context_manager(self):
application = TestApplication(config=_copied(TEST_CONFIG_1))
overriding_core = TestCore(config=_copied(TEST_CONFIG_2['core']))
with application.core.override(overriding_core) as context_core:
self.assertEqual(application.dict_factory(), {'value': TEST_VALUE_2})
self.assertIs(context_core(), overriding_core)
self.assertEqual(application.dict_factory(), {'value': TEST_VALUE_1})
def test_reset_override(self):
application = TestApplication(config=_copied(TEST_CONFIG_1))
application.core.override(TestCore(config=_copied(TEST_CONFIG_2['core'])))
application.core.reset_override()
self.assertEqual(application.dict_factory(), {'value': None})
def test_reset_override_only_overridden(self):
application = TestApplication(config=_copied(TEST_CONFIG_1))
application.core.override(providers.DependenciesContainer(config=_copied(TEST_CONFIG_2['core'])))
application.core.reset_override()
self.assertEqual(application.dict_factory(), {'value': None})
def test_assign_parent(self):
parent = providers.DependenciesContainer()
provider = providers.Container(TestCore)
provider.assign_parent(parent)
self.assertIs(provider.parent, parent)
def test_parent_name(self):
container = containers.DynamicContainer()
provider = providers.Container(TestCore)
container.name = provider
self.assertEqual(provider.parent_name, 'name')
def test_parent_name_with_deep_parenting(self):
provider = providers.Container(TestCore)
container = providers.DependenciesContainer(name=provider)
_ = providers.DependenciesContainer(container=container)
self.assertEqual(provider.parent_name, 'container.name')
def test_parent_name_is_none(self):
provider = providers.Container(TestCore)
self.assertIsNone(provider.parent_name)
def test_parent_deepcopy(self):
container = containers.DynamicContainer()
provider = providers.Container(TestCore)
container.name = provider
copied = providers.deepcopy(container)
self.assertIs(container.name.parent, container)
self.assertIs(copied.name.parent, copied)
self.assertIsNot(container, copied)
self.assertIsNot(container.name, copied.name)
self.assertIsNot(container.name.parent, copied.name.parent)
def test_resolve_provider_name(self):
container = providers.Container(TestCore)
self.assertEqual(container.resolve_provider_name(container.value_getter), 'value_getter')
def test_resolve_provider_name_no_provider(self):
container = providers.Container(TestCore)
with self.assertRaises(errors.Error):
container.resolve_provider_name(providers.Provider())
| bsd-3-clause | -4,025,203,194,327,189,500 | 33.259398 | 105 | 0.634368 | false |
david30907d/feedback_django | spirit/comment/poll/managers.py | 2 | 1541 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.loading import get_model
from django.db.models import Prefetch
class CommentPollQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(is_removed=False)
def for_comment(self, comment):
return self.filter(comment=comment)
def with_choices(self):
choice_model = get_model('spirit_comment_poll.CommentPollChoice')
visible_choices = choice_model.objects.unremoved()
prefetch_choices = Prefetch("poll_choices", queryset=visible_choices, to_attr='choices')
return self.prefetch_related(prefetch_choices)
class CommentPollChoiceQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(is_removed=False)
def for_comment(self, comment):
return self.filter(poll__comment=comment)
def for_poll(self, poll):
return self.filter(poll=poll)
def for_voter(self, voter):
return self.filter(
choice_votes__voter=voter,
choice_votes__is_removed=False
)
def for_vote(self, poll, voter):
return self \
.for_poll(poll) \
.for_voter(voter) \
.unremoved()
class CommentPollVoteQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(is_removed=False)
def for_voter(self, user):
return self.filter(voter=user)
def for_choice(self, choice):
return self.filter(choice=choice)
| mit | -6,985,964,599,124,078,000 | 25.118644 | 96 | 0.658663 | false |
debugger87/spark | python/setup.py | 5 | 10182 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.7'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 | -3,248,820,117,489,993,000 | 41.07438 | 100 | 0.630426 | false |
kkintaro/termite-data-server | web2py/gluon/contrib/DowCommerce.py | 16 | 9206 | """
DowCommerce class to process credit card payments with DowCommerce.com
Modifications to support Dow Commerce API from code originally written by John Conde
http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/
Unknown license, assuming public domain
Modifed by Dave Stoll [email protected]
- modifed to support Dow Commerce API
"""
__all__ = ['DowCommerce']
from operator import itemgetter
import urllib
class DowCommerce:
class DowCommerceError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return str(self.parameter)
def __init__(self, username=None, password=None, demomode=False):
if not demomode:
if str(username).strip() == '' or username is None:
raise DowCommerce.DowCommerceError('No username provided')
if str(password).strip() == '' or password is None:
raise DowCommerce.DowCommerceError('No password provided')
else:
username = 'demo'
password = 'password'
self.proxy = None
self.delimiter = '&'
self.results = {}
self.error = True
self.success = False
self.declined = False
self.url = 'https://secure.dowcommerce.net/api/transact.php'
self.parameters = {}
self.setParameter('username', username)
self.setParameter('password', password)
def process(self):
encoded_args = urllib.urlencode(self.parameters)
if self.proxy is None:
results = str(urllib.urlopen(
self.url, encoded_args).read()).split(self.delimiter)
else:
opener = urllib.FancyURLopener(self.proxy)
opened = opener.open(self.url, encoded_args)
try:
results = str(opened.read()).split(self.delimiter)
finally:
opened.close()
for result in results:
(key, val) = result.split('=')
self.results[key] = val
if self.results['response'] == '1':
self.error = False
self.success = True
self.declined = False
elif self.results['response'] == '2':
self.error = False
self.success = False
self.declined = True
elif self.results['response'] == '3':
self.error = True
self.success = False
self.declined = False
else:
self.error = True
self.success = False
self.declined = False
raise DowCommerce.DowCommerceError(self.results)
def setTransaction(
self, creditcard, expiration, total, cvv=None, orderid=None, orderdescription=None,
ipaddress=None, tax=None, shipping=None,
firstname=None, lastname=None, company=None, address1=None, address2=None, city=None, state=None, zipcode=None,
country=None, phone=None, fax=None, emailaddress=None, website=None,
shipping_firstname=None, shipping_lastname=None, shipping_company=None, shipping_address1=None, shipping_address2=None,
shipping_city=None, shipping_state=None, shipping_zipcode=None, shipping_country=None, shipping_emailaddress=None):
if str(creditcard).strip() == '' or creditcard is None:
raise DowCommerce.DowCommerceError('No credit card number passed to setTransaction(): {0}'.format(creditcard))
if str(expiration).strip() == '' or expiration is None:
raise DowCommerce.DowCommerceError('No expiration number passed to setTransaction(): {0}'.format(expiration))
if str(total).strip() == '' or total is None:
raise DowCommerce.DowCommerceError('No total amount passed to setTransaction(): {0}'.format(total))
self.setParameter('ccnumber', creditcard)
self.setParameter('ccexp', expiration)
self.setParameter('amount', total)
if cvv:
self.setParameter('cvv', cvv)
if orderid:
self.setParameter('orderid', orderid)
if orderdescription:
self.setParameter('orderdescription', orderdescription)
if ipaddress:
self.setParameter('ipaddress', ipaddress)
if tax:
self.setParameter('tax', tax)
if shipping:
self.setParameter('shipping', shipping)
## billing info
if firstname:
self.setParameter('firstname', firstname)
if lastname:
self.setParameter('lastname', lastname)
if company:
self.setParameter('company', company)
if address1:
self.setParameter('address1', address1)
if address2:
self.setParameter('address2', address2)
if city:
self.setParameter('city', city)
if state:
self.setParameter('state', state)
if zipcode:
self.setParameter('zip', zipcode)
if country:
self.setParameter('country', country)
if phone:
self.setParameter('phone', phone)
if fax:
self.setParameter('fax', fax)
if emailaddress:
self.setParameter('email', emailaddress)
if website:
self.setParameter('website', website)
## shipping info
if shipping_firstname:
self.setParameter('shipping_firstname', shipping_firstname)
if shipping_lastname:
self.setParameter('shipping_lastname', shipping_lastname)
if shipping_company:
self.setParameter('shipping_company', shipping_company)
if shipping_address1:
self.setParameter('shipping_address1', shipping_address1)
if shipping_address2:
self.setParameter('shipping_address2', shipping_address2)
if shipping_city:
self.setParameter('shipping_city', shipping_city)
if shipping_state:
self.setParameter('shipping_state', shipping_state)
if shipping_zipcode:
self.setParameter('shipping_zip', shipping_zipcode)
if shipping_country:
self.setParameter('shipping_country', shipping_country)
def setTransactionType(self, transtype=None):
types = ['sale', 'auth', 'credit']
if transtype.lower() not in types:
raise DowCommerce.DowCommerceError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype))
self.setParameter('type', transtype.lower())
def setProxy(self, proxy=None):
if str(proxy).strip() == '' or proxy is None:
raise DowCommerce.DowCommerceError('No proxy passed to setProxy()')
self.proxy = {'http': str(proxy).strip()}
def setParameter(self, key=None, value=None):
if key is not None and value is not None and str(key).strip() != '' and str(value).strip() != '':
self.parameters[key] = str(value).strip()
else:
raise DowCommerce.DowCommerceError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value))
def isApproved(self):
return self.success
def isDeclined(self):
return self.declined
def isError(self):
return self.error
def getResultResponseShort(self):
responses = ['', 'Approved', 'Declined', 'Error']
return responses[int(self.results['response'])]
def getFullResponse(self):
return self.results
def getResponseText(self):
return self.results['responsetext']
def test():
import socket
import sys
from time import time
## TEST VALUES FROM API DOC:
# Visa: 4111111111111111
# MasterCard 5431111111111111
# DiscoverCard: 6011601160116611
# American Express: 341111111111111
# Expiration: 10/10
# Amount: > 1.00 (( passing less than $1.00 will cause it to be declined ))
# CVV: 999
creditcard = '4111111111111111'
expiration = '1010'
total = '1.00'
cvv = '999'
tax = '0.00'
orderid = str(time())[4:10] # get a random invoice number
try:
payment = DowCommerce(demomode=True)
payment.setTransaction(
creditcard, expiration, total, cvv=cvv, tax=tax, orderid=orderid, orderdescription='Test Transaction',
firstname='John', lastname='Doe', company='Acme', address1='123 Min Street', city='Hometown', state='VA',
zipcode='12345', country='US', phone='888-555-1212', emailaddress='[email protected]', ipaddress='192.168.1.1')
payment.process()
if payment.isApproved():
print 'Payment approved!'
print payment.getFullResponse()
elif payment.isDeclined():
print 'Your credit card was declined by your bank'
elif payment.isError():
raise DowCommerce.DowCommerceError('An uncaught error occurred')
except DowCommerce.DowCommerceError, e:
print "Exception thrown:", e
print 'An error occured'
print 'approved', payment.isApproved()
print 'declined', payment.isDeclined()
print 'error', payment.isError()
if __name__ == '__main__':
test()
| bsd-3-clause | 219,258,216,984,566,080 | 36.729508 | 130 | 0.613404 | false |
trishume/MacRanger | ranger/gui/widgets/console.py | 1 | 15189 | # Copyright (C) 2009-2013 Roman Zimbelmann <[email protected]>
# This software is distributed under the terms of the GNU GPL version 3.
"""The Console widget implements a vim-like console"""
import curses
import re
from collections import deque
from . import Widget
from ranger.ext.direction import Direction
from ranger.ext.widestring import uwid, WideString
from ranger.container.history import History, HistoryEmptyException
import ranger
class Console(Widget):
visible = False
last_cursor_mode = None
history_search_pattern = None
prompt = ':'
copy = ''
tab_deque = None
original_line = None
history = None
history_backup = None
override = None
allow_close = False
historypath = None
wait_for_command_input = False
unicode_buffer = ""
def __init__(self, win):
Widget.__init__(self, win)
self.clear()
self.history = History(self.settings.max_console_history_size)
# load history from files
if not ranger.arg.clean:
self.historypath = self.fm.confpath('history')
try:
f = open(self.historypath, 'r')
except:
pass
else:
for line in f:
self.history.add(line[:-1])
f.close()
self.line = ""
self.history_backup = History(self.history)
# NOTE: the console is considered in the "question mode" when the
# question_queue is non-empty. In that case, the console will draw the
# question instead of the regular console, and the input you give is
# used to answer the question instead of typing in commands.
#
# A question is a tuple of (question_string, callback_func,
# tuple_of_choices). callback_func is a function that is called when
# the question is answered which gets the answer as an argument.
# tuple_of_choices looks like ('y', 'n'). Only one-letter-answers are
# currently supported. Pressing enter uses the first choice whereas
# pressing ESC uses the second choice.
self.question_queue = []
def destroy(self):
# save history to files
if ranger.arg.clean or not self.settings.save_console_history:
return
if self.historypath:
try:
f = open(self.historypath, 'w')
except:
pass
else:
for entry in self.history_backup:
try:
f.write(entry + '\n')
except UnicodeEncodeError:
pass
f.close()
def draw(self):
self.win.erase()
if self.question_queue:
assert isinstance(self.question_queue[0], tuple)
assert len(self.question_queue[0]) == 3
self.addstr(0, 0, self.question_queue[0][0])
return
self.addstr(0, 0, self.prompt)
line = WideString(self.line)
overflow = -self.wid + len(self.prompt) + len(line) + 1
if overflow > 0:
self.addstr(0, len(self.prompt), str(line[overflow:]))
else:
self.addstr(0, len(self.prompt), self.line)
def finalize(self):
move = self.fm.ui.win.move
if self.question_queue:
try:
move(self.y, len(self.question_queue[0][0]))
except:
pass
else:
try:
pos = uwid(self.line[0:self.pos]) + len(self.prompt)
move(self.y, self.x + min(self.wid-1, pos))
except:
pass
def open(self, string='', prompt=None, position=None):
if prompt is not None:
assert isinstance(prompt, str)
self.prompt = prompt
elif 'prompt' in self.__dict__:
del self.prompt
if self.last_cursor_mode is None:
try:
self.last_cursor_mode = curses.curs_set(1)
except:
pass
self.allow_close = False
self.tab_deque = None
self.unicode_buffer = ""
self.line = string
self.history_search_pattern = self.line
self.pos = len(string)
if position is not None:
self.pos = min(self.pos, position)
self.history_backup.fast_forward()
self.history = History(self.history_backup)
self.history.add('')
self.wait_for_command_input = True
return True
def close(self, trigger_cancel_function=True):
if self.question_queue:
question = self.question_queue[0]
answers = question[2]
if len(answers) >= 2:
self._answer_question(answers[1])
else:
self._close_command_prompt(trigger_cancel_function)
def _close_command_prompt(self, trigger_cancel_function=True):
if trigger_cancel_function:
cmd = self._get_cmd(quiet=True)
if cmd:
try:
cmd.cancel()
except Exception as error:
self.fm.notify(error)
if self.last_cursor_mode is not None:
try:
curses.curs_set(self.last_cursor_mode)
except:
pass
self.last_cursor_mode = None
self.fm.hide_console_info()
self.add_to_history()
self.tab_deque = None
self.clear()
self.__class__ = Console
self.wait_for_command_input = False
def clear(self):
self.pos = 0
self.line = ''
def press(self, key):
self.fm.ui.keymaps.use_keymap('console')
if not self.fm.ui.press(key):
self.type_key(key)
def _answer_question(self, answer):
if not self.question_queue:
return False
question = self.question_queue[0]
text, callback, answers = question
if answer in answers:
self.question_queue.pop(0)
callback(answer)
return True
return False
def type_key(self, key):
self.tab_deque = None
line = "" if self.question_queue else self.line
result = self._add_character(key, self.unicode_buffer, line, self.pos)
if result[1] == line:
# line didn't change, so we don't need to do anything, just update
# the unicode _buffer.
self.unicode_buffer = result[0]
return
if self.question_queue:
self.unicode_buffer, answer, self.pos = result
self._answer_question(answer)
else:
self.unicode_buffer, self.line, self.pos = result
self.on_line_change()
def _add_character(self, key, unicode_buffer, line, pos):
# Takes the pressed key, a string "unicode_buffer" containing a
# potentially incomplete unicode character, the current line and the
# position of the cursor inside the line.
# This function returns the new unicode buffer, the modified line and
# position.
if isinstance(key, int):
try:
key = chr(key)
except ValueError:
return unicode_buffer, line, pos
if self.fm.py3:
unicode_buffer += key
try:
decoded = unicode_buffer.encode("latin-1").decode("utf-8")
except UnicodeDecodeError:
return unicode_buffer, line, pos
except UnicodeEncodeError:
return unicode_buffer, line, pos
else:
unicode_buffer = ""
if pos == len(line):
line += decoded
else:
line = line[:pos] + decoded + line[pos:]
pos += len(decoded)
else:
if pos == len(line):
line += key
else:
line = line[:pos] + key + line[pos:]
pos += len(key)
return unicode_buffer, line, pos
def history_move(self, n):
try:
current = self.history.current()
except HistoryEmptyException:
pass
else:
if self.line != current and self.line != self.history.top():
self.history.modify(self.line)
if self.history_search_pattern:
self.history.search(self.history_search_pattern, n)
else:
self.history.move(n)
current = self.history.current()
if self.line != current:
self.line = self.history.current()
self.pos = len(self.line)
def add_to_history(self):
self.history_backup.fast_forward()
self.history_backup.add(self.line)
self.history = History(self.history_backup)
def move(self, **keywords):
direction = Direction(keywords)
if direction.horizontal():
# Ensure that the pointer is moved utf-char-wise
if self.fm.py3:
self.pos = direction.move(
direction=direction.right(),
minimum=0,
maximum=len(self.line) + 1,
current=self.pos)
else:
if self.fm.py3:
uc = list(self.line)
upos = len(self.line[:self.pos])
else:
uc = list(self.line.decode('utf-8', 'ignore'))
upos = len(self.line[:self.pos].decode('utf-8', 'ignore'))
newupos = direction.move(
direction=direction.right(),
minimum=0,
maximum=len(uc) + 1,
current=upos)
self.pos = len(''.join(uc[:newupos]).encode('utf-8', 'ignore'))
def delete_rest(self, direction):
self.tab_deque = None
if direction > 0:
self.copy = self.line[self.pos:]
self.line = self.line[:self.pos]
else:
self.copy = self.line[:self.pos]
self.line = self.line[self.pos:]
self.pos = 0
self.on_line_change()
def paste(self):
if self.pos == len(self.line):
self.line += self.copy
else:
self.line = self.line[:self.pos] + self.copy + self.line[self.pos:]
self.pos += len(self.copy)
self.on_line_change()
def delete_word(self, backward=True):
if self.line:
self.tab_deque = None
if backward:
right_part = self.line[self.pos:]
i = self.pos - 2
while i >= 0 and re.match(r'[\w\d]', self.line[i], re.U):
i -= 1
self.copy = self.line[i + 1:self.pos]
self.line = self.line[:i + 1] + right_part
self.pos = i + 1
else:
left_part = self.line[:self.pos]
i = self.pos + 1
while i < len(self.line) and re.match(r'[\w\d]', self.line[i], re.U):
i += 1
self.copy = self.line[self.pos:i]
if i >= len(self.line):
self.line = left_part
self.pos = len(self.line)
else:
self.line = left_part + self.line[i:]
self.pos = len(left_part)
self.on_line_change()
def delete(self, mod):
self.tab_deque = None
if mod == -1 and self.pos == 0:
if not self.line:
self.close(trigger_cancel_function=False)
return
# Delete utf-char-wise
if self.fm.py3:
left_part = self.line[:self.pos + mod]
self.pos = len(left_part)
self.line = left_part + self.line[self.pos + 1:]
else:
uc = list(self.line.decode('utf-8', 'ignore'))
upos = len(self.line[:self.pos].decode('utf-8', 'ignore')) + mod
left_part = ''.join(uc[:upos]).encode('utf-8', 'ignore')
self.pos = len(left_part)
self.line = left_part + ''.join(uc[upos+1:]).encode('utf-8', 'ignore')
self.on_line_change()
def execute(self, cmd=None):
if self.question_queue and cmd is None:
question = self.question_queue[0]
answers = question[2]
if len(answers) >= 1:
self._answer_question(answers[0])
else:
self.question_queue.pop(0)
return
self.allow_close = True
self.fm.execute_console(self.line)
if self.allow_close:
self._close_command_prompt(trigger_cancel_function=False)
def _get_cmd(self, quiet=False):
try:
command_class = self._get_cmd_class()
except KeyError:
if not quiet:
error = "Command not found: `%s'" % self.line.split()[0]
self.fm.notify(error, bad=True)
except:
return None
else:
return command_class(self.line)
def _get_cmd_class(self):
return self.fm.commands.get_command(self.line.split()[0])
def _get_tab(self):
if ' ' in self.line:
cmd = self._get_cmd()
if cmd:
return cmd.tab()
else:
return None
return self.fm.commands.command_generator(self.line)
def tab(self, n=1):
if self.tab_deque is None:
tab_result = self._get_tab()
if isinstance(tab_result, str):
self.line = tab_result
self.pos = len(tab_result)
self.on_line_change()
elif tab_result == None:
pass
elif hasattr(tab_result, '__iter__'):
self.tab_deque = deque(tab_result)
self.tab_deque.appendleft(self.line)
if self.tab_deque is not None:
self.tab_deque.rotate(-n)
self.line = self.tab_deque[0]
self.pos = len(self.line)
self.on_line_change()
def on_line_change(self):
self.history_search_pattern = self.line
try:
cls = self._get_cmd_class()
except (KeyError, ValueError, IndexError):
pass
else:
cmd = cls(self.line)
if cmd and cmd.quick():
self.execute(cmd)
def ask(self, text, callback, choices=['y', 'n']):
"""Open a question prompt with predefined choices
The "text" is displayed as the question text and should include a list
of possible keys that the user can type. The "callback" is a function
that is called when the question is answered. It only gets the answer
as an argument. "choices" is a tuple of one-letter strings that can be
typed in by the user. Every other input gets ignored, except <Enter>
and <ESC>.
The first choice is used when the user presses <Enter>, the second
choice is used when the user presses <ESC>.
"""
self.question_queue.append((text, callback, choices))
| gpl-3.0 | -1,572,149,570,765,976,300 | 33.997696 | 85 | 0.524656 | false |
egbertbouman/tribler-g | Tribler/Plugin/Search.py | 1 | 35105 | # Written by Arno Bakker, Diego Rabioli
# see LICENSE.txt for license information
#
# TODO:
# - Switch to SIMPLE+METADATA query
#
# - adjust SIMPLE+METADATA such that it returns P2PURLs if possible.
# - DO NOT SAVE P2PURLs as .torrent, put in 'torrent_file_name' field in DB.
#
# - Implement continuous dump of results to JS. I.e. push sorting and
# rendering to browser.
# * One option is RFC5023: Atom Pub Proto, $10.1 "Collecting Partial
# Lists" I.e. return a partial list and add a
# <link ref="next" href="/.../next10> tag pointing
# to the next set. See http://www.iana.org/assignments/link-relations/link-relations.xhtml
# for def of next/first/last, etc. link relations.
#
# Arno, 2009-10-10: we current add such a <link ref="next" link,
# which contains a URL that will give all hits found so far. So
# people should poll this URL.
#
# - Background thread to save torrentfiles to localdb.
# Arno, 2009-12-03: Now offloaded to a new TimedTaskQueue.
#
#
# - garbage collect hits at connection close.
# Not vital, current mechanism will GC.
#
# - Support for multifile torrents
#
# - BuddyCast hits: Create LIVE MPEG7 fields for live (i.e., livetimepoint)
# and VOD MPEG7 fields for VOD.
#
# - Use separate HTTP server, Content-serving one needs to be single-threaded
# at the moment to prevent concurrent RANGE queries on same stream from VLC.
# Alternative is to put a Condition variable on a content stream.
#
# Arno, 2009-12-4: I've added locks per content URL and made
# VideoHTTPServer multithreaded and it now also serves the search traffic.
#
# - Debug hanging searches on Windows. May be due to "incomplete outbound TCP
# connection" limit, see Encrypter.py :-( I get timeouts opening the feeds
# listed in the metafeed, whilst the feed server is responding fast.
# Lowering Encrypter's MAX_INCOMPLETE doesn't help. Alt is to periodically
# parse the feeds and store the results.
#
# Arno, 2009-12-4: Problem still exists. Note that TCP limit has been
# lifted on Windows > Vista SP2.
#
# - Update VLC plugin-1.0.1 such that it doesn't show a video window when
# target is empty.
#
# Arno, 2009-12-4: At the moment, setting the window size to (0,0) and
# not providing a URL of a torrent works.
#
# - query_connected_peers() now returns Unicode names, make sure these are
# properly handled when generating HTML output.
import sys
import time
import random
import urllib
import urlparse
import cgi
import binascii
import copy
from cStringIO import StringIO
from traceback import print_exc,print_stack
from threading import RLock
from Tribler.Core.API import *
from Tribler.Core.BitTornado.bencode import *
from Tribler.Core.Utilities.utilities import get_collected_torrent_filename
from Tribler.Video.VideoServer import AbstractPathMapper
from Tribler.Plugin.defs import *
from Tribler.Plugin.AtomFeedParser import *
DEBUG = False
P2PQUERYTYPE = "SIMPLE"
def streaminfo404():
return {'statuscode':404, 'statusmsg':'404 Not Found'}
class SearchPathMapper(AbstractPathMapper):
def __init__(self,session,id2hits,tqueue):
self.session = session
self.id2hits = id2hits
self.tqueue = tqueue
self.metafp = None
self.metafeedurl = None
def get(self,urlpath):
"""
Possible paths:
/search<application/x-www-form-urlencoded query>
"""
if not urlpath.startswith(URLPATH_SEARCH_PREFIX):
return streaminfo404()
fakeurl = 'http://127.0.0.1'+urlpath
o = urlparse.urlparse(fakeurl)
qdict = cgi.parse_qs(o[4])
if DEBUG:
print >>sys.stderr,"searchmap: qdict",qdict
searchstr = qdict['q'][0]
searchstr = searchstr.strip()
collection = qdict['collection'][0]
metafeedurl = qdict['metafeed'][0]
print >>sys.stderr,"\nbg: search: Got search for",`searchstr`,"in",collection
# Garbage collect:
self.id2hits.garbage_collect_timestamp_smaller(time.time() - HITS_TIMEOUT)
if collection == "metafeed":
if not self.check_reload_metafeed(metafeedurl):
return {'statuscode':504, 'statusmsg':'504 MetaFeed server did not respond'}
return self.process_search_metafeed(searchstr)
else:
return self.process_search_p2p(searchstr)
def process_search_metafeed(self,searchstr):
""" Search for hits in the ATOM feeds we got from the meta feed """
allhits = []
for feedurl in self.metafp.get_feedurls():
feedp = FeedParser(feedurl)
try:
feedp.parse()
except:
# TODO: return 504 gateway error if none of the feeds return anything
print_exc()
hits = feedp.search(searchstr)
allhits.extend(hits)
for hitentry in allhits:
titleelement = hitentry.find('{http://www.w3.org/2005/Atom}title')
print >>sys.stderr,"bg: search: meta: Got hit",titleelement.text
id = str(random.random())[2:]
atomurlpathprefix = URLPATH_HITS_PREFIX+'/'+str(id)
atomxml = feedhits2atomxml(allhits,searchstr,atomurlpathprefix)
atomstream = StringIO(atomxml)
atomstreaminfo = { 'statuscode':200,'mimetype': 'application/atom+xml', 'stream': atomstream, 'length': len(atomxml)}
return atomstreaminfo
def process_search_p2p(self,searchstr):
""" Search for hits in local database and perform remote query.
EXPERIMENTAL: needs peers with SIMPLE+METADATA query support.
"""
# Initially, searchstr = keywords
keywords = searchstr.split()
id = str(random.random())[2:]
self.id2hits.add_query(id,searchstr,time.time())
# Parallel: initiate remote query
q = P2PQUERYTYPE+' '+searchstr
print >>sys.stderr,"bg: search: Send remote query for",q
got_remote_hits_lambda = lambda permid,query,remotehits:self.sesscb_got_remote_hits(id,permid,query,remotehits)
self.st = time.time()
self.session.query_connected_peers(q,got_remote_hits_lambda,max_peers_to_query=20)
# Query local DB while waiting
torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
localdbhits = torrent_db.searchNames(keywords)
print >>sys.stderr,"bg: search: Local hits",len(localdbhits)
self.session.close_dbhandler(torrent_db)
# Convert list to dict keyed by infohash
localhits = localdbhits2hits(localdbhits)
self.id2hits.add_hits(id,localhits)
# TODO ISSUE: incremental display of results to user? How to implement this?
atomurlpathprefix = URLPATH_HITS_PREFIX+'/'+str(id)
nextlinkpath = atomurlpathprefix
if False:
# Return ATOM feed directly
atomhits = hits2atomhits(localhits,atomurlpathprefix)
atomxml = atomhits2atomxml(atomhits,searchstr,atomurlpathprefix,nextlinkpath=nextlinkpath)
atomstream = StringIO(atomxml)
atomstreaminfo = { 'statuscode':200,'mimetype': 'application/atom+xml', 'stream': atomstream, 'length': len(atomxml)}
return atomstreaminfo
else:
# Return redirect to ATOM feed URL, this allows us to do a page
# page reload to show remote queries that have come in (DEMO)
streaminfo = { 'statuscode':301,'statusmsg':nextlinkpath }
return streaminfo
def sesscb_got_remote_hits(self,id,permid,query,remotehits):
# Called by SessionCallback thread
try:
et = time.time()
diff = et - self.st
print >>sys.stderr,"bg: search: Got",len(remotehits),"remote hits" # ,"after",diff
hits = remotehits2hits(remotehits)
self.id2hits.add_hits(id,hits)
if P2PQUERYTYPE=="SIMPLE+METADATA":
bgsearch_save_remotehits_lambda = lambda:self.tqueue_save_remote_hits(remotehits)
self.tqueue.add_task(bgsearch_save_remotehits_lambda,0)
except:
print_exc()
def check_reload_metafeed(self,metafeedurl):
if self.metafeedurl is None or self.metafeedurl != metafeedurl:
self.metafp = MetaFeedParser(metafeedurl)
try:
self.metafp.parse() # TODO: offload to separate thread?
print >>sys.stderr,"bg: search: meta: Found feeds",self.metafp.get_feedurls()
self.metafeedurl = metafeedurl
except:
print_exc()
return False
return True
def tqueue_save_remote_hits(self,remotehits):
""" Save .torrents received from SIMPLE+METADATA query on a separate
thread.
Run by TimedTaskQueueThread
"""
torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
extra_info = {'status':'good'}
n = len(remotehits)
count = 0
commit = False
for infohash,remotehit in remotehits.iteritems():
if count == n-1:
commit = True
try:
torrentpath = self.tqueue_save_collected_torrent(remotehit['metatype'],remotehit['metadata'])
torrent_db.addExternalTorrent(torrentpath, source='BC', extra_info=extra_info, commit=commit)
except:
print_exc()
count += 1
self.session.close_dbhandler(torrent_db)
def tqueue_save_collected_torrent(self,metatype,metadata):
""" Run by TimedTaskQueueThread """
if metatype == URL_MIME_TYPE:
tdef = TorrentDef.load_from_url(metadata)
else:
metainfo = bdecode(metadata)
tdef = TorrentDef.load_from_dict(metainfo)
infohash = tdef.get_infohash()
colldir = self.session.get_torrent_collecting_dir()
filename = get_collected_torrent_filename(infohash)
torrentpath = os.path.join(colldir, filename)
print >>sys.stderr,"bg: search: saving remotehit",torrentpath
tdef.save(torrentpath)
return torrentpath
def localdbhits2hits(localdbhits):
hits = {}
for dbhit in localdbhits:
localhit = {}
localhit['hittype'] = "localdb"
localhit.update(dbhit)
infohash = dbhit['infohash'] # convenient to also have in record
hits[infohash] = localhit
return hits
def remotehits2hits(remotehits):
hits = {}
for infohash,hit in remotehits.iteritems():
#print >>sys.stderr,"remotehit2hits: keys",hit.keys()
remotehit = {}
remotehit['hittype'] = "remote"
#remotehit['query_permid'] = permid # Bit of duplication, ignore
remotehit['infohash'] = infohash # convenient to also have in record
remotehit.update(hit)
# HACK until we use SIMPLE+METADATA: Create fake torrent file
if not 'metadata' in hit:
metatype = TSTREAM_MIME_TYPE
metadata = hack_make_default_merkletorrent(hit['content_name'])
remotehit['metatype'] = metatype
remotehit['metadata'] = metadata
hits[infohash] = remotehit
return hits
class Query2HitsMap:
""" Stores localdb and remotehits in common hits format, i.e., each
hit has a 'hittype' attribute that tells which type it is (localdb or remote).
This Query2HitsMap is passed to the Hits2AnyPathMapper, which is connected
to the internal HTTP server.
The HTTP server will then forward all "/hits" GET requests to this mapper.
The mapper then dynamically generates the required contents from the stored
hits, e.g. an ATOM feed, MPEG7 description, .torrent file and thumbnail
images from the torrent.
"""
def __init__(self):
self.lock = RLock()
self.d = {}
def add_query(self,id,searchstr,timestamp):
if DEBUG:
print >>sys.stderr,"q2h: lock1",id
self.lock.acquire()
try:
qrec = self.d.get(id,{})
qrec['searchstr'] = searchstr
qrec['timestamp'] = timestamp
qrec['hitlist'] = {}
self.d[id] = qrec
finally:
if DEBUG:
print >>sys.stderr,"q2h: unlock1"
self.lock.release()
def add_hits(self,id,hits):
if DEBUG:
print >>sys.stderr,"q2h: lock2",id,len(hits)
self.lock.acquire()
try:
qrec = self.d[id]
qrec['hitlist'].update(hits)
finally:
if DEBUG:
print >>sys.stderr,"q2h: unlock2"
self.lock.release()
def get_hits(self,id):
if DEBUG:
print >>sys.stderr,"q2h: lock3",id
self.lock.acquire()
try:
qrec = self.d[id]
return copy.copy(qrec['hitlist']) # return shallow copy
finally:
if DEBUG:
print >>sys.stderr,"q2h: unlock3"
self.lock.release()
def get_searchstr(self,id):
if DEBUG:
print >>sys.stderr,"q2h: lock4"
self.lock.acquire()
try:
qrec = self.d[id]
return qrec['searchstr']
finally:
if DEBUG:
print >>sys.stderr,"q2h: unlock4"
self.lock.release()
def garbage_collect_timestamp_smaller(self,timethres):
self.lock.acquire()
try:
idlist = []
for id,qrec in self.d.iteritems():
if qrec['timestamp'] < timethres:
idlist.append(id)
for id in idlist:
del self.d[id]
finally:
self.lock.release()
class Hits2AnyPathMapper(AbstractPathMapper):
""" See Query2Hits description """
def __init__(self,session,id2hits):
self.session = session
self.id2hits = id2hits
def get(self,urlpath):
"""
Possible paths:
/hits/id -> ATOM feed
/hits/id/infohash.xml -> MPEG 7
/hits/id/infohash.tstream -> Torrent file
/hits/id/infohash.tstream/thumbnail -> Thumbnail
"""
if DEBUG:
print >>sys.stderr,"hitsmap: Got",urlpath
if not urlpath.startswith(URLPATH_HITS_PREFIX):
return streaminfo404()
paths = urlpath.split('/')
if len(paths) < 3:
return streaminfo404()
id = paths[2]
if len(paths) == 3:
# ATOM feed
searchstr = self.id2hits.get_searchstr(id)
hits = self.id2hits.get_hits(id)
if DEBUG:
print >>sys.stderr,"hitsmap: Found",len(hits),"hits"
atomhits = hits2atomhits(hits,urlpath)
if DEBUG:
print >>sys.stderr,"hitsmap: Found",len(atomhits),"atomhits"
atomxml = atomhits2atomxml(atomhits,searchstr,urlpath)
#if DEBUG:
# print >>sys.stderr,"hitsmap: atomstring is",`atomxml`
atomstream = StringIO(atomxml)
atomstreaminfo = { 'statuscode':200,'mimetype': 'application/atom+xml', 'stream': atomstream, 'length': len(atomxml)}
return atomstreaminfo
elif len(paths) >= 4:
# Either NS Metadata, Torrent file, or thumbnail
urlinfohash = paths[3]
print >>sys.stderr,"hitsmap: path3 is",urlinfohash
if urlinfohash.endswith(URLPATH_TORRENT_POSTFIX):
# Torrent file, or thumbnail
coded = urlinfohash[:-len(URLPATH_TORRENT_POSTFIX)]
infohash = urlpath2infohash(coded)
else:
# NS Metadata / MPEG7
coded = urlinfohash[:-len(URLPATH_NSMETA_POSTFIX)]
infohash = urlpath2infohash(coded)
# Check if hit:
hits = self.id2hits.get_hits(id)
print >>sys.stderr,"hitsmap: meta: Found",len(hits),"hits"
hit = hits.get(infohash,None)
if hit is not None:
if len(paths) == 5:
# Thumbnail
return self.get_thumbstreaminfo(infohash,hit)
elif urlinfohash.endswith(URLPATH_TORRENT_POSTFIX):
# Torrent file
return self.get_torrentstreaminfo(infohash,hit)
else:
# NS Metadata / MPEG7
hiturlpathprefix = URLPATH_HITS_PREFIX+'/'+id
return self.get_nsmetastreaminfo(infohash,hit,hiturlpathprefix,urlpath)
return streaminfo404()
def get_torrentstreaminfo(self,infohash,hit):
if DEBUG:
print >>sys.stderr,"hitmap: get_torrentstreaminfo",infohash2urlpath(infohash)
torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
try:
if hit['hittype'] == "localdb":
dbhit = torrent_db.getTorrent(infohash,include_mypref=False)
colltorrdir = self.session.get_torrent_collecting_dir()
filepath = os.path.join(colltorrdir,dbhit['torrent_file_name'])
# Return stream that contains torrent file
stream = open(filepath,"rb")
length = os.path.getsize(filepath)
torrentstreaminfo = {'statuscode':200,'mimetype':TSTREAM_MIME_TYPE,'stream':stream,'length':length}
return torrentstreaminfo
else:
if hit['metatype'] == URL_MIME_TYPE:
# Shouldn't happen, P2PURL should be embedded in atom
return streaminfo404()
else:
stream = StringIO(hit['metadata'])
length = len(hit['metadata'])
torrentstreaminfo = {'statuscode':200,'mimetype':TSTREAM_MIME_TYPE,'stream':stream,'length':length}
return torrentstreaminfo
finally:
self.session.close_dbhandler(torrent_db)
def get_thumbstreaminfo(self,infohash,hit):
if DEBUG:
print >>sys.stderr,"hitmap: get_thumbstreaminfo",infohash2urlpath(infohash)
torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
try:
if hit['hittype'] == "localdb":
dbhit = torrent_db.getTorrent(infohash,include_mypref=False)
colltorrdir = self.session.get_torrent_collecting_dir()
filepath = os.path.join(colltorrdir,dbhit['torrent_file_name'])
tdef = TorrentDef.load(filepath)
(thumbtype,thumbdata) = tdef.get_thumbnail()
return self.create_thumbstreaminfo(thumbtype,thumbdata)
else:
if hit['metatype'] == URL_MIME_TYPE:
# Shouldn't happen, not thumb in P2PURL
return streaminfo404()
else:
if DEBUG:
print >>sys.stderr,"hitmap: get_thumbstreaminfo: looking for thumb in remote hit"
metainfo = bdecode(hit['metadata'])
tdef = TorrentDef.load_from_dict(metainfo)
(thumbtype,thumbdata) = tdef.get_thumbnail()
return self.create_thumbstreaminfo(thumbtype,thumbdata)
finally:
self.session.close_dbhandler(torrent_db)
def create_thumbstreaminfo(self,thumbtype,thumbdata):
if thumbtype is None:
return streaminfo404()
else:
# Return stream that contains thumb
stream = StringIO(thumbdata)
length = len(thumbdata)
thumbstreaminfo = {'statuscode':200,'mimetype':thumbtype,'stream':stream,'length':length}
return thumbstreaminfo
def get_nsmetastreaminfo(self,infohash,hit,hiturlpathprefix,hitpath):
colltorrdir = self.session.get_torrent_collecting_dir()
nsmetahit = hit2nsmetahit(hit,hiturlpathprefix,colltorrdir)
if DEBUG:
print >>sys.stderr,"hitmap: get_nsmetastreaminfo: nsmetahit is",`nsmetahit`
nsmetarepr = nsmetahit2nsmetarepr(nsmetahit,hitpath)
nsmetastream = StringIO(nsmetarepr)
nsmetastreaminfo = { 'statuscode':200,'mimetype': 'text/xml', 'stream': nsmetastream, 'length': len(nsmetarepr)}
return nsmetastreaminfo
def infohash2urlpath(infohash):
if len(infohash) != 20:
raise ValueError("infohash len 20 !=" + str(len(infohash)))
hex = binascii.hexlify(infohash)
if len(hex) != 40:
raise ValueError("hex len 40 !=" + str(len(hex)))
return hex
def urlpath2infohash(hex):
if len(hex) != 40:
raise ValueError("hex len 40 !=" + str(len(hex)) + " " + hex)
infohash = binascii.unhexlify(hex)
if len(infohash) != 20:
raise ValueError("infohash len 20 !=" + str(len(infohash)))
return infohash
def hits2atomhits(hits,urlpathprefix):
atomhits = {}
for infohash,hit in hits.iteritems():
if hit['hittype'] == "localdb":
atomhit = localdbhit2atomhit(hit,urlpathprefix)
atomhits[infohash] = atomhit
else:
atomhit = remotehit2atomhit(hit,urlpathprefix)
atomhits[infohash] = atomhit
return atomhits
def localdbhit2atomhit(dbhit,urlpathprefix):
atomhit = {}
atomhit['title'] = htmlfilter(dbhit['name'].encode("UTF-8"))
atomhit['summary'] = htmlfilter(dbhit['comment'].encode("UTF-8"))
if dbhit['thumbnail']:
urlpath = urlpathprefix+'/'+infohash2urlpath(dbhit['infohash'])+URLPATH_TORRENT_POSTFIX+URLPATH_THUMBNAIL_POSTFIX
atomhit['p2pnext:image'] = urlpath
return atomhit
def remotehit2atomhit(remotehit,urlpathprefix):
# TODO: make RemoteQuery return full DB schema of TorrentDB
#print >>sys.stderr,"remotehit2atomhit: keys",remotehit.keys()
atomhit = {}
atomhit['title'] = htmlfilter(remotehit['content_name'].encode("UTF-8"))
atomhit['summary'] = "Seeders: "+str(remotehit['seeder'])+" Leechers: "+str(remotehit['leecher'])
if remotehit['metatype'] != URL_MIME_TYPE:
# TODO: thumbnail, see if we can detect presence (see DB schema remark).
# Now we assume it's always there if not P2PURL
urlpath = urlpathprefix+'/'+infohash2urlpath(remotehit['infohash'])+URLPATH_TORRENT_POSTFIX+URLPATH_THUMBNAIL_POSTFIX
atomhit['p2pnext:image'] = urlpath
return atomhit
def htmlfilter(s):
""" Escape characters to which HTML parser is sensitive """
if s is None:
return ""
news = s
news = news.replace('&','&')
news = news.replace('<','<')
news = news.replace('>','>')
return news
def atomhits2atomxml(atomhits,searchstr,urlpathprefix,nextlinkpath=None):
# TODO: use ElementTree parser here too, see AtomFeedParser:feedhits2atomxml
atom = ''
atom += '<?xml version="1.0" encoding="UTF-8"?>\n'
atom += '<feed xmlns="http://www.w3.org/2005/Atom" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:p2pnext="urn:p2pnext:contentfeed:2009" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/">\n'
atom += ' <title>Hits for '+searchstr+'</title>\n'
atom += ' <link rel="self" href="'+urlpathprefix+'" />\n'
if nextlinkpath:
atom += ' <link rel="next" href="'+nextlinkpath+'" />\n'
atom += ' <author>\n'
atom += ' <name>NSSA</name>\n'
atom += ' </author>\n'
atom += ' <id>urn:nssa</id>\n'
atom += ' <updated>'+now2formatRFC3339()+'</updated>\n'
#atom += '<p2pnext:image src="http://p2pnextfeed1.rad0.net/images/bbc.png" />\n' # TODO
for infohash,hit in atomhits.iteritems():
urlinfohash = infohash2urlpath(infohash)
hitpath = urlpathprefix+'/'+urlinfohash+URLPATH_NSMETA_POSTFIX
atom += ' <entry>\n'
atom += ' <title>'+hit['title']+'</title>\n'
atom += ' <link type="application/xml" href="'+hitpath+'" />\n'
atom += ' <id>urn:nssa-'+urlinfohash+'</id>\n'
atom += ' <updated>'+now2formatRFC3339()+'</updated>\n'
if hit['summary'] is not None:
atom += ' <summary>'+hit['summary']+'</summary>\n'
if 'p2pnext:image' in hit:
atom += ' <p2pnext:image src="'+hit['p2pnext:image']+'" />\n'
atom += ' </entry>\n'
atom += '</feed>\n'
return atom
def hit2nsmetahit(hit,hiturlprefix,colltorrdir):
""" Convert common hit to the fields required for the MPEG7 NS metadata """
print >>sys.stderr,"his2nsmetahit:"
# Read info from torrent files / P2PURLs
if hit['hittype'] == "localdb":
name = hit['name']
if hit['torrent_file_name'].startswith(P2PURL_SCHEME):
# Local DB hit that is P2PURL
torrenturl = hit['torrent_file_name']
titleimgurl = None
tdef = TorrentDef.load_from_url(torrenturl)
else:
# Local DB hit that is torrent file
torrenturlpath = '/'+infohash2urlpath(hit['infohash'])+URLPATH_TORRENT_POSTFIX
torrenturl = hiturlprefix + torrenturlpath
filepath = os.path.join(colltorrdir,hit['torrent_file_name'])
tdef = TorrentDef.load(filepath)
(thumbtype,thumbdata) = tdef.get_thumbnail()
if thumbtype is None:
titleimgurl = None
else:
titleimgurl = torrenturl+URLPATH_THUMBNAIL_POSTFIX
else:
# Remote hit
name = hit['content_name']
if hit['metatype'] == URL_MIME_TYPE:
torrenturl = hit['torrent_file_name']
titleimgurl = None
tdef = TorrentDef.load_from_url(torrenturl)
else:
torrenturlpath = '/'+infohash2urlpath(hit['infohash'])+URLPATH_TORRENT_POSTFIX
torrenturl = hiturlprefix + torrenturlpath
metainfo = bdecode(hit['metadata'])
tdef = TorrentDef.load_from_dict(metainfo)
(thumbtype,thumbdata) = tdef.get_thumbnail()
if thumbtype is None:
titleimgurl = None
else:
titleimgurl = torrenturl+URLPATH_THUMBNAIL_POSTFIX
# Extract info required for NS metadata MPEG7 representation.
nsmetahit = {}
nsmetahit['title'] = unicode2iri(name)
nsmetahit['titleimgurl'] = titleimgurl
comment = tdef.get_comment()
if comment is None:
nsmetahit['abstract'] = None
else:
nsmetahit['abstract'] = unicode2iri(comment)
nsmetahit['producer'] = 'Insert Name Here'
creator = tdef.get_created_by()
if creator is None:
creator = 'Insert Name Here Too'
nsmetahit['disseminator'] = creator
nsmetahit['copyrightstr'] = 'Copyright '+creator
nsmetahit['torrent_url'] = torrenturl
# TODO: multifile torrents, LIVE
nsmetahit['duration'] = bitratelength2nsmeta_duration(tdef.get_bitrate(),tdef.get_length())
return nsmetahit
def unicode2iri(uni):
# Roughly after http://www.ietf.org/rfc/rfc3987.txt Sec 3.1 procedure.
# TODO: do precisely after.
s = uni.encode('UTF-8')
return urllib.quote(s)
def bitratelength2nsmeta_duration(bitrate,length):
# Format example: PT0H15M0S
if bitrate is None:
return 'PT01H00M0S' # 1 hour
secs = float(length)/float(bitrate)
hours = float(int(secs / 3600.0))
secs = secs - hours*3600.0
mins = float(int(secs / 60.0))
secs = secs - mins*60.0
return 'PT%02.0fH%02.0fM%02.0fS' % (hours,mins,secs)
def nsmetahit2nsmetarepr(hit,hitpath):
title = hit['title']
titleimgurl = hit['titleimgurl']
abstract = hit['abstract']
producer = hit['producer']
disseminator = hit['disseminator']
copyrightstr = hit['copyrightstr']
torrenturl = hit['torrent_url']
duration = hit['duration'] # Format example: PT0H15M0S
livetimepoint = now2formatRFC3339() # Format example: '2009-10-05T00:40:00+01:00' # TODO VOD
s = ''
s += '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<Mpeg7 xmlns="urn:mpeg:mpeg7:schema:2001" xmlns:p2pnext="urn:p2pnext:metadata:2008" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n'
s += ' <Description xsi:type="p2pnext:P2PBasicDescriptionType">\n'
s += ' <CreationInformation>\n'
s += ' <Creation>\n'
s += ' <Title type="main" xml:lang="en">'+title+'</Title>\n'
s += ' <TitleMedia xsi:type="TitleMediaType">\n'
if titleimgurl:
s += ' <TitleImage>\n'
s += ' <MediaUri>'+titleimgurl+'</MediaUri>\n'
s += ' </TitleImage>\n'
s += ' </TitleMedia>\n'
if abstract:
s += ' <Abstract>\n'
s += ' <FreeTextAnnotation>'+abstract+'</FreeTextAnnotation>\n'
s += ' </Abstract>\n'
s += ' <Creator>\n'
s += ' <Role href="urn:mpeg:mpeg7:cs:RoleCS:2001:PRODUCER" />\n'
s += ' <Agent xsi:type="OrganizationType">\n'
s += ' <Name>'+producer+'</Name>\n'
s += ' </Agent>\n'
s += ' </Creator>\n'
s += ' <Creator>\n'
s += ' <Role href="urn:mpeg:mpeg7:cs:RoleCS:2001:DISSEMINATOR" />\n'
s += ' <Agent xsi:type="OrganizationType">\n'
s += ' <Name>'+disseminator+'</Name>\n'
s += ' </Agent>\n'
s += ' </Creator>\n'
s += ' <CopyrightString>'+copyrightstr+'</CopyrightString>\n'
s += ' </Creation>\n'
s += ' </CreationInformation>\n'
s += ' <p2pnext:IsInteractiveContent>false</p2pnext:IsInteractiveContent>\n'
s += ' <p2pnext:IsCommercialContent>false</p2pnext:IsCommercialContent>\n'
s += ' <p2pnext:ContainsCommercialContent>false</p2pnext:ContainsCommercialContent>\n'
s += ' <p2pnext:P2PData>\n'
s += ' <p2pnext:Torrent>\n'
s += ' <MediaUri>'+torrenturl+'</MediaUri>\n'
s += ' </p2pnext:Torrent>\n'
s += ' <p2pnext:P2PFragment>offset(0, 1000)</p2pnext:P2PFragment>\n'
s += ' </p2pnext:P2PData>\n'
s += ' </Description>\n'
s += ' <Description xsi:type="ContentEntityType">\n'
s += ' <MultimediaContent xsi:type="VideoType">\n'
s += ' <Video>\n'
s += ' <MediaTime>\n'
s += ' <MediaTimePoint>T00:00:00</MediaTimePoint>\n'
s += ' <MediaDuration>'+duration+'</MediaDuration>\n'
s += ' </MediaTime>\n'
s += ' </Video>\n'
s += ' </MultimediaContent>\n'
s += ' </Description>\n'
s += ' <Description xsi:type="UsageDescriptionType">\n'
s += ' <UsageInformation>\n'
s += ' <Availability>\n'
s += ' <InstanceRef href="'+hitpath+'" />\n'
s += ' <AvailabilityPeriod type="live">\n'
s += ' <TimePoint>'+livetimepoint+'</TimePoint>\n'
s += ' </AvailabilityPeriod>\n'
s += ' </Availability>\n'
s += ' </UsageInformation>\n'
s += ' </Description>\n'
s += '</Mpeg7>\n'
return s
def hack_make_default_merkletorrent(title):
metainfo = {}
metainfo['announce'] = 'http://localhost:0/announce'
metainfo['creation date'] = int(time.time())
info = {}
info['name'] = title
info['length'] = 2 ** 30
info['piece length'] = 2 ** 16
info['root hash'] = '*' * 20
metainfo['info'] = info
mdict = {}
mdict['Publisher'] = 'Tribler'
mdict['Description'] = ''
mdict['Progressive'] = 1
mdict['Speed Bps'] = str(2 ** 16)
mdict['Title'] = metainfo['info']['name']
mdict['Creation Date'] = long(time.time())
# Azureus client source code doesn't tell what this is, so just put in random value from real torrent
mdict['Content Hash'] = 'PT3GQCPW4NPT6WRKKT25IQD4MU5HM4UY'
mdict['Revision Date'] = long(time.time())
cdict = {}
cdict['Content'] = mdict
metainfo['azureus_properties'] = cdict
return bencode(metainfo)
"""
class Infohash2TorrentPathMapper(AbstractPathMapper):
Mapper to map in the collection of known torrents files (=collected + started
+ own) into the HTTP address space of the local HTTP server. In particular,
it maps a "/infohash/aabbccdd...zz.tstream" path to a streaminfo dict.
Also supported are "/infohash/aabbccdd...zz.tstream/thumbnail" queries, which
try to read the thumbnail from the torrent.
def __init__(self,urlpathprefix,session):
self.urlpathprefix = urlpathprefix
self.session = session
self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
def get(self,urlpath):
if not urlpath.startswith(self.urlpathprefix):
return None
try:
wantthumb = False
if urlpath.endswith(URLPATH_THUMBNAIL_POSTFIX):
wantthumb = True
infohashquote = urlpath[len(self.urlpathprefix):-len(URLPATH_TORRENT_POSTFIX+URLPATH_THUMBNAIL_POSTFIX)]
else:
infohashquote = urlpath[len(self.urlpathprefix):-len(URLPATH_TORRENT_POSTFIX)]
infohash = urlpath2infohash(infohash)
dbhit = self.torrent_db.getTorrent(infohash,include_mypref=False)
colltorrdir = self.session.get_torrent_collecting_dir()
filepath = os.path.join(colltorrdir,dbhit['torrent_file_name'])
if not wantthumb:
# Return stream that contains torrent file
stream = open(filepath,"rb")
length = os.path.getsize(filepath)
streaminfo = {'statuscode':200,'mimetype':TSTREAM_MIME_TYPE,'stream':stream,'length':length}
else:
# Return stream that contains thumbnail
tdef = TorrentDef.load(filepath)
(thumbtype,thumbdata) = tdef.get_thumbnail()
if thumbtype is None:
return None
else:
stream = StringIO(thumbdata)
streaminfo = {'statuscode':200,'mimetype':thumbtype,'stream':stream,'length':len(thumbdata)}
return streaminfo
except:
print_exc()
return None
"""
| lgpl-2.1 | 5,569,890,133,682,828,000 | 36.625938 | 314 | 0.584589 | false |
topic2k/EventGhost | plugins/FileOperations/__init__.py | 1 | 37658 | # -*- coding: utf-8 -*-
version="0.1.7"
# Copyright (C) 2008-2011 Pako ([email protected])
#
# This file is a plugin for EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
#
# Changelog (in reverse chronological order):
# -------------------------------------------
# 0.1.7 by Pako 2011-04-11 08:33 UTC+1
# - added eg.ParseString for some inputs
# 0.1.6 by Pako 2010-04-15 15:27 GMT+1
#===============================================================================
eg.RegisterPlugin(
name = "File Operations",
author = "Pako",
version = version,
kind = "other",
guid = "{50D933C5-F93B-4A8A-A6CE-95A40F906036}",
createMacrosOnAdd = False,
icon = (
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAA"
"ABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAINSURBVBgZBcG/r55z"
"GAfg6/4+z3va01NHlYgzEfE7MdCIGISFgS4Gk8ViYyM2Mdlsko4GSf8Do0FLRCIkghhY"
"JA3aVBtEz3nP89wf11VJvPDepdd390+8Nso5nESBQoq0pfvXm9fzWf19453LF85vASqJ"
"lz748vInb517dIw6EyYBIIG49u+xi9/c9MdvR//99MPPZ7+4cP4IZhhTPbwzT2d+vGoa"
"VRRp1rRliVvHq+cfvM3TD82+7mun0o/ceO7NT+/4/KOXjwZU1ekk0840bAZzMQ2mooqh"
"0A72d5x/6sB9D5zYnff3PoYBoWBgFKPKqDKqjCpjKr//dcu9p489dra88cydps30KswA"
"CfNEKanSaxhlntjJ8Mv12Paie+vZ+0+oeSwwQ0Iw1xAR1CiFNJkGO4wu3ZMY1AAzBI0q"
"SgmCNJsJUEOtJSMaCTBDLyQ0CknAGOgyTyFFiLI2awMzdEcSQgSAAKVUmAeNkxvWJWCG"
"tVlDmgYQ0GFtgg4pNtOwbBcwQy/Rife/2yrRRVI0qYCEBly8Z+P4qMEMy7JaVw72N568"
"e+iwhrXoECQkfH91kY7jwwXMsBx1L93ZruqrK6uuiAIdSnTIKKPLPFcvay8ww/Hh+ufe"
"znTXu49v95IMoQG3784gYXdTqvRmqn/Wpa/ADFX58MW3L71SVU9ETgEIQQQIOOzub+fh"
"IvwPRDgeVjWDahIAAAAASUVORK5CYII="
),
description = (
"File Operations (reading, periodical reading and writing)."
),
url = "http://www.eventghost.net/forum/viewtopic.php?t=1011"
)
#===============================================================================
import os
import time
import codecs
from threading import Thread, Event
def String2Hex(strng, length = '2'):
tmp = []
s2h = "%0" + length + "X "
for c in strng:
tmp.append( s2h % ord( c ) )
return ''.join( tmp ).strip()
#===============================================================================
class ObservationThread(Thread):
def __init__(
self,
stp,
):
self.abort = False
self.aborted = False
self.lastCheck = 0
self.threadFlag = Event()
#self.firstRun = True
self.inCoding = stp[0]
self.fileName = eg.ParseString(stp[1])
self.mode = stp[2]
self.errDecMode = stp[3]
self.inPage = stp[4]
self.fromLine = stp[5]
self.direction = stp[6]
self.lines = stp[7]
self.period = stp[8]
self.evtName = eg.ParseString(stp[9])
self.trigger = stp[10]
self.oldData = None
Thread.__init__(self, name = self.evtName.encode('unicode_escape')+'_Thread')
def run(self):
while 1:
errorList = ('strict','ignore','replace')
try:
input = codecs.open(self.fileName,'r',self.inPage, errorList[self.errDecMode])
except:
raise
else:
if self.lines > 0:
data = input.readlines()
if self.direction == 0: #from beginning
data = data[self.fromLine-1:self.fromLine+self.lines-1]
else: #from end
if self.fromLine-self.lines < 1:
data = data[-self.fromLine:]
else:
data = data[-self.fromLine:-(self.fromLine-self.lines)]
if self.mode == 2: #one string
data = ''.join(data)
elif self.mode == 0: #without CR/LF
tmp = []
for line in data:
tmp.append(line.rstrip())
data = tmp
if self.lines == 1:
if len(data) > 0: #empty file ?
data = data[0]
else:
data = ''
else: #whole file
data = input.read()
try:
input.close()
except:
raise
flag = True
while True:
if self.trigger == 0: #always
break
elif self.trigger == 1: #always if not empty
if self.mode == 2:
if data != '':
break
else:
if data != []:
break
elif self.trigger == 2: #only at change
if data != self.oldData:
break
else: #only at change and not empty
if data != self.oldData:
if self.mode == 2:
if data != '':
break
else:
if data != []:
break
flag = False
break
if flag:
eg.TriggerEvent(self.evtName, payload = data, prefix = 'File')
self.oldData = data
if self.abort:
break
self.lastCheck = time.time()
self.threadFlag.wait(self.period)
self.threadFlag.clear()
self.aborted = True
def AbortObservation(self, close=False):
self.abort = True
self.threadFlag.set()
#===============================================================================
class FileOperations(eg.PluginClass):
def __init__(self):
self.AddAction(Read)
self.AddAction(ReadPeriodically)
self.AddAction(AbortPeriodicalRead)
self.AddAction(AbortAllPeriodicalRead)
self.AddAction(Write)
self.observThreads = {}
self.observData = []
def StartObservation(
self,
stp,
):
observName = eg.ParseString(stp[9])
if observName in self.observThreads:
ot = self.observThreads[observName]
if ot.isAlive():
ot.AbortObservation()
del self.observThreads[observName]
ot = ObservationThread(
stp,
)
ot.start()
self.observThreads[observName] = ot
def AbortObservation(self, observName):
if observName in self.observThreads:
ot = self.observThreads[observName]
ot.AbortObservation()
def AbortAllObservations(self, close=False):
thrds = list(enumerate(self.observThreads))
thrds.reverse()
for i, item in thrds:
ot = self.observThreads[item]
ot.AbortObservation(close)
def Configure(self, *args):
panel = eg.ConfigPanel(self, resizable=True)
panel.sizer.Add(
wx.StaticText(panel, -1, self.text.header),
flag = wx.ALIGN_CENTER_VERTICAL
)
mySizer = wx.GridBagSizer(5, 5)
observListCtrl = wx.ListCtrl(panel, -1, style=wx.LC_REPORT | wx.VSCROLL | wx.HSCROLL)
for i, colLabel in enumerate(self.text.colLabels):
observListCtrl.InsertColumn(i, colLabel)
#setting cols width
observListCtrl.InsertStringItem(0, 30*"X")
observListCtrl.SetStringItem(0, 1, 16*"X")
observListCtrl.SetStringItem(0, 2, 16*"X")
size = 0
for i in range(3):
observListCtrl.SetColumnWidth(i, wx.LIST_AUTOSIZE_USEHEADER)
size += observListCtrl.GetColumnWidth(i)
observListCtrl.SetMinSize((size, -1))
mySizer.Add(observListCtrl, (0,0), (1, 4), flag = wx.EXPAND)
#buttons
abortButton = wx.Button(panel, -1, "Abort")
mySizer.Add(abortButton, (1,0))
abortAllButton = wx.Button(panel, -1, "Abort all")
mySizer.Add(abortAllButton, (1,1), flag = wx.ALIGN_CENTER_HORIZONTAL)
refreshButton = wx.Button(panel, -1, "Refresh")
mySizer.Add(refreshButton, (1,3), flag = wx.ALIGN_RIGHT)
panel.sizer.Add(mySizer, 1, flag = wx.EXPAND)
mySizer.AddGrowableRow(0)
mySizer.AddGrowableCol(2)
def PopulateList (event=None):
observListCtrl.DeleteAllItems()
row = 0
for i, item in enumerate(self.observThreads):
t = self.observThreads[item]
if t.isAlive():
observListCtrl.InsertStringItem(row, os.path.split(t.fileName)[1])
observListCtrl.SetStringItem(row, 1, t.evtName)
observListCtrl.SetStringItem(row, 2, str(t.period) + " sec")
row += 1
ListSelection(wx.CommandEvent())
def OnAbortButton(event):
item = observListCtrl.GetFirstSelected()
while item != -1:
cell = observListCtrl.GetItem(item,1)
evtName = cell.GetText()
ot = self.observThreads[evtName]
self.AbortObservation(evtName)
while not ot.aborted:
pass
item = observListCtrl.GetNextSelected(item)
PopulateList()
event.Skip()
def OnAbortAllButton(event):
self.AbortAllObservations()
PopulateList()
event.Skip()
def ListSelection(event):
flag = observListCtrl.GetFirstSelected() != -1
abortButton.Enable(flag)
event.Skip()
def OnSize(event):
observListCtrl.SetColumnWidth(6, wx.LIST_AUTOSIZE_USEHEADER)
event.Skip()
PopulateList()
abortButton.Bind(wx.EVT_BUTTON, OnAbortButton)
abortAllButton.Bind(wx.EVT_BUTTON, OnAbortAllButton)
refreshButton.Bind(wx.EVT_BUTTON, PopulateList)
observListCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, ListSelection)
observListCtrl.Bind(wx.EVT_LIST_ITEM_DESELECTED, ListSelection)
panel.Bind(wx.EVT_SIZE, OnSize)
while panel.Affirmed():
panel.SetResult(*args)
#function to fill the action's Comboboxes
def GetObservData(self):
self.observData.sort(lambda a,b: cmp(a[1].lower(), b[1].lower()))
return self.observData
#function to collect data for action's Comboboxes
def AddObservData(self, stp):
item = (os.path.split(stp[1])[1],stp[9])
if not item in self.observData:
self.observData.append(item)
class text:
FilePath = "Read file:"
browseFileDialogTitle = "Choose the file"
txtMode = "Line(s) return like as a:"
listNotIncluding = "List of line strings without CR/LF"
listIncluding = "List of line strings including CR/LF"
oneNotIncluding = "String without CR/LF"
oneIncluding = "String including CR/LF"
oneString = "One string (including CR/LF)"
systemPage = "system code page (%s)"
defaultIn = "unicode (UTF-8)"
inputPage = "Input data coding:"
txtDecErrMode = "Error handling during decoding:"
strict = "Raise an exception"
ignore = "Ignore (skip bad chars)"
replace = "Replace bad chars"
lineNum = "Start read at line number:"
begin = "from the beginning"
end = "from the end"
readAhead = "Read"
readBehind = "lines (0 = whole file)"
intervalLabel = "Refresh interval (s):"
evtNameLabel = "Observation and event name:"
triggerLabel = "Event trigger:"
triggerChoice = (
"always",
"always if not empty",
"only at changes",
"only at changes and if not empty"
)
header = "Currently active file observations:"
colLabels = (
"File",
"Event name",
"Interval")
#===============================================================================
class Read(eg.ActionClass):
name = "Read text from file"
description = "Reads text from selected file."
def __call__(
self,
inCoding = 0,
fileName = '',
mode = 0,
errDecMode = 0,
inPage = "",
fromLine = 1,
direction = 0,
lines = 1,
):
fileName = eg.ParseString(fileName)
errorList = ('strict', 'ignore', 'replace')
try:
input = codecs.open(fileName, 'r', inPage, errorList[errDecMode])
except:
raise
else:
data = input.readlines()
if lines == 0:
direction = 0
lines = len(data)
fromLine = 1
if direction == 0: #from beginning
data = data[fromLine-1:fromLine+lines-1]
else: #from end
if fromLine-lines < 1:
data = data[-fromLine:]
else:
data = data[-fromLine:-(fromLine-lines)]
if mode == 2: #one string
data = ''.join(data)
elif mode == 0: #without CR/LF
tmp = []
for line in data:
tmp.append(line.rstrip())
data = tmp
if lines == 1:
if len(data) > 0: #empty file ?
data = data[0]
else:
data = ''
try:
input.close()
except:
raise
return data
def GetLabel(
self,
inCoding,
fileName,
mode,
errDecMode,
inPage,
fromLine,
direction,
lines = 1,
):
return '%s: %s' % (str(self.name), os.path.split(fileName)[1])
def Configure(
self,
inCoding = 0,
fileName = '',
mode = 0,
errDecMode = 0,
inPage="",
fromLine=1,
direction = 0,
lines = 1,
):
from codecsList import codecsList
panel = eg.ConfigPanel(self)
text = self.plugin.text
self.mode = mode
#Controls
inPageText = wx.StaticText(panel, -1, text.inputPage)
labelMode = wx.StaticText(panel, -1, text.txtMode)
labelDecErrMode = wx.StaticText(panel, -1, text.txtDecErrMode)
fileText = wx.StaticText(panel, -1, text.FilePath)
filepathCtrl = eg.FileBrowseButton(
panel,
-1,
initialValue=fileName,
labelText="",
fileMask="*.*",
buttonText=eg.text.General.browse,
dialogTitle=text.browseFileDialogTitle
)
width = labelDecErrMode.GetTextExtent(text.txtDecErrMode)[0]
choiceDecErrMode = wx.Choice(
panel,
-1,
size = ((width,-1)),
choices=(text.strict, text.ignore, text.replace)
)
choiceDecErrMode.SetSelection(errDecMode)
choices = [text.systemPage % eg.systemEncoding, text.defaultIn]
choices.extend(codecsList)
inPageCtrl = wx.Choice(panel,-1,choices=choices)
inPageCtrl.SetSelection(inCoding)
lineNumLbl=wx.StaticText(panel, -1, text.lineNum)
fromLineNumCtrl = eg.SpinIntCtrl(
panel,
-1,
fromLine,
min = 1,
max = 999,
)
rb0 = panel.RadioButton(not direction, text.begin, style=wx.RB_GROUP)
rb1 = panel.RadioButton(direction, text.end)
lblAhead = wx.StaticText(panel, -1, text.readAhead)
lblBehind = wx.StaticText(panel, -1, text.readBehind)
linesNumCtrl = eg.SpinIntCtrl(
panel,
-1,
lines,
min = 0,
max = 999,
)
w0 = inPageCtrl.GetTextExtent(text.listNotIncluding)[0]
w1 = inPageCtrl.GetTextExtent(text.listIncluding)[0]
w2 = inPageCtrl.GetTextExtent(text.oneNotIncluding)[0]
w3 = inPageCtrl.GetTextExtent(text.oneIncluding)[0]
w4 = inPageCtrl.GetTextExtent(text.oneString)[0]
width = max(w0,w1,w2,w3,w4)+30
choiceMode = wx.Choice(panel,-1,size=(width,-1))
#Sizers
topSizer = wx.FlexGridSizer(2,0,2,15)
topSizer.AddGrowableCol(0,1)
topSizer.AddGrowableCol(1,1)
topSizer.Add(inPageText,0,wx.EXPAND)
topSizer.Add(labelDecErrMode,0,wx.EXPAND)
topSizer.Add(inPageCtrl,0,wx.EXPAND)
topSizer.Add(choiceDecErrMode,0,wx.EXPAND)
fromSizer = wx.BoxSizer(wx.HORIZONTAL)
fromSizer.Add(lineNumLbl,0,wx.TOP,4)
fromSizer.Add(fromLineNumCtrl,0,wx.LEFT,10)
fromSizer.Add(rb0,0,wx.EXPAND|wx.LEFT,20)
fromSizer.Add(rb1,0,wx.EXPAND|wx.LEFT,15)
linesSizer = wx.BoxSizer(wx.HORIZONTAL)
linesSizer.Add(lblAhead,0,wx.TOP,4)
linesSizer.Add(linesNumCtrl,0,wx.LEFT|wx.RIGHT,8)
linesSizer.Add(lblBehind,0,wx.TOP,4)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(fileText,0,wx.EXPAND)
mainSizer.Add(filepathCtrl,0,wx.EXPAND)
mainSizer.Add(topSizer,0,wx.TOP|wx.EXPAND,5)
mainSizer.Add(linesSizer,0,wx.TOP|wx.EXPAND,11)
mainSizer.Add(fromSizer,0,wx.TOP|wx.EXPAND,11)
mainSizer.Add(labelMode,0,wx.TOP|wx.EXPAND,11)
mainSizer.Add(choiceMode,0,wx.TOP,2)
panel.sizer.Add(mainSizer,0,wx.EXPAND)
def onLinesNumCtrl(event=None):
flag = False
if event:
self.mode = choiceMode.GetSelection()
if linesNumCtrl.GetValue() == 0:
fromLineNumCtrl.SetValue(1)
rb0.SetValue(True)
rb1.SetValue(False)
lineNumLbl.Enable(False)
fromLineNumCtrl.Enable(False)
rb0.Enable(False)
rb1.Enable(False)
else:
lineNumLbl.Enable(True)
fromLineNumCtrl.Enable(True)
rb0.Enable(True)
rb1.Enable(True)
if linesNumCtrl.GetValue() == 1:
choiceMode.Clear()
choiceMode.AppendItems(strings=(text.oneNotIncluding,text.oneIncluding))
else:
if len(choiceMode.GetStrings()) != 3:
choiceMode.Clear()
choiceMode.AppendItems(
strings=(text.listNotIncluding,text.listIncluding,text.oneString)
)
if self.mode == 2:
flag = True
if event:
choiceMode.SetSelection(0)
event.Skip()
if flag:
self.mode = 0
choiceMode.SetSelection(self.mode)
linesNumCtrl.Bind(wx.EVT_SPIN, onLinesNumCtrl)
onLinesNumCtrl()
while panel.Affirmed():
inCoding = inPageCtrl.GetSelection()
pgTpl = (eg.systemEncoding, 'utf8')
panel.SetResult(
inCoding,
filepathCtrl.GetValue(),
choiceMode.GetSelection(),
choiceDecErrMode.GetSelection(),
inPageCtrl.GetStringSelection() if inCoding > 1 else pgTpl[inCoding],
fromLineNumCtrl.GetValue(),
rb1.GetValue(),
linesNumCtrl.GetValue(),
)
#===============================================================================
class AbortPeriodicalRead(eg.ActionClass):
name = "Abort periodical reading"
description = "Aborts periodical reading of text from selected file."
def __call__(self, observName='', file = ''):
observName = eg.ParseString(observName)
self.plugin.AbortObservation(observName)
def GetLabel(self, observName, file):
return '%s: %s -> %s' % (str(self.name), file, observName)
def Configure(self, observName='', file = ''):
text=self.text
panel = eg.ConfigPanel(self)
choices = [item[1] for item in self.plugin.GetObservData()]
fileLbl = wx.StaticText(panel, -1, '')
fileLbl.Enable(False)
nameLbl=wx.StaticText(panel, -1, text.nameObs)
nameCtrl=wx.ComboBox(panel,-1,choices = choices)
nameCtrl.SetStringSelection(observName)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(fileLbl,0)
mainSizer.Add((1,20))
mainSizer.Add(nameLbl,0)
mainSizer.Add(nameCtrl,0,wx.EXPAND)
panel.sizer.Add(mainSizer)
panel.sizer.Layout()
def onComboBox(event = None):
choices = [item[1] for item in self.plugin.GetObservData()]
evtName = nameCtrl.GetValue()
if evtName in choices:
indx = choices.index(evtName)
fileName = self.plugin.GetObservData()[indx][0]
lbl = text.fileLabel % fileName
else:
lbl = ''
fileLbl.SetLabel(lbl)
if event:
event.Skip()
onComboBox()
nameCtrl.Bind(wx.EVT_COMBOBOX,onComboBox)
# re-assign the test button
def OnTestButton(event):
self.plugin.AbortObservation(nameCtrl.GetValue())
panel.dialog.buttonRow.testButton.SetLabel(text.abortNow)
panel.dialog.buttonRow.testButton.SetToolTipString(text.tip)
panel.dialog.buttonRow.testButton.Bind(wx.EVT_BUTTON, OnTestButton)
while panel.Affirmed():
lbl = fileLbl.GetLabel()
if lbl != '':
fileName = lbl[2+lbl.rfind(':'):]
else:
fileName = ''
panel.SetResult(
nameCtrl.GetValue(),
fileName
)
class text:
nameObs = 'Observation and event name:'
abortNow = 'Abort now !'
tip = 'Abort observation now'
fileLabel = 'File to read: %s'
#===============================================================================
class AbortAllPeriodicalRead(eg.ActionClass):
name = "Abort all periodical reading"
description = "Aborts all periodical reading of text from file."
def __call__(self):
self.plugin.AbortAllObservations()
#===============================================================================
class ReadPeriodically(eg.ActionClass):
name = "Start periodical reading"
description = ("Starts periodical reading of text from selected file. "
"Learning the line(s) return as payload of event.")
def startObserv(self, stp):
self.plugin.StartObservation(stp)
def __call__(self, stp):
self.startObserv(stp)
def GetLabel(
self,
stp
):
self.plugin.AddObservData(stp)
return '%s: %s -> %s' % (str(self.name), os.path.split(stp[1])[1], stp[9])
def Configure(
self,
stp = []
):
if stp == []:
inCoding = 0
fileName = ''
mode = 0
errDecMode = 0
inPage = ""
fromLine = 1
direction = 0
lines = 1
# period = 0.1
period = 1
evtName = ''
trigger = 1
else:
inCoding = stp[0]
fileName = stp[1]
mode = stp[2]
errDecMode = stp[3]
inPage = stp[4]
fromLine = stp[5]
direction = stp[6]
lines = stp[7]
period = stp[8]
evtName = stp[9]
trigger = stp[10]
from codecsList import codecsList
panel = eg.ConfigPanel(self)
text = self.plugin.text
self.mode = mode
#Controls
inPageText = wx.StaticText(panel, -1, text.inputPage)
labelMode = wx.StaticText(panel, -1, text.txtMode)
labelDecErrMode = wx.StaticText(panel, -1, text.txtDecErrMode)
fileText = wx.StaticText(panel, -1, text.FilePath)
filepathCtrl = eg.FileBrowseButton(
panel,
-1,
initialValue=fileName,
labelText="",
fileMask="*.*",
buttonText=eg.text.General.browse,
dialogTitle=text.browseFileDialogTitle
)
width = labelDecErrMode.GetTextExtent(text.txtDecErrMode)[0]
choiceDecErrMode = wx.Choice(
panel,
-1,
size = ((width,-1)),
choices=(text.strict, text.ignore, text.replace)
)
choiceDecErrMode.SetSelection(errDecMode)
choices = [text.systemPage % eg.systemEncoding, text.defaultIn]
choices.extend(codecsList)
inPageCtrl = wx.Choice(panel,-1,choices=choices)
inPageCtrl.SetSelection(inCoding)
lineNumLbl=wx.StaticText(panel, -1, text.lineNum)
fromLineNumCtrl = eg.SpinIntCtrl(
panel,
-1,
fromLine,
min = 1,
max = 999,
)
rb0 = panel.RadioButton(not direction, text.begin, style=wx.RB_GROUP)
rb1 = panel.RadioButton(direction, text.end)
lblAhead = wx.StaticText(panel, -1, text.readAhead)
lblBehind = wx.StaticText(panel, -1, text.readBehind)
linesNumCtrl = eg.SpinIntCtrl(
panel,
-1,
lines,
min = 0,
max = 999,
)
periodNumCtrl = eg.SpinNumCtrl(
panel,
-1,
period,
integerWidth = 5,
fractionWidth = 1,
allowNegative = False,
min = 0.1,
increment = 0.1,
)
intervalLbl = wx.StaticText(panel, -1, text.intervalLabel)
w0 = inPageCtrl.GetTextExtent(text.listNotIncluding)[0]
w1 = inPageCtrl.GetTextExtent(text.listIncluding)[0]
w2 = inPageCtrl.GetTextExtent(text.oneNotIncluding)[0]
w3 = inPageCtrl.GetTextExtent(text.oneIncluding)[0]
w4 = inPageCtrl.GetTextExtent(text.oneString)[0]
width = max(w0,w1,w2,w3,w4)+30
choiceMode = wx.Choice(panel,-1,size=(width,-1))
evtNameCtrl = wx.TextCtrl(panel,-1,evtName)
evtNameLbl = wx.StaticText(panel, -1, text.evtNameLabel)
triggerCtrl = wx.Choice(panel,-1, choices = text.triggerChoice)
triggerCtrl.SetSelection(trigger)
triggerLbl = wx.StaticText(panel, -1, text.triggerLabel)
#Sizers
topSizer = wx.FlexGridSizer(2,0,2,25)
topSizer.AddGrowableCol(0,1)
topSizer.AddGrowableCol(1,1)
topSizer.Add(inPageText,0,wx.EXPAND)
topSizer.Add(labelDecErrMode,0,wx.EXPAND)
topSizer.Add(inPageCtrl,0,wx.EXPAND)
topSizer.Add(choiceDecErrMode,0,wx.EXPAND)
fromSizer = wx.BoxSizer(wx.HORIZONTAL)
fromSizer.Add(lineNumLbl,0,wx.TOP,4)
fromSizer.Add(fromLineNumCtrl,0,wx.LEFT,10)
fromSizer.Add(rb0,0,wx.EXPAND|wx.LEFT,20)
fromSizer.Add(rb1,0,wx.EXPAND|wx.LEFT,15)
linesSizer = wx.BoxSizer(wx.HORIZONTAL)
linesSizer.Add(lblAhead,0, flag = wx.TOP, border = 4)
linesSizer.Add(linesNumCtrl,0,wx.LEFT|wx.RIGHT,8)
linesSizer.Add(lblBehind,0, flag = wx.TOP, border = 4)
periodSizer = wx.BoxSizer(wx.HORIZONTAL)
periodSizer.Add(intervalLbl,0, wx.TOP|wx.RIGHT, 4)
periodSizer.Add(periodNumCtrl,0, wx.RIGHT)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(fileText,0,wx.EXPAND)
mainSizer.Add(filepathCtrl,0,wx.EXPAND)
mainSizer.Add(topSizer,0,wx.TOP|wx.EXPAND,5)
mainSizer.Add(linesSizer,0,wx.TOP|wx.EXPAND,11)
mainSizer.Add(fromSizer,0,wx.TOP|wx.EXPAND,11)
bottomSizer = wx.FlexGridSizer(4,0,2,25)
bottomSizer.AddGrowableCol(0,1)
bottomSizer.AddGrowableCol(1,1)
bottomSizer.Add(labelMode,0,wx.EXPAND)
bottomSizer.Add((1,1))
bottomSizer.Add(choiceMode,0,wx.EXPAND)
bottomSizer.Add(periodSizer,0,wx.EXPAND|wx.RIGHT,3)
bottomSizer.Add(evtNameLbl,0,wx.TOP,8)
bottomSizer.Add(triggerLbl,0,wx.TOP,8)
bottomSizer.Add(evtNameCtrl,0,wx.EXPAND)
bottomSizer.Add(triggerCtrl,0,wx.EXPAND)
mainSizer.Add(bottomSizer,0,wx.TOP|wx.EXPAND,11)
panel.sizer.Add(mainSizer,0,wx.EXPAND)
def onLinesNumCtrl(event=None):
flag = False
if event:
self.mode = choiceMode.GetSelection()
if linesNumCtrl.GetValue() == 0:
fromLineNumCtrl.SetValue(1)
rb0.SetValue(True)
rb1.SetValue(False)
lineNumLbl.Enable(False)
fromLineNumCtrl.Enable(False)
rb0.Enable(False)
rb1.Enable(False)
else:
lineNumLbl.Enable(True)
fromLineNumCtrl.Enable(True)
rb0.Enable(True)
rb1.Enable(True)
if linesNumCtrl.GetValue() == 1:
choiceMode.Clear()
choiceMode.AppendItems(strings=(text.oneNotIncluding,text.oneIncluding))
else:
if len(choiceMode.GetStrings()) != 3:
choiceMode.Clear()
choiceMode.AppendItems(
strings=(text.listNotIncluding,text.listIncluding,text.oneString)
)
if self.mode == 2:
flag = True
if event:
choiceMode.SetSelection(0)
event.Skip()
if flag:
self.mode = 0
choiceMode.SetSelection(self.mode)
linesNumCtrl.Bind(wx.EVT_SPIN, onLinesNumCtrl)
onLinesNumCtrl()
while panel.Affirmed():
inCoding = inPageCtrl.GetSelection()
pgTpl = (eg.systemEncoding, 'utf8')
setup = [
inCoding,
filepathCtrl.GetValue(),
choiceMode.GetSelection(),
choiceDecErrMode.GetSelection(),
inPageCtrl.GetStringSelection() if inCoding > 1 else pgTpl[inCoding],
fromLineNumCtrl.GetValue(),
rb1.GetValue(),
linesNumCtrl.GetValue(),
periodNumCtrl.GetValue(),
evtNameCtrl.GetValue(),
triggerCtrl.GetSelection()
]
panel.SetResult(
setup
)
#===============================================================================
class Write(eg.ActionClass):
name = "Write text to file"
description = "Writes text to selected file."
class text:
TreeLabel = "Write %s to file: %s"
FilePath = "Output file:"
browseFileDialogTitle = "Choose the file"
txtModeMulti = "Mode of write"
overwrite = "File overwrite"
append = "Append to file"
newLine = "Append to file with new line"
writeToLog = "Write to EventGhost log too"
systemPage = "system code page (%s)"
defaultOut = "unicode (UTF-8)"
hexdump = "String write in the HexDump form"
inString = "Input text:"
logTimes = "Write Timestamp"
outputPage = "Output data coding:"
txtEncErrMode = "Error handling during encoding:"
strict = "Raise an exception"
ignore = "Ignore (skip bad chars)"
replace = "Replace bad chars"
internal = 'unicode internal'
def __call__(
self,
outCoding,
string = "",
fileName = '',
mode = 0,
errEncMode = 0,
log = False,
times = False,
hex = False,
outPage = "",
):
modeStr = 'w' if mode==0 else 'a'
stamp = time.strftime('%y-%m-%d %H:%M:%S')+' ' if times else ''
cr = '\r\n' if mode == 2 else ''
errorList = ('strict','ignore','replace')
string = eg.ParseString(string)
fileName = eg.ParseString(fileName)
if hex:
if outPage != 'unicode_internal':
string = string.encode(outPage,errorList[errEncMode])
string = String2Hex(string)
else:
string = String2Hex(string,'4')
outPage = 'ascii'
try:
file = codecs.open(fileName, modeStr, outPage, errorList[errEncMode])
except:
raise
try:
file.write('%s%s%s' % (stamp, string, cr))
except:
raise
try:
file.close()
except:
raise
if log:
print string
return string
def GetLabel(
self,
outCoding,
string,
fileName,
mode,
errEncMode,
log,
times,
hex,
outPage,
):
return self.text.TreeLabel % (string, fileName)
def Configure(
self,
outCoding = 2,
string = "{eg.result}",
fileName = u'EG_WTTF.txt',
mode = 2,
errEncMode = 0,
log = False,
times = False,
hex = False,
outPage="",
):
from codecsList import codecsList
panel = eg.ConfigPanel(self)
text = self.text
#Controls
stringText = wx.StaticText(panel, -1, text.inString)
outPageText = wx.StaticText(panel, -1, text.outputPage)
labelEncErrMode = wx.StaticText(panel, -1, text.txtEncErrMode)
fileText = wx.StaticText(panel, -1, text.FilePath)
filepathCtrl = eg.FileBrowseButton(
panel,
-1,
initialValue=fileName,
labelText="",
fileMask="*.*",
buttonText=eg.text.General.browse,
dialogTitle=text.browseFileDialogTitle
)
width = labelEncErrMode.GetTextExtent(text.txtEncErrMode)[0]
choiceEncErrMode = wx.Choice(
panel,
-1,
size = ((width,-1)),
choices=(text.strict, text.ignore, text.replace)
)
stringCtrl = wx.TextCtrl(panel, -1, string, style=wx.TE_NOHIDESEL)
radioBoxMode = wx.RadioBox(
panel,
-1,
text.txtModeMulti,
choices=[text.overwrite, text.append, text.newLine],
style=wx.RA_SPECIFY_ROWS
)
radioBoxMode.SetSelection(mode)
choiceEncErrMode.SetSelection(errEncMode)
writeToLogCheckBox = wx.CheckBox(panel, -1, text.writeToLog)
writeToLogCheckBox.SetValue(log)
timesCheckBox = wx.CheckBox(panel, -1, text.logTimes)
timesCheckBox.SetValue(times)
hexCheckBox = wx.CheckBox(panel, -1, text.hexdump)
hexCheckBox.SetValue(hex)
choices = [text.internal, text.defaultOut, text.systemPage % eg.systemEncoding]
choices.extend(codecsList)
outPageCtrl = wx.Choice(panel,-1,choices=choices)
outPageCtrl.SetSelection(outCoding)
#Sizers
topSizer = wx.FlexGridSizer(5,0,1,15)
topSizer.AddGrowableCol(0,1)
topSizer.AddGrowableCol(1,1)
topSizer.Add(stringText,0,wx.EXPAND)
topSizer.Add(fileText,0,wx.EXPAND)
topSizer.Add(stringCtrl,0,wx.EXPAND)
topSizer.Add(filepathCtrl,0,wx.EXPAND)
topSizer.Add((1,7))
topSizer.Add((1,7))
topSizer.Add(outPageText,0,wx.EXPAND)
topSizer.Add(labelEncErrMode,0,wx.EXPAND)
topSizer.Add(outPageCtrl,0,wx.EXPAND)
topSizer.Add(choiceEncErrMode,0,wx.EXPAND)
chkBoxSizer = wx.BoxSizer(wx.VERTICAL)
chkBoxSizer.Add(writeToLogCheckBox,0,wx.TOP|wx.LEFT,12)
chkBoxSizer.Add(timesCheckBox,0,wx.TOP|wx.LEFT,12)
chkBoxSizer.Add(hexCheckBox,0,wx.TOP|wx.LEFT,12)
bottomSizer = wx.GridSizer(1,2,1,10)
bottomSizer.Add(radioBoxMode,0,wx.TOP|wx.EXPAND,5)
bottomSizer.Add(chkBoxSizer,1,wx.EXPAND)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(topSizer,0,wx.EXPAND)
mainSizer.Add(bottomSizer,0,wx.TOP|wx.EXPAND,10)
panel.sizer.Add(mainSizer,0,wx.EXPAND)
while panel.Affirmed():
outCoding = outPageCtrl.GetSelection()
pgTpl = ('unicode_internal', 'utf8', eg.systemEncoding)
panel.SetResult(
outCoding,
stringCtrl.GetValue(),
filepathCtrl.GetValue(),
radioBoxMode.GetSelection(),
choiceEncErrMode.GetSelection(),
writeToLogCheckBox.IsChecked(),
timesCheckBox.IsChecked(),
hexCheckBox.IsChecked(),
outPageCtrl.GetStringSelection() if outCoding > 2 else pgTpl[outCoding],
)
#===============================================================================
| gpl-2.0 | -7,533,842,122,160,694,000 | 35.27842 | 94 | 0.540271 | false |
googleapis/python-aiplatform | google/cloud/aiplatform_v1beta1/types/pipeline_state.py | 1 | 1155 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"PipelineState",},
)
class PipelineState(proto.Enum):
r"""Describes the state of a pipeline."""
PIPELINE_STATE_UNSPECIFIED = 0
PIPELINE_STATE_QUEUED = 1
PIPELINE_STATE_PENDING = 2
PIPELINE_STATE_RUNNING = 3
PIPELINE_STATE_SUCCEEDED = 4
PIPELINE_STATE_FAILED = 5
PIPELINE_STATE_CANCELLING = 6
PIPELINE_STATE_CANCELLED = 7
PIPELINE_STATE_PAUSED = 8
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,121,764,629,500,372,100 | 30.216216 | 75 | 0.716017 | false |
javierag/samba | lib/subunit/python/subunit/iso8601.py | 14 | 4587 | # Copyright (c) 2007 Michael Twomey
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""ISO 8601 date time string parsing
Basic usage:
>>> import iso8601
>>> iso8601.parse_date("2007-01-25T12:00:00Z")
datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
>>>
"""
from datetime import datetime, timedelta, tzinfo
import re
import sys
__all__ = ["parse_date", "ParseError"]
# Adapted from http://delete.me.uk/2005/03/iso8601.html
ISO8601_REGEX_PATTERN = (r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
)
TIMEZONE_REGEX_PATTERN = "(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})"
ISO8601_REGEX = re.compile(ISO8601_REGEX_PATTERN.encode('utf8'))
TIMEZONE_REGEX = re.compile(TIMEZONE_REGEX_PATTERN.encode('utf8'))
zulu = "Z".encode('latin-1')
minus = "-".encode('latin-1')
if sys.version_info < (3, 0):
bytes = str
class ParseError(Exception):
"""Raised when there is a problem parsing a date string"""
# Yoinked from python docs
ZERO = timedelta(0)
class Utc(tzinfo):
"""UTC
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
UTC = Utc()
class FixedOffset(tzinfo):
"""Fixed offset in hours and minutes from UTC
"""
def __init__(self, offset_hours, offset_minutes, name):
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def __repr__(self):
return "<FixedOffset %r>" % self.__name
def parse_timezone(tzstring, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if tzstring == zulu:
return default_timezone
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if tzstring is None:
return default_timezone
m = TIMEZONE_REGEX.match(tzstring)
prefix, hours, minutes = m.groups()
hours, minutes = int(hours), int(minutes)
if prefix == minus:
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, tzstring)
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
if not isinstance(datestring, bytes):
raise ParseError("Expecting bytes %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
if groups["fraction"] is None:
groups["fraction"] = 0
else:
groups["fraction"] = int(float("0.%s" % groups["fraction"].decode()) * 1e6)
return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
int(groups["fraction"]), tz)
| gpl-3.0 | -7,549,753,754,683,362,000 | 33.488722 | 112 | 0.661871 | false |
houqp/floyd-cli | tests/cli/data/delete_test.py | 1 | 7277 | from click.testing import CliRunner
import unittest
from mock import patch, call
from floyd.cli.data import delete
from floyd.model.experiment_config import ExperimentConfig
from tests.cli.data.mocks import mock_data, mock_project_client, mock_access_token
from tests.cli.mocks import mock_data_config
from tests.cli import assert_exit_code
class TestDataDelete(unittest.TestCase):
"""
Tests CLI's data delete functionality `floyd data delete`
"""
def setUp(self):
self.runner = CliRunner()
@patch('floyd.cli.data.DataClient')
def test_with_no_arguments(self, data_client):
result = self.runner.invoke(delete)
# No calls to api, exit 0
data_client.assert_not_called()
assert_exit_code(result, 0)
@patch('floyd.manager.data_config.DataConfigManager.get_config', side_effect=mock_data_config)
@patch('floyd.manager.experiment_config.ExperimentConfigManager.get_config', return_value=ExperimentConfig('foo', '12345'))
@patch('floyd.client.project.ProjectClient', side_effect=mock_project_client)
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.model.access_token.assert_token_not_expired')
@patch('floyd.cli.data.DataClient.get', side_effect=mock_data)
@patch('floyd.cli.data.DataClient.delete', return_value=True)
def test_with_multiple_ids_and_yes_option(self,
delete_data,
get_data,
assert_token_not_expired,
get_auth_header,
get_access_token,
get_project,
get_expt_config,
get_data_config):
id_1 = 'mckay/datasets/foo/1'
id_2 = 'mckay/datasets/bar/1'
id_3 = 'mckay/datasets/foo/1'
result = self.runner.invoke(delete, ['-y', id_1, id_2, id_3])
assert_exit_code(result, 0)
# Trigger a get and a delete for each id
calls = [call(id_1), call(id_2), call(id_3)]
get_data.assert_has_calls(calls, any_order=True)
delete_data.assert_has_calls(calls, any_order=True)
@patch('floyd.manager.data_config.DataConfigManager.get_config', side_effect=mock_data_config)
@patch('floyd.manager.experiment_config.ExperimentConfigManager.get_config', return_value=ExperimentConfig('foo', '12345'))
@patch('floyd.client.project.ProjectClient', side_effect=mock_project_client)
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token')
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header')
@patch('floyd.model.access_token.assert_token_not_expired')
@patch('floyd.cli.data.DataClient.get', side_effect=mock_data)
@patch('floyd.cli.data.DataClient.delete', return_value=True)
def test_delete_without_yes_option(self,
delete_data,
get_data,
assert_token_not_expired,
get_auth_header,
get_access_token,
project_client,
get_expt_config,
get_data_config):
id_1 = 'mckay/datasets/foo/1'
id_2 = 'mckay/datasets/bar/1'
id_3 = 'mckay/datasets/foo/1'
# Tell prompt to skip id_1 and id_3
result = self.runner.invoke(delete,
[id_1, id_2, id_3],
input='n\nY\nn\n')
# Triggers a get for all ids
calls = [call(id_1), call(id_2), call(id_3)]
get_data.assert_has_calls(calls, any_order=True)
# Calls delete for only id_2
delete_data.assert_called_once_with(id_2)
assert_exit_code(result, 0)
@patch('floyd.manager.data_config.DataConfigManager.get_config', side_effect=mock_data_config)
@patch('floyd.manager.experiment_config.ExperimentConfigManager.get_config', return_value=ExperimentConfig('foo', '12345'))
@patch('floyd.client.project.ProjectClient', side_effect=mock_project_client)
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token')
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header')
@patch('floyd.model.access_token.assert_token_not_expired')
@patch('floyd.cli.data.DataClient.get', side_effect=mock_data)
@patch('floyd.cli.data.DataClient.delete', return_value=False)
def test_failed_delete(self,
delete_data,
get_data,
assert_token_not_expired,
get_auth_header,
get_access_token,
project_client,
get_expt_config,
get_data_config):
id_1 = 'mckay/datasets/foo/1'
id_2 = 'mckay/datasets/bar/1'
id_3 = 'mckay/datasets/foo/1'
result = self.runner.invoke(delete, ['-y', id_1, id_2, id_3])
# Trigger a get and a delete for each id, even though each delete
# fails. All deletes are attempted regardless of previous failures.
calls = [call(id_1), call(id_2), call(id_3)]
get_data.assert_has_calls(calls, any_order=True)
delete_data.assert_has_calls(calls, any_order=True)
# Exit 1 for failed deletes
assert_exit_code(result, 1)
@patch('floyd.manager.data_config.DataConfigManager.get_config', side_effect=mock_data_config)
@patch('floyd.manager.experiment_config.ExperimentConfigManager.get_config', return_value=ExperimentConfig('foo', '12345'))
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token')
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header')
@patch('floyd.model.access_token.assert_token_not_expired')
@patch('floyd.cli.data.DataClient.get', return_value=None)
@patch('floyd.cli.data.DataClient.delete')
def test_failed_get(self,
delete_data,
get_data,
assert_token_not_expired,
get_auth_header,
get_access_token,
get_expt_config,
get_data_config):
id_1 = 'mckay/datasets/foo/1'
id_2 = 'mckay/datasets/bar/1'
id_3 = 'mckay/datasets/foo/1'
result = self.runner.invoke(delete, ['-y', id_1, id_2, id_3])
# Trigger a get for each id, even though each fails. (No early exit)
calls = [call(id_1), call(id_2), call(id_3)]
get_data.assert_has_calls(calls, any_order=True)
# Deletes are not triggered for ids that are not found
delete_data.assert_not_called()
# Exit 1 for failed get requests
assert_exit_code(result, 1)
| apache-2.0 | -7,629,995,316,263,644,000 | 46.253247 | 127 | 0.585681 | false |
laurent-george/weboob | modules/residentadvisor/test.py | 7 | 1605 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Alexandre Morignot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.calendar import Query
from datetime import datetime, timedelta
class ResidentadvisorTest(BackendTest):
MODULE = 'residentadvisor'
def test_searchcity(self):
query = Query()
query.city = u'Melbourne'
self.assertTrue(len(list(self.backend.search_events(query))) > 0)
event = self.backend.search_events(query).next()
self.assertTrue(self.backend.get_event(event.id))
def test_datefrom(self):
query = Query()
later = (datetime.now() + timedelta(days=31))
query.start_date = later
event = self.backend.search_events(query).next()
self.assertTrue(later.date() <= event.start_date.date())
event = self.backend.get_event(event.id)
self.assertTrue(later.date() <= event.start_date.date())
| agpl-3.0 | -1,187,164,371,780,062,200 | 32.4375 | 77 | 0.701558 | false |
ninnux/exscript | tests/Exscript/util/ipTest.py | 6 | 1044 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
import Exscript.util.ip
class ipTest(unittest.TestCase):
CORRELATE = Exscript.util.ip
def testIsIp(self):
from Exscript.util.ip import is_ip
self.assert_(is_ip('0.0.0.0'))
self.assert_(is_ip('::'))
self.assert_(not is_ip('1'))
def testNormalizeIp(self):
from Exscript.util.ip import normalize_ip
self.assertEqual(normalize_ip('0.128.255.0'), '000.128.255.000')
self.assertEqual(normalize_ip('1234:0:01:02::'),
'1234:0000:0001:0002:0000:0000:0000:0000')
def testCleanIp(self):
from Exscript.util.ip import clean_ip
self.assertEqual(clean_ip('192.168.010.001'), '192.168.10.1')
self.assertEqual(clean_ip('1234:0:0:0:0:0:0:000A'), '1234::a')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ipTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 | 6,527,308,790,610,414,000 | 35 | 84 | 0.616858 | false |
kkmonlee/Project-Euler-Solutions | Python/p356v2.py | 1 | 2577 | import sys
class Matrix():
def __init__(self, entries):
self.entries = entries
def __mul__(self, other):
result = [[0 for j in range(len(other.entries[0]))] for i in range(len(self.entries))]
for i in range(len(self.entries)):
for j in range(len(other.entries[0])):
for k in range(len(other.entries)):
result[i][j] += self.entries[i][k] * other.entries[k][j]
return Matrix(result)
def __mod__(self, mod):
if mod:
for i in range(len(self.entries)):
for j in range(len(self.entries[0])):
self.entries[i][j] %= mod
return self
def __pow__(self, n, mod = None):
assert(n > 0)
if n == 1:
return self.__mod__(mod)
half = self.__pow__(n >> 1, mod)
if n & 1 == 1:
return half.__mul__(half).__mul__(self).__mod__(mod)
else:
return half.__mul__(half).__mod__(mod)
def __str__(self):
return str(self.entries)
class HomogenousRecurrence():
def __init__(self, coefficients, initial_values):
assert(len(coefficients) == len(initial_values))
self.dim = len(coefficients)
self.companion_matrix = self.__init__companion_matrix(coefficients)
self.initial_state = self.__init__initial_state(initial_values)
def __init__companion_matrix(self, coefficients):
entries = [[0 for j in range(self.dim)] for i in range(self.dim)]
for i in range(self.dim):
entries[0][i] = coefficients[i]
for i in range(1, self.dim):
entries[i][i - 1] = 1
return Matrix(entries)
def __init__initial_state(self, initial_values):
entries = [[value] for value in initial_values]
return Matrix(entries)
def get(self, n, mod = None):
if n < self.dim:
value = self.initial_state.entries[self.dim - n - 1][0]
return value % mod if mod else value
else:
return ((pow(self.companion_matrix, n - self.dim + 1, mod) * self.initial_state) % mod).entries[0][0]
class Problem():
def solve(self):
result = 0
for i in range(1, 30 + 1):
result = (result + self.get(i, 987654321, 10**8)) % 10**8
print(result)
def get(self, n, power, mod):
recurrence = HomogenousRecurrence([2**n, 0, -n], [4**n, 2**n, 3])
return recurrence.get(power, mod) - 1
def main():
problem = Problem()
problem.solve()
if __name__ == '__main__':
sys.exit(main()) | gpl-3.0 | -250,995,158,572,273,570 | 32.921053 | 113 | 0.540163 | false |
ryfeus/lambda-packs | Opencv_pil/source/PIL/PixarImagePlugin.py | 25 | 1671 | #
# The Python Imaging Library.
# $Id$
#
# PIXAR raster support for PIL
#
# history:
# 97-01-29 fl Created
#
# notes:
# This is incomplete; it is based on a few samples created with
# Photoshop 2.5 and 3.0, and a summary description provided by
# Greg Coats <[email protected]>. Hopefully, "L" and
# "RGBA" support will be added in future versions.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile, _binary
__version__ = "0.1"
#
# helpers
i16 = _binary.i16le
def _accept(prefix):
return prefix[:4] == b"\200\350\000\000"
##
# Image plugin for PIXAR raster images.
class PixarImageFile(ImageFile.ImageFile):
format = "PIXAR"
format_description = "PIXAR raster image"
def _open(self):
# assuming a 4-byte magic label
s = self.fp.read(4)
if s != b"\200\350\000\000":
raise SyntaxError("not a PIXAR file")
# read rest of header
s = s + self.fp.read(508)
self.size = i16(s[418:420]), i16(s[416:418])
# get channel/depth descriptions
mode = i16(s[424:426]), i16(s[426:428])
if mode == (14, 2):
self.mode = "RGB"
# FIXME: to be continued...
# create tile descriptor (assuming "dumped")
self.tile = [("raw", (0, 0)+self.size, 1024, (self.mode, 0, 1))]
#
# --------------------------------------------------------------------
Image.register_open(PixarImageFile.format, PixarImageFile, _accept)
Image.register_extension(PixarImageFile.format, ".pxr")
| mit | 3,788,393,903,357,963,300 | 22.535211 | 72 | 0.591263 | false |
ViDA-NYU/reprozip | reprozip/reprozip/pack.py | 1 | 7952 | # Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Packing logic for reprozip.
This module contains the :func:`~reprozip.pack.pack` function and associated
utilities that are used to build the .rpz pack file from the trace SQLite file
and config YAML.
"""
from __future__ import division, print_function, unicode_literals
import itertools
import logging
import os
from rpaths import Path
import string
import sys
import tarfile
import uuid
from reprozip import __version__ as reprozip_version
from reprozip.common import File, load_config, save_config, \
record_usage_package
from reprozip.tracer.linux_pkgs import identify_packages
from reprozip.traceutils import combine_files
from reprozip.utils import iteritems
logger = logging.getLogger('reprozip')
def expand_patterns(patterns):
files = set()
dirs = set()
# Finds all matching paths
for pattern in patterns:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Expanding pattern %r into %d paths",
pattern,
len(list(Path('/').recursedir(pattern))))
for path in Path('/').recursedir(pattern):
if path.is_dir():
dirs.add(path)
else:
files.add(path)
# Don't include directories whose files are included
non_empty_dirs = set([Path('/')])
for p in files | dirs:
path = Path('/')
for c in p.components[1:]:
path = path / c
non_empty_dirs.add(path)
# Builds the final list
return [File(p) for p in itertools.chain(dirs - non_empty_dirs, files)]
def canonicalize_config(packages, other_files, additional_patterns,
sort_packages):
"""Expands ``additional_patterns`` from the configuration file.
"""
if additional_patterns:
add_files = expand_patterns(additional_patterns)
logger.info("Found %d files from expanding additional_patterns...",
len(add_files))
if add_files:
if sort_packages:
add_files, add_packages = identify_packages(add_files)
else:
add_packages = []
other_files, packages = combine_files(add_files, add_packages,
other_files, packages)
return packages, other_files
def data_path(filename, prefix=Path('DATA')):
"""Computes the filename to store in the archive.
Turns an absolute path containing '..' into a filename without '..', and
prefixes with DATA/.
Example:
>>> data_path(PosixPath('/var/lib/../../../../tmp/test'))
PosixPath(b'DATA/tmp/test')
>>> data_path(PosixPath('/var/lib/../www/index.html'))
PosixPath(b'DATA/var/www/index.html')
"""
return prefix / filename.split_root()[1]
class PackBuilder(object):
"""Higher layer on tarfile that adds intermediate directories.
"""
def __init__(self, filename):
self.tar = tarfile.open(str(filename), 'w:gz')
self.seen = set()
def add_data(self, filename):
if filename in self.seen:
return
path = Path('/')
for c in filename.components[1:]:
path = path / c
if path in self.seen:
continue
logger.debug("%s -> %s", path, data_path(path))
self.tar.add(str(path), str(data_path(path)), recursive=False)
self.seen.add(path)
def close(self):
self.tar.close()
self.seen = None
def pack(target, directory, sort_packages):
"""Main function for the pack subcommand.
"""
if target.exists():
# Don't overwrite packs...
logger.critical("Target file exists!")
sys.exit(1)
# Reads configuration
configfile = directory / 'config.yml'
if not configfile.is_file():
logger.critical("Configuration file does not exist!\n"
"Did you forget to run 'reprozip trace'?\n"
"If not, you might want to use --dir to specify an "
"alternate location.")
sys.exit(1)
runs, packages, other_files = config = load_config(
configfile,
canonical=False)
additional_patterns = config.additional_patterns
inputs_outputs = config.inputs_outputs
# Validate run ids
run_chars = ('0123456789_-@() .:%'
'abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
for i, run in enumerate(runs):
if (any(c not in run_chars for c in run['id']) or
all(c in string.digits for c in run['id'])):
logger.critical("Illegal run id: %r (run number %d)",
run['id'], i)
sys.exit(1)
# Canonicalize config (re-sort, expand 'additional_files' patterns)
packages, other_files = canonicalize_config(
packages, other_files, additional_patterns, sort_packages)
logger.info("Creating pack %s...", target)
tar = tarfile.open(str(target), 'w:')
fd, tmp = Path.tempfile()
os.close(fd)
try:
datatar = PackBuilder(tmp)
# Add the files from the packages
for pkg in packages:
if pkg.packfiles:
logger.info("Adding files from package %s...", pkg.name)
files = []
for f in pkg.files:
if not Path(f.path).exists():
logger.warning("Missing file %s from package %s",
f.path, pkg.name)
else:
datatar.add_data(f.path)
files.append(f)
pkg.files = files
else:
logger.info("NOT adding files from package %s", pkg.name)
# Add the rest of the files
logger.info("Adding other files...")
files = set()
for f in other_files:
if not Path(f.path).exists():
logger.warning("Missing file %s", f.path)
else:
datatar.add_data(f.path)
files.add(f)
other_files = files
datatar.close()
tar.add(str(tmp), 'DATA.tar.gz')
finally:
tmp.remove()
logger.info("Adding metadata...")
# Stores pack version
fd, manifest = Path.tempfile(prefix='reprozip_', suffix='.txt')
os.close(fd)
try:
with manifest.open('wb') as fp:
fp.write(b'REPROZIP VERSION 2\n')
tar.add(str(manifest), 'METADATA/version')
finally:
manifest.remove()
# Stores the original trace
trace = directory / 'trace.sqlite3'
if not trace.is_file():
logger.critical("trace.sqlite3 is gone! Aborting")
sys.exit(1)
tar.add(str(trace), 'METADATA/trace.sqlite3')
# Checks that input files are packed
for name, f in iteritems(inputs_outputs):
if f.read_runs and not Path(f.path).exists():
logger.warning("File is designated as input (name %s) but is not "
"to be packed: %s", name, f.path)
# Generates a unique identifier for the pack (for usage reports purposes)
pack_id = str(uuid.uuid4())
# Stores canonical config
fd, can_configfile = Path.tempfile(suffix='.yml', prefix='rpz_config_')
os.close(fd)
try:
save_config(can_configfile, runs, packages, other_files,
reprozip_version,
inputs_outputs, canonical=True,
pack_id=pack_id)
tar.add(str(can_configfile), 'METADATA/config.yml')
finally:
can_configfile.remove()
tar.close()
# Record some info to the usage report
record_usage_package(runs, packages, other_files,
inputs_outputs,
pack_id)
| bsd-3-clause | -1,183,094,330,447,568,000 | 31.995851 | 79 | 0.577968 | false |
belokop/indico_bare | indico/modules/events/payment/util.py | 2 | 3173 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from indico.core.db import db
from indico.core.plugins import plugin_engine
from indico.modules.events.payment import PaymentPluginMixin
from indico.modules.events.payment.notifications import notify_double_payment
from indico.modules.events.payment.models.transactions import PaymentTransaction, TransactionStatus
from indico.modules.events.registration.notifications import notify_registration_state_update
remove_prefix_re = re.compile('^payment_')
def get_payment_plugins():
"""Returns a dict containing the available payment plugins."""
return {remove_prefix_re.sub('', p.name): p for p in plugin_engine.get_active_plugins().itervalues()
if isinstance(p, PaymentPluginMixin)}
def get_active_payment_plugins(event):
"""Returns a dict containing the active payment plugins of an event."""
return {name: plugin for name, plugin in get_payment_plugins().iteritems()
if plugin.event_settings.get(event, 'enabled')}
def register_transaction(registration, amount, currency, action, provider=None, data=None):
"""Creates a new transaction for a certain transaction action.
:param registration: the `Registration` associated to the transaction
:param amount: the (strictly positive) amount of the transaction
:param currency: the currency used for the transaction
:param action: the `TransactionAction` of the transaction
:param provider: the payment method name of the transaction,
or '_manual' if no payment method has been used
:param data: arbitrary JSON-serializable data specific to the
transaction's provider
"""
new_transaction, double_payment = PaymentTransaction.create_next(registration=registration, action=action,
amount=amount, currency=currency,
provider=provider, data=data)
if new_transaction:
db.session.flush()
if double_payment:
notify_double_payment(registration)
if new_transaction.status == TransactionStatus.successful:
registration.update_state(paid=True)
elif new_transaction.status == TransactionStatus.cancelled:
registration.update_state(paid=False)
notify_registration_state_update(registration)
return new_transaction
| gpl-3.0 | -308,189,249,017,586,370 | 46.358209 | 110 | 0.710054 | false |
Joergen/zamboni | mkt/developers/views_payments.py | 1 | 14470 | import json
import urllib
from datetime import datetime
from django import http
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect
import commonware
from curling.lib import HttpClientError
import jingo
from jingo import helpers
import jinja2
import waffle
from tower import ugettext as _
from waffle.decorators import waffle_switch
import amo
from access import acl
from amo import messages
from amo.decorators import json_view, login_required, post_required, write
from amo.urlresolvers import reverse
from constants.payments import (PAYMENT_METHOD_ALL,
PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from lib.crypto import generate_key
from lib.pay_server import client
from market.models import Price
from mkt.constants import DEVICE_LOOKUP
from mkt.developers.decorators import dev_required
from mkt.developers.models import (CantCancel, PaymentAccount, UserInappKey,
uri_to_pk)
from . import forms, forms_payments
log = commonware.log.getLogger('z.devhub')
@dev_required
@post_required
def disable_payments(request, addon_id, addon):
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('payments'))
@dev_required(owner_for_post=True, webapp=True)
def payments(request, addon_id, addon, webapp=False):
premium_form = forms_payments.PremiumForm(
request.POST or None, request=request, addon=addon,
user=request.amo_user)
region_form = forms.RegionForm(
request.POST or None, product=addon, request=request)
upsell_form = forms_payments.UpsellForm(
request.POST or None, addon=addon, user=request.amo_user)
bango_account_list_form = forms_payments.BangoAccountListForm(
request.POST or None, addon=addon, user=request.amo_user)
if request.method == 'POST':
success = all(form.is_valid() for form in
[premium_form, region_form, upsell_form,
bango_account_list_form])
if success:
region_form.save()
try:
premium_form.save()
except client.Error as err:
success = False
log.error('Error setting payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to the '
u'payment server.'))
raise # We want to see these exceptions!
is_free_inapp = addon.premium_type == amo.ADDON_FREE_INAPP
is_now_paid = (addon.premium_type in amo.ADDON_PREMIUMS
or is_free_inapp)
# If we haven't changed to a free app, check the upsell.
if is_now_paid and success:
try:
if not is_free_inapp:
upsell_form.save()
bango_account_list_form.save()
except client.Error as err:
log.error('Error saving payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to '
u'the payment server.'))
success = False
raise # We want to see all the solitude errors now.
# If everything happened successfully, give the user a pat on the back.
if success:
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('payments'))
# TODO: This needs to be updated as more platforms support payments.
cannot_be_paid = (
addon.premium_type == amo.ADDON_FREE and
any(premium_form.device_data['free-%s' % x] == y for x, y in
[('android-mobile', True), ('android-tablet', True),
('desktop', True), ('firefoxos', False)]))
try:
tier_zero = Price.objects.get(price='0.00', active=True)
tier_zero_id = tier_zero.pk
except Price.DoesNotExist:
tier_zero = None
tier_zero_id = ''
# Get the regions based on tier zero. This should be all the
# regions with payments enabled.
paid_region_ids_by_slug = []
if tier_zero:
paid_region_ids_by_slug = tier_zero.region_ids_by_slug()
return jingo.render(
request, 'developers/payments/premium.html',
{'addon': addon, 'webapp': webapp, 'premium': addon.premium,
'form': premium_form, 'upsell_form': upsell_form,
'tier_zero_id': tier_zero_id,
'region_form': region_form,
'DEVICE_LOOKUP': DEVICE_LOOKUP,
'is_paid': (addon.premium_type in amo.ADDON_PREMIUMS
or addon.premium_type == amo.ADDON_FREE_INAPP),
'no_paid': cannot_be_paid,
'is_incomplete': addon.status == amo.STATUS_NULL,
'is_packaged': addon.is_packaged,
# Bango values
'bango_account_form': forms_payments.BangoPaymentAccountForm(),
'bango_account_list_form': bango_account_list_form,
# Waffles
'payments_enabled':
waffle.flag_is_active(request, 'allow-b2g-paid-submission') and
not waffle.switch_is_active('disabled-payments'),
'api_pricelist_url':
reverse('api_dispatch_list', kwargs={'resource_name': 'prices',
'api_name': 'webpay'}),
'payment_methods': {
PAYMENT_METHOD_ALL: _('All'),
PAYMENT_METHOD_CARD: _('Credit card'),
PAYMENT_METHOD_OPERATOR: _('Carrier'),
},
'all_paid_region_ids_by_slug': paid_region_ids_by_slug,
})
@login_required
@json_view
def payment_accounts(request):
app_slug = request.GET.get('app-slug', '')
accounts = PaymentAccount.objects.filter(
user=request.amo_user, inactive=False)
def account(acc):
app_names = (', '.join(unicode(apa.addon.name)
for apa in acc.addonpaymentaccount_set.all()))
data = {
'id': acc.pk,
'name': jinja2.escape(unicode(acc)),
'app-names': jinja2.escape(app_names),
'account-url':
reverse('mkt.developers.bango.payment_account', args=[acc.pk]),
'delete-url':
reverse('mkt.developers.bango.delete_payment_account',
args=[acc.pk]),
'agreement-url': acc.get_agreement_url(),
'agreement': 'accepted' if acc.agreed_tos else 'rejected',
'shared': acc.shared
}
if waffle.switch_is_active('bango-portal') and app_slug:
data['portal-url'] = reverse(
'mkt.developers.apps.payments.bango_portal_from_addon',
args=[app_slug])
return data
return map(account, accounts)
@login_required
def payment_accounts_form(request):
bango_account_form = forms_payments.BangoAccountListForm(
user=request.amo_user, addon=None)
return jingo.render(
request, 'developers/payments/includes/bango_accounts_form.html',
{'bango_account_list_form': bango_account_form})
@write
@post_required
@login_required
@json_view
def payments_accounts_add(request):
form = forms_payments.BangoPaymentAccountForm(request.POST)
if not form.is_valid():
return json_view.error(form.errors)
try:
obj = PaymentAccount.create_bango(request.amo_user, form.cleaned_data)
except HttpClientError as e:
log.error('Client error create Bango account; %s' % e)
return http.HttpResponseBadRequest(json.dumps(e.content))
return {'pk': obj.pk, 'agreement-url': obj.get_agreement_url()}
@write
@login_required
@json_view
def payments_account(request, id):
account = get_object_or_404(PaymentAccount, pk=id, user=request.user)
if request.POST:
form = forms_payments.BangoPaymentAccountForm(
request.POST, account=account)
if form.is_valid():
form.save()
else:
return json_view.error(form.errors)
return account.get_details()
@write
@post_required
@login_required
def payments_accounts_delete(request, id):
account = get_object_or_404(PaymentAccount, pk=id, user=request.user)
try:
account.cancel(disable_refs=True)
except CantCancel:
log.info('Could not cancel account.')
return http.HttpResponse('Cannot cancel account', status=409)
log.info('Account cancelled: %s' % id)
return http.HttpResponse('success')
@login_required
@waffle_switch('in-app-sandbox')
def in_app_keys(request):
keys = (UserInappKey.objects.no_cache()
.filter(solitude_seller__user=request.amo_user))
# TODO(Kumar) support multiple test keys. For now there's only one.
if keys.count():
key = keys.get()
else:
key = None
if request.method == 'POST':
if key:
key.reset()
messages.success(request, _('Secret was reset successfully.'))
else:
UserInappKey.create(request.amo_user)
messages.success(request,
_('Key and secret were created successfully.'))
return redirect(reverse('mkt.developers.apps.in_app_keys'))
return jingo.render(request, 'developers/payments/in-app-keys.html',
{'key': key})
@login_required
@waffle_switch('in-app-sandbox')
def in_app_key_secret(request, pk):
key = (UserInappKey.objects.no_cache()
.filter(solitude_seller__user=request.amo_user, pk=pk))
if not key.count():
# Either the record does not exist or it's not owned by the
# logged in user.
return http.HttpResponseForbidden()
return http.HttpResponse(key.get().secret())
@login_required
@waffle_switch('in-app-payments')
@dev_required(owner_for_post=True, webapp=True)
def in_app_config(request, addon_id, addon, webapp=True):
inapp = addon.premium_type in amo.ADDON_INAPPS
if not inapp:
messages.error(request,
_('Your app is not configured for in-app payments.'))
return redirect(reverse('mkt.developers.apps.payments',
args=[addon.app_slug]))
try:
account = addon.app_payment_account
except ObjectDoesNotExist:
messages.error(request, _('No payment account for this app.'))
return redirect(reverse('mkt.developers.apps.payments',
args=[addon.app_slug]))
seller_config = get_seller_product(account)
owner = acl.check_addon_ownership(request, addon)
if request.method == 'POST':
# Reset the in-app secret for the app.
(client.api.generic
.product(seller_config['resource_pk'])
.patch(data={'secret': generate_key(48)}))
messages.success(request, _('Changes successfully saved.'))
return redirect(reverse('mkt.developers.apps.in_app_config',
args=[addon.app_slug]))
return jingo.render(request, 'developers/payments/in-app-config.html',
{'addon': addon, 'owner': owner,
'seller_config': seller_config})
@login_required
@waffle_switch('in-app-payments')
@dev_required(webapp=True)
def in_app_secret(request, addon_id, addon, webapp=True):
seller_config = get_seller_product(addon.app_payment_account)
return http.HttpResponse(seller_config['secret'])
@waffle_switch('bango-portal')
@dev_required(webapp=True)
def bango_portal_from_addon(request, addon_id, addon, webapp=True):
if not ((addon.authors.filter(user=request.user,
addonuser__role=amo.AUTHOR_ROLE_OWNER).exists()) and
(addon.app_payment_account.payment_account.solitude_seller.user.id
== request.user.id)):
log.error(('User not allowed to reach the Bango portal; '
'pk=%s') % request.user.pk)
return http.HttpResponseForbidden()
package_id = addon.app_payment_account.payment_account.bango_package_id
return _redirect_to_bango_portal(package_id, 'addon_id: %s' % addon_id)
def _redirect_to_bango_portal(package_id, source):
try:
bango_token = client.api.bango.login.post({'packageId': package_id})
except HttpClientError as e:
log.error('Failed to authenticate against Bango portal; %s' % source,
exc_info=True)
return http.HttpResponseBadRequest(json.dumps(e.content))
bango_url = '{base_url}{parameters}'.format(**{
'base_url': settings.BANGO_BASE_PORTAL_URL,
'parameters': urllib.urlencode({
'authenticationToken': bango_token['authentication_token'],
'emailAddress': bango_token['email_address'],
'packageId': package_id,
'personId': bango_token['person_id'],
})
})
response = http.HttpResponse(status=204)
response['Location'] = bango_url
return response
def get_seller_product(account):
"""
Get the solitude seller_product for a payment account object.
"""
bango_product = (client.api.bango
.product(uri_to_pk(account.product_uri))
.get_object_or_404())
# TODO(Kumar): we can optimize this by storing the seller_product
# when we create it in developers/models.py or allowing solitude
# to filter on both fields.
return (client.api.generic
.product(uri_to_pk(bango_product['seller_product']))
.get_object_or_404())
# TODO(andym): move these into a tastypie API.
@login_required
@json_view
def agreement(request, id):
account = get_object_or_404(PaymentAccount, pk=id, user=request.user)
# It's a shame we have to do another get to find this out.
package = client.api.bango.package(account.uri).get_object_or_404()
if request.method == 'POST':
# Set the agreement.
account.update(agreed_tos=True)
return (client.api.bango.sbi.post(
data={'seller_bango': package['resource_uri']}))
res = (client.api.bango.sbi.agreement
.get_object(data={'seller_bango': package['resource_uri']}))
res['valid'] = helpers.datetime(
datetime.strptime(res['valid'], '%Y-%m-%dT%H:%M:%S'))
return res
| bsd-3-clause | 4,215,820,748,799,579,600 | 35.819338 | 79 | 0.615204 | false |
MrReN/django-oscar | oscar/apps/order/migrations/0008_auto__add_field_orderdiscount_category.py | 16 | 33182 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderDiscount.category'
db.add_column('order_orderdiscount', 'category',
self.gf('django.db.models.fields.CharField')(default='Basket', max_length=64),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OrderDiscount.category'
db.delete_column('order_orderdiscount', 'category')
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.communicationevent': {
'Meta': {'object_name': 'CommunicationEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customer.CommunicationEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': "orm['order.Order']"})
},
'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'order.lineprice': {
'Meta': {'ordering': "('id',)", 'object_name': 'LinePrice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': "orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'category': ('django.db.models.fields.CharField', [], {'default': "'Basket'", 'max_length': '64'}),
'frequency': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offer_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': "orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'})
},
'order.paymentevent': {
'Meta': {'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.PaymentEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': "orm['order.Order']"})
},
'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.PaymentEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.paymenteventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'PaymentEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.shippingevent': {
'Meta': {'ordering': "['-date']", 'object_name': 'ShippingEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.ShippingEventQuantity']", 'symmetrical': 'False'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': "orm['order.Order']"})
},
'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.ShippingEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.shippingeventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'ShippingEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['order']
| bsd-3-clause | 2,084,114,572,480,455,400 | 86.783069 | 222 | 0.554276 | false |
Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/prompt_toolkit/validation.py | 23 | 1755 | """
Input validation for a `Buffer`.
(Validators will be called before accepting input.)
"""
from __future__ import unicode_literals
from .filters import to_simple_filter
from abc import ABCMeta, abstractmethod
from six import with_metaclass
__all__ = (
'ConditionalValidator',
'ValidationError',
'Validator',
)
class ValidationError(Exception):
"""
Error raised by :meth:`.Validator.validate`.
:param cursor_position: The cursor position where the error occured.
:param message: Text.
"""
def __init__(self, cursor_position=0, message=''):
super(ValidationError, self).__init__(message)
self.cursor_position = cursor_position
self.message = message
def __repr__(self):
return '%s(cursor_position=%r, message=%r)' % (
self.__class__.__name__, self.cursor_position, self.message)
class Validator(with_metaclass(ABCMeta, object)):
"""
Abstract base class for an input validator.
"""
@abstractmethod
def validate(self, document):
"""
Validate the input.
If invalid, this should raise a :class:`.ValidationError`.
:param document: :class:`~prompt_toolkit.document.Document` instance.
"""
pass
class ConditionalValidator(Validator):
"""
Validator that can be switched on/off according to
a filter. (This wraps around another validator.)
"""
def __init__(self, validator, filter):
assert isinstance(validator, Validator)
self.validator = validator
self.filter = to_simple_filter(filter)
def validate(self, document):
# Call the validator only if the filter is active.
if self.filter():
self.validator.validate(document)
| apache-2.0 | 6,711,840,564,356,401,000 | 26.421875 | 77 | 0.646154 | false |
pschmitt/home-assistant | homeassistant/components/powerwall/config_flow.py | 6 | 2793 | """Config flow for Tesla Powerwall integration."""
import logging
from tesla_powerwall import APIChangedError, Powerwall, PowerwallUnreachableError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_IP_ADDRESS
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_IP_ADDRESS): str})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
power_wall = Powerwall(data[CONF_IP_ADDRESS])
try:
await hass.async_add_executor_job(power_wall.detect_and_pin_version)
site_info = await hass.async_add_executor_job(power_wall.get_site_info)
except PowerwallUnreachableError:
raise CannotConnect
except APIChangedError as err:
# Only log the exception without the traceback
_LOGGER.error(str(err))
raise WrongVersion
# Return info that you want to store in the config entry.
return {"title": site_info.site_name}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Tesla Powerwall."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except WrongVersion:
errors["base"] = "wrong_version"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(user_input[CONF_IP_ADDRESS])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
await self.async_set_unique_id(user_input[CONF_IP_ADDRESS])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class WrongVersion(exceptions.HomeAssistantError):
"""Error to indicate the powerwall uses a software version we cannot interact with."""
| apache-2.0 | 1,197,514,514,682,018,600 | 33.481481 | 90 | 0.664518 | false |
bestrauc/seqan | util/bin/demo_checker.py | 10 | 4665 | #!/usr/bin/env python2
"""Demo checker script.
Given a demo .cpp file PATH.cpp we can make it a small test if there is a file
PATH.cpp.stdout and/or PATH.cpp.stderr. The test is implemented using this
script.
The script is called with the options --binary-path and one or both of
--stdout-path and --stderr-path. The demo is executed and the test succeeds
if the exit code is 0 and the standard/error output is the same as in the
.stdout/.stderr file. If there is output and the file is missing then this is
a failure as well.
"""
__author__ = """Manuel Holtgrewe <[email protected]>
Temesgen H. Dadi <[email protected]>
"""
import argparse
import difflib
import subprocess
import sys
import re
def t(s):
"""Force Windows line endings to Unix line endings."""
return s.replace("\r\n", "\n")
def fuzzyEqual(pattern, text):
"""checks if the expected output is eqal to the actualoutput using a reqex
use the literal [VAR] if the part of the output is not expected to be the same all the time.
"""
if len(pattern) != len(text):
print >> sys.stderr, 'Number of lines differ. Expected output has %s lines whereas actual has %s lines.' % (len(pattern), len(text))
return False
for i in range(len(pattern)):
T = text[i]
P = pattern[i]
if T == P :
continue
else :
if '[VAR]' not in P:
print >> sys.stderr, 'Line %s is different between expected and actual outputs.' % (i)
return False
else:
P = (re.escape(P)).replace('\\[VAR\\]', "[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?")
r = re.compile(P)
if re.match(r, T) == None:
print >> sys.stderr, 'Line %s is different (REGEX) between expected and actual outputs.' % (i)
return False
return True
def loadExpected(args):
"""Load the expected file contents."""
out, err = '', ''
if args.stdout_path:
with open(args.stdout_path, 'rb') as f:
out = f.read()
if args.stderr_path:
with open(args.stderr_path, 'rb') as f:
err = f.read()
return t(out.strip()).split('\n'), t(err.strip()).split('\n')
def runDemo(args):
cmd = [args.binary_path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutbuff, stderrbuff = p.communicate()
return t(stdoutbuff.strip()).split('\n'), t(stderrbuff.strip()).split('\n'), p.returncode
def main():
"""Program entry point."""
parser = argparse.ArgumentParser(description='Run SeqAn demos as apps.')
parser.add_argument('--binary-path', dest='binary_path', required='True',
help='Path to the demo binary to execute.')
parser.add_argument('--stdout-path', dest='stdout_path',
help='Path to standard out file to compare to.',
default=None)
parser.add_argument('--stderr-path', dest='stderr_path',
help='Path to standard error file to compare to.',
default=None)
args = parser.parse_args()
print >>sys.stderr, 'Running %s.' % args.binary_path
actual_out, actual_err, ret = runDemo(args)
if ret != 0:
print >>sys.stderr, 'ERROR: Return code of %s was %s.' % (args.binary_path, ret)
return 1
else:
print >>sys.stderr, 'Return code was %s.' % ret
print >>sys.stderr, 'Loading files "%s", "%s".' % (args.stdout_path, args.stderr_path)
expected_out, expected_err = loadExpected(args)
is_stdout_as_expected = fuzzyEqual(expected_out, actual_out)
is_stderr_as_expected = fuzzyEqual(expected_err, actual_err)
if not is_stdout_as_expected:
print >>sys.stderr, 'The standard output was not as expected!'
l = difflib.context_diff(expected_out, actual_out,
fromfile='expected', tofile='actual')
print >>sys.stderr, '\n'.join(l)
else:
print >>sys.stderr, 'Standard output was as expected.'
if not is_stderr_as_expected:
print >>sys.stderr, 'The standard error was not as expected!'
l = difflib.context_diff(expected_err, actual_err,
fromfile='expected', tofile='actual')
print >>sys.stderr, '\n'.join(l)
else:
print >>sys.stderr, 'Standard error was as expected.'
# here we used not because we need return-code 0 (False) if test is successful
return not (is_stdout_as_expected and is_stderr_as_expected)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -694,974,993,020,110,200 | 37.553719 | 140 | 0.598071 | false |
hahnicity/meg-server | meg/db.py | 1 | 3263 | from collections import namedtuple
import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import ARRAY
def create_db(app):
db = SQLAlchemy(app)
return db
def generate_models(db):
class RevocationKey(db.Model):
"""
The model for a PGP Revocation Key.
"""
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, nullable=False)
expires = db.Column(db.DateTime, nullable=True)
length = db.Column(db.Integer, nullable=False)
armored = db.Column(db.Text, nullable=False)
pgp_keyid_for = db.Column(db.String(16), nullable=False)
def __init__(self, created, expires, length, armored, key_id_for):
self.created = created
self.expires = expires
self.length = length
self.armored = armored
self.pgp_keyid_for = key_id_for
class GcmInstanceId(db.Model):
"""
Store phone instance id
"""
id = db.Column(db.Integer, primary_key=True)
instance_id = db.Column(db.Text, nullable=False)
phone_number = db.Column(db.Text, nullable=False)
email = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
def __init__(self, instance_id, phone_number, email):
self.instance_id = instance_id
self.phone_number = phone_number
self.email = email
self.created_at = datetime.datetime.now()
class MessageStore(db.Model):
"""
Stores messages for eventual transmission to client or app
"""
id = db.Column(db.Integer, primary_key=True)
msg_id = db.Column(db.Text, nullable=True)
client_id = db.Column(db.Text, nullable=True)
action = db.Column(db.VARCHAR(8), nullable=False)
email_to = db.Column(db.Text, nullable=False)
email_from = db.Column(db.Text, nullable=False)
message = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
def __init__(self, client_id, msg_id, email_to, email_from, message, action):
self.action = action
self.client_id = client_id
self.msg_id = msg_id
self.email_to = email_to
self.email_from = email_from
self.message = message
self.created_at = datetime.datetime.now()
class RevocationToken(db.Model):
"""
Stores revocation tokens.
"""
id = db.Column(db.Integer, primary_key=True)
pgp_keyid_for = db.Column(db.VARCHAR(8), nullable=False)
hex = db.Column(db.VARCHAR(32), nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
user_email = db.Column(db.Text, nullable=False)
def __init__(self, keyid, hex, user_email):
self.pgp_keyid_for = keyid
self.hex = hex
self.user_email = user_email
self.created_at = datetime.datetime.now()
Models = namedtuple('Models', [
'RevocationKey', 'GcmInstanceId', 'MessageStore', 'RevocationToken'
])
return Models(RevocationKey, GcmInstanceId, MessageStore, RevocationToken)
| gpl-2.0 | -9,116,469,193,310,956,000 | 35.662921 | 85 | 0.608336 | false |
VHAINNOVATIONS/DmD | scrubber/MIST_2_0_4/src/MAT/lib/mat/python/MAT/Document.py | 1 | 62461 | # Copyright (C) 2007 - 2009 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
import sys
import Command
class OverlapError(Exception):
pass
from MAT.Annotation import Annotation, DocumentAnnotationTypeRepository, SpanlessAnnotation, \
AnnotationCore, AttributeValueSequence, AnnotationAttributeType
# This really ought to be in DocumentIO.py after the
# refactor, but there are too many odd dependencies.
class LoadError(Exception):
pass
class DumpError(Exception):
pass
class DocumentError(Exception):
pass
# The metadata is only used when reading the document in
# from a file. Otherwise, Python will never see the metadata.
# Everything that refers to AMS should be here, just in case we
# need to factor it out.
class AnnotatedDoc:
def __init__(self, signal = None, globalTypeRepository = None):
# There are too many levels here, but I've made the simplest
# modification to try to get the old global annotation type
# records to work here - it can't be global because we have
# potential threading issues with Web services.
self.atypeRepository = DocumentAnnotationTypeRepository(self, globalTypeRepository = globalTypeRepository)
self.atypeDict = {}
self.anameDict = self.atypeRepository
self.signal = ""
self.metadata = {}
if signal is not None:
if type(signal) is not type(u''):
raise LoadError, "signal must be Unicode"
self.signal = signal
def truncate(self):
self.atypeDict = {}
self.anameDict.clear()
# We have to unlock the repository, AND the atypes
# which have already been used.
def unlockAtypeRepository(self):
self.atypeRepository.forceUnlock()
for k, v in self.anameDict.items():
if v.repository is not self.atypeRepository:
# Copy (which will create an unlocked local copy) and update.
newV = v.copy(repository = self.atypeRepository)
if self.atypeDict.has_key(v):
self.atypeDict[newV] = self.atypeDict[v]
del self.atypeDict[v]
self.anameDict[k] = newV
# There's really no reason to have another level of indirection
# here. But we have it.
def findAnnotationType(self, tname, hasSpan = True, create = True):
if self.anameDict.has_key(tname):
return self.anameDict[tname]
else:
atype = self.atypeRepository.findAnnotationType(tname, hasSpan = hasSpan, create = create)
if atype is not None:
self.atypeDict[atype] = []
return atype
# blockAdd is backward compatibility. There are some cases where
# I suspect that I might not want to add when I create. All the cases
# in the code where they happen together have been converted.
def createAnnotation(self, start, end, type, attrs = None, blockAdd = False):
a = Annotation(self, start, end, type, attrs)
if not blockAdd:
self._addAnnotation(a)
return a
# blockAdd is backward compatibility.
def createSpanlessAnnotation(self, type, attrs = None, blockAdd = False):
a = SpanlessAnnotation(self, type, attrs)
if not blockAdd:
self._addAnnotation(a)
return a
# now only to be used in special cases.
def _addAnnotation(self, a):
if self.atypeDict.has_key(a.atype):
self.atypeDict[a.atype].append(a)
else:
self.atypeDict[a.atype] = [a]
# If someone points to it, raise an error.
def removeAnnotation(self, a):
if self.atypeDict.has_key(a.atype):
# Try this first. This can fail if someone points to it.
self.atypeRepository.removeAnnotationIDs([a])
try:
self.atypeDict[a.atype].remove(a)
except ValueError:
pass
# All the IDs must not be pointed to by anyone outside
# the group.
def removeAnnotationGroup(self, aGroup, forceDetach = False):
self.atypeRepository.removeAnnotationIDs(aGroup, forceDetach = forceDetach)
for a in aGroup:
if self.atypeDict.has_key(a.atype):
try:
self.atypeDict[a.atype].remove(a)
except ValueError:
pass
# At one point, I had a pure copy, but it doesn't make
# any sense in the context of the system. So we now have
# an import.
def importAnnotation(self, a):
# Copying from another document. We have to use strings
# and a dictionary instead of the actual atype and a list of attrs.
# Slower, but correct.
return self._importAnnotations({a.atype: [a]})
# This is used to get around the annotation pointer issue.
# Namely, you can't import them individually if they point to each other -
# because you can't import a group of annots which points to an annot
# that isn't mentioned.
def importAnnotationGroup(self, aGroup):
# First, sort by atype. Then, collapse with copyAnnotations.
atypeMap = {}
for a in aGroup:
try:
atypeMap[a.atype].append(a)
except KeyError:
atypeMap[a.atype] = [a]
return self._importAnnotations(atypeMap)
def importAnnotations(self, sourceDocument, atypes = None, offset = 0):
if atypes is None:
atypeMap = sourceDocument.atypeDict
else:
atypeMap = {}
for a in atypes:
try:
atypeMap[sourceDocument.anameDict[a]] = sourceDocument.atypeDict[sourceDocument.anameDict[a]]
except KeyError:
# There may not be any of them.
pass
return self._importAnnotations(atypeMap, offset = offset)
# This list is UNORDERED.
def recordStep(self, phaseName):
if not self.metadata.has_key("phasesDone"):
self.metadata["phasesDone"] = [phaseName]
elif phaseName not in self.metadata["phasesDone"]:
self.metadata["phasesDone"].append(phaseName)
# If ordered is True, can't do spanless.
# I've been bitten once too often by the bug where I'm
# looping through the result of getAnnotations and removing
# them, but the list I'm looping through is the list I'm removing
# things from. So I wasn't copying by default, but I'm
# sick of this bug.
def getAnnotations(self, atypes = None, strict = False, ordered = False,
spannedOnly = False, spanlessOnly = False):
if spannedOnly and spanlessOnly:
raise DocumentError, "Can restrict to either spanned or spanless, not both"
if ordered or strict:
if spanlessOnly:
raise DocumentError, "Can't restrict to spanless if ordered or strict"
spannedOnly = True
# Order them by start. If the end of one is
# after the start of the next and we've asked for strict,
# raise OverlapError.
# Let atypes is None fall all the way through, for efficiency.
if atypes is not None:
# Remember, the atype may not exist.
atypes = [self.anameDict[a] for a in atypes if self.anameDict.has_key(a)]
if spannedOnly or spanlessOnly:
if atypes is None:
if spannedOnly:
atypes = [atype for atype in self.anameDict.values() if atype.hasSpan]
else:
atypes = [atype for atype in self.anameDict.values() if not atype.hasSpan]
elif spannedOnly:
atypes = [atype for atype in atypes if atype.hasSpan]
else:
atypes = [atype for atype in atypes if not atype.hasSpan]
if atypes is None:
annotList = self.atypeDict.values()
else:
annotList = []
for a in atypes:
try:
annotList.append(self.atypeDict[a])
except KeyError:
# There may not be any of them.
pass
if len(annotList) == 1:
allAnnots = annotList[0][:]
elif annotList:
allAnnots = reduce(lambda x, y: x + y, annotList)
else:
allAnnots = []
# We will have already checked for spanless.
if ordered:
allAnnots.sort(cmp, lambda x: x.start)
if strict:
lastEnd = None
for a in allAnnots:
if (lastEnd is not None) and \
(a.start < lastEnd):
raise OverlapError
lastEnd = a.end
return allAnnots
def getAnnotationByID(self, aID):
return self.atypeRepository.getAnnotationByID(aID)
# This returns a list of labels.
def getAnnotationTypes(self):
return self.atypeRepository.keys()
# This will only return spanned annotations.
def orderAnnotations(self, atypes = None, strict = False):
return self.getAnnotations(atypes = atypes, strict = strict, ordered = True)
def hasAnnotations(self, atypes):
for a in atypes:
if self.anameDict.has_key(a):
if self.atypeDict.get(self.anameDict[a]):
return True
return False
# Copying the document should import the global type repository.
def copy(self, removeAnnotationTypes = None, signalInterval = None):
# First, copy the signal.
if signalInterval:
newStart, newEnd = signalInterval
newD = AnnotatedDoc(self.signal[newStart:newEnd])
else:
newStart = 0
newD = AnnotatedDoc(self.signal)
# Next, copy the metadata. This has to be a RECURSIVE copy.
newD.metadata = self._recursiveCopy(self.metadata)
# Now, import the annotation types.
newAtypes = newD.atypeRepository.importAnnotationTypes(self, removeAnnotationTypes = removeAnnotationTypes)
# Now, the annotations. If there's a signal interval,
# we need to exclude all annotations which are outside the
# interval. Ditto any annotations in removeAnnotationTypes.
# And since we know we've copied the atypes, we can use the actual
# lists of attributes.
# If we're filtering annotations, or grabbing a signal interval, we have to
# ensure that the annotations which are going to be copied
# don't refer to annotations outside the set. Otherwise, we don't
# need to check. So, collect all the old ones first.
# Also, if we're taking an interval, none of the annotations
# to be copied can be spanless.
annotMap = {}
justCreated = set(newAtypes)
for atype in newAtypes:
if signalInterval and (not atype.hasSpan):
raise DocumentError, "Can't copy with a filtered signal and spanless annotations"
# These are the already filtered atypes.
if self.anameDict.has_key(atype.lab) and \
self.atypeDict.has_key(self.anameDict[atype.lab]):
oldAtype = self.anameDict[atype.lab]
# If there are any annotations to copy:
if signalInterval is None:
annotMap[oldAtype] = self.atypeDict[oldAtype]
else:
annotMap[oldAtype] = [a for a in self.atypeDict[self.anameDict[atype.lab]]
if (a.start >= newStart) and (a.end <= newEnd)]
newD._importAnnotations(annotMap, justCreated = justCreated,
failOnReferenceCheck = removeAnnotationTypes or signalInterval,
copyIDs = True,
offset = -newStart)
return newD
# I'm going to have this return a mapping from the old annotations to
# the new. I'm going to need this when I create the comparison documents.
def _importAnnotations(self, annotMap, justCreated = None,
failOnReferenceCheck = True, offset = 0,
copyIDs = False):
# See what annotations are being pointed to.
referencedAnnots = set()
allAnnots = []
for aGroup in annotMap.values():
allAnnots += aGroup
for a in aGroup:
for attr in a.attrs:
if isinstance(attr, AnnotationCore):
referencedAnnots.add(attr)
elif isinstance(attr, AttributeValueSequence) and attr.ofDocAndAttribute and \
isinstance(attr.ofDocAndAttribute[1], AnnotationAttributeType):
for subval in attr:
referencedAnnots.add(subval)
# If there are referenced annotations which aren't being copied, barf.
if failOnReferenceCheck and referencedAnnots and (not set(allAnnots).issuperset(referencedAnnots)):
raise DocumentError, "Can't copy annotations if they point to annotations which aren't included"
resultMap = {}
# Now, for each atype, find it in the new doc, and if it's either been just created
# or you can't find it, you can use the sequence method. Otherwise, you need the
# dictionary method. If there are reference annotations, use the dMap; otherwise, no.
# If you use the dMap, you'll have to record which method to copy the attrs with.
if referencedAnnots:
dMap = {}
for sourceAtype, sourceAnnots in annotMap.items():
useSequenceMethod = True
atype = self.atypeRepository.findAnnotationType(sourceAtype.lab, hasSpan = sourceAtype.hasSpan, create = False)
if atype is None:
atype = self.atypeRepository.importAnnotationType(sourceAtype)
elif justCreated and (atype in justCreated):
pass
else:
for t in sourceAtype.attr_list:
atype.importAttribute(t)
useSequenceMethod = False
# So now, we have the new atype.
# copyID is True when we're copying documents, not when we're
# importing annotations elsewhere. I'm PRETTY sure that the default
# should be False.
# I actually can't afford to fail to copy any attributes - later, when I
# copy in the annotation-valued attributes, I'll need to already have
# the elements which allow the label restrictions to be satisfied. So
# I only want to postpone the annotation-valued attributes. And in that
# case, we can't use the sequence method.
if referencedAnnots:
targetAnnots = []
for a in sourceAnnots:
annotAttrs = {}
foundAnnotAttrs = False
allAttrs = []
for attr, av in zip(sourceAtype.attr_list, a.attrs):
if attr._typename_ == "annotation":
# Postpone.
if av is not None:
annotAttrs[attr.name] = av
foundAnnotAttrs = True
# Placeholder.
allAttrs.append(None)
elif isinstance(av, AttributeValueSequence):
allAttrs.append(av.copy())
else:
allAttrs.append(av)
targetAnnot = a.copy(doc = self, offset = offset, copyID = copyIDs,
atype = atype,
attrs = allAttrs)
# We only need to postpone those things which point to
# other annotations.
if foundAnnotAttrs:
dMap[a] = (targetAnnot, annotAttrs)
targetAnnots.append(targetAnnot)
elif useSequenceMethod:
targetAnnots = [a.copy(doc = self, offset = offset,
atype = atype, copyID = copyIDs)
for a in sourceAnnots]
else:
# Special case: the annotation values, if they're sequences, CANNOT BE REUSED.
# There are no referenced annotations in the set of annotations we're copying.
targetAnnots = [a.copy(doc = self, offset = offset, copyID = copyIDs,
atype = atype,
attrs = dict(zip([attr.name for attr in sourceAtype.attr_list],
[((isinstance(av, AttributeValueSequence) and av.copy()) or av)
for av in a.attrs])))
for a in sourceAnnots]
# Now, add the target annotations to the new document.
try:
self.atypeDict[atype] += targetAnnots
except KeyError:
self.atypeDict[atype] = targetAnnots
resultMap.update(zip(sourceAnnots, targetAnnots))
# We've postponed copying the annotation attributes, because we have referenced
# annotations and we need to use the newly created correspondents.
# Don't forget to copy the attribute value sequences, and if
# it happens to be an annotation attribute value, look up the
# correspondents.
if referencedAnnots:
for sourceA, (targetA, annotAttrDict) in dMap.items():
for attr, a in annotAttrDict.items():
# This checks the label restrictions, even though it's
# not necessary, but I can't bypass that.
if isinstance(a, AnnotationCore):
targetA[attr] = resultMap[a]
else:
targetA[attr] = a.__class__([resultMap[subA] for subA in a])
return resultMap
def _recursiveCopy(self, d):
if type(d) is dict:
return dict([(k, self._recursiveCopy(v)) for k, v in d.items()])
elif type(d) is list:
return [self._recursiveCopy(v) for v in d]
else:
return d
# This list is UNORDERED.
def setStepsDone(self, steps):
self.metadata["phasesDone"] = steps
def stepUndone(self, step):
try:
self.metadata["phasesDone"].remove(step)
except ValueError:
pass
def getStepsDone(self):
try:
return self.metadata["phasesDone"]
except KeyError:
return []
def removeAnnotations(self, atypes = None):
if atypes is None:
self.atypeDict = {}
self.anameDict.clear()
else:
aGroup = []
for atype in atypes:
try:
atypeObj = self.anameDict[atype]
annots = self.atypeDict[atypeObj]
aGroup += annots
except KeyError:
pass
# Remove the annotation IDs as a bundle, to make
# sure they're not externally referenced.
self.atypeRepository.removeAnnotationIDs(aGroup)
for atype in atypes:
try:
del self.atypeDict[self.anameDict[atype]]
except KeyError:
pass
# Cleaning up the document.
def adjustTagsToTokens(self, task, doPrompt = False, doReport = False):
# Sometimes, Lord help us, the tokens and tags get out of alignment,
# and this can be a very bad thing. Perhaps we're importing tags from
# another tagger, and using this tokenizer, or perhaps something went
# wrong with hand tagging, or (in the de-identification case) the tokenizer
# does unexpected things on resynthesized text.
# This code was taken directly from the de-identification task.
# I have to make sure that (believe it
# or not) no tags mismatch the annotation boundaries. If they do,
# I need to expand the annotation boundaries to match the nearest
# token. This is a messy computation.
# Copy it, because in some rare cases I'm going to have to
# delete annotations.
# I really want there to be a task object, because it's the task that's the
# authority about the annotation types. If a document, for instance, was processed
# by a task which didn't have the token annotation listed, and then you add the
# token annotation, bad things will happen.
contentAnnots = self.orderAnnotations(task.getAnnotationTypesByCategory('content'))[:]
lexAnnots = self.orderAnnotations(task.getAnnotationTypesByCategory('token'))
lexAnnotIndex = 0
maxLex = len(lexAnnots)
# And to complicate matters, it's possible that the adjustment
# might lead to overlapping annotations, if entities abut each
# other. That can't happen.
# And a final complexity. Not all the text is covered by tokens, and
# sometimes, if a replacer replaces a newline, a newline is added
# at the end of the replacement. So we have to be aware that
# there may be untokenized whitespace that we can TRIM, rather
# than always assuming a boundary. has to be moved to SPREAD.
# The old algorithm was overly complex and missed some edge conditions.
# So:
# (1) Digest all the tokens which are completely before the annotation.
# (2) Check left edge. Adjust if necessary.
# (3) Digest tokens entirely within the annotation.
# Remember, it can be the same lex as the left boundary.
# (4) Check right edge. Adjust if necessary.
# This algorithm only works if we have no overlapping annotations.
# Actually, the way to adjust for overlapping annots is to
# keep track of the right edges we expand, and if a left
# edge needs to be moved, only expand if it doesn't cross a
# newly-created right edge.
# Don't forget about boundary conditions: what if the current annotation
# starts or ends after the last token?
annotationsToDelete = []
# Let's do this in a couple stages, since I want to use this code to
# diagnose as well as to repair. So first, we take all the lexes
# and we generate start and end tables.
tStartMap = {}
tEndMap = {}
j = 0
for t in lexAnnots:
tStartMap[t.start] = j
tEndMap[t.end] = j
j += 1
badAnnots = []
# So we should check to repair, if we're prompting, and we should
# report, if we're reporting.
def presentPrompt(s):
while True:
w = raw_input(s)
if w in ['y', 'Y']:
return True
elif w in ['n', 'N']:
return False
else:
print "Please answer 'y' or 'n'."
for cIndex in range(len(contentAnnots)):
cAnnot = contentAnnots[cIndex]
if not (tStartMap.has_key(cAnnot.start) and tEndMap.has_key(cAnnot.end)):
if (not doPrompt) or \
presentPrompt("Annotation %s from %d to %d does not align with token boundaries. Repair? (y/n) " % (cAnnot.atype.lab, cAnnot.start, cAnnot.end)):
badAnnots.append(cAnnot)
if doReport:
print "Annotation is %s from %d to %d." % (cAnnot.atype.lab, cAnnot.start, cAnnot.end)
annString = self.signal[cAnnot.start:cAnnot.end]
import string
chars = []
for c in annString:
if c in string.uppercase:
chars.append('A')
elif c in string.lowercase:
chars.append('a')
elif c in string.digits:
chars.append('0')
else:
chars.append(c)
print "Text pattern is '%s'." % "".join(chars)
if cIndex > 0:
prevString = self.signal[contentAnnots[cIndex-1].end:cAnnot.start]
else:
prevString = self.signal[:cAnnot.start]
print "Non-annotated text on left side is: '%s' (%d characters, %d - %d)" % (prevString, len(prevString), cAnnot.start - len(prevString), cAnnot.start)
if cIndex < (len(contentAnnots) - 1):
nextString = self.signal[cAnnot.end:contentAnnots[cIndex+1].start]
else:
nextString = self.signal[cAnnot.end:]
print "Non-annotated text on right side is: '%s' (%d characters, %d - %d)" % (nextString, len(nextString), cAnnot.end, cAnnot.end + len(nextString))
print "Tokens in neighborhood are:"
iStart = cAnnot.start - 30
if iStart < 0:
iStart = 0
iEnd = cAnnot.end + 30
if iEnd > len(self.signal):
iEnd = len(self.signal)
while iStart < iEnd:
if tStartMap.has_key(iStart):
lex = lexAnnots[tStartMap[iStart]]
print ("%d - %d" % (lex.start, lex.end)),
import sys
sys.stdout.flush()
iStart += 1
print
# Now, we have all the ones we should repair.
# Note that we want to avoid creating overlaps where there were
# none previously, but we shouldn't avoid overlaps entirely.
# What this means is that if I expand a right edge, I should
# make sure that any left edge that I expand doesn't cross
# any new right edge. If it does, I want to shrink rather than grow.
usedRightEdgeToks = set([])
for cAnnot in badAnnots:
# (1) digest all tokens which are completely before the annotation.
# The annotations are in start index order, so that should work.
while True:
if lexAnnotIndex >= maxLex:
# Oops, we ran out of lexes before we reached
# the annotation. Remove it.
if doReport:
print "Ran out of lexes before %s from %d to %d" % (cAnnot, cAnnot.start, cAnnot.end)
annotationsToDelete.append(cAnnot)
break
curLex = lexAnnots[lexAnnotIndex]
if curLex.end > cAnnot.start:
# Encroaching.
break
lexAnnotIndex += 1
# OK, now we've advanced lexAnnotIndex up to where we
# need it.
localIndex = lexAnnotIndex
# (2) Check left edge. Adjust if necessary.
# If the annotation precedes all tokens, we have to be careful
# not to just shift it onto the existing token.
if curLex.start >= cAnnot.end:
# Delete the annotation.
if doReport:
print "First available lex (%d - %d) >= end of %s from %d to %d; deleting" % \
(curLex.start, curLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
annotationsToDelete.append(cAnnot)
continue
elif curLex.start < cAnnot.start:
# Lex spans annotation start. Adjust left if it's not less than a newly created right
# edge, otherwise adjust right.
foundNewlyCreated = False
if curLex in usedRightEdgeToks:
if localIndex + 1 >= maxLex:
if doReport:
print "Ran out of lexes before %s from %d to %d" % (cAnnot, cAnnot.start, cAnnot.end)
annotationsToDelete.append(cAnnot)
else:
nextLex = lexAnnots[lexAnnotIndex + 1]
if doReport:
print "First available lex (%d - %d) < start of %s from %d to %d; shrinking annot start to avoid previous use of left token" % \
(nextLex.start, nextLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
cAnnot.start = nextLex.start
else:
if doReport:
print "First available lex (%d - %d) < start of %s from %d to %d; expanding annot start" % \
(curLex.start, curLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
cAnnot.start = curLex.start
elif curLex.start > cAnnot.start:
# Gap between tokens, or first token starts
# after first annotation. Adjust right.
if doReport:
print "First available lex (%d - %d) > start of %s from %d to %d; shrinking annot start" % \
(curLex.start, curLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
cAnnot.start = curLex.start
# (3) Digest tokens entirely within the annotation.
# Remember, it can be the same lex as the left boundary.
# We transition to the local index now.
while True:
if localIndex >= maxLex:
# Oops, we ran out of lexes before we
# reached the end of the annotation.
# Use the last lex.
cAnnot.end = curLex.end
break
curLex = lexAnnots[localIndex]
if curLex.end >= cAnnot.end:
# Encroaching.
break
localIndex += 1
# (4) Check right edge. Adjust if necessary.
# Worry about the case where the next annotation
# starts immediately afterward. Probably, the way
# to do that is to advance the lexAnnotIndex because
# we've "consumed" the token.
if curLex.start >= cAnnot.end:
# It's possible that the next tokens
# starts entirely after the current annotation.
# Then we need to shrink the current annotation
# to the end of the previous token.
if localIndex > 0:
if doReport:
print "Last available lex start (%d - %d) > end of %s from %d to %d; shrinking end" % \
(curLex.start, curLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
cAnnot.end = lexAnnots[localIndex - 1].end
else:
# This is the first token. How we got an annotation
# which ends after the first token is a mystery,
# but out it goes.
if doReport:
print "Last available lex start (%d - %d) > end of %s from %d to %d, but no preceding lex; deleting" % \
(curLex.start, curLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
annotationsToDelete.append(cAnnot)
elif curLex.end > cAnnot.end:
# If there had been a token which ended
# exactly on the annotation boundary, we would
# have seen it. So we expand the annotation.
if doReport:
print "Last available lex end (%d - %d) > end of %s from %d to %d; expanding end" % \
(curLex.start, curLex.end, cAnnot.atype.lab, cAnnot.start, cAnnot.end)
cAnnot.end = curLex.end
usedRightEdgeToks.add(curLex)
# we delete the annotation. Actually, we'd better make sure that the
# annotations are detached first.
self.removeAnnotationGroup(annotationsToDelete, forceDetach = True)
return len(badAnnots)
def avoidWhitespaceInTags(self, task):
# Even in the case where we have no tokens, because it's a
# pattern-redacted document, we might have situations where the
# tags cover leading or trailing whitespace. Here, we shrink the
# whitespace without reference to the lexes. So you need to be careful
# when you call this.
import string
contentAnnots = self.orderAnnotations(task.getAnnotationTypesByCategory('content'))
for c in contentAnnots:
signal = self.signal[c.start:c.end]
iStart = 0
while signal[iStart] in string.whitespace:
iStart += 1
iEnd = -1
while signal[iEnd] in string.whitespace:
iEnd -= 1
iEnd += 1
c.start += iStart
c.end += iEnd
def removeOverlaps(self, collisionList):
# Emergency stopgap. Occasionally, we get overlapping
# content tags, which we might not want. collisionList
# is a list of annotation names which can't have
# any overlaps among them.
indexDict = {}
for aName in collisionList:
try:
aType = self.anameDict[aName]
annots = self.atypeDict[aType]
except KeyError:
continue
# Ignore the overlaps for types which aren't spanned,
# in the unlikely event that someone chooses them.
if not aType.hasSpan:
continue
# Zero it out.
self.atypeDict[aType] = []
for a in annots:
if indexDict.has_key(a.start):
# Take the shortest.
if a.end < indexDict[a.start].end:
print >> sys.stderr, "Warning: discarding %s from %d to %d in favor of shorter overlapping annotation" % (indexDict[a.start].atype.lab, a.start, indexDict[a.start].end)
indexDict[a.start] = a
else:
print >> sys.stderr, "Warning: discarding %s from %d to %d in favor of shorter overlapping annotation" % (a.atype.lab, a.start, a.end)
else:
indexDict[a.start] = a
indexes = indexDict.keys()
indexes.sort()
end = 0
for i in indexes:
# There will only be one.
annot = indexDict[i]
if i < end:
# Now, we keep the annotation that's already started.
print >> sys.stderr, "Warning: discarding %s from %s to %d in favor of annotation which starts earlier" % (annot.atype.lab, annot.start, annot.end)
del indexDict[i]
else:
end = annot.end
# Now, we've removed all the overlapping ones.
# Reconstruct the annot sets.
indexes = indexDict.keys()
indexes.sort()
for i in indexes:
a = indexDict[i]
self.atypeDict[a.atype].append(a)
# I wanted this to be on the document, rather than the task,
# because it's a document operation. But I want to call it
# on bunches of documents.
# Note that for the purposes of Carafe, at the moment this doesn't
# return the zone information, so Carafe can't exploit that zone
# region type as a feature. But we return it anyway.
@classmethod
def processableRegions(cls, annotSets, task = None, segmentFilterFn = None):
zType, rAttr, regions = None, None, None
if task is not None:
zType, rAttr, regions = task.getTrueZoneInfo()
regionLists = []
for d in annotSets:
segs = d.orderAnnotations(["SEGMENT"])
if zType is not None:
zones = d.orderAnnotations([zType])
else:
zones = None
# If there's no segment filter function, there's no point
# in looking at the segments - just use the zones. Not going to
# bother filtering on zones, because the segments wouldn't be
# there otherwise.
if segs and segmentFilterFn:
segs = [seg for seg in segs if segmentFilterFn(seg)]
regionList = []
# Loop through the segments. Each time we find one which is an
# extension of a previous machine-annotatable segment, ignore the
# new segment and extend the old.
currentDigestibleRegion = None
currentSeg = None
currentZoneIndex = None
if zones:
currentZoneIndex = 0
for seg in segs:
if currentDigestibleRegion and (currentDigestibleRegion[1] == seg.start) and \
((currentZoneIndex is None) or (zones[currentZoneIndex].end <= seg.end)):
currentDigestibleRegion[1] = seg.end
else:
# Try to move forward.
if currentZoneIndex is not None:
while seg.start >= zones[currentZoneIndex].end:
currentZoneIndex += 1
if currentZoneIndex == len(zones):
currentZoneIndex = None
break
currentDigestibleRegion = [seg.start, seg.end,
((currentZoneIndex is not None) and (rAttr is not None) and \
[rAttr, zones[currentZoneIndex].get(rAttr)])
or None]
regionList.append(currentDigestibleRegion)
regionLists.append(regionList)
elif zones:
# Don't filter zones for segments above, but DO filter it here.
regionLists.append([[z.start, z.end, ((rAttr is not None) and [rAttr, z.get(rAttr)]) or None]
for z in zones if (rAttr is None) or (z.get(rAttr) in regions)])
else:
# No zoning at all has happened. Just use the whole document.
regionLists.append([[0, len(d.signal), None]])
return regionLists
#
# This is a structure which provides a view into a document, by
# smallest region. It looks into all the segments, and records
# for each smallest region, what annotations are stacked over
# that region. It's set up to manage multiple documents simultaneously
# (as long as they have the same signal), and so that
# you can augment the regions with features based on
# what's stacked there. It also records whether an annotation
# that's stacked there is ending there or not.
# The documents are also assumed to share a task, so
# they're subdivided by effective label and category, which
# may or may not be the right thing to do.
# If there's no task, there's a bunch of things we can't do.
#
# When we collect the docs, we can only collect the indices.
# It doesn't turn into the slices until we ask.
class DocSliceError(Exception):
pass
class SignalCacheEntry:
def __init__(self, cache, eType, doc, a, label, category):
self.cache = cache
self.eType = eType
self.doc = doc
self.annot = a
self.label = label
self.category = category
class SignalIndexCache:
def __init__(self, nth):
self.nth = nth
self.labelMap = {}
def addEntry(self, eType, doc, a, label, category):
e = SignalCacheEntry(self, eType, doc, a, label, category)
try:
self.labelMap[(label, category)].append(e)
except KeyError:
self.labelMap[(label, category)] = [e]
def get(self, label = None, category = None):
r = []
for (l, c), entries in self.labelMap.items():
if ((label is None) or (l == label)) and \
((category is None) or (c == category)):
r += entries
return r
def removeEntry(self, e):
# This is the actual entry that comes back from get().
# So it will be the proper entry in the label map.
try:
self.labelMap[(e.label, e.category)].remove(e)
except ValueError:
pass
except KeyError:
pass
class SignalRegion(SignalIndexCache):
STARTS, ENDS, MATCHES, WITHIN = 0, 1, 2, 3
def __init__(self, nth, start, end):
self.start = start
self.end = end
SignalIndexCache.__init__(self, nth)
class SignalIndex(SignalIndexCache):
STARTS, ENDS, CROSSES = 0, 1, 2
def __init__(self, nth, i):
self.index = i
SignalIndexCache.__init__(self, nth)
class DocSliceManager:
# The skip table is a hash of doc => [(label, category), ...]
def __init__(self, docs, task = None, categoryMap = None,
skipTable = None, keepTable = None):
self.task = task
# The categoryMap is a map from the label (NOT the effective label) to
# one of the categories.
self.categoryMap = categoryMap or {}
# We want to know the effective label and the category.
# What we should do is populate the category map
# with the effective label as we go along, IF there's
# either a task, or a task table in the document (I'm really not comfortable
# with the latter, and I'd like to phase it out).
# Actually, I'm going to liberate myself - I'm going to
# ignore the task table in the document here completely.
# Gotta start sometime...
self.docs = []
# Intermediate cache.
self._indexCache = {}
# Final cache. Regions are in order.
self.regions = []
if docs is not None:
for doc in docs:
self.addDocument(doc, (skipTable and skipTable.get(doc)), (keepTable and keepTable.get(doc)))
# skipList and keepList are decisive. If both are provided (why would
# you do that?) and something's not in the skip list but not in the
# keep list, it's skipped.
def addDocument(self, doc, skipList = None, keepList = None):
if self.docs:
if doc.signal != self.docs[0].signal:
raise DocSliceError, "document signals must be identical"
if doc in self.docs:
raise DocSliceError, "document is already in slicer"
self.docs.append(doc)
if skipList:
skipList = set(skipList)
if keepList:
keepList = set(keepList)
# Let's gather info about the annotation.
if self.task:
mData = self.task
labeler = self.task.getEffectiveAnnotationLabel
else:
mData = None
labeler = lambda ann: ann.atype.lab
for a in doc.getAnnotations(spannedOnly = True):
# Find the label and category. Note that the category will be
# the category of the annotation descriptor that the label
# is defined in; if it's an effective label, that's the category
# you'll get.
label = labeler(a)
try:
category = self.categoryMap[label]
except KeyError:
category = None
if mData is not None:
try:
category = mData.getCategoryForLabel(label)
except KeyError:
pass
self.categoryMap[label] = category
if skipList and (((label, category) in skipList) or \
((label, None) in skipList) or \
((None, category) in skipList)):
continue
if keepList and (((label, category) not in keepList) and \
((label, None) not in keepList) and \
((None, category) not in keepList)):
continue
entry = (doc, a, label, category)
try:
self._indexCache[a.start][0].append(entry)
except KeyError:
h = [[entry], []]
self._indexCache[a.start] = h
try:
self._indexCache[a.end][1].append(entry)
except KeyError:
h = [[], [entry]]
self._indexCache[a.end] = h
def getRegions(self):
allIndices = self._indexCache.keys()
allIndices.sort()
if not allIndices:
return []
curEntries = set()
# For each index, if it's not the final index,
# start a region. The region ends all the annotations
# that are ending, inherits all the annotations which
# are underway, and starts all the annotations which
# are starting.
lastIndex = allIndices[-1]
firstIndex = allIndices[0]
justStarted = []
previousIndex = -1
regions = []
j = 0
for i in allIndices:
[startEntries, endEntries] = self._indexCache[i]
if i == lastIndex:
if startEntries:
raise DocSliceError, "Can't start any annotations on the last index"
if i == firstIndex:
if endEntries:
raise DocSliceError, "Can't end any annotations on the first index"
else:
# At this point, I'm going to close the previous index.
r = SignalRegion(j, previousIndex, i)
j += 1
regions.append(r)
for endEntry in endEntries:
if endEntry in justStarted:
r.addEntry(SignalRegion.MATCHES, *endEntry)
else:
r.addEntry(SignalRegion.ENDS, *endEntry)
for startEntry in justStarted:
if startEntry not in endEntries:
r.addEntry(SignalRegion.STARTS, *startEntry)
for coveringEntry in curEntries:
if (coveringEntry not in justStarted) and (coveringEntry not in endEntries):
r.addEntry(SignalRegion.WITHIN, *coveringEntry)
# The final trick is the ones which this region is within.
# Those are the ones which are still going,
# but weren't just started.
# Cache these for the next interval.
justStarted = startEntries
previousIndex = i
curEntries -= set(endEntries)
curEntries |= set(startEntries)
if curEntries:
raise DocSliceError, "entries remain after all indices are processed"
return regions
def getIndexes(self):
allIndices = self._indexCache.keys()
allIndices.sort()
if not allIndices:
return []
curEntries = set()
# For each index, if it's not the final index,
# start a region. The region ends all the annotations
# that are ending, inherits all the annotations which
# are underway, and starts all the annotations which
# are starting.
lastIndex = allIndices[-1]
firstIndex = allIndices[0]
previousIndex = -1
indexes = []
j = 0
for i in allIndices:
[startEntries, endEntries] = self._indexCache[i]
if i == lastIndex:
if startEntries:
raise DocSliceError, "Can't start any annotations on the last index"
if i == firstIndex:
if endEntries:
raise DocSliceError, "Can't end any annotations on the first index"
else:
# At this point, I'm going to close the previous index.
r = SignalIndex(j, i)
j += 1
indexes.append(r)
for endEntry in endEntries:
r.addEntry(SignalIndex.ENDS, *endEntry)
for startEntry in startEntries:
r.addEntry(SignalIndex.STARTS, *startEntry)
for coveringEntry in curEntries:
if (coveringEntry not in endEntries):
r.addEntry(SignalIndex.CROSSES, *coveringEntry)
curEntries -= set(endEntries)
curEntries |= set(startEntries)
if curEntries:
raise DocSliceError, "entries remain after all indices are processed"
return indexes
#
# AnnotationReporter
#
# I hope this will be more successful than the DocSliceManager...
# This code was excised from the guts of MATReport. I believe I'm going to
# need it elsewhere; the first application is for the conversion reporter in MATTransducer.
# I think the idea is that for each row, there may be multiple
# annotations (e.g., perhaps they're paired) and there may be a different
# configuration of headers for each.
class AnnotationReporter:
CONCORDANCE_WINDOW = 32
def __init__(self, partitionByLabel = False):
self.partitionByLabel = partitionByLabel
self.positions = []
self.rows = []
self.rowsByLabel = {}
self.convertedPartitionedHeadersAndRows = None
def addPosition(self, headerPrefix = None, concordanceContext = False, concordanceWindow = None,
showText = True):
posDesc = {"doConcordance": concordanceContext, "showText": showText, "headerPrefix": headerPrefix}
if showText and concordanceContext:
if concordanceWindow is None:
posDesc["concordanceWindow"] = self.CONCORDANCE_WINDOW
elif concordanceWindow < 1:
raise DocumentError, "concordance window must be 1 or greater"
else:
posDesc["concordanceWindow"] = concordanceWindow
if not showText:
posDesc["headers"] = ["start", "end", "label", "description"]
elif concordanceContext:
posDesc["headers"] = ["start", "end", "left context", "text", "label", "description", "right context"]
else:
posDesc["headers"] = ["start", "end", "text", "label", "description"]
if len(self.positions) == 0:
# first position. Do something special for the partition headers.
if not showText:
posDesc["bylabel_headers"] = ["start", "end", "id", "attrs"]
elif concordanceContext:
posDesc["bylabel_headers"] = ["start", "end", "id", "left context", "text", "attrs", "right context"]
else:
posDesc["bylabel_headers"] = ["start", "end", "id", "text", "attrs"]
else:
posDesc["bylabel_headers"] = posDesc["headers"][:]
self.positions.append(posDesc)
def addDocument(self, doc, basename, aNames, includeSpanless = False):
annotLabCounts = {}
if len(self.positions) != 1:
raise DocumentError, "positions != 1 when adding document is not permitted"
if self.positions[0]["headers"][0] != "basename":
self.positions[0]["headers"][0:0] = ["basename"]
if self.positions[0]["bylabel_headers"][0] != "basename":
self.positions[0]["bylabel_headers"][0:0] = ["basename"]
if aNames:
# We'll have something to add.
# orderAnnotations will retrieve JUST the spanned annotations.
for a in doc.orderAnnotations(aNames):
self.addRow([a])
self.rows[-1][0:0] = [basename]
if self.partitionByLabel:
self.rowsByLabel[a.atype.lab]["rows"][-1][0:0] = [basename]
try:
annotLabCounts[a.atype.lab] += 1
except KeyError:
annotLabCounts[a.atype.lab] = 1
if includeSpanless:
for a in doc.getAnnotations(atypes = aNames, spanlessOnly = True):
self.addRow([a])
self.rows[-1][0:0] = [basename]
if self.partitionByLabel:
self.rowsByLabel[a.atype.lab]["rows"][-1][0:0] = [basename]
try:
annotLabCounts[a.atype.lab] += 1
except KeyError:
annotLabCounts[a.atype.lab] = 1
return annotLabCounts
# Partition by label partitions by the FIRST ELEMENT.
# The first element position will use the keys as part of the columns.
# The other positions will be appropriate for the position.
def addRow(self, row):
if len(row) != len(self.positions):
raise DocumentError, "row is different length than positions"
rowRes = []
partitionRes = []
self.rows.append(rowRes)
i = 0
# The annotation may be null, if it's not paired.
while i < len(row):
a = row[i]
posDesc = self.positions[i]
if a is None:
# Pad it.
rowRes += [None] * len(posDesc["headers"])
i += 1
continue
doConcordance = posDesc["doConcordance"]
showText = posDesc["showText"]
# Create a composite label.
labName = a.atype.lab
leftWindow = rightWindow = start = end = None
if a.atype.hasSpan:
txt = a.doc.signal[a.start:a.end]
if doConcordance:
leftEdge = max(0, a.start - posDesc["concordanceWindow"])
leftWindow = a.doc.signal[leftEdge:a.start]
rightWindow = a.doc.signal[a.end:a.end+posDesc["concordanceWindow"]]
start = a.start
end = a.end
else:
txt = self._computeSpanlessText(a)
if not showText:
localSubportion = [start, end, labName, a.describe()]
elif doConcordance:
localSubportion = [start, end, leftWindow, txt, labName, a.describe(), rightWindow]
else:
localSubportion = [start, end, txt, labName, a.describe()]
rowRes += localSubportion
if self.partitionByLabel:
if i == 0:
try:
entry = self.rowsByLabel[labName]
entry["rows"].append(partitionRes)
except KeyError:
entry = {"keys": set(), "rows": [partitionRes]}
self.rowsByLabel[labName] = entry
aDict = dict([(attr.name, attr.toStringNonNull(val)) for (attr, val) in \
zip(a.atype.attr_list, a.attrs) if val is not None])
entry["keys"].update(aDict.keys())
if not showText:
partitionRes += [start, end, a.id, aDict]
elif doConcordance:
partitionRes += [start, end, a.id, leftWindow, txt, aDict, rightWindow]
else:
partitionRes += [start, end, a.id, txt, aDict]
else:
partitionRes += localSubportion
i += 1
def getHeadersAndRows(self):
h = []
for p in self.positions:
headers = p["headers"]
if p["headerPrefix"]:
h += [p["headerPrefix"] + " " + s for s in headers]
else:
h += headers
return h, self.rows
def getPartitionedHeadersAndRows(self):
self._ensurePartitionConversion()
return self.convertedPartitionedHeadersAndRows
def _ensurePartitionConversion(self):
if (self.convertedPartitionedHeadersAndRows is None) and (self.partitionByLabel):
# The first position will have attrs.
self.convertedPartitionedHeadersAndRows = {}
for lab, entry in self.rowsByLabel.items():
h = []
attrIndex = self.positions[0]["bylabel_headers"].index("attrs")
for p in self.positions:
headers = p["bylabel_headers"]
if p["headerPrefix"]:
h += [p["headerPrefix"] + " " + s for s in headers]
else:
h += headers
# Those are all the headers.
attrIndex = h.index("attrs")
# Don't do the prefix substitutions with the keys.
keys = list(entry["keys"])
keys.sort()
h = h[0:attrIndex] + keys + h[attrIndex+1:]
rows = [row[0:attrIndex] + [row[attrIndex].get(key) for key in keys] + row[attrIndex+1:]
for row in entry["rows"]]
self.convertedPartitionedHeadersAndRows[lab] = (h, rows)
# The way we compute the text for a spanless
# annotation is to find the spanned annotations which are
# referenced, and define a window around it. Then,
# we collapse the windows if they overlap.
def _computeSpanlessText(self, a):
spannedAnnots = {}
for attrObj, val in zip(a.atype.attr_list, a.attrs):
if (attrObj._typename_ == "annotation") and (val is not None):
if not attrObj.aggregation:
if val.atype.hasSpan:
try:
spannedAnnots[val].add(attrObj.name)
except KeyError:
spannedAnnots[val] = set([attrObj.name])
else:
for subval in val:
if subval.atype.hasSpan:
try:
spannedAnnots[subval].add(attrObj.name)
except KeyError:
spannedAnnots[subval] = set([attrObj.name])
if spannedAnnots:
# OK, now we have a mapping from spanned annotations
# to the attrs they cover. Don't forget they can
# overlap each other. Grrr.
# First thing we do: let's have a window of 20 characters on
# each side.
annotKeys = spannedAnnots.keys()
annotKeys.sort(key = lambda a: a.start)
intervals = []
signalLen = len(a.doc.signal)
# There's no guarantee that the annot ENDS are in order.
# So the only safe thing to do is gather all the starts and
# ends first.
toInsert = {}
for annot, attrList in spannedAnnots.items():
startStr = " [" + ",".join(attrList) + " "
try:
toInsert[annot.start][0].append(startStr)
except KeyError:
# Not less than 0.
toInsert[annot.start] = [[startStr], []]
try:
toInsert[annot.end][1].append(" ] ")
except KeyError:
# Not more than the length of the signal.
toInsert[annot.end] = [[], [" ] "]]
# I want to see all the text; so multiple successive
# starts just keep extending the interval. It's only
# when the covered tags go down to 0 that we start
# skipping stuff.
allIndices = list(toInsert.keys())
allIndices.sort()
covering = 0
for index in allIndices:
[starts, ends] = toInsert[index]
if ends:
covering -= len(ends)
# We're ending some stuff.
# Not more than the length of the signal.
right = min(index + 20, signalLen)
if covering == 0:
# Set the right index of the current interval
intervals[-1][1] = right
if starts:
if covering == 0:
# Not less than 0.
left = max(index - 20, 0)
if intervals and ((intervals[-1][1] + 10) >= left):
# If it's within 10 of the last interval, just
# add it.
intervals[-1][1] = right
else:
intervals.append([left, index])
covering += len(starts)
# Now, we have all the indices we need.
bracketKeys = toInsert.keys()
bracketKeys.sort()
# There can be multiple annotations inside
# a given interval, don't forget.
docSignal = a.doc.signal
strs = ["..."]
for [left, right] in intervals:
if len(strs) > 1: strs.append("...")
start = left
while bracketKeys and \
(bracketKeys[0] > left) and \
(bracketKeys[0] < right):
strs.append(docSignal[start:bracketKeys[0]])
[bStart, bEnd] = toInsert[bracketKeys[0]]
strs += bEnd
strs += bStart
start = bracketKeys[0]
bracketKeys[0:1] = []
strs.append(docSignal[start:right])
strs.append("...")
# Get rid of all the newlines by splitting at
# whitespace and reconstructing.
return " ".join("".join(strs).split())
else:
return None
| apache-2.0 | 3,540,611,689,557,830,700 | 43.710809 | 192 | 0.543715 | false |
dpmatthews/rose | metomi/rose/env_cat.py | 4 | 2206 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (C) 2012-2019 British Crown (Met Office) & Contributors.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Implements "rose env-cat"."""
from metomi.rose.env import env_var_process, UnboundEnvironmentVariableError
from metomi.rose.opt_parse import RoseOptionParser
import sys
def main():
"""Implement "rose env-cat"."""
opt_parser = RoseOptionParser()
opt_parser.add_my_options("match_mode", "output_file", "unbound")
opts, args = opt_parser.parse_args()
if not args:
args = ["-"]
if not opts.output_file or opts.output_file == "-":
out_handle = sys.stdout
else:
out_handle = open(opts.output_file, "wb")
for arg in args:
if arg == "-":
in_handle = sys.stdin
else:
in_handle = open(arg)
line_num = 0
while True:
line_num += 1
line = in_handle.readline()
if not line:
break
try:
out_handle.write(
env_var_process(line, opts.unbound, opts.match_mode))
except UnboundEnvironmentVariableError as exc:
name = arg
if arg == "-":
name = "<STDIN>"
sys.exit("%s:%s: %s" % (name, line_num, str(exc)))
in_handle.close()
out_handle.close()
if __name__ == "__main__":
main()
| gpl-3.0 | -6,525,386,018,596,820,000 | 34.015873 | 79 | 0.56301 | false |
BarraQDA/nvivotools | querySource.py | 1 | 5135 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Jonathan Schultz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import argparse
from NVivoNorm import NVivoNorm
from sqlalchemy import *
import re
import csv
import shutil
def add_arguments(parser):
parser.description = "Query sources in a normalised file."
generalgroup = parser.add_argument_group('General')
generalgroup.add_argument( 'infile', type=str,
help='Input normalised NVivo (.nvpn) file')
generalgroup.add_argument('-o', '--outfile', type=str,
help='Output file')
generalgroup.add_argument('-s', '--source', type=str)
generalgroup.add_argument('-c', '--category', type=str)
advancedgroup = parser.add_argument_group('Advanced')
advancedgroup.add_argument('-v', '--verbosity', type=int, default=1)
advancedgroup.add_argument('--no-comments', action='store_true', help='Do not produce a comments logfile')
parser.set_defaults(func=querySource)
parser.set_defaults(build_comments=build_comments)
parser.set_defaults(hiddenargs=['hiddenargs', 'verbosity', 'no_comments'])
def parse_arguments():
parser = argparse.ArgumentParser()
add_arguments(parser)
return vars(parser.parse_args())
def build_comments(kwargs):
comments = ((' ' + kwargs['outfile'] + ' ') if kwargs['outfile'] else '').center(80, '#') + '\n'
comments += '# ' + os.path.basename(__file__) + '\n'
hiddenargs = kwargs['hiddenargs'] + ['hiddenargs', 'func', 'build_comments']
for argname, argval in kwargs.items():
if argname not in hiddenargs:
if type(argval) == str:
comments += '# --' + argname + '="' + argval + '"\n'
elif type(argval) == bool:
if argval:
comments += '# --' + argname + '\n'
elif type(argval) == list:
for valitem in argval:
if type(valitem) == str:
comments += '# --' + argname + '="' + valitem + '"\n'
else:
comments += '# --' + argname + '=' + str(valitem) + '\n'
elif argval is not None:
comments += '# --' + argname + '=' + str(argval) + '\n'
return comments
def querySource(infile, outfile,
source, category,
verbosity, no_comments,
comments, **dummy):
try:
norm = NVivoNorm(infile)
norm.begin()
sourcesel = select([
norm.Source.c.Name,
norm.Source.c.Description,
norm.SourceCategory.c.Name.label('Category'),
norm.Source.c.Color,
norm.Source.c.Content
]).select_from(
norm.Source.outerjoin(norm.SourceCategory,
norm.SourceCategory.c.Id == norm.Source.c.Category)
)
params = {}
if source:
sourcesel = sourcesel.where(
norm.Source.c.Name == bindparam('Source')
)
params.update({'Source': source})
if category:
sourcesel = sourcesel.where(and_(
norm.Source.c.Category == norm.SourceCategory.c.Id,
norm.SourceCategory.c.Name == bindparam('SourceCategory')
))
params.update({'SourceCategory': category})
if outfile:
if os.path.exists(outfile):
shutil.move(outfile, outfile + '.bak')
csvfile = open(outfile, 'w')
else:
csvfile = sys.stdout
if not no_comments:
csvfile.write(comments)
csvfile.write('#' * 80 + '\n')
csvwriter = csv.DictWriter(csvfile,
fieldnames=['Name', 'Description', 'Content', 'Category', 'Color'],
extrasaction='ignore',
lineterminator=os.linesep,
quoting=csv.QUOTE_NONNUMERIC)
csvwriter.writeheader()
for source in norm.con.execute(sourcesel, params):
csvwriter.writerow(dict(source))
csvfile.close()
except:
raise
finally:
del norm
def main():
kwargs = parse_arguments()
kwargs['comments'] = build_comments(kwargs)
kwargs['func'](**kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,609,732,775,818,592,000 | 34.413793 | 110 | 0.55482 | false |
googleapis/python-aiplatform | samples/model-builder/image_dataset_import_data_sample_test.py | 1 | 1361 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import image_dataset_import_data_sample
import test_constants as constants
def test_image_dataset_import_data_sample(
mock_sdk_init, mock_import_image_dataset, mock_get_image_dataset
):
image_dataset_import_data_sample.image_dataset_import_data_sample(
project=constants.PROJECT,
location=constants.LOCATION,
src_uris=constants.GCS_SOURCES,
import_schema_uri=None,
dataset_id=constants.DATASET_NAME,
)
mock_get_image_dataset.assert_called_once_with(constants.DATASET_NAME)
mock_sdk_init.assert_called_once_with(
project=constants.PROJECT, location=constants.LOCATION
)
mock_import_image_dataset.assert_called_once_with(
gcs_source=constants.GCS_SOURCES, import_schema_uri=None, sync=True
)
| apache-2.0 | -2,818,455,065,200,854,500 | 33.025 | 75 | 0.736958 | false |
jumpstarter-io/glance | glance/api/v2/images.py | 3 | 38110 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import glance_store
from oslo.serialization import jsonutils as json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
import six.moves.urllib.parse as urlparse
import webob.exc
from glance.api import policy
from glance.common import exception
from glance.common import location_strategy
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance import i18n
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
_ = i18n._
_LW = i18n._LW
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'glance.common.config', group='image_format')
CONF.import_opt('container_formats', 'glance.common.config',
group='image_format')
class ImagesController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
@utils.mutating
def create(self, req, image, extra_properties, tags):
image_factory = self.gateway.get_image_factory(req.context)
image_repo = self.gateway.get_repo(req.context)
try:
image = image_factory.new_image(extra_properties=extra_properties,
tags=tags, **image)
image_repo.add(image)
except exception.DuplicateLocation as dup:
raise webob.exc.HTTPBadRequest(explanation=dup.msg)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.InvalidParameterValue as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.LimitExceeded as e:
LOG.warn(utils.exception_to_str(e))
raise webob.exc.HTTPRequestEntityTooLarge(
explanation=e.msg, request=req, content_type='text/plain')
except exception.Duplicate as dupex:
raise webob.exc.HTTPConflict(explanation=dupex.msg)
except exception.ReservedProperty as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.ReadonlyProperty as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except TypeError as e:
LOG.debug(utils.exception_to_str(e))
raise webob.exc.HTTPBadRequest(
explanation=utils.exception_to_str(e))
return image
def index(self, req, marker=None, limit=None, sort_key=None,
sort_dir=None, filters=None, member_status='accepted'):
sort_key = ['created_at'] if not sort_key else sort_key
sort_dir = ['desc'] if not sort_dir else sort_dir
result = {}
if filters is None:
filters = {}
filters['deleted'] = False
if limit is None:
limit = CONF.limit_param_default
limit = min(CONF.api_limit_max, limit)
image_repo = self.gateway.get_repo(req.context)
try:
images = image_repo.list(marker=marker, limit=limit,
sort_key=sort_key,
sort_dir=sort_dir,
filters=filters,
member_status=member_status)
if len(images) != 0 and len(images) == limit:
result['next_marker'] = images[-1].image_id
except (exception.NotFound, exception.InvalidSortKey,
exception.InvalidFilterRangeValue) as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
result['images'] = images
return result
def show(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
return image_repo.get(image_id)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
@utils.mutating
def update(self, req, image_id, changes):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
for change in changes:
change_method_name = '_do_%s' % change['op']
assert hasattr(self, change_method_name)
change_method = getattr(self, change_method_name)
change_method(req, image, change)
if changes:
image_repo.save(image)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.InvalidParameterValue as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the"
" quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
raise webob.exc.HTTPRequestEntityTooLarge(
explanation=msg, request=req, content_type='text/plain')
except exception.LimitExceeded as e:
LOG.exception(utils.exception_to_str(e))
raise webob.exc.HTTPRequestEntityTooLarge(
explanation=e.msg, request=req, content_type='text/plain')
return image
def _do_replace(self, req, image, change):
path = change['path']
path_root = path[0]
value = change['value']
if path_root == 'locations':
self._do_replace_locations(image, value)
else:
if hasattr(image, path_root):
setattr(image, path_root, value)
elif path_root in image.extra_properties:
image.extra_properties[path_root] = value
else:
msg = _("Property %s does not exist.")
raise webob.exc.HTTPConflict(msg % path_root)
def _do_add(self, req, image, change):
path = change['path']
path_root = path[0]
value = change['value']
json_schema_version = change.get('json_schema_version', 10)
if path_root == 'locations':
self._do_add_locations(image, path[1], value)
else:
if ((hasattr(image, path_root) or
path_root in image.extra_properties)
and json_schema_version == 4):
msg = _("Property %s already present.")
raise webob.exc.HTTPConflict(msg % path_root)
if hasattr(image, path_root):
setattr(image, path_root, value)
else:
image.extra_properties[path_root] = value
def _do_remove(self, req, image, change):
path = change['path']
path_root = path[0]
if path_root == 'locations':
self._do_remove_locations(image, path[1])
else:
if hasattr(image, path_root):
msg = _("Property %s may not be removed.")
raise webob.exc.HTTPForbidden(msg % path_root)
elif path_root in image.extra_properties:
del image.extra_properties[path_root]
else:
msg = _("Property %s does not exist.")
raise webob.exc.HTTPConflict(msg % path_root)
@utils.mutating
def delete(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
image.delete()
image_repo.remove(image)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
msg = (_("Failed to find image %(image_id)s to delete") %
{'image_id': image_id})
LOG.warn(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InUseByStore as e:
msg = (_("Image %(id)s could not be deleted "
"because it is in use: %(exc)s") %
{"id": image_id,
"exc": e.msg})
LOG.warn(msg)
raise webob.exc.HTTPConflict(explanation=msg)
def _get_locations_op_pos(self, path_pos, max_pos, allow_max):
if path_pos is None or max_pos is None:
return None
pos = max_pos if allow_max else max_pos - 1
if path_pos.isdigit():
pos = int(path_pos)
elif path_pos != '-':
return None
if not (allow_max or 0 <= pos < max_pos):
return None
return pos
def _do_replace_locations(self, image, value):
if len(image.locations) > 0 and len(value) > 0:
msg = _("Cannot replace locations from a non-empty "
"list to a non-empty list.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(value) == 0:
# NOTE(zhiyan): this actually deletes the location
# from the backend store.
del image.locations[:]
if image.status == 'active':
image.status = 'queued'
else: # NOTE(zhiyan): len(image.locations) == 0
try:
image.locations = value
if image.status == 'queued':
image.status = 'active'
except (exception.BadStoreUri, exception.DuplicateLocation) as bse:
raise webob.exc.HTTPBadRequest(explanation=bse.msg)
except ValueError as ve: # update image status failed.
raise webob.exc.HTTPBadRequest(
explanation=utils.exception_to_str(ve))
def _do_add_locations(self, image, path_pos, value):
pos = self._get_locations_op_pos(path_pos,
len(image.locations), True)
if pos is None:
msg = _("Invalid position for adding a location.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
image.locations.insert(pos, value)
if image.status == 'queued':
image.status = 'active'
except (exception.BadStoreUri, exception.DuplicateLocation) as bse:
raise webob.exc.HTTPBadRequest(explanation=bse.msg)
except ValueError as ve: # update image status failed.
raise webob.exc.HTTPBadRequest(
explanation=utils.exception_to_str(ve))
def _do_remove_locations(self, image, path_pos):
pos = self._get_locations_op_pos(path_pos,
len(image.locations), False)
if pos is None:
msg = _("Invalid position for removing a location.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
# NOTE(zhiyan): this actually deletes the location
# from the backend store.
image.locations.pop(pos)
except Exception as e:
raise webob.exc.HTTPInternalServerError(
explanation=utils.exception_to_str(e))
if len(image.locations) == 0 and image.status == 'active':
image.status = 'queued'
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ('direct_url', 'self', 'file', 'schema')
_readonly_properties = ('created_at', 'updated_at', 'status', 'checksum',
'size', 'virtual_size', 'direct_url', 'self',
'file', 'schema')
_reserved_properties = ('owner', 'location', 'deleted', 'deleted_at')
_base_properties = ('checksum', 'created_at', 'container_format',
'disk_format', 'id', 'min_disk', 'min_ram', 'name',
'size', 'virtual_size', 'status', 'tags',
'updated_at', 'visibility', 'protected')
_available_sort_keys = ('name', 'status', 'container_format',
'disk_format', 'size', 'id', 'created_at',
'updated_at')
_default_sort_key = 'created_at'
_default_sort_dir = 'desc'
_path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}}
_default_sort_dir = 'desc'
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(
explanation=utils.exception_to_str(msg))
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
image = {}
properties = body
tags = properties.pop('tags', [])
for key in self._base_properties:
try:
# NOTE(flwang): Instead of changing the _check_unexpected
# of ImageFactory. It would be better to do the mapping
# at here.
if key == 'id':
image['image_id'] = properties.pop(key)
else:
image[key] = properties.pop(key)
except KeyError:
pass
return dict(image=image, extra_properties=properties, tags=tags)
def _get_change_operation_d10(self, raw_change):
try:
return raw_change['op']
except KeyError:
msg = _("Unable to find '%s' in JSON Schema change") % 'op'
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_change_operation_d4(self, raw_change):
op = None
for key in ['replace', 'add', 'remove']:
if key in raw_change:
if op is not None:
msg = _('Operation objects must contain only one member'
' named "add", "remove", or "replace".')
raise webob.exc.HTTPBadRequest(explanation=msg)
op = key
if op is None:
msg = _('Operation objects must contain exactly one member'
' named "add", "remove", or "replace".')
raise webob.exc.HTTPBadRequest(explanation=msg)
return op
def _get_change_path_d10(self, raw_change):
try:
return raw_change['path']
except KeyError:
msg = _("Unable to find '%s' in JSON Schema change") % 'path'
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_change_path_d4(self, raw_change, op):
return raw_change[op]
def _decode_json_pointer(self, pointer):
"""Parse a json pointer.
Json Pointers are defined in
http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .
The pointers use '/' for separation between object attributes, such
that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character
in an attribute name is encoded as "~1" and a '~' character is encoded
as "~0".
"""
self._validate_json_pointer(pointer)
ret = []
for part in pointer.lstrip('/').split('/'):
ret.append(part.replace('~1', '/').replace('~0', '~').strip())
return ret
def _validate_json_pointer(self, pointer):
"""Validate a json pointer.
We only accept a limited form of json pointers.
"""
if not pointer.startswith('/'):
msg = _('Pointer `%s` does not start with "/".') % pointer
raise webob.exc.HTTPBadRequest(explanation=msg)
if re.search('/\s*?/', pointer[1:]):
msg = _('Pointer `%s` contains adjacent "/".') % pointer
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(pointer) > 1 and pointer.endswith('/'):
msg = _('Pointer `%s` end with "/".') % pointer
raise webob.exc.HTTPBadRequest(explanation=msg)
if pointer[1:].strip() == '/':
msg = _('Pointer `%s` does not contains valid token.') % pointer
raise webob.exc.HTTPBadRequest(explanation=msg)
if re.search('~[^01]', pointer) or pointer.endswith('~'):
msg = _('Pointer `%s` contains "~" not part of'
' a recognized escape sequence.') % pointer
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_change_value(self, raw_change, op):
if 'value' not in raw_change:
msg = _('Operation "%s" requires a member named "value".')
raise webob.exc.HTTPBadRequest(explanation=msg % op)
return raw_change['value']
def _validate_change(self, change):
path_root = change['path'][0]
if path_root in self._readonly_properties:
msg = _("Attribute '%s' is read-only.") % path_root
raise webob.exc.HTTPForbidden(explanation=six.text_type(msg))
if path_root in self._reserved_properties:
msg = _("Attribute '%s' is reserved.") % path_root
raise webob.exc.HTTPForbidden(explanation=six.text_type(msg))
if change['op'] == 'delete':
return
partial_image = None
if len(change['path']) == 1:
partial_image = {path_root: change['value']}
elif ((path_root in get_base_properties().keys()) and
(get_base_properties()[path_root].get('type', '') == 'array')):
# NOTE(zhiyan): cient can use PATCH API to adding element to
# the image's existing set property directly.
# Such as: 1. using '/locations/N' path to adding a location
# to the image's 'locations' list at N position.
# (implemented)
# 2. using '/tags/-' path to appending a tag to the
# image's 'tags' list at last. (Not implemented)
partial_image = {path_root: [change['value']]}
if partial_image:
try:
self.schema.validate(partial_image)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
def _validate_path(self, op, path):
path_root = path[0]
limits = self._path_depth_limits.get(path_root, {})
if len(path) != limits.get(op, 1):
msg = _("Invalid JSON pointer for this resource: "
"'/%s'") % '/'.join(path)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(msg))
def _parse_json_schema_change(self, raw_change, draft_version):
if draft_version == 10:
op = self._get_change_operation_d10(raw_change)
path = self._get_change_path_d10(raw_change)
elif draft_version == 4:
op = self._get_change_operation_d4(raw_change)
path = self._get_change_path_d4(raw_change, op)
else:
msg = _('Unrecognized JSON Schema draft version')
raise webob.exc.HTTPBadRequest(explanation=msg)
path_list = self._decode_json_pointer(path)
return op, path_list
def update(self, request):
changes = []
content_types = {
'application/openstack-images-v2.0-json-patch': 4,
'application/openstack-images-v2.1-json-patch': 10,
}
if request.content_type not in content_types:
headers = {'Accept-Patch':
', '.join(sorted(content_types.keys()))}
raise webob.exc.HTTPUnsupportedMediaType(headers=headers)
json_schema_version = content_types[request.content_type]
body = self._get_request_body(request)
if not isinstance(body, list):
msg = _('Request body must be a JSON array of operation objects.')
raise webob.exc.HTTPBadRequest(explanation=msg)
for raw_change in body:
if not isinstance(raw_change, dict):
msg = _('Operations must be JSON objects.')
raise webob.exc.HTTPBadRequest(explanation=msg)
(op, path) = self._parse_json_schema_change(raw_change,
json_schema_version)
# NOTE(zhiyan): the 'path' is a list.
self._validate_path(op, path)
change = {'op': op, 'path': path,
'json_schema_version': json_schema_version}
if not op == 'remove':
change['value'] = self._get_change_value(raw_change, op)
self._validate_change(change)
changes.append(change)
return {'changes': changes}
def _validate_limit(self, limit):
try:
limit = int(limit)
except ValueError:
msg = _("limit param must be an integer")
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _("limit param must be positive")
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _validate_sort_key(self, sort_key):
if sort_key not in self._available_sort_keys:
msg = _('Invalid sort key: %(sort_key)s. '
'It must be one of the following: %(available)s.') % \
{'sort_key': sort_key,
'available': ', '.join(self._available_sort_keys)}
raise webob.exc.HTTPBadRequest(explanation=msg)
return sort_key
def _validate_sort_dir(self, sort_dir):
if sort_dir not in ['asc', 'desc']:
msg = _('Invalid sort direction: %s') % sort_dir
raise webob.exc.HTTPBadRequest(explanation=msg)
return sort_dir
def _validate_member_status(self, member_status):
if member_status not in ['pending', 'accepted', 'rejected', 'all']:
msg = _('Invalid status: %s') % member_status
raise webob.exc.HTTPBadRequest(explanation=msg)
return member_status
def _get_filters(self, filters):
visibility = filters.get('visibility')
if visibility:
if visibility not in ['public', 'private', 'shared']:
msg = _('Invalid visibility value: %s') % visibility
raise webob.exc.HTTPBadRequest(explanation=msg)
changes_since = filters.get('changes-since', None)
if changes_since:
msg = _('The "changes-since" filter is no longer available on v2.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return filters
def _get_sorting_params(self, params):
"""
Process sorting params.
Currently glance supports two sorting syntax: classic and new one,
that is uniform for all Openstack projects.
Classic syntax: sort_key=name&sort_dir=asc&sort_key=size&sort_dir=desc
New syntax: sort=name:asc,size:desc
"""
sort_keys = []
sort_dirs = []
if 'sort' in params:
# use new sorting syntax here
if 'sort_key' in params or 'sort_dir' in params:
msg = _('Old and new sorting syntax cannot be combined')
raise webob.exc.HTTPBadRequest(explanation=msg)
for sort_param in params.pop('sort').strip().split(','):
key, _sep, dir = sort_param.partition(':')
if not dir:
dir = self._default_sort_dir
sort_keys.append(self._validate_sort_key(key.strip()))
sort_dirs.append(self._validate_sort_dir(dir.strip()))
else:
# continue with classic syntax
# NOTE(mfedosin): we have 3 options here:
# 1. sort_dir wasn't passed: we use default one - 'desc'.
# 2. Only one sort_dir was passed: use it for every sort_key
# in the list.
# 3. Multiple sort_dirs were passed: consistently apply each one to
# the corresponding sort_key.
# If number of sort_dirs and sort_keys doesn't match then raise an
# exception.
while 'sort_key' in params:
sort_keys.append(self._validate_sort_key(
params.pop('sort_key').strip()))
while 'sort_dir' in params:
sort_dirs.append(self._validate_sort_dir(
params.pop('sort_dir').strip()))
if sort_dirs:
dir_len = len(sort_dirs)
key_len = len(sort_keys)
if dir_len > 1 and dir_len != key_len:
msg = _('Number of sort dirs does not match the number '
'of sort keys')
raise webob.exc.HTTPBadRequest(explanation=msg)
if not sort_keys:
sort_keys = [self._default_sort_key]
if not sort_dirs:
sort_dirs = [self._default_sort_dir]
return sort_keys, sort_dirs
def index(self, request):
params = request.params.copy()
limit = params.pop('limit', None)
marker = params.pop('marker', None)
member_status = params.pop('member_status', 'accepted')
# NOTE (flwang) To avoid using comma or any predefined chars to split
# multiple tags, now we allow user specify multiple 'tag' parameters
# in URL, such as v2/images?tag=x86&tag=64bit.
tags = []
while 'tag' in params:
tags.append(params.pop('tag').strip())
query_params = {
'filters': self._get_filters(params),
'member_status': self._validate_member_status(member_status),
}
if marker is not None:
query_params['marker'] = marker
if limit is not None:
query_params['limit'] = self._validate_limit(limit)
if tags:
query_params['filters']['tags'] = tags
# NOTE(mfedosin): param is still called sort_key and sort_dir,
# instead of sort_keys and sort_dirs respectively.
# It's done because in v1 it's still a single value.
query_params['sort_key'], query_params['sort_dir'] = \
self._get_sorting_params(params)
return query_params
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _get_image_href(self, image, subcollection=''):
base_href = '/v2/images/%s' % image.image_id
if subcollection:
base_href = '%s/%s' % (base_href, subcollection)
return base_href
def _format_image(self, image):
image_view = dict()
try:
image_view = dict(image.extra_properties)
attributes = ['name', 'disk_format', 'container_format',
'visibility', 'size', 'virtual_size', 'status',
'checksum', 'protected', 'min_ram', 'min_disk',
'owner']
for key in attributes:
image_view[key] = getattr(image, key)
image_view['id'] = image.image_id
image_view['created_at'] = timeutils.isotime(image.created_at)
image_view['updated_at'] = timeutils.isotime(image.updated_at)
if CONF.show_multiple_locations:
locations = list(image.locations)
if locations:
image_view['locations'] = []
for loc in locations:
tmp = dict(loc)
tmp.pop('id', None)
tmp.pop('status', None)
image_view['locations'].append(tmp)
else:
# NOTE (flwang): We will still show "locations": [] if
# image.locations is None to indicate it's allowed to show
# locations but it's just non-existent.
image_view['locations'] = []
LOG.debug("There is not available location "
"for image %s" % image.image_id)
if CONF.show_image_direct_url:
if image.locations:
# Choose best location configured strategy
l = location_strategy.choose_best_location(image.locations)
image_view['direct_url'] = l['url']
else:
LOG.debug("There is not available location "
"for image %s" % image.image_id)
image_view['tags'] = list(image.tags)
image_view['self'] = self._get_image_href(image)
image_view['file'] = self._get_image_href(image, 'file')
image_view['schema'] = '/v2/schemas/image'
image_view = self.schema.filter(image_view) # domain
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
return image_view
def create(self, response, image):
response.status_int = 201
self.show(response, image)
response.location = self._get_image_href(image)
def show(self, response, image):
image_view = self._format_image(image)
body = json.dumps(image_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def update(self, response, image):
image_view = self._format_image(image)
body = json.dumps(image_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, result):
params = dict(response.request.params)
params.pop('marker', None)
query = urlparse.urlencode(params)
body = {
'images': [self._format_image(i) for i in result['images']],
'first': '/v2/images',
'schema': '/v2/schemas/images',
}
if query:
body['first'] = '%s?%s' % (body['first'], query)
if 'next_marker' in result:
params['marker'] = result['next_marker']
next_query = urlparse.urlencode(params)
body['next'] = '/v2/images?%s' % next_query
response.unicode_body = six.text_type(json.dumps(body,
ensure_ascii=False))
response.content_type = 'application/json'
def delete(self, response, result):
response.status_int = 204
def get_base_properties():
return {
'id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'name': {
'type': ['null', 'string'],
'description': _('Descriptive name for the image'),
'maxLength': 255,
},
'status': {
'type': 'string',
'description': _('Status of the image (READ-ONLY)'),
'enum': ['queued', 'saving', 'active', 'killed',
'deleted', 'pending_delete'],
},
'visibility': {
'type': 'string',
'description': _('Scope of image accessibility'),
'enum': ['public', 'private'],
},
'protected': {
'type': 'boolean',
'description': _('If true, image will not be deletable.'),
},
'checksum': {
'type': ['null', 'string'],
'description': _('md5 hash of image contents. (READ-ONLY)'),
'maxLength': 32,
},
'owner': {
'type': ['null', 'string'],
'description': _('Owner of the image'),
'maxLength': 255,
},
'size': {
'type': ['null', 'integer'],
'description': _('Size of image file in bytes (READ-ONLY)'),
},
'virtual_size': {
'type': ['null', 'integer'],
'description': _('Virtual size of image in bytes (READ-ONLY)'),
},
'container_format': {
'type': ['null', 'string'],
'description': _('Format of the container'),
'enum': [None] + CONF.image_format.container_formats,
},
'disk_format': {
'type': ['null', 'string'],
'description': _('Format of the disk'),
'enum': [None] + CONF.image_format.disk_formats,
},
'created_at': {
'type': 'string',
'description': _('Date and time of image registration'
' (READ-ONLY)'),
# TODO(bcwaldon): our jsonschema library doesn't seem to like the
# format attribute, figure out why!
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of the last image modification'
' (READ-ONLY)'),
# 'format': 'date-time',
},
'tags': {
'type': 'array',
'description': _('List of strings related to the image'),
'items': {
'type': 'string',
'maxLength': 255,
},
},
'direct_url': {
'type': 'string',
'description': _('URL to access the image file kept in external '
'store (READ-ONLY)'),
},
'min_ram': {
'type': 'integer',
'description': _('Amount of ram (in MB) required to boot image.'),
},
'min_disk': {
'type': 'integer',
'description': _('Amount of disk space (in GB) required to boot '
'image.'),
},
'self': {
'type': 'string',
'description': '(READ-ONLY)'
},
'file': {
'type': 'string',
'description': '(READ-ONLY)'
},
'schema': {
'type': 'string',
'description': '(READ-ONLY)'
},
'locations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'maxLength': 255,
},
'metadata': {
'type': 'object',
},
},
'required': ['url', 'metadata'],
},
'description': _('A set of URLs to access the image file kept in '
'external store'),
},
}
def _get_base_links():
return [
{'rel': 'self', 'href': '{self}'},
{'rel': 'enclosure', 'href': '{file}'},
{'rel': 'describedby', 'href': '{schema}'},
]
def get_schema(custom_properties=None):
properties = get_base_properties()
links = _get_base_links()
if CONF.allow_additional_image_properties:
schema = glance.schema.PermissiveSchema('image', properties, links)
else:
schema = glance.schema.Schema('image', properties)
if custom_properties:
for property_value in custom_properties.values():
property_value['is_base'] = False
schema.merge_properties(custom_properties)
return schema
def get_collection_schema(custom_properties=None):
image_schema = get_schema(custom_properties)
return glance.schema.CollectionSchema('images', image_schema)
def load_custom_properties():
"""Find the schema properties files and load them into a dict."""
filename = 'schema-image.json'
match = CONF.find_file(filename)
if match:
with open(match, 'r') as schema_file:
schema_data = schema_file.read()
return json.loads(schema_data)
else:
msg = (_LW('Could not find schema properties file %s. Continuing '
'without custom properties') % filename)
LOG.warn(msg)
return {}
def create_resource(custom_properties=None):
"""Images resource factory method"""
schema = get_schema(custom_properties)
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = ImagesController()
return wsgi.Resource(controller, deserializer, serializer)
| apache-2.0 | 7,292,079,490,618,843,000 | 38.822362 | 79 | 0.548491 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/application_gateway.py | 1 | 8366 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ApplicationGateway(Resource):
"""Application gateway resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: SKU of the application gateway resource.
:type sku: ~azure.mgmt.network.v2017_03_01.models.ApplicationGatewaySku
:param ssl_policy: SSL policy of the application gateway resource.
:type ssl_policy:
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewaySslPolicy
:ivar operational_state: Operational state of the application gateway
resource. Possible values include: 'Stopped', 'Starting', 'Running',
'Stopping'
:vartype operational_state: str or
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayOperationalState
:param gateway_ip_configurations: Subnets of application the gateway
resource.
:type gateway_ip_configurations:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayIPConfiguration]
:param authentication_certificates: Authentication certificates of the
application gateway resource.
:type authentication_certificates:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayAuthenticationCertificate]
:param ssl_certificates: SSL certificates of the application gateway
resource.
:type ssl_certificates:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewaySslCertificate]
:param frontend_ip_configurations: Frontend IP addresses of the
application gateway resource.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayFrontendIPConfiguration]
:param frontend_ports: Frontend ports of the application gateway resource.
:type frontend_ports:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayFrontendPort]
:param probes: Probes of the application gateway resource.
:type probes:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayProbe]
:param backend_address_pools: Backend address pool of the application
gateway resource.
:type backend_address_pools:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendAddressPool]
:param backend_http_settings_collection: Backend http settings of the
application gateway resource.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayBackendHttpSettings]
:param http_listeners: Http listeners of the application gateway resource.
:type http_listeners:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayHttpListener]
:param url_path_maps: URL path map of the application gateway resource.
:type url_path_maps:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayUrlPathMap]
:param request_routing_rules: Request routing rules of the application
gateway resource.
:type request_routing_rules:
list[~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayRequestRoutingRule]
:param web_application_firewall_configuration: Web application firewall
configuration.
:type web_application_firewall_configuration:
~azure.mgmt.network.v2017_03_01.models.ApplicationGatewayWebApplicationFirewallConfiguration
:param resource_guid: Resource GUID property of the application gateway
resource.
:type resource_guid: str
:param provisioning_state: Provisioning state of the application gateway
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'operational_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'properties.sku', 'type': 'ApplicationGatewaySku'},
'ssl_policy': {'key': 'properties.sslPolicy', 'type': 'ApplicationGatewaySslPolicy'},
'operational_state': {'key': 'properties.operationalState', 'type': 'str'},
'gateway_ip_configurations': {'key': 'properties.gatewayIPConfigurations', 'type': '[ApplicationGatewayIPConfiguration]'},
'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[ApplicationGatewayAuthenticationCertificate]'},
'ssl_certificates': {'key': 'properties.sslCertificates', 'type': '[ApplicationGatewaySslCertificate]'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[ApplicationGatewayFrontendIPConfiguration]'},
'frontend_ports': {'key': 'properties.frontendPorts', 'type': '[ApplicationGatewayFrontendPort]'},
'probes': {'key': 'properties.probes', 'type': '[ApplicationGatewayProbe]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'backend_http_settings_collection': {'key': 'properties.backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHttpSettings]'},
'http_listeners': {'key': 'properties.httpListeners', 'type': '[ApplicationGatewayHttpListener]'},
'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[ApplicationGatewayUrlPathMap]'},
'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[ApplicationGatewayRequestRoutingRule]'},
'web_application_firewall_configuration': {'key': 'properties.webApplicationFirewallConfiguration', 'type': 'ApplicationGatewayWebApplicationFirewallConfiguration'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, sku=None, ssl_policy=None, gateway_ip_configurations=None, authentication_certificates=None, ssl_certificates=None, frontend_ip_configurations=None, frontend_ports=None, probes=None, backend_address_pools=None, backend_http_settings_collection=None, http_listeners=None, url_path_maps=None, request_routing_rules=None, web_application_firewall_configuration=None, resource_guid=None, provisioning_state=None, etag=None):
super(ApplicationGateway, self).__init__(id=id, location=location, tags=tags)
self.sku = sku
self.ssl_policy = ssl_policy
self.operational_state = None
self.gateway_ip_configurations = gateway_ip_configurations
self.authentication_certificates = authentication_certificates
self.ssl_certificates = ssl_certificates
self.frontend_ip_configurations = frontend_ip_configurations
self.frontend_ports = frontend_ports
self.probes = probes
self.backend_address_pools = backend_address_pools
self.backend_http_settings_collection = backend_http_settings_collection
self.http_listeners = http_listeners
self.url_path_maps = url_path_maps
self.request_routing_rules = request_routing_rules
self.web_application_firewall_configuration = web_application_firewall_configuration
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
| mit | -275,036,238,992,185,660 | 55.911565 | 478 | 0.705475 | false |
tweezy23/nogotofail | nogotofail/mitm/connection/server.py | 2 | 6988 | r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from nogotofail.mitm.connection import RedirectConnection
from nogotofail.mitm.connection.handlers.selector import default_connection_selector, default_ssl_connection_selector, default_data_selector
from nogotofail.mitm import util
import threading
import socket
import select
import os
import sys
import logging
import time
class Server:
"""Server for handling creation and management of nogotofail.mitm.connection.Connections
"""
def __init__(self, port, app_blame, connection_class=RedirectConnection,
handler_selector=default_connection_selector,
ssl_handler_selector=default_ssl_connection_selector,
data_handler_selector=default_data_selector,
block_non_clients=False, ipv6=False):
self.kill = False
self.port = port
self.kill_fd, self.control_fd = self.setup_control_pipe()
self.connections = {self.kill_fd: None}
self.connection_class = connection_class
self.handler_selector = handler_selector
self.ssl_handler_selector = ssl_handler_selector
self.data_handler_selector = data_handler_selector
self.serving_thread = threading.Thread(target=self.run)
self.serving_thread.daemon = True
self.app_blame = app_blame
self.logger = logging.getLogger("nogotofail.mitm")
self.block_non_clients = block_non_clients
self.ipv6 = ipv6
def start(self):
self.serving_thread.start()
def run(self):
try:
self.serve()
except Exception as e:
self.logger.exception("Uncaught exception in serving thread!")
self.logger.critical("EXITING")
sys.exit()
def _create_server_sockets(self):
sockets = []
for family in [socket.AF_INET, socket.AF_INET6]:
if family == socket.AF_INET6 and not self.ipv6:
break
local_server_socket = socket.socket(family=family)
if family == socket.AF_INET6:
# Force into ipv6 only mode. We will bind a v4 and v6 socket.
# This makes compat a little easier
local_server_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
local_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.connection_class.setup_server_socket(local_server_socket)
local_server_socket.bind(("", self.port))
local_server_socket.listen(5)
sockets.append(local_server_socket)
return sockets
def serve(self):
last_reap = time.time()
# set up the listening sockets
local_server_sockets = self._create_server_sockets()
for sock in local_server_sockets:
self.connections[sock] = None
while not self.kill:
r, _, _ = select.select(self.connections.keys(), [], [], 10)
for fd in r:
if fd == self.kill_fd:
return
if fd in local_server_sockets:
client_socket, client_address = None, (None, None)
try:
(client_socket, client_address) = (
fd.accept())
self.setup_connection(client_socket)
except socket.error as e:
self.logger.error(
"Socket error in connection startup from %s" % client_address[0])
self.logger.exception(e)
util.close_quietly(client_socket)
continue
try:
conn = self.connections[fd]
except KeyError:
# fd could have already been removed if the other end of the socket closed
# and was handled before fd. fd has already been handled so
# move along
continue
try:
cont = conn.bridge(fd)
if not cont:
self.remove(conn)
except Exception as e:
self.logger.exception(e)
self.remove(conn)
# If nothing is happening and we haven't reaped in a while reap
now = time.time()
if (len(r) == 0 and now - last_reap > 600) or now - last_reap > 3600:
for conn in set(self.connections.values()):
if not isinstance(conn, self.connection_class):
continue
if now - conn.last_used > 3600:
self.remove(conn)
def remove(self, conn):
conn.close(handler_initiated=False)
self.connections.pop(conn.server_socket, None)
self.connections.pop(conn.client_socket, None)
self.connections.pop(conn.raw_server_socket, None)
self.connections.pop(conn.raw_client_socket, None)
def setup_connection(self, client_socket):
if self.block_non_clients:
if not self.app_blame.client_available(
client_socket.getpeername()[0]):
self.logger.debug("Connection from non-client %s blocked",
client_socket.getpeername()[0])
client_socket.close()
return
connection = (
self.connection_class(
self, client_socket,
self.handler_selector,
self.ssl_handler_selector,
self.data_handler_selector,
self.app_blame))
if connection.start():
self.connections[connection.client_socket] = connection
self.connections[connection.server_socket] = connection
def setup_control_pipe(self):
killer, controller = os.pipe()
return killer, controller
def shutdown(self):
self.kill = True
os.write(self.control_fd, "Die!")
self.serving_thread.join(5)
for sock in self.connections:
if sock != self.kill_fd:
sock.close()
def update_sockets(self, connection):
self.connections.pop(connection.raw_client_socket)
self.connections.pop(connection.raw_server_socket)
self.connections[connection.client_socket] = connection
self.connections[connection.server_socket] = connection
| apache-2.0 | -2,954,718,907,837,573,600 | 39.865497 | 140 | 0.590298 | false |
miminar/openshift-ansible | roles/openshift_facts/library/openshift_facts.py | 11 | 51780 | #!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
"""Ansible module for retrieving and setting openshift related facts"""
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
import json
import re
import os
import yaml
import struct
import socket
import ipaddress
from distutils.util import strtobool
from ansible.module_utils.six import text_type
from ansible.module_utils.six import string_types
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import * # noqa: F403
from ansible.module_utils.facts import * # noqa: F403
from ansible.module_utils.urls import * # noqa: F403
from ansible.module_utils.six import iteritems, itervalues
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
from ansible.module_utils._text import to_native
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
if 'kube_admission_plugin_config' in facts['master']:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
def first_ip(network):
""" Return the first IPv4 address in network
Args:
network (str): network in CIDR format
Returns:
str: first IPv4 address
"""
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
Args:
hostname (str): hostname to test
Returns:
bool: True if valid, otherwise False
"""
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
# OpenShift will not allow a node with more than 63 chars in name.
len(hostname) > 63):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
""" Choose a hostname from the provided hostnames
Given a list of hostnames and a fallback value, choose a hostname to
use. This function will prefer fqdns if they exist (excluding any that
begin with localhost or end with localdomain) over ip addresses.
Args:
hostnames (list): list of hostnames
fallback (str): default value to set if hostnames does not contain
a valid hostname
Returns:
str: chosen hostname
"""
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
""" Return metadata from the provided metadata_url
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict or list: metadata request result
"""
result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(to_native(result.read())) # noqa: F405
else:
return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
""" Walk the metadata tree and return a dictionary of the entire tree
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the result of walking the metadata tree
"""
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
""" Retrieve the provider metadata
Args:
metadata_url (str): metadata url
supports_recursive (bool): does the provider metadata api support
recursion
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the provider metadata
"""
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
""" Normalize gce facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
# Split instance hostname from GCE metadata to use the short instance name
facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
""" Normalize aws facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
""" Normalize openstack facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
facts['network'][f_var] = metadata['ec2_compat'][h_var]
else:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
except socket.gaierror:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
return facts
def normalize_provider_facts(provider, metadata):
""" Normalize provider facts
Args:
provider (str): host provider
metadata (dict): provider metadata
Returns:
dict: the normalized provider facts
"""
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'aws':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated url facts if they
were not already present
"""
if 'master' in facts:
hostname = facts['common']['hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
public_hostname = facts['common']['public_hostname']
api_hostname = cluster_hostname if cluster_hostname else hostname
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
console_path = facts['master']['console_path']
use_ssl = dict(
api=facts['master']['api_use_ssl'],
public_api=facts['master']['api_use_ssl'],
loopback_api=facts['master']['api_use_ssl'],
console=facts['master']['console_use_ssl'],
public_console=facts['master']['console_use_ssl'],
)
ports = dict(
api=facts['master']['api_port'],
public_api=facts['master']['api_port'],
loopback_api=facts['master']['api_port'],
console=facts['master']['console_port'],
public_console=facts['master']['console_port'],
)
prefix_hosts = [('api', api_hostname),
('public_api', api_public_hostname),
('loopback_api', hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix]))
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
facts['master'].setdefault('loopback_user', r_lhu)
prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix],
console_path))
return facts
def set_aggregate_facts(facts):
""" Set aggregate facts
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
# TODO: The openstack cloudprovider nodename setting was too opinionaed.
# It needs to be generalized before it can be enabled again.
# elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
# facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['raw_hostname'].lower()
return facts
def make_allowed_registries(registry_list):
""" turns a list of wildcard registries to allowedRegistriesForImport json setting """
return {
"allowedRegistriesForImport": [
{'domainName': reg} if isinstance(reg, str) else reg for reg in registry_list
]
}
def set_allowed_registries(facts):
""" override allowedRegistriesForImport in imagePolicyConfig """
if 'master' in facts:
image_policy = {}
overriden = False
if facts['master'].get('image_policy_config', None):
image_policy = facts['master']['image_policy_config']
overriden = True
overrides = facts['master'].get('image_policy_allowed_registries_for_import', None)
if overrides:
image_policy = merge_facts(image_policy, make_allowed_registries(overrides), None)
overriden = True
if overriden:
facts['master']['image_policy_config'] = image_policy
return facts
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
Args:
use_ssl (bool): is ssl enabled
hostname (str): hostname
port (str): port
path (str): url path
Returns:
str: The generated url string
"""
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
try:
url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
except AttributeError:
# pylint: disable=undefined-variable
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
def get_current_config(facts):
""" Get current openshift config
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the current openshift config
"""
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
_, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
controller_args['cloud-provider'] = ['aws']
controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
controller_args['disable-attach-detach-reconcile-sync'] = ['true']
if facts['cloudprovider']['kind'] == 'azure':
controller_args['cloud-provider'] = ['azure']
controller_args['cloud-config'] = [cloud_cfg_path + '/azure.conf']
if facts['cloudprovider']['kind'] == 'openstack':
controller_args['cloud-provider'] = ['openstack']
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
api_server_args['cloud-provider'] = ['aws']
api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'azure':
api_server_args['cloud-provider'] = ['azure']
api_server_args['cloud-config'] = [cloud_cfg_path + '/azure.conf']
if facts['cloudprovider']['kind'] == 'openstack':
api_server_args['cloud-provider'] = ['openstack']
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
facts (dict): facts dict to update
provider_facts (dict): provider facts to apply
roles: host roles
Returns:
dict: the merged facts
"""
if not provider_facts:
return facts
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][h_var]
)
facts['provider'] = provider_facts
return facts
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
'image_policy_config',
"builddefaults",
"buildoverrides"]
facts = dict()
for key, value in iteritems(orig):
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
# Continue to recurse if old and new fact is a dictionary.
elif isinstance(value, dict) and isinstance(new[key], dict):
# Collect the subset of additive facts to overwrite if
# key matches. These will be passed to the subsequent
# merge_facts call.
relevant_additive_facts = []
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
facts[key] = copy.deepcopy(new[key])
# Key isn't in new so add it to facts to keep it.
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
""" Save local facts
Args:
filename (str): local facts file
facts (dict): facts to set
"""
try:
fact_dir = os.path.dirname(filename)
try:
os.makedirs(fact_dir) # try to make the directory
except OSError as exception:
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
""" Retrieve local facts from fact file
Args:
filename (str): local facts file
Returns:
dict: the retrieved facts
"""
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
local_facts = {}
return local_facts
def sort_unique(alist):
""" Sorts and de-dupes a list
Args:
list: a list
Returns:
list: a sorted de-duped list
"""
return sorted(list(set(alist)))
def safe_get_bool(fact):
""" Get a boolean fact safely.
Args:
facts: fact to convert
Returns:
bool: given fact as a bool
"""
return bool(strtobool(str(fact)))
def set_proxy_facts(facts):
""" Set global proxy facts
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
# See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
# masters behind a proxy need to connect to etcd via IP
if 'no_proxy_etcd_host_ips' in common:
if isinstance(common['no_proxy_etcd_host_ips'], string_types):
common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
# Master IPs should be added to no proxy lists to make liveness probes to pass
if 'no_proxy_master_ips' in common:
if isinstance(common['no_proxy_master_ips'], string_types):
common['no_proxy'].extend(common['no_proxy_master_ips'].split(','))
if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# TODO: This is Azure specific and should be scoped out to only Azure installs
common['no_proxy'].append('169.254.169.254')
# We always add local dns domain and ourselves no matter what
kube_svc_ip = str(ipaddress.ip_network(text_type(common['portal_net']))[1])
common['no_proxy'].append(kube_svc_ip)
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
common['no_proxy'].append(facts['master']['cluster_hostname'])
common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
facts['common'] = common
return facts
def set_builddefaults_facts(facts):
""" Set build defaults including setting proxy values from http_proxy, https_proxy,
no_proxy to the more specific builddefaults and builddefaults_git vars.
1. http_proxy, https_proxy, no_proxy
2. builddefaults_*
3. builddefaults_git_*
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
common = facts['common']
# Copy values from common to builddefaults
if 'http_proxy' not in builddefaults and 'http_proxy' in common:
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
# Create git specific facts from generic values, if git specific values are
# not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
# 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
def delete_empty_keys(keylist):
""" Delete dictionary elements from keylist where "value" is empty.
Args:
keylist(list): A list of builddefault configuration envs.
Returns:
none
Example:
keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'NO_PROXY', 'value': ''}]
After calling delete_empty_keys the provided list is modified to become:
[{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
"""
count = 0
for i in range(0, len(keylist)):
if len(keylist[i - count]['value']) == 0:
del keylist[i - count]
count += 1
def set_buildoverrides_facts(facts):
""" Set build overrides
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'buildoverrides' in facts:
buildoverrides = facts['buildoverrides']
# If we're actually defining a buildoverrides config then create admission_plugin_config
# then merge buildoverrides[config] structure into admission_plugin_config
if 'config' in buildoverrides:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
def pop_obsolete_local_facts(local_facts):
"""Remove unused keys from local_facts"""
keys_to_remove = {
'master': ('etcd_port', 'etcd_use_ssl', 'etcd_hosts')
}
for role in keys_to_remove:
if role in local_facts:
for key in keys_to_remove[role]:
local_facts[role].pop(key, None)
class OpenShiftFactsUnsupportedRoleError(Exception):
"""Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
"""Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
"""Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
""" Origin Facts
Attributes:
facts (dict): facts for the host
Args:
module (AnsibleModule): an AnsibleModule object
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
'buildoverrides',
'cloudprovider',
'common',
'etcd',
'master',
'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
# Collect system facts and preface each fact with 'ansible_'.
try:
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
additional_facts = {}
for (k, v) in self.system_facts.items():
additional_facts["ansible_%s" % k.replace('-', '_')] = v
self.system_facts.update(additional_facts)
except UnboundLocalError:
# ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite)
roles = local_facts.keys()
defaults = self.get_defaults(roles)
provider_facts = {}
if 'common' in local_facts and 'cloudprovider' in local_facts['common']:
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_aggregate_facts(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
facts = set_nodename(facts)
facts = set_allowed_registries(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
""" Get default fact values
Args:
roles (list): list of roles for this host
Returns:
dict: The generated default facts
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr).lower()
exit_code, output, _ = module.run_command(['hostname']) # noqa: F405
raw_hostname = output.strip() if exit_code == 0 else hostname
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
raw_hostname=raw_hostname,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
dns_domain='cluster.local',
config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
console_path='/console',
console_port='8443',
portal_net='172.30.0.0/16',
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn')
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
return defaults
def guess_host_provider(self):
""" Guess the host provider
Returns:
dict: The generated default facts for the detected provider
"""
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['ansible_product_name']
product_version = self.system_facts['ansible_product_version']
virt_type = self.system_facts['ansible_virtualization_type']
virt_role = self.system_facts['ansible_virtualization_role']
bios_vendor = self.system_facts['ansible_system_vendor']
provider = None
metadata = None
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif bios_vendor == 'Amazon EC2':
# Adds support for Amazon EC2 C5 instance types
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
""" Initialize the provider facts
Returns:
dict: The normalized provider facts
"""
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: The result of merging the provided facts with existing
local facts
"""
changed = False
facts_to_set = dict()
if facts is not None:
facts_to_set[self.role] = facts
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
pop_obsolete_local_facts(new_local_facts)
if new_local_facts != local_facts:
changed = True
if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def remove_empty_facts(self, facts=None):
""" Remove empty facts
Args:
facts (dict): facts to clean
"""
facts_to_remove = []
for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
if value == "" or value == [""] or value is None:
facts_to_remove.append(fact)
for fact in facts_to_remove:
del facts[fact]
return facts
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
module.params['gather_timeout'] = 10 # noqa: F405
module.params['filter'] = '*' # noqa: F405
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
additive_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params) # noqa: F405
changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
if __name__ == '__main__':
main()
| apache-2.0 | -8,617,930,880,546,161,000 | 38.830769 | 118 | 0.569139 | false |
orbitfp7/horizon | openstack_dashboard/dashboards/project/data_processing/clusters/urls.py | 34 | 1613 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns
from django.conf.urls import url
import openstack_dashboard.dashboards.project.data_processing. \
clusters.views as views
urlpatterns = patterns('',
url(r'^$', views.ClustersView.as_view(),
name='index'),
url(r'^$', views.ClustersView.as_view(),
name='clusters'),
url(r'^create-cluster$',
views.CreateClusterView.as_view(),
name='create-cluster'),
url(r'^configure-cluster$',
views.ConfigureClusterView.as_view(),
name='configure-cluster'),
url(r'^(?P<cluster_id>[^/]+)$',
views.ClusterDetailsView.as_view(),
name='details'),
url(r'^(?P<cluster_id>[^/]+)/scale$',
views.ScaleClusterView.as_view(),
name='scale'))
| apache-2.0 | -1,235,539,989,835,135,500 | 41.447368 | 69 | 0.549287 | false |
QEF/q-e_schrodinger | test-suite/testcode/lib/testcode2/validation.py | 1 | 11350 | '''
testcode2.validation
--------------------
Classes and functions for comparing data.
:copyright: (c) 2012 James Spencer.
:license: modified BSD; see LICENSE for more details.
'''
import re
import sys
import warnings
import testcode2.ansi as ansi
import testcode2.compatibility as compat
import testcode2.exceptions as exceptions
class Status:
'''Enum-esque object for storing whether an object passed a comparison.
bools: iterable of boolean objects. If all booleans are True (False) then the
status is set to pass (fail) and if only some booleans are True, the
status is set to warning (partial pass).
status: existing status to use. bools is ignored if status is supplied.
name: name of status (unknown, skipped, passed, partial, failed) to use.
Setting name overrides bools and status.
'''
def __init__(self, bools=None, status=None, name=None):
(self._unknown, self._skipped) = (-2, -1)
(self._passed, self._partial, self._failed) = (0, 1, 2)
if name is not None:
setattr(self, 'status', getattr(self, '_'+name))
elif status is not None:
self.status = status
elif bools:
if compat.compat_all(bools):
self.status = self._passed
elif compat.compat_any(bools):
self.status = self._partial
else:
self.status = self._failed
else:
self.status = self._unknown
def unknown(self):
'''Return true if stored status is unknown.'''
return self.status == self._unknown
def skipped(self):
'''Return true if stored status is skipped.'''
return self.status == self._skipped
def passed(self):
'''Return true if stored status is passed.'''
return self.status == self._passed
def warning(self):
'''Return true if stored status is a partial pass.'''
return self.status == self._partial
def failed(self):
'''Return true if stored status is failed.'''
return self.status == self._failed
def print_status(self, msg=None, verbose=1, vspace=True):
'''Print status.
msg: optional message to print out after status.
verbose: 0: suppress all output except for . (for pass), U (for unknown),
W (for warning/partial pass) and F (for fail) without a newline.
1: print 'Passed', 'Unknown', 'WARNING' or '**FAILED**'.
2: as for 1 plus print msg (if supplied).
3: as for 2 plus print a blank line.
vspace: print out extra new line afterwards if verbose > 1.
'''
if verbose > 0:
if self.status == self._unknown:
print('Unknown.')
elif self.status == self._passed:
print('Passed.')
elif self.status == self._skipped:
print(('%s.' % ansi.ansi_format('SKIPPED', 'blue')))
elif self.status == self._partial:
print(('%s.' % ansi.ansi_format('WARNING', 'blue')))
else:
print(('%s.' % ansi.ansi_format('**FAILED**', 'red', 'normal', 'bold')))
if msg and verbose > 1:
print(msg)
if vspace and verbose > 1:
print('')
else:
if self.status == self._unknown:
sys.stdout.write('U')
elif self.status == self._skipped:
sys.stdout.write('S')
elif self.status == self._passed:
sys.stdout.write('.')
elif self.status == self._partial:
sys.stdout.write('W')
else:
sys.stdout.write('F')
sys.stdout.flush()
def __add__(self, other):
'''Add two status objects.
Return the maximum level (ie most "failed") status.'''
return Status(status=max(self.status, other.status))
class Tolerance:
'''Store absolute and relative tolerances
Given are regarded as equal if they are within these tolerances.
name: name of tolerance object.
absolute: threshold for absolute difference between two numbers.
relative: threshold for relative difference between two numbers.
strict: if true, then require numbers to be within both thresholds.
'''
def __init__(self, name='', absolute=None, relative=None, strict=True):
self.name = name
self.absolute = absolute
self.relative = relative
if not self.absolute and not self.relative:
err = 'Neither absolute nor relative tolerance given.'
raise exceptions.TestCodeError(err)
self.strict = strict
def __repr__(self):
return (self.absolute, self.relative, self.strict).__repr__()
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def validate(self, test_val, benchmark_val, key=''):
'''Compare test and benchmark values to within the tolerances.'''
status = Status([True])
msg = ['values are within tolerance.']
compare = '(Test: %s. Benchmark: %s.)' % (test_val, benchmark_val)
try:
# Check float is not NaN (which we can't compare).
if compat.isnan(test_val) or compat.isnan(benchmark_val):
status = Status([False])
msg = ['cannot compare NaNs.']
else:
# Check if values are within tolerances.
(status_absolute, msg_absolute) = \
self.validate_absolute(benchmark_val, test_val)
(status_relative, msg_relative) = \
self.validate_relative(benchmark_val, test_val)
if self.absolute and self.relative and not self.strict:
# Require only one of thresholds to be met.
status = Status([status_relative.passed(),
status_absolute.passed()])
else:
# Only have one or other of thresholds (require active one
# to be met) or have both and strict mode is on (require
# both to be met).
status = status_relative + status_absolute
err_stat = ''
if status.warning():
err_stat = 'Warning: '
elif status.failed():
err_stat = 'ERROR: '
msg = []
if self.absolute and msg_absolute:
msg.append('%s%s %s' % (err_stat, msg_absolute, compare))
if self.relative and msg_relative:
msg.append('%s%s %s' % (err_stat, msg_relative, compare))
except TypeError:
if test_val != benchmark_val:
# require test and benchmark values to be equal (within python's
# definition of equality).
status = Status([False])
msg = ['values are different. ' + compare]
if key and msg:
msg.insert(0, key)
msg = '\n '.join(msg)
else:
msg = '\n'.join(msg)
return (status, msg)
def validate_absolute(self, benchmark_val, test_val):
'''Compare test and benchmark values to the absolute tolerance.'''
if self.absolute:
diff = test_val - benchmark_val
err = abs(diff)
passed = err < self.absolute
msg = ''
if not passed:
msg = ('absolute error %.2e greater than %.2e.' %
(err, self.absolute))
else:
passed = True
msg = 'No absolute tolerance set. Passing without checking.'
return (Status([passed]), msg)
def validate_relative(self, benchmark_val, test_val):
'''Compare test and benchmark values to the relative tolerance.'''
if self.relative:
diff = test_val - benchmark_val
if benchmark_val == 0 and diff == 0:
err = 0
elif benchmark_val == 0:
err = float("Inf")
else:
err = abs(diff/benchmark_val)
passed = err < self.relative
msg = ''
if not passed:
msg = ('relative error %.2e greater than %.2e.' %
(err, self.relative))
else:
passed = True
msg = 'No relative tolerance set. Passing without checking.'
return (Status([passed]), msg)
def compare_data(benchmark, test, default_tolerance, tolerances,
ignore_fields=None):
'''Compare two data dictionaries.'''
ignored_params = compat.compat_set(ignore_fields or tuple())
bench_params = compat.compat_set(benchmark) - ignored_params
test_params = compat.compat_set(test) - ignored_params
# Check both the key names and the number of keys in case there are
# different numbers of duplicate keys.
comparable = (bench_params == test_params)
key_counts = dict((key,0) for key in bench_params | test_params)
for (key, val) in list(benchmark.items()):
if key not in ignored_params:
key_counts[key] += len(val)
for (key, val) in list(test.items()):
if key not in ignored_params:
key_counts[key] -= len(val)
comparable = comparable and compat.compat_all(kc == 0 for kc in list(key_counts.values()))
status = Status()
msg = []
if not comparable:
status = Status([False])
bench_only = bench_params - test_params
test_only = test_params - bench_params
msg.append('Different sets of data extracted from benchmark and test.')
if bench_only:
msg.append(" Data only in benchmark: %s." % ", ".join(bench_only))
if test_only:
msg.append(" Data only in test: %s." % ", ".join(test_only))
bench_more = [key for key in key_counts
if key_counts[key] > 0 and key not in bench_only]
test_more = [key for key in key_counts
if key_counts[key] < 0 and key not in test_only]
if bench_more:
msg.append(" More data in benchmark than in test: %s." %
", ".join(bench_more))
if test_more:
msg.append(" More data in test than in benchmark: %s." %
", ".join(test_more))
for param in (bench_params & test_params):
param_tol = tolerances.get(param, default_tolerance)
if param_tol == default_tolerance:
# See if there's a regex that matches.
tol_matches = [tol for tol in list(tolerances.values())
if tol.name and re.match(tol.name, param)]
if tol_matches:
param_tol = tol_matches[0]
if len(tol_matches) > 1:
warnings.warn('Multiple tolerance regexes match. '
'Using %s.' % (param_tol.name))
for bench_value, test_value in zip(benchmark[param], test[param]):
key_status, err = param_tol.validate(test_value, bench_value, param)
status += key_status
if not key_status.passed() and err:
msg.append(err)
return (comparable, status, "\n".join(msg))
| gpl-2.0 | -6,717,007,280,243,656,000 | 41.037037 | 94 | 0.554361 | false |
arielalmendral/ert | python/python/ert/enkf/state_map.py | 2 | 4920 | # Copyright (C) 2012 Statoil ASA, Norway.
#
# The file 'enkf_fs.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from ert.enkf import EnkfPrototype
from ert.enkf.enums import RealizationStateEnum
from ert.util import BoolVector
class StateMap(BaseCClass):
TYPE_NAME = "state_map"
_alloc = EnkfPrototype("void* state_map_alloc()", bind = False)
_fread = EnkfPrototype("bool state_map_fread(state_map , char*)")
_fwrite = EnkfPrototype("void state_map_fwrite(state_map , char*)")
_equal = EnkfPrototype("bool state_map_equal(state_map , state_map)")
_free = EnkfPrototype("void state_map_free(state_map)")
_size = EnkfPrototype("int state_map_get_size(state_map)")
_iget = EnkfPrototype("realisation_state_enum state_map_iget(state_map, int)")
_iset = EnkfPrototype("void state_map_iset(state_map, int, realisation_state_enum)")
_select_matching = EnkfPrototype("void state_map_select_matching(state_map, bool_vector, realisation_state_enum)")
_is_read_only = EnkfPrototype("bool state_map_is_readonly(state_map)")
_is_legal_transition = EnkfPrototype("bool state_map_legal_transition(realisation_state_enum, realisation_state_enum)", bind = False)
def __init__(self , filename = None):
c_ptr = self._alloc()
super(StateMap, self).__init__(c_ptr)
if filename:
self.load(filename)
def __len__(self):
""" @rtype: int """
return self._size()
def __iter__(self):
index = 0
size = len(self)
while index < size:
yield self[index]
index += 1
def __eq__(self , other):
return self._equal(other)
def __getitem__(self, index):
""" @rtype: RealizationStateEnum """
if not isinstance(index, int):
raise TypeError("Expected an integer")
size = len(self)
if index < 0:
index += size
if 0 <= index < size:
return self._iget(index)
raise IndexError("Invalid index. Valid range: [0, %d)" % size)
def __setitem__(self, index, value):
if self.isReadOnly():
raise UserWarning("This State Map is read only!")
if not isinstance(index, int):
raise TypeError("Expected an integer")
if not isinstance(value, RealizationStateEnum):
raise TypeError("Expected a RealizationStateEnum")
if index < 0:
index += len(self)
if index < 0:
raise IndexError("Index out of range: %d < 0" % index)
self._iset(index, value)
@classmethod
def isLegalTransition(cls, realization_state1, realization_state2):
""" @rtype: bool """
if not isinstance(realization_state1, RealizationStateEnum) or not isinstance(realization_state2, RealizationStateEnum):
raise TypeError("Expected a RealizationStateEnum")
return cls._is_legal_transition(realization_state1, realization_state2)
def isReadOnly(self):
""" @rtype: bool """
return self._is_read_only()
def selectMatching(self, select_target, select_mask):
"""
@type select_target: BoolVector
@type select_mask: RealizationStateEnum
"""
assert isinstance(select_target, BoolVector)
assert isinstance(select_mask, RealizationStateEnum)
self._select_matching(select_target, select_mask)
def realizationList(self , state_value):
"""
Will create a list of all realisations with state equal to state_value.
@type state_value: RealizationStateEnum
@rtype: ert.util.IntVector
"""
mask = BoolVector(False, len(self))
self.selectMatching(mask, state_value)
return BoolVector.createActiveList(mask)
def free(self):
self._free()
def __repr__(self):
ro = 'read only' if self.isReadOnly() else 'read/write'
return 'StateMap(size = %d, %s) %s' % (len(self), ro, self._ad_str())
def load(self,filename):
if not self._fread(filename):
raise IOError("Failed to load state map from:%s" % filename)
def save(self, filename):
self._fwrite(filename)
| gpl-3.0 | 664,415,046,870,531,500 | 33.405594 | 138 | 0.616057 | false |
mick-d/nipype | nipype/interfaces/utility/wrappers.py | 1 | 5987 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Various utilities
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__))
>>> datadir = os.path.realpath(os.path.join(filepath,
... '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, bytes
from ... import logging
from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined,
BaseInterfaceInputSpec)
from ..io import IOBase, add_traits
from ...utils.filemanip import filename_to_list
from ...utils.functions import getsource, create_function_from_source
logger = logging.getLogger('interface')
class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
function_str = traits.Str(mandatory=True, desc='code for function')
class Function(IOBase):
"""Runs arbitrary function as an interface
Examples
--------
>>> func = 'def func(arg1, arg2=5): return arg1 + arg2'
>>> fi = Function(input_names=['arg1', 'arg2'], output_names=['out'])
>>> fi.inputs.function_str = func
>>> res = fi.run(arg1=1)
>>> res.outputs.out
6
"""
input_spec = FunctionInputSpec
output_spec = DynamicTraitedSpec
def __init__(self, input_names=None, output_names='out', function=None,
imports=None, **inputs):
"""
Parameters
----------
input_names: single str or list or None
names corresponding to function inputs
if ``None``, derive input names from function argument names
output_names: single str or list
names corresponding to function outputs (default: 'out').
if list of length > 1, has to match the number of outputs
function : callable
callable python object. must be able to execute in an
isolated namespace (possibly in concert with the ``imports``
parameter)
imports : list of strings
list of import statements that allow the function to execute
in an otherwise empty namespace
"""
super(Function, self).__init__(**inputs)
if function:
if hasattr(function, '__call__'):
try:
self.inputs.function_str = getsource(function)
except IOError:
raise Exception('Interface Function does not accept '
'function objects defined interactively '
'in a python session')
else:
if input_names is None:
fninfo = function.__code__
elif isinstance(function, (str, bytes)):
self.inputs.function_str = function
if input_names is None:
fninfo = create_function_from_source(
function, imports).__code__
else:
raise Exception('Unknown type of function')
if input_names is None:
input_names = fninfo.co_varnames[:fninfo.co_argcount]
self.inputs.on_trait_change(self._set_function_string,
'function_str')
self._input_names = filename_to_list(input_names)
self._output_names = filename_to_list(output_names)
add_traits(self.inputs, [name for name in self._input_names])
self.imports = imports
self._out = {}
for name in self._output_names:
self._out[name] = None
def _set_function_string(self, obj, name, old, new):
if name == 'function_str':
if hasattr(new, '__call__'):
function_source = getsource(new)
fninfo = new.__code__
elif isinstance(new, (str, bytes)):
function_source = new
fninfo = create_function_from_source(
new, self.imports).__code__
self.inputs.trait_set(trait_change_notify=False,
**{'%s' % name: function_source})
# Update input traits
input_names = fninfo.co_varnames[:fninfo.co_argcount]
new_names = set(input_names) - set(self._input_names)
add_traits(self.inputs, list(new_names))
self._input_names.extend(new_names)
def _add_output_traits(self, base):
undefined_traits = {}
for key in self._output_names:
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _run_interface(self, runtime):
# Create function handle
function_handle = create_function_from_source(self.inputs.function_str,
self.imports)
# Get function args
args = {}
for name in self._input_names:
value = getattr(self.inputs, name)
if isdefined(value):
args[name] = value
out = function_handle(**args)
if len(self._output_names) == 1:
self._out[self._output_names[0]] = out
else:
if isinstance(out, tuple) and (len(out) != len(self._output_names)):
raise RuntimeError('Mismatch in number of expected outputs')
else:
for idx, name in enumerate(self._output_names):
self._out[name] = out[idx]
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for key in self._output_names:
outputs[key] = self._out[key]
return outputs
| bsd-3-clause | -1,269,931,418,162,323,200 | 36.654088 | 82 | 0.564557 | false |
alexkasko/krakatau-java | krakatau-lib/src/main/resources/Lib/Krakatau/ssa/exceptionset.py | 1 | 8266 | import collections, itertools
from . import objtypes
from .mixin import ValueType
class CatchSetManager(object):
def __init__(self, env, chpairs, attributes=None):
if attributes is not None:
self.env, self.sets, self.mask = attributes
else:
self.env = env
self.sets = collections.OrderedDict() #make this ordered since OnException relies on it
sofar = empty = ExceptionSet.EMPTY
for catchtype, handler in chpairs:
old = self.sets.get(handler, empty)
new = ExceptionSet.fromTops(env, catchtype)
self.sets[handler] = old | (new - sofar)
sofar = sofar | new
self.mask = sofar
self.pruneKeys()
assert(not self._conscheck())
def newMask(self, mask):
for k in self.sets:
self.sets[k] &= mask
self.mask &= mask
assert(not self._conscheck())
def pruneKeys(self):
for handler, catchset in list(self.sets.items()):
if not catchset:
del self.sets[handler]
def copy(self):
return CatchSetManager(0,0,(self.env, self.sets.copy(), self.mask))
def replaceKey(self, old, new):
assert(old in self.sets and new not in self.sets)
self.sets[new] = self.sets[old]
del self.sets[old]
def replaceKeys(self, replace):
self.sets = collections.OrderedDict((replace.get(key,key), val) for key, val in self.sets.items())
def _conscheck(self):
temp = ExceptionSet.EMPTY
for v in self.sets.values():
assert(not v & temp)
temp |= v
assert(temp == self.mask)
class ExceptionSet(ValueType):
__slots__ = "env pairs".split()
def __init__(self, env, pairs): #assumes arguments are in reduced form
self.env = env
self.pairs = frozenset([(x,frozenset(y)) for x,y in pairs])
#We allow env to be None for the empty set so we can construct empty sets easily
#Any operation resulting in a nonempty set will get its env from the nonempty argument
assert(self.empty() or self.env is not None)
#make sure set is fully reduced
parts = []
for t, holes in pairs:
parts.append(t)
parts.extend(holes)
assert(len(set(parts)) == len(parts))
@staticmethod #factory
def fromTops(env, *tops):
return ExceptionSet(env, [(x, frozenset()) for x in tops])
def _key(self): return self.pairs
def empty(self): return not self.pairs
def __nonzero__(self): return bool(self.pairs)
def getSingleTType(self): #todo - update SSA printer
#comSuper doesn't care about order so we can freely pass in nondeterministic order
return objtypes.commonSupertype(self.env, [objtypes.TypeTT(top,0) for (top,holes) in self.pairs])
def getTopTTs(self): return sorted([objtypes.TypeTT(top,0) for (top,holes) in self.pairs])
def __sub__(self, other):
assert(type(self) == type(other))
if self.empty() or other.empty():
return self
if self == other:
return ExceptionSet.EMPTY
subtest = self.env.isSubclass
pairs = self.pairs
for pair2 in other.pairs:
#Warning, due to a bug in Python, TypeErrors raised inside the gen expr will give an incorect error message
#TypeError: type object argument after * must be a sequence, not generator
#This can be worked around by using a list comprehension instead of a genexpr after the *
pairs = itertools.chain(*[ExceptionSet.diffPair(subtest, pair1, pair2) for pair1 in pairs])
return ExceptionSet.reduce(self.env, pairs)
def __or__(self, other):
assert(type(self) == type(other))
if other.empty() or self == other:
return self
if self.empty():
return other
return ExceptionSet.reduce(self.env, self.pairs | other.pairs)
def __and__(self, other):
assert(type(self) == type(other))
new = self - (self - other)
return new
def isdisjoint(self, other):
return (self-other) == self
def __str__(self):
parts = [('{} - [{}]'.format(top, ', '.join(sorted(holes))) if holes else top) for top, holes in self.pairs]
return 'ES[{}]'.format(', '.join(parts))
__repr__ = __str__
@staticmethod
def diffPair(subtest, pair1, pair2): #subtract pair2 from pair1. Returns a list of new pairs
#todo - find way to make this less ugly
t1, holes1 = pair1
t2, holes2 = pair2
if subtest(t1,t2): #t2 >= t1
if any(subtest(t1, h) for h in holes2):
return pair1,
else:
newpairs = []
holes2 = [h for h in holes2 if subtest(h, t1) and not any(subtest(h,h2) for h2 in holes1)]
for h in holes2:
newholes = [h2 for h2 in holes1 if subtest(h2, h)]
newpairs.append((h, newholes))
return newpairs
elif subtest(t2,t1): #t2 < t1
if any(subtest(t2, h) for h in holes1):
return pair1,
else:
newpairs = [(t1,ExceptionSet.reduceHoles(subtest, list(holes1)+[t2]))]
holes2 = [h for h in holes2 if not any(subtest(h,h2) for h2 in holes1)]
for h in holes2:
newholes = [h2 for h2 in holes1 if subtest(h2, h)]
newpairs.append((h, newholes))
return newpairs
else:
return pair1,
@staticmethod
def mergePair(subtest, pair1, pair2): #merge pair2 into pair1 and return the union
t1, holes1 = pair1
t2, holes2 = pair2
assert(subtest(t2,t1))
if t2 in holes1:
holes1 = list(holes1)
holes1.remove(t2)
return t1, holes1 + list(holes2)
#TODO - this can probably be made more efficient
holes1a = set(h for h in holes1 if not subtest(h, t2))
holes1b = [h for h in holes1 if h not in holes1a]
merged_holes = set()
for h1, h2 in itertools.product(holes1b, holes2):
if subtest(h2, h1):
merged_holes.add(h1)
elif subtest(h1, h2):
merged_holes.add(h2)
merged_holes = ExceptionSet.reduceHoles(subtest, merged_holes)
assert(len(merged_holes) <= len(holes1b) + len(holes2))
return t1, (list(holes1a) + merged_holes)
@staticmethod
def reduceHoles(subtest, holes):
newholes = []
for hole in holes:
for ehole in newholes:
if subtest(hole, ehole):
break
else:
newholes = [hole] + [h for h in newholes if not subtest(h, hole)]
return newholes
@staticmethod
def reduce(env, pairs):
subtest = env.isSubclass
pairs = [pair for pair in pairs if pair[0] not in pair[1]] #remove all degenerate pairs
newpairs = []
while pairs:
top, holes = pair = pairs.pop()
#look for an existing top to merge into
for epair in newpairs[:]:
etop, eholes = epair
#new pair can be merged into existing pair
if subtest(top, etop) and (top in eholes or not any(subtest(top, ehole) for ehole in eholes)):
new = ExceptionSet.mergePair(subtest, epair, pair)
newpairs, pairs = [new], [p for p in newpairs if p is not epair] + pairs
break
#existing pair can be merged into new pair
elif subtest(etop, top) and (etop in holes or not any(subtest(etop, hole) for hole in holes)):
new = ExceptionSet.mergePair(subtest, pair, epair)
newpairs, pairs = [new], [p for p in newpairs if p is not epair] + pairs
break
#pair is incomparable to all existing pairs
else:
holes = ExceptionSet.reduceHoles(subtest, holes)
newpairs.append((top,holes))
return ExceptionSet(env, newpairs)
ExceptionSet.EMPTY = ExceptionSet(None, []) | gpl-3.0 | 6,571,110,257,106,938,000 | 37.451163 | 119 | 0.572586 | false |
akira-baruah/bazel | third_party/py/gflags/gflags/argument_parser.py | 11 | 15710 | #!/usr/bin/env python
# Copyright 2002 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains base classes used to parse and convert arguments.
Instead of importing this module directly, it's preferable to import the
flags package and use the aliases defined at the package level.
"""
import csv
import io
import string
import six
from gflags import _helpers
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __new__(mcs, name, bases, dct):
_helpers.define_both_methods(name, dct, 'Parse', 'parse')
_helpers.define_both_methods(name, dct, 'Type', 'flag_type')
_helpers.define_both_methods(name, dct, 'Convert', 'convert')
return type.__new__(mcs, name, bases, dct)
def __call__(cls, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for cls with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
*args: Positional initializer arguments.
**kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(cls, *args, **kwargs)
else:
instances = cls._instances
key = (cls,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(cls, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(cls, *args)
class ArgumentParser(six.with_metaclass(_ArgumentParserCache, object)):
"""Base class used to parse and convert arguments.
The parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
syntactic_help = ''
def parse(self, argument):
"""Parses the string argument and returns the native value.
By default it returns its argument unmodified.
Args:
argument: string argument passed in the commandline.
Raises:
ValueError: Raised when it fails to parse the argument.
Returns:
The parsed value in native type.
"""
return argument
def flag_type(self):
"""Returns a string representing the type of the flag."""
return 'string'
def _custom_xml_dom_elements(self, doc): # pylint: disable=unused-argument
"""Returns a list of XML DOM elements to add additional flag information.
Args:
doc: A minidom.Document, the DOM document it should create nodes from.
Returns:
A list of minidom.Element.
"""
return []
class _ArgumentSerializerMeta(type):
def __new__(mcs, name, bases, dct):
_helpers.define_both_methods(name, dct, 'Serialize', 'serialize')
return type.__new__(mcs, name, bases, dct)
class ArgumentSerializer(six.with_metaclass(_ArgumentSerializerMeta, object)):
"""Base class for generating string representations of a flag value."""
def serialize(self, value):
return _helpers.StrOrUnicode(value)
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def is_outside_bounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def parse(self, argument):
val = self.convert(argument)
if self.is_outside_bounds(val):
raise ValueError('%s is not %s' % (val, self.syntactic_help))
return val
def _custom_xml_dom_elements(self, doc):
elements = []
if self.lower_bound is not None:
elements.append(_helpers.CreateXMLDOMElement(
doc, 'lower_bound', self.lower_bound))
if self.upper_bound is not None:
elements.append(_helpers.CreateXMLDOMElement(
doc, 'upper_bound', self.upper_bound))
return elements
def convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'a'
number_name = 'number'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def flag_type(self):
return 'float'
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'an'
number_name = 'integer'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = 'a positive %s' % self.number_name
elif upper_bound == -1:
sh = 'a negative %s' % self.number_name
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
if isinstance(argument, str):
base = 10
if len(argument) > 2 and argument[0] == '0':
if argument[1] == 'o':
base = 8
elif argument[1] == 'x':
base = 16
return int(argument, base)
else:
return int(argument)
def flag_type(self):
return 'int'
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if isinstance(argument, str):
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def parse(self, argument):
val = self.convert(argument)
return val
def flag_type(self):
return 'bool'
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None, case_sensitive=True):
"""Initialize EnumParser.
Args:
enum_values: Array of values in the enum.
case_sensitive: Whether or not the enum is to be case-sensitive.
"""
super(EnumParser, self).__init__()
self.enum_values = enum_values
self.case_sensitive = case_sensitive
def parse(self, argument):
"""Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum.
"""
if not self.enum_values:
return argument
elif self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0]
def flag_type(self):
return 'string enum'
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
return self.list_sep.join([_helpers.StrOrUnicode(x) for x in value])
class CsvListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
"""Serialize a list as a string, if possible, or as a unicode string."""
if six.PY2:
# In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.
output = io.BytesIO()
csv.writer(output).writerow([unicode(x).encode('utf-8') for x in value])
serialized_value = output.getvalue().decode('utf-8').strip()
else:
# In Python3 csv.writer expects a text stream.
output = io.StringIO()
csv.writer(output).writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return _helpers.StrOrUnicode(serialized_value)
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = 'a %s separated list' % self._name
def parse(self, argument):
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
return [s.strip() for s in argument.split(self._token)]
def flag_type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def parse(self, argument):
"""Override to support full CSV syntax."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
try:
return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
except csv.Error as e:
# Provide a helpful report for case like
# --listflag="$(printf 'hello,\nworld')"
# IOW, list flag values containing naked newlines. This error
# was previously "reported" by allowing csv.Error to
# propagate.
raise ValueError('Unable to parse the value %r as a %s: %s'
% (argument, self.flag_type(), e))
def _custom_xml_dom_elements(self, doc):
elements = super(ListParser, self)._custom_xml_dom_elements(doc)
elements.append(_helpers.CreateXMLDOMElement(
doc, 'list_separator', repr(',')))
return elements
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self, comma_compat=False):
"""Initializer.
Args:
comma_compat: bool - Whether to support comma as an additional separator.
If false then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated.
"""
self._comma_compat = comma_compat
name = 'whitespace or comma' if self._comma_compat else 'whitespace'
BaseListParser.__init__(self, None, name)
def parse(self, argument):
"""Override to support comma compatibility."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split()
def _custom_xml_dom_elements(self, doc):
elements = super(WhitespaceSeparatedListParser, self
)._custom_xml_dom_elements(doc)
separators = list(string.whitespace)
if self._comma_compat:
separators.append(',')
separators.sort()
for sep_char in separators:
elements.append(_helpers.CreateXMLDOMElement(
doc, 'list_separator', repr(sep_char)))
return elements
| apache-2.0 | -905,356,330,432,378,500 | 31.729167 | 80 | 0.668682 | false |
chrisndodge/edx-platform | openedx/core/djangoapps/catalog/tests/test_utils.py | 3 | 9344 | """Tests covering utilities for integrating with the catalog service."""
import uuid
import ddt
from django.test import TestCase
import mock
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.catalog import utils
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.catalog.tests import factories, mixins
from student.tests.factories import UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.catalog.utils'
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
# ConfigurationModels use the cache. Make every cache get a miss.
@mock.patch('config_models.models.cache.get', return_value=None)
class TestGetPrograms(mixins.CatalogIntegrationMixin, TestCase):
"""Tests covering retrieval of programs from the catalog service."""
def setUp(self):
super(TestGetPrograms, self).setUp()
self.user = UserFactory()
self.uuid = str(uuid.uuid4())
self.type = 'FooBar'
self.catalog_integration = self.create_catalog_integration(cache_ttl=1)
def assert_contract(self, call_args, program_uuid=None, type=None): # pylint: disable=redefined-builtin
"""Verify that API data retrieval utility is used correctly."""
args, kwargs = call_args
for arg in (self.catalog_integration, self.user, 'programs'):
self.assertIn(arg, args)
self.assertEqual(kwargs['resource_id'], program_uuid)
cache_key = '{base}.programs{type}'.format(
base=self.catalog_integration.CACHE_KEY,
type='.' + type if type else ''
)
self.assertEqual(
kwargs['cache_key'],
cache_key if self.catalog_integration.is_cache_enabled else None
)
self.assertEqual(kwargs['api']._store['base_url'], self.catalog_integration.internal_api_url) # pylint: disable=protected-access
querystring = {
'marketable': 1,
'exclude_utm': 1,
}
if type:
querystring['type'] = type
self.assertEqual(kwargs['querystring'], querystring)
return args, kwargs
def test_get_programs(self, _mock_cache, mock_get_catalog_data):
programs = [factories.Program() for __ in range(3)]
mock_get_catalog_data.return_value = programs
data = utils.get_programs(self.user)
self.assert_contract(mock_get_catalog_data.call_args)
self.assertEqual(data, programs)
def test_get_one_program(self, _mock_cache, mock_get_catalog_data):
program = factories.Program()
mock_get_catalog_data.return_value = program
data = utils.get_programs(self.user, uuid=self.uuid)
self.assert_contract(mock_get_catalog_data.call_args, program_uuid=self.uuid)
self.assertEqual(data, program)
def test_get_programs_by_type(self, _mock_cache, mock_get_catalog_data):
programs = [factories.Program() for __ in range(2)]
mock_get_catalog_data.return_value = programs
data = utils.get_programs(self.user, type=self.type)
self.assert_contract(mock_get_catalog_data.call_args, type=self.type)
self.assertEqual(data, programs)
def test_programs_unavailable(self, _mock_cache, mock_get_catalog_data):
mock_get_catalog_data.return_value = []
data = utils.get_programs(self.user)
self.assert_contract(mock_get_catalog_data.call_args)
self.assertEqual(data, [])
def test_cache_disabled(self, _mock_cache, mock_get_catalog_data):
self.catalog_integration = self.create_catalog_integration(cache_ttl=0)
utils.get_programs(self.user)
self.assert_contract(mock_get_catalog_data.call_args)
def test_config_missing(self, _mock_cache, _mock_get_catalog_data):
"""Verify that no errors occur if this method is called when catalog config is missing."""
CatalogIntegration.objects.all().delete()
data = utils.get_programs(self.user)
self.assertEqual(data, [])
class TestMungeCatalogProgram(TestCase):
"""Tests covering querystring stripping."""
catalog_program = factories.Program()
def test_munge_catalog_program(self):
munged = utils.munge_catalog_program(self.catalog_program)
expected = {
'id': self.catalog_program['uuid'],
'name': self.catalog_program['title'],
'subtitle': self.catalog_program['subtitle'],
'category': self.catalog_program['type'],
'marketing_slug': self.catalog_program['marketing_slug'],
'organizations': [
{
'display_name': organization['name'],
'key': organization['key']
} for organization in self.catalog_program['authoring_organizations']
],
'course_codes': [
{
'display_name': course['title'],
'key': course['key'],
'organization': {
'display_name': course['owners'][0]['name'],
'key': course['owners'][0]['key']
},
'run_modes': [
{
'course_key': run['key'],
'run_key': CourseKey.from_string(run['key']).run,
'mode_slug': 'verified'
} for run in course['course_runs']
],
} for course in self.catalog_program['courses']
],
'banner_image_urls': {
'w1440h480': self.catalog_program['banner_image']['large']['url'],
'w726h242': self.catalog_program['banner_image']['medium']['url'],
'w435h145': self.catalog_program['banner_image']['small']['url'],
'w348h116': self.catalog_program['banner_image']['x-small']['url'],
},
}
self.assertEqual(munged, expected)
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
@mock.patch('config_models.models.cache.get', return_value=None)
class TestGetCourseRun(mixins.CatalogIntegrationMixin, TestCase):
"""Tests covering retrieval of course runs from the catalog service."""
def setUp(self):
super(TestGetCourseRun, self).setUp()
self.user = UserFactory()
self.course_key = CourseKey.from_string('foo/bar/baz')
self.catalog_integration = self.create_catalog_integration()
def assert_contract(self, call_args):
"""Verify that API data retrieval utility is used correctly."""
args, kwargs = call_args
for arg in (self.catalog_integration, self.user, 'course_runs'):
self.assertIn(arg, args)
self.assertEqual(kwargs['resource_id'], unicode(self.course_key))
self.assertEqual(kwargs['api']._store['base_url'], self.catalog_integration.internal_api_url) # pylint: disable=protected-access
return args, kwargs
def test_get_course_run(self, _mock_cache, mock_get_catalog_data):
course_run = factories.CourseRun()
mock_get_catalog_data.return_value = course_run
data = utils.get_course_run(self.course_key, self.user)
self.assert_contract(mock_get_catalog_data.call_args)
self.assertEqual(data, course_run)
def test_course_run_unavailable(self, _mock_cache, mock_get_catalog_data):
mock_get_catalog_data.return_value = []
data = utils.get_course_run(self.course_key, self.user)
self.assert_contract(mock_get_catalog_data.call_args)
self.assertEqual(data, {})
def test_cache_disabled(self, _mock_cache, mock_get_catalog_data):
utils.get_course_run(self.course_key, self.user)
_, kwargs = self.assert_contract(mock_get_catalog_data.call_args)
self.assertIsNone(kwargs['cache_key'])
def test_cache_enabled(self, _mock_cache, mock_get_catalog_data):
catalog_integration = self.create_catalog_integration(cache_ttl=1)
utils.get_course_run(self.course_key, self.user)
_, kwargs = mock_get_catalog_data.call_args
self.assertEqual(kwargs['cache_key'], catalog_integration.CACHE_KEY)
def test_config_missing(self, _mock_cache, _mock_get_catalog_data):
"""Verify that no errors occur if this method is called when catalog config is missing."""
CatalogIntegration.objects.all().delete()
data = utils.get_course_run(self.course_key, self.user)
self.assertEqual(data, {})
@mock.patch(UTILS_MODULE + '.get_course_run')
class TestGetRunMarketingUrl(TestCase):
"""Tests covering retrieval of course run marketing URLs."""
def setUp(self):
super(TestGetRunMarketingUrl, self).setUp()
self.course_key = CourseKey.from_string('foo/bar/baz')
self.user = UserFactory()
def test_get_run_marketing_url(self, mock_get_course_run):
course_run = factories.CourseRun()
mock_get_course_run.return_value = course_run
url = utils.get_run_marketing_url(self.course_key, self.user)
self.assertEqual(url, course_run['marketing_url'])
def test_marketing_url_missing(self, mock_get_course_run):
mock_get_course_run.return_value = {}
url = utils.get_run_marketing_url(self.course_key, self.user)
self.assertEqual(url, None)
| agpl-3.0 | -628,021,784,483,039,200 | 37.771784 | 137 | 0.630351 | false |
popldo/fabric | bddtests/steps/bdd_test_util.py | 14 | 4632 |
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
def cli_call(context, arg_list, expect_success=True):
"""Executes a CLI command in a subprocess and return the results.
@param context: the behave context
@param arg_list: a list command arguments
@param expect_success: use False to return even if an error occurred when executing the command
@return: (string, string, int) output message, error message, return code
"""
#arg_list[0] = "update-" + arg_list[0]
# We need to run the cli command by actually calling the python command
# the update-cli.py script has a #!/bin/python as the first line
# which calls the system python, not the virtual env python we
# setup for running the update-cli
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
if output is not None:
print("Output:\n" + output)
if error is not None:
print("Error Message:\n" + error)
if expect_success:
raise subprocess.CalledProcessError(p.returncode, arg_list, output)
return output, error, p.returncode
class UserRegistration:
def __init__(self, secretMsg, composeService):
self.secretMsg = secretMsg
self.composeService = composeService
self.tags = {}
self.lastResult = None
def getUserName(self):
return self.secretMsg['enrollId']
# Registerses a user on a specific composeService
def registerUser(context, secretMsg, composeService):
userName = secretMsg['enrollId']
if 'users' in context:
pass
else:
context.users = {}
if userName in context.users:
raise Exception("User already registered: {0}".format(userName))
context.users[userName] = UserRegistration(secretMsg, composeService)
# Registerses a user on a specific composeService
def getUserRegistration(context, enrollId):
userRegistration = None
if 'users' in context:
pass
else:
context.users = {}
if enrollId in context.users:
userRegistration = context.users[enrollId]
else:
raise Exception("User has not been registered: {0}".format(enrollId))
return userRegistration
def ipFromContainerNamePart(namePart, containerDataList):
"""Returns the IPAddress based upon a name part of the full container name"""
containerData = containerDataFromNamePart(namePart, containerDataList)
if containerData == None:
raise Exception("Could not find container with namePart = {0}".format(namePart))
return containerData.ipAddress
def fullNameFromContainerNamePart(namePart, containerDataList):
containerData = containerDataFromNamePart(namePart, containerDataList)
if containerData == None:
raise Exception("Could not find container with namePart = {0}".format(namePart))
return containerData.containerName
def containerDataFromNamePart(namePart, containerDataList):
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
fullContainerName = containerNamePrefix + namePart
for containerData in containerDataList:
if containerData.containerName.startswith(fullContainerName):
return containerData
return None
def getContainerDataValuesFromContext(context, aliases, callback):
"""Returns the IPAddress based upon a name part of the full container name"""
assert 'compose_containers' in context, "compose_containers not found in context"
values = []
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
for namePart in aliases:
for containerData in context.compose_containers:
if containerData.containerName.startswith(containerNamePrefix + namePart):
values.append(callback(containerData))
break
return values
def start_background_process(context, program_name, arg_list):
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
setattr(context, program_name, p)
| apache-2.0 | 3,013,707,842,821,020,000 | 36.658537 | 99 | 0.712219 | false |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/google/appengine/api/xmpp/xmpp_service_stub.py | 5 | 6883 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the XMPP API, writes messages to logs."""
import logging
import os
from google.appengine.api import apiproxy_stub
from google.appengine.api import app_identity
from google.appengine.api import xmpp
from google.appengine.api.xmpp import xmpp_service_pb
class XmppServiceStub(apiproxy_stub.APIProxyStub):
"""Python only xmpp service stub.
This stub does not use an XMPP network. It prints messages to the console
instead of sending any stanzas.
"""
def __init__(self, log=logging.info, service_name='xmpp'):
"""Initializer.
Args:
log: A logger, used for dependency injection.
service_name: Service name expected for all calls.
"""
super(XmppServiceStub, self).__init__(service_name)
self.log = log
def _Dynamic_GetPresence(self, request, response):
"""Implementation of XmppService::GetPresence.
Returns online if the first character of the JID comes before 'm' in the
alphabet, otherwise returns offline.
Args:
request: A PresenceRequest.
response: A PresenceResponse.
"""
jid = request.jid()
self._GetFrom(request.from_jid())
if jid[0] < 'm':
response.set_is_available(True)
else:
response.set_is_available(False)
def _Dynamic_SendMessage(self, request, response):
"""Implementation of XmppService::SendMessage.
Args:
request: An XmppMessageRequest.
response: An XmppMessageResponse .
"""
from_jid = self._GetFrom(request.from_jid())
log_message = []
log_message.append('Sending an XMPP Message:')
log_message.append(' From:')
log_message.append(' ' + from_jid)
log_message.append(' Body:')
log_message.append(' ' + request.body())
log_message.append(' Type:')
log_message.append(' ' + request.type())
log_message.append(' Raw Xml:')
log_message.append(' ' + str(request.raw_xml()))
log_message.append(' To JIDs:')
for jid in request.jid_list():
log_message.append(' ' + jid)
self.log('\n'.join(log_message))
for jid in request.jid_list():
response.add_status(xmpp_service_pb.XmppMessageResponse.NO_ERROR)
def _Dynamic_SendInvite(self, request, response):
"""Implementation of XmppService::SendInvite.
Args:
request: An XmppInviteRequest.
response: An XmppInviteResponse .
"""
from_jid = self._GetFrom(request.from_jid())
log_message = []
log_message.append('Sending an XMPP Invite:')
log_message.append(' From:')
log_message.append(' ' + from_jid)
log_message.append(' To: ' + request.jid())
self.log('\n'.join(log_message))
def _Dynamic_SendPresence(self, request, response):
"""Implementation of XmppService::SendPresence.
Args:
request: An XmppSendPresenceRequest.
response: An XmppSendPresenceResponse .
"""
from_jid = self._GetFrom(request.from_jid())
log_message = []
log_message.append('Sending an XMPP Presence:')
log_message.append(' From:')
log_message.append(' ' + from_jid)
log_message.append(' To: ' + request.jid())
if request.type():
log_message.append(' Type: ' + request.type())
if request.show():
log_message.append(' Show: ' + request.show())
if request.status():
log_message.append(' Status: ' + request.status())
self.log('\n'.join(log_message))
def _GetFrom(self, requested):
"""Validates that the from JID is valid.
The JID uses the display-app-id for all apps to simulate a common case
in production (alias === display-app-id).
Args:
requested: The requested from JID.
Returns:
string, The from JID.
Raises:
xmpp.InvalidJidError if the requested JID is invalid.
"""
full_appid = os.environ.get('APPLICATION_ID')
partition, domain_name, display_app_id = (
app_identity.app_identity._ParseFullAppId(full_appid))
if requested == None or requested == '':
return display_app_id + '@appspot.com/bot'
node, domain, resource = ('', '', '')
at = requested.find('@')
if at == -1:
self.log('Invalid From JID: No \'@\' character found. JID: %s', requested)
raise xmpp.InvalidJidError()
node = requested[:at]
rest = requested[at+1:]
if rest.find('@') > -1:
self.log('Invalid From JID: Second \'@\' character found. JID: %s',
requested)
raise xmpp.InvalidJidError()
slash = rest.find('/')
if slash == -1:
domain = rest
resource = 'bot'
else:
domain = rest[:slash]
resource = rest[slash+1:]
if resource.find('/') > -1:
self.log('Invalid From JID: Second \'/\' character found. JID: %s',
requested)
raise xmpp.InvalidJidError()
if domain == 'appspot.com' and node == display_app_id:
return node + '@' + domain + '/' + resource
elif domain == display_app_id + '.appspotchat.com':
return node + '@' + domain + '/' + resource
self.log('Invalid From JID: Must be [email protected][/resource] or '
'[email protected][/resource]. JID: %s', requested)
raise xmpp.InvalidJidError()
def _Dynamic_CreateChannel(self, request, response):
"""Implementation of XmppService::CreateChannel.
Args:
request: A CreateChannelRequest.
response: A CreateChannelResponse.
"""
log_message = []
log_message.append('Sending a Create Channel:')
log_message.append(' Client ID:')
log_message.append(' ' + request.application_key())
if request.duration_minutes():
log_message.append(' Duration minutes: ' + request.duration_minutes())
self.log('\n'.join(log_message))
def _Dynamic_SendChannelMessage(self, request, response):
"""Implementation of XmppService::SendChannelMessage.
Args:
request: A SendMessageRequest.
response: A SendMessageRequest.
"""
log_message = []
log_message.append('Sending a Channel Message:')
log_message.append(' Client ID:')
log_message.append(' ' + request.application_key())
log_message.append(' Message:')
log_message.append(' ' + request.duration_minutes())
self.log('\n'.join(log_message))
| apache-2.0 | 838,252,406,215,235,800 | 29.865471 | 80 | 0.640709 | false |
salv-orlando/MyRepo | nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py | 4 | 3926 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import MetaData, Table, Column, String
from nova.compute import task_states
from nova.compute import vm_states
meta = MetaData()
c_task_state = Column('task_state',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=True)
_upgrade_translations = {
"stopping": {
"state_description": vm_states.ACTIVE,
"task_state": task_states.STOPPING,
},
"stopped": {
"state_description": vm_states.STOPPED,
"task_state": None,
},
"terminated": {
"state_description": vm_states.DELETED,
"task_state": None,
},
"terminating": {
"state_description": vm_states.ACTIVE,
"task_state": task_states.DELETING,
},
"running": {
"state_description": vm_states.ACTIVE,
"task_state": None,
},
"scheduling": {
"state_description": vm_states.BUILDING,
"task_state": task_states.SCHEDULING,
},
"migrating": {
"state_description": vm_states.MIGRATING,
"task_state": None,
},
"pending": {
"state_description": vm_states.BUILDING,
"task_state": task_states.SCHEDULING,
},
}
_downgrade_translations = {
vm_states.ACTIVE: {
None: "running",
task_states.DELETING: "terminating",
task_states.STOPPING: "stopping",
},
vm_states.BUILDING: {
None: "pending",
task_states.SCHEDULING: "scheduling",
},
vm_states.STOPPED: {
None: "stopped",
},
vm_states.REBUILDING: {
None: "pending",
},
vm_states.DELETED: {
None: "terminated",
},
vm_states.MIGRATING: {
None: "migrating",
},
}
def upgrade(migrate_engine):
meta.bind = migrate_engine
instance_table = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
c_state = instance_table.c.state
c_state.alter(name='power_state')
c_vm_state = instance_table.c.state_description
c_vm_state.alter(name='vm_state')
instance_table.create_column(c_task_state)
for old_state, values in _upgrade_translations.iteritems():
instance_table.update().\
values(**values).\
where(c_vm_state == old_state).\
execute()
def downgrade(migrate_engine):
meta.bind = migrate_engine
instance_table = Table('instances', meta, autoload=True,
autoload_with=migrate_engine)
c_task_state = instance_table.c.task_state
c_state = instance_table.c.power_state
c_state.alter(name='state')
c_vm_state = instance_table.c.vm_state
c_vm_state.alter(name='state_description')
for old_vm_state, old_task_states in _downgrade_translations.iteritems():
for old_task_state, new_state_desc in old_task_states.iteritems():
instance_table.update().\
where(c_task_state == old_task_state).\
where(c_vm_state == old_vm_state).\
values(vm_state=new_state_desc).\
execute()
instance_table.drop_column('task_state')
| apache-2.0 | 1,963,922,868,546,017,000 | 27.449275 | 78 | 0.604177 | false |
0x46616c6b/ansible | lib/ansible/modules/cloud/google/gcpubsub.py | 16 | 11841 | #!/usr/bin/python
# Copyright 2016 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gcpubsub
version_added: "2.3"
short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
description:
- Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
See U(https://cloud.google.com/pubsub/docs) for an overview.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- Subscription pull happens before publish. You cannot publish and pull in the same task.
author:
- "Tom Melendez (@supertom) <[email protected]>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: True
subscription:
description:
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull. For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields. See subfields name, push_endpoint and ack_deadline for more information.
required: False
name:
description: Subfield of subscription. Required if subscription is specified. See examples.
required: False
ack_deadline:
description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
required: False
pull:
description: Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name. max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately (bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
push_endpoint:
description: Subfield of subscription. Not required. If specified, message will be sent to an endpoint. See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
required: False
publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required.
required: False
state:
description: State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the subscription.
required: False
default: "present"
'''
EXAMPLES = '''
# Create a topic and publish a message to it
# (Message will be pushed; there is no check to see if the message was pushed before
# Topics:
## Create Topic
gcpubsub:
topic: ansible-topic-example
state: present
## Delete Topic
### Subscriptions associated with topic are not deleted.
gcpubsub:
topic: ansible-topic-example
state: absent
## Messages: publish multiple messages, with attributes (key:value available with the message)
### setting absent will keep the messages from being sent
gcpubsub:
topic: "{{ topic_name }}"
state: present
publish:
- message: "this is message 1"
attributes:
mykey1: myvalue
mykey2: myvalu2
mykey3: myvalue3
- message: "this is message 2"
attributes:
server: prod
sla: "99.9999"
owner: fred
# Subscriptions
## Create Subscription (pull)
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: present
## Create Subscription with ack_deadline and push endpoint
### pull is default, ack_deadline is not required
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
ack_deadline: "60"
push_endpoint: http://pushendpoint.example.com
state: present
## Subscription change from push to pull
### setting push_endpoint to "None" converts subscription to pull.
gcpubsub:
topic: ansible-topic-example
subscription:
name: mysub
push_endpoint: "None"
## Delete subscription
### Topic will not be deleted
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: absent
## Pull messages from subscription
### only pull keyword is required.
gcpubsub:
topic: ansible-topic-example
subscription:
name: ansible-topic-example-sub
pull:
message_ack: yes
max_messages: "100"
'''
RETURN = '''
publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required.
returned: Only when specified
type: list of dictionary
sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
pulled_messages:
description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
returned: Only when subscription.pull is specified
type: list of dictionary
sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
state:
description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
returned: Always
type: str
sample: "present"
subscription:
description: Name of subscription.
returned: When subscription fields are specified
type: str
sample: "mysubscription"
topic:
description: Name of topic.
returned: Always
type: str
sample: "mytopic"
'''
CLOUD_CLIENT = 'google-cloud-pubsub'
CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
def publish_messages(message_list, topic):
with topic.batch() as batch:
for message in message_list:
msg = message['message']
attrs = {}
if 'attributes' in message:
attrs = message['attributes']
batch.publish(bytes(msg), **attrs)
return True
def pull_messages(pull_params, sub):
"""
:rtype: tuple (output, changed)
"""
changed = False
max_messages=pull_params.get('max_messages', None)
message_ack = pull_params.get('message_ack', 'no')
return_immediately = pull_params.get('return_immediately', False)
output= []
pulled = sub.pull(return_immediately=return_immediately,
max_messages=max_messages)
for ack_id, msg in pulled:
msg_dict = {'message_id': msg.message_id,
'attributes': msg.attributes,
'data': msg.data,
'ack_id': ack_id }
output.append(msg_dict)
if message_ack:
ack_ids = [m['ack_id'] for m in output]
if ack_ids:
sub.acknowledge(ack_ids)
changed = True
return (output, changed)
def main():
module = AnsibleModule(argument_spec=dict(
topic=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
publish=dict(type='list', default=None),
subscription=dict(type='dict', default=None),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
mod_params = {}
mod_params['publish'] = module.params.get('publish')
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['subscription'] = module.params.get('subscription')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
changed = False
json_output = {}
t = None
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
s = None
if mod_params['subscription']:
# Note: default ack deadline cannot be changed without deleting/recreating subscription
s = t.subscription(mod_params['subscription']['name'],
ack_deadline=mod_params['subscription'].get('ack_deadline', None),
push_endpoint=mod_params['subscription'].get('push_endpoint', None))
if mod_params['state'] == 'absent':
# Remove the most granular resource. If subcription is specified
# we remove it. If only topic is specified, that is what is removed.
# Note that a topic can be removed without first removing the subscription.
# TODO(supertom): Enhancement: Provide an option to only delete a topic
# if there are no subscriptions associated with it (which the API does not support).
if s is not None:
if s.exists():
s.delete()
changed = True
else:
if t.exists():
t.delete()
changed = True
elif mod_params['state'] == 'present':
if not t.exists():
t.create()
changed = True
if s:
if not s.exists():
s.create()
s.reload()
changed = True
else:
# Subscription operations
# TODO(supertom): if more 'update' operations arise, turn this into a function.
s.reload()
push_endpoint=mod_params['subscription'].get('push_endpoint', None)
if push_endpoint is not None:
if push_endpoint != s.push_endpoint:
if push_endpoint == 'None':
push_endpoint = None
s.modify_push_configuration(push_endpoint=push_endpoint)
s.reload()
changed = push_endpoint == s.push_endpoint
if 'pull' in mod_params['subscription']:
if s.push_endpoint is not None:
module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
(json_output['pulled_messages'], changed) = pull_messages(
mod_params['subscription']['pull'], s)
# publish messages to the topic
if mod_params['publish'] and len(mod_params['publish']) > 0:
changed = publish_messages(mod_params['publish'], t)
json_output['changed'] = changed
json_output.update(mod_params)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gcp import *
if __name__ == '__main__':
main()
| gpl-3.0 | -4,408,834,727,503,473,000 | 34.990881 | 436 | 0.671312 | false |
eclectic-boy/rhodonea_mapper | tests/rhodonea_mapper/tests_models.py | 1 | 2164 | from unittest.mock import patch, call
from django.contrib.gis.geos import MultiPolygon, Polygon
from django.test import TestCase
from pyproj import Geod
from rhodonea_mapper.models import Rhodonea, Layer
from tests.rhodonea_mapper.factories import (
LayerFactory,
RhodoneaFactory,
get_centered_envelope,
)
class LayerTests(TestCase):
def test_add_overlay(self):
count = 10
layer = LayerFactory(overlays_count=count)
layer.add_overlay()
layer.refresh_from_db()
self.assertEqual(count + 1, layer.overlays_count)
@patch.object(Rhodonea, 'build_envelope', autospec=True)
def test_set_envelope(self, build_envelope):
def build_envelope_mock(self):
return get_centered_envelope(self.point, 10)
build_envelope.side_effect = build_envelope_mock
layer = LayerFactory()
rh1 = RhodoneaFactory(layer=layer)
rh2 = RhodoneaFactory(layer=layer)
rh3 = RhodoneaFactory(layer=layer)
layer.envelope = None
layer.save()
self.assertIsNone(layer.envelope)
layer.set_envelope()
layer.refresh_from_db()
m_p = MultiPolygon(
rh1.build_envelope(),
rh2.build_envelope(),
rh3.build_envelope(),
)
self.assertEqual(m_p.envelope.wkt, layer.envelope.wkt)
class RhodoneaTests(TestCase):
@patch.object(Layer, 'set_envelope')
@patch.object(Geod, 'fwd')
@patch.object(Polygon, 'from_bbox')
def test_build_envelope(self, from_bbox, fwd, set_envelope):
fwd.return_value = (10, 20)
geom = get_centered_envelope()
from_bbox.return_value = geom
rh = RhodoneaFactory()
envelope = rh.build_envelope()
self.assertEqual([
call(*rh.point, -90, rh.r),
call(*rh.point, 180, rh.r),
call(*rh.point, 90, rh.r),
call(*rh.point, 0, rh.r),
], fwd.call_args_list)
from_bbox.assert_called_with((
fwd()[0],
fwd()[1],
fwd()[0],
fwd()[1],
))
self.assertEqual(geom.envelope.wkt, envelope.wkt)
| bsd-3-clause | 8,421,178,477,606,176,000 | 27.473684 | 64 | 0.60536 | false |
cosailer/caeproject | simulation_result/8/project3_harmonic_coat.py | 1 | 1991 | # -*- coding: utf-8 -*-
"""
-------------------------------------
N A C S P Y T H O N S C R I P T
-------------------------------------
NACS version: 2.0.2745 - pre3
NACS architecture: CENTOS 5.11 (X86_64)
File generated at Tue Jan 20 16:55:05 2015
On host 'lse86' by 'cae42'
"""
from __future__ import division
try:
from nacs.scripting import *
except:
raise Exception("File is only executable in the NACS python interpreter!")
# =================
# NACS SIMULATION
# =================
simulation = NacsSimulation()
simulation.setGrid(u'project3.nmf', 'plane')
simulation.addOutput(Output.Nacs())
text = Output.Text()
simulation.addOutput(text)
simulation.addOutput(Output.GiD())
# =====================
# MATERIAL DEFINITION
# =====================
copper = Material('Copper')
copper.density(8940.0)
copper.lossTangensDelta([1000],[0.002])
copper.stiffness.isotropic.byENu(1.15e+11, 0.35)
steel = Material('Steel')
steel.density(8242.5)
steel.lossTangensDelta([1000],[0.0003])
steel.stiffness.isotropic.byENu(1.95e+11, 0.28)
silicon = Material('Silicon')
silicon.density(2300.0)
silicon.stiffness.isotropic.byENu(67500000000.0, 0.1)
simulation.setMat('exc_f_r', copper)
simulation.setMat('rec_f_r', copper)
simulation.setMat('sen_coat_r', steel)
simulation.setMat('silicon_r', silicon)
# ===============
# ANALYSIS STEP
# ===============
harm1 = Analysis.Harmonic()
harm1.set(1, 1373000000.0, 1373000000.0, 'log')
mech1 = Physic.Mechanic('planeStrain')
mech1.addRegions(['exc_f_r', 'sen_coat_r', 'silicon_r', 'rec_f_r'])
mech1.addBc(mech1.BC.Force.expr('exc_f_r', 'y', "-1000"))
mech1.addBc(mech1.BC.Fix('outerbounds_bot', ['x', 'y']))
mech1.addResult(mech1.Result.Displacement(['exc_f_r', 'rec_f_r', 'sen_coat_r', 'silicon_r']))
mech1.addResult(mech1.Result.Displacement(['observer_point_1', 'observer_point_2', 'observer_point_3', 'observer_point_4', 'observer_point_e4'], 'amplPhase', 'mesh', [text]))
harm1.addPhysic(mech1)
simulation.addAnalysis(harm1)
| gpl-2.0 | 3,311,929,993,577,821,000 | 27.855072 | 174 | 0.641888 | false |
zincsoda/qemu-heca | scripts/tracetool/backend/dtrace.py | 11 | 2147 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PROBEPREFIX = None
def _probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def _binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def c(events):
pass
def h(events):
out('#include "trace-dtrace.h"',
'')
for e in events:
out('static inline void trace_%(name)s(%(args)s) {',
' QEMU_%(uppername)s(%(argnames)s);',
'}',
name = e.name,
args = e.args,
uppername = e.name.upper(),
argnames = ", ".join(e.args.names()),
)
def d(events):
out('provider qemu {')
for e in events:
args = str(e.args)
# DTrace provider syntax expects foo() for empty
# params, not foo(void)
if args == 'void':
args = ''
# Define prototype for probe arguments
out('',
'probe %(name)s(%(args)s);',
name = e.name,
args = args,
)
out('',
'};')
def stap(events):
for e in events:
# Define prototype for probe arguments
out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
'{',
probeprefix = _probeprefix(),
name = e.name,
binary = _binary(),
)
i = 1
if len(e.args) > 0:
for name in e.args.names():
# Append underscore to reserved keywords
if name in ('limit', 'in', 'next', 'self', 'function'):
name += '_'
out(' %s = $arg%d;' % (name, i))
i += 1
out('}')
out()
| gpl-2.0 | -1,816,221,435,878,794,000 | 21.113402 | 86 | 0.495105 | false |
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-personalizer/azure/cognitiveservices/personalizer/models/error_response_py3.py | 1 | 1522 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""Used to return an error to the client.
All required parameters must be populated in order to send to Azure.
:param error: Required. The error object.
:type error:
~azure.cognitiveservices.personalizer.models.PersonalizerError
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'PersonalizerError'},
}
def __init__(self, *, error, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseException(HttpOperationError):
"""Server responded with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
| mit | -8,343,446,994,285,037,000 | 30.708333 | 99 | 0.620894 | false |
hawwach/Hawwach-Work | plugin.video.bokra/default.py | 1 | 8440 | # -*- coding: utf8 -*-
import urllib,urllib2,re,xbmcplugin,xbmcgui
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
from httplib import HTTP
from urlparse import urlparse
import StringIO
import urllib2,urllib
import re
import httplib
import time
import xbmcgui
from urllib2 import Request, build_opener, HTTPCookieProcessor, HTTPHandler
import cookielib
import datetime
__settings__ = xbmcaddon.Addon(id='plugin.video.bokra')
__icon__ = __settings__.getAddonInfo('icon')
__fanart__ = __settings__.getAddonInfo('fanart')
__language__ = __settings__.getLocalizedString
_thisPlugin = int(sys.argv[1])
_pluginName = (sys.argv[0])
def patch_http_response_read(func):
def inner(*args):
try:
return func(*args)
except httplib.IncompleteRead, e:
return e.partial
return inner
httplib.HTTPResponse.read = patch_http_response_read(httplib.HTTPResponse.read)
def get_categories(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
target= re.findall(r' <ul class="hidden-xs">(.*?)\s(.*?) </ul>', link, re.DOTALL)
mylist= [items for i in target for items in i if items !='']
final_catergories = [it for itr in mylist for it in itr.split('/a></li>') if '="' in str(it) ]
for itr in final_catergories:
my_data =itr.split('="')[1]
path = 'http://shahidlive.com'+my_data.split('">')[0]
title = my_data.split('">')[1]
title = title.replace('<','')
if 'مسلسلات' in str(title):
addDir(title,path,1,'')
elif 'افلام' in str(title):
addDir(title,path,2,'')
def list_cat_content(url):
max_nr = int(get_max_page(url))
for iter in range(1,max_nr):
try:
url = url.split('-')[0] +'-'+ url.split('-')[1]+'-'+str(iter)
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req,timeout=1)
link=response.read()
target= re.findall(r'<div class="col-(.*?)\s(.*?)</h4></div>', link, re.DOTALL)
counter = 0
for itr in target:
counter =counter +1
if counter > 1:
for item in itr:
item= item.split('">')
try:
path = 'http://shahidlive.com'+item[1].replace('<a href="','').strip()
img = item[3].split('="')[1].split('"')[0].strip()
title = item[6].replace('<h4>','').strip()
addDir(title,path,3,img)
except:
pass
except:
print 'Nothing ti view'
def get_max_page(url):
my_nr_list = []
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
target= re.findall(r'<ul class="pagination">(.*?)\s(.*?)</div>', link, re.DOTALL)
for item in target:
for itr in item:
for i in itr.split('class="page"'):
try:
my_list_item= i.split('</a></li><li class=')[0].split('">')[1]
if my_list_item.isdigit():
my_nr_list.append(my_list_item)
except:
pass
return max (my_nr_list)
def get_episodes(url):
max_nr = int(get_max_page(url))
try:
for iter in range(0,max_nr):
url = url.split('-')[0] +'-'+ url.split('-')[1]+'-'+str(iter)
print url
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
target= re.findall(r' <a href="(.*?)\s(.*?)<img src="(.*?)\s(.*?)class(.*?)\s(.*?)<div class="title"><h4>(.*?)\s(.*?)</h4></div>', link, re.DOTALL)
counter = 0
for itr in target:
counter =counter +1
if counter > 1:
video = 'http://shahidlive.com'+ itr[0].replace('">','').strip()
img = itr[2].replace('"','').strip()
name = itr[6]+' '+ itr[7]
name= name.strip()
addLink(name,video,4,img)
except:
pass
def get_video_file(url):
url = 'http://shahidlive.com/Play/'+url.split('Video-')[1]+'-681-382'
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
target= re.findall(r'<iframe src="(.*?)\s(.*?)"', link, re.DOTALL)
target= target[0]
target = target[0].replace('"','').strip()
req_target = urllib2.Request(target)
req_target.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response_target = urllib2.urlopen(req_target)
link_target=response_target.read()
link_file= re.findall(r'"file":(.*?)\s(.*?)",', link_target, re.DOTALL)
link_streamer= re.findall(r'"streamer":(.*?)\s(.*?)",', link_target, re.DOTALL)
link_flash= re.findall(r'"flashplayer":(.*?)\s(.*?)",', link_target, re.DOTALL)
link_flash= link_flash[0]
link_flash= 'http://nadstream.shahidlive.com'+link_flash[1].replace('"','').strip()
link_file= link_file[0]
link_file= link_file[1]
link_streamer= link_streamer[0]
link_streamer= link_streamer[1]
final_video_url = link_streamer.replace('"','').strip()+' playpath='+link_file.replace('"','').strip()+' swfUrl='+link_flash+ ' timeout=20'
listItem = xbmcgui.ListItem(path=str(final_video_url))
xbmcplugin.setResolvedUrl(_thisPlugin, True, listItem)
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name,url,mode,iconimage):
u=_pluginName+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty("IsPlayable","true");
ok=xbmcplugin.addDirectoryItem(handle=_thisPlugin,url=u,listitem=liz,isFolder=False)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
print ""
get_categories('http://shahidlive.com/')
elif mode==1:
print ""+url
list_cat_content(url)
elif mode==2:
print ""+url
get_episodes(url)
elif mode==3:
print ""+url
get_episodes(url)
elif mode==4:
print ""+url
get_video_file(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | -6,845,934,217,304,861,000 | 33.4 | 159 | 0.562055 | false |
BehnamEmamian/tdesktop | Telegram/SourceFiles/mtproto/generate.py | 3 | 45546 | '''
This file is part of Telegram Desktop,
the official desktop version of Telegram messaging app, see https://telegram.org
Telegram Desktop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE
Copyright (c) 2014 John Preston, https://desktop.telegram.org
'''
import glob
import re
import binascii
# define some checked flag conversions
# the key flag type should be a subset of the value flag type
# with exact the same names, then the key flag can be implicitly
# casted to the value flag type
parentFlags = {};
parentFlagsList = [];
def addChildParentFlags(child, parent):
parentFlagsList.append(child);
parentFlags[child] = parent;
addChildParentFlags('MTPDmessageService', 'MTPDmessage');
addChildParentFlags('MTPDupdateShortMessage', 'MTPDmessage');
addChildParentFlags('MTPDupdateShortChatMessage', 'MTPDmessage');
addChildParentFlags('MTPDupdateShortSentMessage', 'MTPDmessage');
addChildParentFlags('MTPDreplyKeyboardHide', 'MTPDreplyKeyboardMarkup');
addChildParentFlags('MTPDreplyKeyboardForceReply', 'MTPDreplyKeyboardMarkup');
addChildParentFlags('MTPDinputPeerNotifySettings', 'MTPDpeerNotifySettings');
addChildParentFlags('MTPDpeerNotifySettings', 'MTPDinputPeerNotifySettings');
addChildParentFlags('MTPDchannelForbidden', 'MTPDchannel');
# this is a map (key flags -> map (flag name -> flag bit))
# each key flag of parentFlags should be a subset of the value flag here
parentFlagsCheck = {};
layer = '';
funcs = 0
types = 0;
consts = 0
funcsNow = 0
enums = [];
funcsDict = {};
funcsList = [];
typesDict = {};
TypesDict = {};
typesList = [];
boxed = {};
funcsText = '';
typesText = '';
dataTexts = '';
creatorProxyText = '';
inlineMethods = '';
textSerializeInit = '';
textSerializeMethods = '';
forwards = '';
forwTypedefs = '';
out = open('scheme_auto.h', 'w')
out.write('/*\n');
out.write('Created from \'/SourceFiles/mtproto/scheme.tl\' by \'/SourceFiles/mtproto/generate.py\' script\n\n');
out.write('WARNING! All changes made in this file will be lost!\n\n');
out.write('This file is part of Telegram Desktop,\n');
out.write('the official desktop version of Telegram messaging app, see https://telegram.org\n');
out.write('\n');
out.write('Telegram Desktop is free software: you can redistribute it and/or modify\n');
out.write('it under the terms of the GNU General Public License as published by\n');
out.write('the Free Software Foundation, either version 3 of the License, or\n');
out.write('(at your option) any later version.\n');
out.write('\n');
out.write('It is distributed in the hope that it will be useful,\n');
out.write('but WITHOUT ANY WARRANTY; without even the implied warranty of\n');
out.write('MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n');
out.write('GNU General Public License for more details.\n');
out.write('\n');
out.write('In addition, as a special exception, the copyright holders give permission\n');
out.write('to link the code of portions of this program with the OpenSSL library.\n');
out.write('\n');
out.write('Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE\n');
out.write('Copyright (c) 2014 John Preston, https://desktop.telegram.org\n');
out.write('*/\n');
out.write('#pragma once\n\n#include "mtproto/core_types.h"\n');
with open('scheme.tl') as f:
for line in f:
layerline = re.match(r'// LAYER (\d+)', line)
if (layerline):
layer = 'static constexpr mtpPrime CurrentLayer = ' + layerline.group(1) + ';';
nocomment = re.match(r'^(.*?)//', line)
if (nocomment):
line = nocomment.group(1);
if (re.match(r'\-\-\-functions\-\-\-', line)):
funcsNow = 1;
continue;
if (re.match(r'\-\-\-types\-\-\-', line)):
funcsNow = 0;
continue;
if (re.match(r'^\s*$', line)):
continue;
nametype = re.match(r'([a-zA-Z\.0-9_]+)#([0-9a-f]+)([^=]*)=\s*([a-zA-Z\.<>0-9_]+);', line);
if (not nametype):
if (not re.match(r'vector#1cb5c415 \{t:Type\} # \[ t \] = Vector t;', line)):
print('Bad line found: ' + line);
continue;
name = nametype.group(1);
nameInd = name.find('.');
if (nameInd >= 0):
Name = name[0:nameInd] + '_' + name[nameInd + 1:nameInd + 2].upper() + name[nameInd + 2:];
name = name.replace('.', '_');
else:
Name = name[0:1].upper() + name[1:];
typeid = nametype.group(2);
while (len(typeid) > 0 and typeid[0] == '0'):
typeid = typeid[1:];
if (len(typeid) == 0):
typeid = '0';
typeid = '0x' + typeid;
cleanline = nametype.group(1) + nametype.group(3) + '= ' + nametype.group(4);
cleanline = re.sub(r' [a-zA-Z0-9_]+\:flags\.[0-9]+\?true', '', cleanline);
cleanline = cleanline.replace('<', ' ').replace('>', ' ').replace(' ', ' ');
cleanline = re.sub(r'^ ', '', cleanline);
cleanline = re.sub(r' $', '', cleanline);
cleanline = cleanline.replace(':bytes ', ':string ');
cleanline = cleanline.replace('?bytes ', '?string ');
cleanline = cleanline.replace('{', '');
cleanline = cleanline.replace('}', '');
countTypeId = binascii.crc32(binascii.a2b_qp(cleanline));
if (countTypeId < 0):
countTypeId += 2 ** 32;
countTypeId = '0x' + re.sub(r'^0x|L$', '', hex(countTypeId));
if (typeid != countTypeId):
print('Warning: counted ' + countTypeId + ' mismatch with provided ' + typeid + ' (' + cleanline + ')');
continue;
params = nametype.group(3);
restype = nametype.group(4);
if (restype.find('<') >= 0):
templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', restype);
if (templ):
vectemplate = templ.group(2);
if (re.match(r'^[A-Z]', vectemplate) or re.match(r'^[a-zA-Z0-9]+_[A-Z]', vectemplate)):
restype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
elif (vectemplate == 'int' or vectemplate == 'long' or vectemplate == 'string'):
restype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
else:
foundmeta = '';
for metatype in typesDict:
for typedata in typesDict[metatype]:
if (typedata[0] == vectemplate):
foundmeta = metatype;
break;
if (len(foundmeta) > 0):
break;
if (len(foundmeta) > 0):
ptype = templ.group(1) + 'MTP' + foundmeta.replace('.', '_') + '>';
else:
print('Bad vector param: ' + vectemplate);
continue;
else:
print('Bad template type: ' + restype);
continue;
resType = restype.replace('.', '_');
if (restype.find('.') >= 0):
parts = re.match(r'([a-z]+)\.([A-Z][A-Za-z0-9<>\._]+)', restype)
if (parts):
restype = parts.group(1) + '_' + parts.group(2)[0:1].lower() + parts.group(2)[1:];
else:
print('Bad result type name with dot: ' + restype);
continue;
else:
if (re.match(r'^[A-Z]', restype)):
restype = restype[:1].lower() + restype[1:];
else:
print('Bad result type name: ' + restype);
continue;
boxed[resType] = restype;
boxed[Name] = name;
enums.append('\tmtpc_' + name + ' = ' + typeid);
paramsList = params.strip().split(' ');
prms = {};
conditions = {};
trivialConditions = {}; # true type
prmsList = [];
conditionsList = [];
isTemplate = hasFlags = hasTemplate = '';
for param in paramsList:
if (re.match(r'^\s*$', param)):
continue;
templ = re.match(r'^{([A-Za-z]+):Type}$', param);
if (templ):
hasTemplate = templ.group(1);
continue;
pnametype = re.match(r'([a-z_][a-z0-9_]*):([A-Za-z0-9<>\._]+|![a-zA-Z]+|\#|[a-z_][a-z0-9_]*\.[0-9]+\?[A-Za-z0-9<>\._]+)$', param);
if (not pnametype):
print('Bad param found: "' + param + '" in line: ' + line);
continue;
pname = pnametype.group(1);
ptypewide = pnametype.group(2);
if (re.match(r'^!([a-zA-Z]+)$', ptypewide)):
if ('!' + hasTemplate == ptypewide):
isTemplate = pname;
ptype = 'TQueryType';
else:
print('Bad template param name: "' + param + '" in line: ' + line);
continue;
elif (ptypewide == '#'):
hasFlags = pname;
if funcsNow:
ptype = 'flags<MTP' + name + '::Flags>';
else:
ptype = 'flags<MTPD' + name + '::Flags>';
else:
ptype = ptypewide;
if (ptype.find('?') >= 0):
pmasktype = re.match(r'([a-z_][a-z0-9_]*)\.([0-9]+)\?([A-Za-z0-9<>\._]+)', ptype);
if (not pmasktype or pmasktype.group(1) != hasFlags):
print('Bad param found: "' + param + '" in line: ' + line);
continue;
ptype = pmasktype.group(3);
if (ptype.find('<') >= 0):
templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', ptype);
if (templ):
vectemplate = templ.group(2);
if (re.match(r'^[A-Z]', vectemplate) or re.match(r'^[a-zA-Z0-9]+_[A-Z]', vectemplate)):
ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
elif (vectemplate == 'int' or vectemplate == 'long' or vectemplate == 'string'):
ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
else:
foundmeta = '';
for metatype in typesDict:
for typedata in typesDict[metatype]:
if (typedata[0] == vectemplate):
foundmeta = metatype;
break;
if (len(foundmeta) > 0):
break;
if (len(foundmeta) > 0):
ptype = templ.group(1) + 'MTP' + foundmeta.replace('.', '_') + '>';
else:
print('Bad vector param: ' + vectemplate);
continue;
else:
print('Bad template type: ' + ptype);
continue;
if (not pname in conditions):
conditionsList.append(pname);
conditions[pname] = pmasktype.group(2);
if (ptype == 'true'):
trivialConditions[pname] = 1;
elif (ptype.find('<') >= 0):
templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', ptype);
if (templ):
vectemplate = templ.group(2);
if (re.match(r'^[A-Z]', vectemplate) or re.match(r'^[a-zA-Z0-9]+_[A-Z]', vectemplate)):
ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
elif (vectemplate == 'int' or vectemplate == 'long' or vectemplate == 'string'):
ptype = templ.group(1) + 'MTP' + vectemplate.replace('.', '_') + '>';
else:
foundmeta = '';
for metatype in typesDict:
for typedata in typesDict[metatype]:
if (typedata[0] == vectemplate):
foundmeta = metatype;
break;
if (len(foundmeta) > 0):
break;
if (len(foundmeta) > 0):
ptype = templ.group(1) + 'MTP' + foundmeta.replace('.', '_') + '>';
else:
print('Bad vector param: ' + vectemplate);
continue;
else:
print('Bad template type: ' + ptype);
continue;
prmsList.append(pname);
prms[pname] = ptype.replace('.', '_');
if (isTemplate == '' and resType == 'X'):
print('Bad response type "X" in "' + name +'" in line: ' + line);
continue;
if funcsNow:
if (isTemplate != ''):
funcsText += '\ntemplate <typename TQueryType>';
funcsText += '\nclass MTP' + name + ' { // RPC method \'' + nametype.group(1) + '\'\n'; # class
funcsText += 'public:\n';
prmsStr = [];
prmsInit = [];
prmsNames = [];
if (hasFlags != ''):
funcsText += '\tenum class Flag : int32 {\n';
maxbit = 0;
parentFlagsCheck['MTP' + name] = {};
for paramName in conditionsList:
funcsText += '\t\tf_' + paramName + ' = (1 << ' + conditions[paramName] + '),\n';
parentFlagsCheck['MTP' + name][paramName] = conditions[paramName];
maxbit = max(maxbit, int(conditions[paramName]));
if (maxbit > 0):
funcsText += '\n';
funcsText += '\t\tMAX_FIELD = (1 << ' + str(maxbit) + '),\n';
funcsText += '\t};\n';
funcsText += '\tQ_DECLARE_FLAGS(Flags, Flag);\n';
funcsText += '\tfriend inline Flags operator~(Flag v) { return QFlag(~static_cast<int32>(v)); }\n';
funcsText += '\n';
if (len(conditions)):
for paramName in conditionsList:
if (paramName in trivialConditions):
funcsText += '\tbool is_' + paramName + '() const { return v' + hasFlags + '.v & Flag::f_' + paramName + '; }\n';
else:
funcsText += '\tbool has_' + paramName + '() const { return v' + hasFlags + '.v & Flag::f_' + paramName + '; }\n';
funcsText += '\n';
if (len(prms) > len(trivialConditions)):
for paramName in prmsList:
if (paramName in trivialConditions):
continue;
paramType = prms[paramName];
prmsInit.append('v' + paramName + '(_' + paramName + ')');
prmsNames.append('_' + paramName);
if (paramName == isTemplate):
ptypeFull = paramType;
else:
ptypeFull = 'MTP' + paramType;
funcsText += '\t' + ptypeFull + ' v' + paramName + ';\n';
if (paramType in ['int', 'Int', 'bool', 'Bool', 'flags<Flags>']):
prmsStr.append(ptypeFull + ' _' + paramName);
else:
prmsStr.append('const ' + ptypeFull + ' &_' + paramName);
funcsText += '\n';
funcsText += '\tMTP' + name + '() {\n\t}\n'; # constructor
funcsText += '\tMTP' + name + '(const mtpPrime *&from, const mtpPrime *end, mtpTypeId cons = mtpc_' + name + ') {\n\t\tread(from, end, cons);\n\t}\n'; # stream constructor
if (len(prms) > len(trivialConditions)):
funcsText += '\tMTP' + name + '(' + ', '.join(prmsStr) + ') : ' + ', '.join(prmsInit) + ' {\n\t}\n';
funcsText += '\n';
funcsText += '\tuint32 innerLength() const {\n'; # count size
size = [];
for k in prmsList:
v = prms[k];
if (k in conditionsList):
if (not k in trivialConditions):
size.append('(has_' + k + '() ? v' + k + '.innerLength() : 0)');
else:
size.append('v' + k + '.innerLength()');
if (not len(size)):
size.append('0');
funcsText += '\t\treturn ' + ' + '.join(size) + ';\n';
funcsText += '\t}\n';
funcsText += '\tmtpTypeId type() const {\n\t\treturn mtpc_' + name + ';\n\t}\n'; # type id
funcsText += '\tvoid read(const mtpPrime *&from, const mtpPrime *end, mtpTypeId cons = mtpc_' + name + ') {\n'; # read method
for k in prmsList:
v = prms[k];
if (k in conditionsList):
if (not k in trivialConditions):
funcsText += '\t\tif (has_' + k + '()) { v' + k + '.read(from, end); } else { v' + k + ' = MTP' + v + '(); }\n';
else:
funcsText += '\t\tv' + k + '.read(from, end);\n';
funcsText += '\t}\n';
funcsText += '\tvoid write(mtpBuffer &to) const {\n'; # write method
for k in prmsList:
v = prms[k];
if (k in conditionsList):
if (not k in trivialConditions):
funcsText += '\t\tif (has_' + k + '()) v' + k + '.write(to);\n';
else:
funcsText += '\t\tv' + k + '.write(to);\n';
funcsText += '\t}\n';
if (isTemplate != ''):
funcsText += '\n\ttypedef typename TQueryType::ResponseType ResponseType;\n';
else:
funcsText += '\n\ttypedef MTP' + resType + ' ResponseType;\n'; # method return type
funcsText += '};\n'; # class ending
if (len(conditionsList)):
funcsText += 'Q_DECLARE_OPERATORS_FOR_FLAGS(MTP' + name + '::Flags)\n\n';
if (isTemplate != ''):
funcsText += 'template <typename TQueryType>\n';
funcsText += 'class MTP' + Name + ' : public MTPBoxed<MTP' + name + '<TQueryType> > {\n';
funcsText += 'public:\n';
funcsText += '\tMTP' + Name + '() {\n\t}\n';
funcsText += '\tMTP' + Name + '(const MTP' + name + '<TQueryType> &v) : MTPBoxed<MTP' + name + '<TQueryType> >(v) {\n\t}\n';
if (len(prms) > len(trivialConditions)):
funcsText += '\tMTP' + Name + '(' + ', '.join(prmsStr) + ') : MTPBoxed<MTP' + name + '<TQueryType> >(MTP' + name + '<TQueryType>(' + ', '.join(prmsNames) + ')) {\n\t}\n';
funcsText += '};\n';
else:
funcsText += 'class MTP' + Name + ' : public MTPBoxed<MTP' + name + '> {\n';
funcsText += 'public:\n';
funcsText += '\tMTP' + Name + '() {\n\t}\n';
funcsText += '\tMTP' + Name + '(const MTP' + name + ' &v) : MTPBoxed<MTP' + name + '>(v) {\n\t}\n';
funcsText += '\tMTP' + Name + '(const mtpPrime *&from, const mtpPrime *end, mtpTypeId cons = 0) : MTPBoxed<MTP' + name + '>(from, end, cons) {\n\t}\n';
if (len(prms) > len(trivialConditions)):
funcsText += '\tMTP' + Name + '(' + ', '.join(prmsStr) + ') : MTPBoxed<MTP' + name + '>(MTP' + name + '(' + ', '.join(prmsNames) + ')) {\n\t}\n';
funcsText += '};\n';
funcs = funcs + 1;
if (not restype in funcsDict):
funcsList.append(restype);
funcsDict[restype] = [];
# TypesDict[restype] = resType;
funcsDict[restype].append([name, typeid, prmsList, prms, hasFlags, conditionsList, conditions, trivialConditions]);
else:
if (isTemplate != ''):
print('Template types not allowed: "' + resType + '" in line: ' + line);
continue;
if (not restype in typesDict):
typesList.append(restype);
typesDict[restype] = [];
TypesDict[restype] = resType;
typesDict[restype].append([name, typeid, prmsList, prms, hasFlags, conditionsList, conditions, trivialConditions]);
consts = consts + 1;
# text serialization: types and funcs
def addTextSerialize(lst, dct, dataLetter):
result = '';
for restype in lst:
v = dct[restype];
for data in v:
name = data[0];
prmsList = data[2];
prms = data[3];
hasFlags = data[4];
conditionsList = data[5];
conditions = data[6];
trivialConditions = data[7];
result += 'void _serialize_' + name + '(MTPStringLogger &to, int32 stage, int32 lev, Types &types, Types &vtypes, StagesFlags &stages, StagesFlags &flags, const mtpPrime *start, const mtpPrime *end, int32 iflag) {\n';
if (len(conditions)):
result += '\tMTP' + dataLetter + name + '::Flags flag(iflag);\n\n';
if (len(prms)):
result += '\tif (stage) {\n';
result += '\t\tto.add(",\\n").addSpaces(lev);\n';
result += '\t} else {\n';
result += '\t\tto.add("{ ' + name + '");\n';
result += '\t\tto.add("\\n").addSpaces(lev);\n';
result += '\t}\n';
result += '\tswitch (stage) {\n';
stage = 0;
for k in prmsList:
v = prms[k];
result += '\tcase ' + str(stage) + ': to.add(" ' + k + ': "); ++stages.back(); ';
if (k == hasFlags):
result += 'if (start >= end) throw Exception("start >= end in flags"); else flags.back() = *start; ';
if (k in trivialConditions):
result += 'if (flag & MTP' + dataLetter + name + '::Flag::f_' + k + ') { ';
result += 'to.add("YES [ BY BIT ' + conditions[k] + ' IN FIELD ' + hasFlags + ' ]"); ';
result += '} else { to.add("[ SKIPPED BY BIT ' + conditions[k] + ' IN FIELD ' + hasFlags + ' ]"); } ';
else:
if (k in conditions):
result += 'if (flag & MTP' + dataLetter + name + '::Flag::f_' + k + ') { ';
result += 'types.push_back(';
vtypeget = re.match(r'^[Vv]ector<MTP([A-Za-z0-9\._]+)>', v);
if (vtypeget):
if (not re.match(r'^[A-Z]', v)):
result += 'mtpc_vector';
else:
result += '0';
restype = vtypeget.group(1);
try:
if boxed[restype]:
restype = 0;
except KeyError:
if re.match(r'^[A-Z]', restype):
restype = 0;
else:
restype = v;
try:
if boxed[restype]:
restype = 0;
except KeyError:
if re.match(r'^[A-Z]', restype):
restype = 0;
if (restype):
try:
conses = typesDict[restype];
if (len(conses) > 1):
print('Complex bare type found: "' + restype + '" trying to serialize "' + k + '" of type "' + v + '"');
continue;
if (vtypeget):
result += '); vtypes.push_back(';
result += 'mtpc_' + conses[0][0];
if (not vtypeget):
result += '); vtypes.push_back(0';
except KeyError:
if (vtypeget):
result += '); vtypes.push_back(';
if (re.match(r'^flags<', restype)):
result += 'mtpc_flags';
else:
result += 'mtpc_' + restype + '+0';
if (not vtypeget):
result += '); vtypes.push_back(0';
else:
result += '0); vtypes.push_back(0';
result += '); stages.push_back(0); flags.push_back(0); ';
if (k in conditions):
result += '} else { to.add("[ SKIPPED BY BIT ' + conditions[k] + ' IN FIELD ' + hasFlags + ' ]"); } ';
result += 'break;\n';
stage = stage + 1;
result += '\tdefault: to.add("}"); types.pop_back(); vtypes.pop_back(); stages.pop_back(); flags.pop_back(); break;\n';
result += '\t}\n';
else:
result += '\tto.add("{ ' + name + ' }"); types.pop_back(); vtypes.pop_back(); stages.pop_back(); flags.pop_back();\n';
result += '}\n\n';
return result;
# text serialization: types and funcs
def addTextSerializeInit(lst, dct):
result = '';
for restype in lst:
v = dct[restype];
for data in v:
name = data[0];
result += '\t\t_serializers.insert(mtpc_' + name + ', _serialize_' + name + ');\n';
return result;
textSerializeMethods += addTextSerialize(typesList, typesDict, 'D');
textSerializeInit += addTextSerializeInit(typesList, typesDict) + '\n';
textSerializeMethods += addTextSerialize(funcsList, funcsDict, '');
textSerializeInit += addTextSerializeInit(funcsList, funcsDict) + '\n';
for restype in typesList:
v = typesDict[restype];
resType = TypesDict[restype];
withData = 0;
creatorsText = '';
constructsText = '';
constructsInline = '';
forwards += 'class MTP' + restype + ';\n';
forwTypedefs += 'typedef MTPBoxed<MTP' + restype + '> MTP' + resType + ';\n';
withType = (len(v) > 1);
switchLines = '';
friendDecl = '';
getters = '';
reader = '';
writer = '';
sizeList = [];
sizeFast = '';
newFast = '';
sizeCases = '';
for data in v:
name = data[0];
typeid = data[1];
prmsList = data[2];
prms = data[3];
hasFlags = data[4];
conditionsList = data[5];
conditions = data[6];
trivialConditions = data[7];
dataText = '';
dataText += '\nclass MTPD' + name + ' : public mtpDataImpl<MTPD' + name + '> {\n'; # data class
dataText += 'public:\n';
sizeList = [];
creatorParams = [];
creatorParamsList = [];
readText = '';
writeText = '';
if (hasFlags != ''):
dataText += '\tenum class Flag : int32 {\n';
maxbit = 0;
parentFlagsCheck['MTPD' + name] = {};
for paramName in conditionsList:
dataText += '\t\tf_' + paramName + ' = (1 << ' + conditions[paramName] + '),\n';
parentFlagsCheck['MTPD' + name][paramName] = conditions[paramName];
maxbit = max(maxbit, int(conditions[paramName]));
if (maxbit > 0):
dataText += '\n';
dataText += '\t\tMAX_FIELD = (1 << ' + str(maxbit) + '),\n';
dataText += '\t};\n';
dataText += '\tQ_DECLARE_FLAGS(Flags, Flag);\n';
dataText += '\tfriend inline Flags operator~(Flag v) { return QFlag(~static_cast<int32>(v)); }\n';
dataText += '\n';
if (len(conditions)):
for paramName in conditionsList:
if (paramName in trivialConditions):
dataText += '\tbool is_' + paramName + '() const { return v' + hasFlags + '.v & Flag::f_' + paramName + '; }\n';
else:
dataText += '\tbool has_' + paramName + '() const { return v' + hasFlags + '.v & Flag::f_' + paramName + '; }\n';
dataText += '\n';
dataText += '\tMTPD' + name + '() {\n\t}\n'; # default constructor
switchLines += '\t\tcase mtpc_' + name + ': '; # for by-type-id type constructor
if (len(prms) > len(trivialConditions)):
switchLines += 'setData(new MTPD' + name + '()); ';
withData = 1;
getters += '\n\tMTPD' + name + ' &_' + name + '() {\n'; # splitting getter
if (withType):
getters += '\t\tt_assert(data != nullptr && _type == mtpc_' + name + ');\n';
else:
getters += '\t\tt_assert(data != nullptr);\n';
getters += '\t\tsplit();\n';
getters += '\t\treturn *(MTPD' + name + '*)data;\n';
getters += '\t}\n';
getters += '\tconst MTPD' + name + ' &c_' + name + '() const {\n'; # const getter
if (withType):
getters += '\t\tt_assert(data != nullptr && _type == mtpc_' + name + ');\n';
else:
getters += '\t\tt_assert(data != nullptr);\n';
getters += '\t\treturn *(const MTPD' + name + '*)data;\n';
getters += '\t}\n';
constructsText += '\texplicit MTP' + restype + '(MTPD' + name + ' *_data);\n'; # by-data type constructor
constructsInline += 'inline MTP' + restype + '::MTP' + restype + '(MTPD' + name + ' *_data) : mtpDataOwner(_data)';
if (withType):
constructsInline += ', _type(mtpc_' + name + ')';
constructsInline += ' {\n}\n';
dataText += '\tMTPD' + name + '('; # params constructor
prmsStr = [];
prmsInit = [];
for paramName in prmsList:
if (paramName in trivialConditions):
continue;
paramType = prms[paramName];
if (paramType in ['int', 'Int', 'bool', 'Bool']):
prmsStr.append('MTP' + paramType + ' _' + paramName);
creatorParams.append('MTP' + paramType + ' _' + paramName);
else:
prmsStr.append('const MTP' + paramType + ' &_' + paramName);
creatorParams.append('const MTP' + paramType + ' &_' + paramName);
creatorParamsList.append('_' + paramName);
prmsInit.append('v' + paramName + '(_' + paramName + ')');
if (withType):
readText += '\t\t';
writeText += '\t\t';
if (paramName in conditions):
readText += '\tif (v.has_' + paramName + '()) { v.v' + paramName + '.read(from, end); } else { v.v' + paramName + ' = MTP' + paramType + '(); }\n';
writeText += '\tif (v.has_' + paramName + '()) v.v' + paramName + '.write(to);\n';
sizeList.append('(v.has_' + paramName + '() ? v.v' + paramName + '.innerLength() : 0)');
else:
readText += '\tv.v' + paramName + '.read(from, end);\n';
writeText += '\tv.v' + paramName + '.write(to);\n';
sizeList.append('v.v' + paramName + '.innerLength()');
forwards += 'class MTPD' + name + ';\n'; # data class forward declaration
dataText += ', '.join(prmsStr) + ') : ' + ', '.join(prmsInit) + ' {\n\t}\n';
dataText += '\n';
for paramName in prmsList: # fields declaration
if (paramName in trivialConditions):
continue;
paramType = prms[paramName];
dataText += '\tMTP' + paramType + ' v' + paramName + ';\n';
sizeCases += '\t\tcase mtpc_' + name + ': {\n';
sizeCases += '\t\t\tconst MTPD' + name + ' &v(c_' + name + '());\n';
sizeCases += '\t\t\treturn ' + ' + '.join(sizeList) + ';\n';
sizeCases += '\t\t}\n';
sizeFast = '\tconst MTPD' + name + ' &v(c_' + name + '());\n\treturn ' + ' + '.join(sizeList) + ';\n';
newFast = 'new MTPD' + name + '()';
else:
sizeFast = '\treturn 0;\n';
switchLines += 'break;\n';
dataText += '};\n'; # class ending
if (len(prms) > len(trivialConditions)):
dataTexts += dataText; # add data class
if (not friendDecl):
friendDecl += '\tfriend class MTP::internal::TypeCreator;\n';
creatorProxyText += '\tinline static MTP' + restype + ' new_' + name + '(' + ', '.join(creatorParams) + ') {\n';
if (len(prms) > len(trivialConditions)): # creator with params
creatorProxyText += '\t\treturn MTP' + restype + '(new MTPD' + name + '(' + ', '.join(creatorParamsList) + '));\n';
else:
if (withType): # creator by type
creatorProxyText += '\t\treturn MTP' + restype + '(mtpc_' + name + ');\n';
else: # single creator
creatorProxyText += '\t\treturn MTP' + restype + '();\n';
creatorProxyText += '\t}\n';
if (len(conditionsList)):
creatorsText += 'Q_DECLARE_OPERATORS_FOR_FLAGS(MTPD' + name + '::Flags)\n';
creatorsText += 'inline MTP' + restype + ' MTP_' + name + '(' + ', '.join(creatorParams) + ') {\n';
creatorsText += '\treturn MTP::internal::TypeCreator::new_' + name + '(' + ', '.join(creatorParamsList) + ');\n';
creatorsText += '}\n';
if (withType):
reader += '\t\tcase mtpc_' + name + ': _type = cons; '; # read switch line
if (len(prms) > len(trivialConditions)):
reader += '{\n';
reader += '\t\t\tif (!data) setData(new MTPD' + name + '());\n';
reader += '\t\t\tMTPD' + name + ' &v(_' + name + '());\n';
reader += readText;
reader += '\t\t} break;\n';
writer += '\t\tcase mtpc_' + name + ': {\n'; # write switch line
writer += '\t\t\tconst MTPD' + name + ' &v(c_' + name + '());\n';
writer += writeText;
writer += '\t\t} break;\n';
else:
reader += 'break;\n';
else:
if (len(prms) > len(trivialConditions)):
reader += '\n\tif (!data) setData(new MTPD' + name + '());\n';
reader += '\tMTPD' + name + ' &v(_' + name + '());\n';
reader += readText;
writer += '\tconst MTPD' + name + ' &v(c_' + name + '());\n';
writer += writeText;
forwards += '\n';
typesText += '\nclass MTP' + restype; # type class declaration
if (withData):
typesText += ' : private mtpDataOwner'; # if has data fields
typesText += ' {\n';
typesText += 'public:\n';
typesText += '\tMTP' + restype + '()'; # default constructor
inits = [];
if (withType):
if (withData):
inits.append('mtpDataOwner(0)');
inits.append('_type(0)');
else:
if (withData):
inits.append('mtpDataOwner(' + newFast + ')');
if (withData and not withType):
typesText += ';\n';
inlineMethods += '\ninline MTP' + restype + '::MTP' + restype + '()';
if (inits):
inlineMethods += ' : ' + ', '.join(inits);
inlineMethods += ' {\n}\n';
else:
if (inits):
typesText += ' : ' + ', '.join(inits);
typesText += ' {\n\t}\n';
inits = [];
if (withData):
inits.append('mtpDataOwner(0)');
if (withType):
inits.append('_type(0)');
typesText += '\tMTP' + restype + '(const mtpPrime *&from, const mtpPrime *end, mtpTypeId cons';
if (not withType):
typesText += ' = mtpc_' + name;
typesText += ')'; # read constructor
if (inits):
typesText += ' : ' + ', '.join(inits);
typesText += ' {\n\t\tread(from, end, cons);\n\t}\n';
if (withData):
typesText += getters;
typesText += '\n\tuint32 innerLength() const;\n'; # size method
inlineMethods += '\ninline uint32 MTP' + restype + '::innerLength() const {\n';
if (withType and sizeCases):
inlineMethods += '\tswitch (_type) {\n';
inlineMethods += sizeCases;
inlineMethods += '\t}\n';
inlineMethods += '\treturn 0;\n';
else:
inlineMethods += sizeFast;
inlineMethods += '}\n';
typesText += '\tmtpTypeId type() const;\n'; # type id method
inlineMethods += 'inline mtpTypeId MTP' + restype + '::type() const {\n';
if (withType):
inlineMethods += '\tt_assert(_type != 0);\n';
inlineMethods += '\treturn _type;\n';
else:
inlineMethods += '\treturn mtpc_' + v[0][0] + ';\n';
inlineMethods += '}\n';
typesText += '\tvoid read(const mtpPrime *&from, const mtpPrime *end, mtpTypeId cons'; # read method
if (not withType):
typesText += ' = mtpc_' + name;
typesText += ');\n';
inlineMethods += 'inline void MTP' + restype + '::read(const mtpPrime *&from, const mtpPrime *end, mtpTypeId cons) {\n';
if (withData):
if (withType):
inlineMethods += '\tif (cons != _type) setData(0);\n';
else:
inlineMethods += '\tif (cons != mtpc_' + v[0][0] + ') throw mtpErrorUnexpected(cons, "MTP' + restype + '");\n';
if (withType):
inlineMethods += '\tswitch (cons) {\n'
inlineMethods += reader;
inlineMethods += '\t\tdefault: throw mtpErrorUnexpected(cons, "MTP' + restype + '");\n';
inlineMethods += '\t}\n';
else:
inlineMethods += reader;
inlineMethods += '}\n';
typesText += '\tvoid write(mtpBuffer &to) const;\n'; # write method
inlineMethods += 'inline void MTP' + restype + '::write(mtpBuffer &to) const {\n';
if (withType and writer != ''):
inlineMethods += '\tswitch (_type) {\n';
inlineMethods += writer;
inlineMethods += '\t}\n';
else:
inlineMethods += writer;
inlineMethods += '}\n';
typesText += '\n\ttypedef void ResponseType;\n'; # no response types declared
typesText += '\nprivate:\n'; # private constructors
if (withType): # by-type-id constructor
typesText += '\texplicit MTP' + restype + '(mtpTypeId type);\n';
inlineMethods += 'inline MTP' + restype + '::MTP' + restype + '(mtpTypeId type) : ';
if (withData):
inlineMethods += 'mtpDataOwner(0), ';
inlineMethods += '_type(type)';
inlineMethods += ' {\n';
inlineMethods += '\tswitch (type) {\n'; # type id check
inlineMethods += switchLines;
inlineMethods += '\t\tdefault: throw mtpErrorBadTypeId(type, "MTP' + restype + '");\n\t}\n';
inlineMethods += '}\n'; # by-type-id constructor end
if (withData):
typesText += constructsText;
inlineMethods += constructsInline;
if (friendDecl):
typesText += '\n' + friendDecl;
if (withType):
typesText += '\n\tmtpTypeId _type;\n'; # type field var
typesText += '};\n'; # type class ended
inlineMethods += creatorsText;
typesText += 'typedef MTPBoxed<MTP' + restype + '> MTP' + resType + ';\n'; # boxed type definition
for childName in parentFlagsList:
parentName = parentFlags[childName];
for flag in parentFlagsCheck[childName]:
if (not flag in parentFlagsCheck[parentName]):
print('Flag ' + flag + ' not found in ' + parentName + ' which should be a flags-parent of ' + childName);
error
elif (parentFlagsCheck[childName][flag] != parentFlagsCheck[parentName][flag]):
print('Flag ' + flag + ' has different value in ' + parentName + ' which should be a flags-parent of ' + childName);
error
inlineMethods += 'inline ' + parentName + '::Flags mtpCastFlags(' + childName + '::Flags flags) { return ' + parentName + '::Flags(QFlag(flags)); }\n';
inlineMethods += 'inline ' + parentName + '::Flags mtpCastFlags(MTPflags<' + childName + '::Flags> flags) { return mtpCastFlags(flags.v); }\n';
# manual types added here
textSerializeMethods += 'void _serialize_rpc_result(MTPStringLogger &to, int32 stage, int32 lev, Types &types, Types &vtypes, StagesFlags &stages, StagesFlags &flags, const mtpPrime *start, const mtpPrime *end, int32 iflag) {\n';
textSerializeMethods += '\tif (stage) {\n';
textSerializeMethods += '\t\tto.add(",\\n").addSpaces(lev);\n';
textSerializeMethods += '\t} else {\n';
textSerializeMethods += '\t\tto.add("{ rpc_result");\n';
textSerializeMethods += '\t\tto.add("\\n").addSpaces(lev);\n';
textSerializeMethods += '\t}\n';
textSerializeMethods += '\tswitch (stage) {\n';
textSerializeMethods += '\tcase 0: to.add(" req_msg_id: "); ++stages.back(); types.push_back(mtpc_long); vtypes.push_back(0); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tcase 1: to.add(" result: "); ++stages.back(); types.push_back(0); vtypes.push_back(0); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tdefault: to.add("}"); types.pop_back(); vtypes.pop_back(); stages.pop_back(); flags.pop_back(); break;\n';
textSerializeMethods += '\t}\n';
textSerializeMethods += '}\n\n';
textSerializeInit += '\t\t_serializers.insert(mtpc_rpc_result, _serialize_rpc_result);\n';
textSerializeMethods += 'void _serialize_msg_container(MTPStringLogger &to, int32 stage, int32 lev, Types &types, Types &vtypes, StagesFlags &stages, StagesFlags &flags, const mtpPrime *start, const mtpPrime *end, int32 iflag) {\n';
textSerializeMethods += '\tif (stage) {\n';
textSerializeMethods += '\t\tto.add(",\\n").addSpaces(lev);\n';
textSerializeMethods += '\t} else {\n';
textSerializeMethods += '\t\tto.add("{ msg_container");\n';
textSerializeMethods += '\t\tto.add("\\n").addSpaces(lev);\n';
textSerializeMethods += '\t}\n';
textSerializeMethods += '\tswitch (stage) {\n';
textSerializeMethods += '\tcase 0: to.add(" messages: "); ++stages.back(); types.push_back(mtpc_vector); vtypes.push_back(mtpc_core_message); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tdefault: to.add("}"); types.pop_back(); vtypes.pop_back(); stages.pop_back(); flags.pop_back(); break;\n';
textSerializeMethods += '\t}\n';
textSerializeMethods += '}\n\n';
textSerializeInit += '\t\t_serializers.insert(mtpc_msg_container, _serialize_msg_container);\n';
textSerializeMethods += 'void _serialize_core_message(MTPStringLogger &to, int32 stage, int32 lev, Types &types, Types &vtypes, StagesFlags &stages, StagesFlags &flags, const mtpPrime *start, const mtpPrime *end, int32 iflag) {\n';
textSerializeMethods += '\tif (stage) {\n';
textSerializeMethods += '\t\tto.add(",\\n").addSpaces(lev);\n';
textSerializeMethods += '\t} else {\n';
textSerializeMethods += '\t\tto.add("{ core_message");\n';
textSerializeMethods += '\t\tto.add("\\n").addSpaces(lev);\n';
textSerializeMethods += '\t}\n';
textSerializeMethods += '\tswitch (stage) {\n';
textSerializeMethods += '\tcase 0: to.add(" msg_id: "); ++stages.back(); types.push_back(mtpc_long); vtypes.push_back(0); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tcase 1: to.add(" seq_no: "); ++stages.back(); types.push_back(mtpc_int); vtypes.push_back(0); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tcase 2: to.add(" bytes: "); ++stages.back(); types.push_back(mtpc_int); vtypes.push_back(0); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tcase 3: to.add(" body: "); ++stages.back(); types.push_back(0); vtypes.push_back(0); stages.push_back(0); flags.push_back(0); break;\n';
textSerializeMethods += '\tdefault: to.add("}"); types.pop_back(); vtypes.pop_back(); stages.pop_back(); flags.pop_back(); break;\n';
textSerializeMethods += '\t}\n';
textSerializeMethods += '}\n\n';
textSerializeInit += '\t\t_serializers.insert(mtpc_core_message, _serialize_core_message);\n';
textSerializeFull = '\nvoid mtpTextSerializeType(MTPStringLogger &to, const mtpPrime *&from, const mtpPrime *end, mtpPrime cons, uint32 level, mtpPrime vcons) {\n';
textSerializeFull += '\tif (_serializers.isEmpty()) initTextSerializers();\n\n';
textSerializeFull += '\tQVector<mtpTypeId> types, vtypes;\n';
textSerializeFull += '\tQVector<int32> stages, flags;\n';
textSerializeFull += '\ttypes.reserve(20); vtypes.reserve(20); stages.reserve(20); flags.reserve(20);\n';
textSerializeFull += '\ttypes.push_back(mtpTypeId(cons)); vtypes.push_back(mtpTypeId(vcons)); stages.push_back(0); flags.push_back(0);\n\n';
textSerializeFull += '\tconst mtpPrime *start = from;\n';
textSerializeFull += '\tmtpTypeId type = cons, vtype = vcons;\n';
textSerializeFull += '\tint32 stage = 0, flag = 0;\n\n';
textSerializeFull += '\twhile (!types.isEmpty()) {\n';
textSerializeFull += '\t\ttype = types.back();\n';
textSerializeFull += '\t\tvtype = vtypes.back();\n';
textSerializeFull += '\t\tstage = stages.back();\n';
textSerializeFull += '\t\tflag = flags.back();\n';
textSerializeFull += '\t\tif (!type) {\n';
textSerializeFull += '\t\t\tif (from >= end) {\n';
textSerializeFull += '\t\t\t\tthrow Exception("from >= end");\n';
textSerializeFull += '\t\t\t} else if (stage) {\n';
textSerializeFull += '\t\t\t\tthrow Exception("unknown type on stage > 0");\n';
textSerializeFull += '\t\t\t}\n';
textSerializeFull += '\t\t\ttypes.back() = type = *from;\n';
textSerializeFull += '\t\t\tstart = ++from;\n';
textSerializeFull += '\t\t}\n\n';
textSerializeFull += '\t\tint32 lev = level + types.size() - 1;\n';
textSerializeFull += '\t\tTextSerializers::const_iterator it = _serializers.constFind(type);\n';
textSerializeFull += '\t\tif (it != _serializers.cend()) {\n';
textSerializeFull += '\t\t\t(*it.value())(to, stage, lev, types, vtypes, stages, flags, start, end, flag);\n';
textSerializeFull += '\t\t} else {\n';
textSerializeFull += '\t\t\tmtpTextSerializeCore(to, from, end, type, lev, vtype);\n';
textSerializeFull += '\t\t\ttypes.pop_back(); vtypes.pop_back(); stages.pop_back(); flags.pop_back();\n';
textSerializeFull += '\t\t}\n';
textSerializeFull += '\t}\n';
textSerializeFull += '}\n';
out.write('\n// Creator current layer and proxy class declaration\n');
out.write('namespace MTP {\nnamespace internal {\n\n' + layer + '\n\n');
out.write('class TypeCreator;\n\n} // namespace internal\n} // namespace MTP\n');
out.write('\n// Type id constants\nenum {\n' + ',\n'.join(enums) + '\n};\n');
out.write('\n// Type forward declarations\n' + forwards);
out.write('\n// Boxed types definitions\n' + forwTypedefs);
out.write('\n// Type classes definitions\n' + typesText);
out.write('\n// Type constructors with data\n' + dataTexts);
out.write('\n// RPC methods\n' + funcsText);
out.write('\n// Creator proxy class definition\nnamespace MTP {\nnamespace internal {\n\nclass TypeCreator {\npublic:\n' + creatorProxyText + '\t};\n\n} // namespace internal\n} // namespace MTP\n');
out.write('\n// Inline methods definition\n' + inlineMethods);
out.write('\n// Human-readable text serialization\nvoid mtpTextSerializeType(MTPStringLogger &to, const mtpPrime *&from, const mtpPrime *end, mtpPrime cons, uint32 level, mtpPrime vcons);\n');
outCpp = open('scheme_auto.cpp', 'w');
outCpp.write('/*\n');
outCpp.write('Created from \'/SourceFiles/mtproto/scheme.tl\' by \'/SourceFiles/mtproto/generate.py\' script\n\n');
outCpp.write('WARNING! All changes made in this file will be lost!\n\n');
outCpp.write('This file is part of Telegram Desktop,\n');
outCpp.write('the official desktop version of Telegram messaging app, see https://telegram.org\n');
outCpp.write('\n');
outCpp.write('Telegram Desktop is free software: you can redistribute it and/or modify\n');
outCpp.write('it under the terms of the GNU General Public License as published by\n');
outCpp.write('the Free Software Foundation, either version 3 of the License, or\n');
outCpp.write('(at your option) any later version.\n');
outCpp.write('\n');
outCpp.write('It is distributed in the hope that it will be useful,\n');
outCpp.write('but WITHOUT ANY WARRANTY; without even the implied warranty of\n');
outCpp.write('MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n');
outCpp.write('GNU General Public License for more details.\n');
outCpp.write('\n');
outCpp.write('Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE\n');
outCpp.write('Copyright (c) 2014 John Preston, https://desktop.telegram.org\n');
outCpp.write('*/\n');
outCpp.write('#include "stdafx.h"\n\n#include "mtproto/scheme_auto.h"\n\n');
outCpp.write('typedef QVector<mtpTypeId> Types;\ntypedef QVector<int32> StagesFlags;\n\n');
outCpp.write(textSerializeMethods);
outCpp.write('namespace {\n');
outCpp.write('\ttypedef void(*mtpTextSerializer)(MTPStringLogger &to, int32 stage, int32 lev, Types &types, Types &vtypes, StagesFlags &stages, StagesFlags &flags, const mtpPrime *start, const mtpPrime *end, int32 iflag);\n');
outCpp.write('\ttypedef QMap<mtpTypeId, mtpTextSerializer> TextSerializers;\n\tTextSerializers _serializers;\n\n');
outCpp.write('\tvoid initTextSerializers() {\n');
outCpp.write(textSerializeInit);
outCpp.write('\t}\n}\n');
outCpp.write(textSerializeFull + '\n');
print('Done, written {0} constructors, {1} functions.'.format(consts, funcs));
| gpl-3.0 | -52,681,385,546,284,330 | 45.006061 | 232 | 0.573113 | false |
cades/qualitybots | src/appengine/common/useragent_parser.py | 26 | 6495 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Browser User Agent (UA) Parser/detector."""
import re
WEBKIT = 'applewebkit'
GECKO = 'gecko'
CHROME = 'chrome'
CHROME_OS = 'cros'
FIREFOX = 'firefox'
LINUX = 'linux'
MAC = 'macintosh'
OS_X = 'os x'
UNKNOWN = 'unknown'
WIN = 'windows'
WIN_2000 = 'win_2000'
WIN_XP = 'win_xp'
WIN_VISTA = 'win_vista'
WIN_7 = 'win_7'
WIN_NT_VERSIONS = {'5.0': WIN_2000, '5.1': WIN_XP, '5.2': WIN_XP,
'6.0': WIN_VISTA, '6.1': WIN_7}
X11 = 'x11'
# Regular expressions.
BROWSER_INFO_REGEX = re.compile(r'(firefox|chrome)/([bpre0-9.]*)')
OS_INFO_REGEX = re.compile(r'\([^\)]*\)')
CROS_VERSION_REGEX = re.compile(r'cros\W+.*(\d+\.\d+\.\d+)')
WIN_VERSION_REGEX = re.compile(r'nt\W*(\d\.?\d?)')
MAC_VERSION_REGEX = re.compile(r'os\W+x\W+(\d+[._]\d+[._]?\d*)')
WEBKIT_ENGINE_REGEX = re.compile(r'applewebkit/([0-9.]*)')
GECKO_ENGINE_REGEX = re.compile(r'(rv:[bpre0-9.]*)\)\W+gecko')
class UAParserException(Exception):
pass
class MissingUAException(Exception):
pass
class UAParser(object):
"""Class for Parsing Browser's User Agent(UA) String.
Only supports parsing UA for chrome and firefox at this time.
Usage:
ua_parser = UAParser(user_agent)
# To get browser family.
ua_parser.GetBrowserFamily()
# To get browser version.
ua_parser.GetBrowserVersion()
Similarly OS and Layout family and version can be parsed.
Attributes:
user_agent_lowercase: User Agent String in Lowercase.
__browser_family: Browser family (e.g. Chrome, Firefox).
__browser_version: Browser version.
__os_family: Operating system family (e.g. Linux, Windows, cros).
__os_version: Operating system version.
__layout_engine_family: Browser layout engine family (e.g. applewebkit,
gecko)
__layout_engine_version: Browser layout engine version.
"""
def __init__(self, user_agent):
"""Init method for User Agent Parser.
Args:
user_agent: User Agent String.
Raises:
MissingUAException: Missing user agent string parameter.
"""
if not user_agent:
raise MissingUAException('Missing User agent parameter.')
self.user_agent_lowercase = user_agent.lower()
self.__browser_family = None
self.__browser_version = None
self.__os_family = None
self.__os_version = None
self.__layout_engine_family = None
self.__layout_engine_version = None
def _ParseBrowserInfo(self):
"""Parses browser family and version information from UA."""
browser_info = BROWSER_INFO_REGEX.search(self.user_agent_lowercase).groups()
if not browser_info:
raise UAParserException('Could not parse browser family from user agent.')
self.__browser_family = browser_info[0]
self.__browser_version = browser_info[1]
def GetBrowserFamily(self):
"""Parses browser family from UA.
Returns:
Browser family.
"""
if not self.__browser_family:
self._ParseBrowserInfo()
return self.__browser_family
def GetBrowserVersion(self):
"""Parses browser version from UA.
Returns:
Browser version.
"""
if not self.__browser_version:
self._ParseBrowserInfo()
return self.__browser_version
def _ParseOSInfo(self):
"""Parses OS family and version information from UA."""
# Let's look for anything within braces.
ua_parts = OS_INFO_REGEX.findall(self.user_agent_lowercase)
if not ua_parts:
return
# Let's get rid of opening and closing braces and split.
ua_os_part = ua_parts[0][1:-1].split(';')[0].strip()
# Check for linux family of OS.
if ua_os_part.find(X11) != -1:
# Let's check for chromeos.
if ua_parts[0].find(CHROME_OS) != -1:
self.__os_family = CHROME_OS
self.__os_version = CROS_VERSION_REGEX.findall(ua_parts[0])[0]
else:
self.__os_family = LINUX
self.__os_version = UNKNOWN
elif ua_os_part.find(WIN) != -1:
self.__os_family = WIN
win_version = WIN_VERSION_REGEX.findall(ua_parts[0])
if win_version:
self.__os_version = WIN_NT_VERSIONS[win_version[0]]
else:
self.__os_version = UNKNOWN
elif ua_os_part.find(MAC) != -1:
self.__os_family = MAC
mac_version = MAC_VERSION_REGEX.findall(ua_parts[0])
if mac_version:
self.__os_version = mac_version[0]
def GetOSFamily(self):
"""Parses OS family from UA.
Returns:
Operating System (OS) family.
"""
if not self.__os_family:
self._ParseOSInfo()
return self.__os_family
def GetOSVersion(self):
"""Parses OS version from UA.
Returns:
Operating system (OS) version.
"""
if not self.__os_version:
self._ParseOSInfo()
return self.__os_version
def _ParseLayoutEngineInfo(self):
"""Parses layout engine family and version information from UA."""
if not self.__browser_family:
self._ParseBrowserInfo()
if self.__browser_family == CHROME:
self.__layout_engine_family = WEBKIT
webkit_engine_info = WEBKIT_ENGINE_REGEX.findall(
self.user_agent_lowercase)
if webkit_engine_info:
self.__layout_engine_version = webkit_engine_info[0]
elif self.__browser_family == FIREFOX:
self.__layout_engine_family = GECKO
gecko_version = GECKO_ENGINE_REGEX.findall(
self.user_agent_lowercase)
if gecko_version:
self.__layout_engine_version = gecko_version[0]
def GetLayoutEngineFamily(self):
"""Parses layout engine family from UA.
Returns:
Layout Engine family.
"""
if not self.__layout_engine_family:
self._ParseLayoutEngineInfo()
return self.__layout_engine_family
def GetLayoutEngineVersion(self):
"""Parses layout engine version from UA.
Returns:
Layout Engine version.
"""
if not self.__layout_engine_version:
self._ParseLayoutEngineInfo()
return self.__layout_engine_version
| apache-2.0 | -311,655,636,466,601,340 | 28.793578 | 80 | 0.653118 | false |
scitran/api | tests/unit_tests/python/test_gear_util.py | 2 | 1538 |
import copy
from api.jobs import gears
# DISCUSS: this basically asserts that the log helper doesn't throw, which is of non-zero but questionable value.
# Could instead be marked for pytest et. al to ignore coverage? Desirability? Compatibility?
def test_fill_defaults():
gear_config = {
'key_one': {'default': 1},
'key_two': {'default': 2},
'key_three': {'default': 3},
'key_no_de': {}
}
gear = {
'gear': {
'config': gear_config
}
}
# test sending in complete config does not change
config = {
'key_one': 4,
'key_two': 5,
'key_three': 6
}
result = gears.fill_gear_default_values(gear, config)
assert result['key_one'] == 4
assert result['key_two'] == 5
assert result['key_three'] == 6
# test sending in empty config
result = gears.fill_gear_default_values(gear, {})
assert result['key_one'] == 1
assert result['key_two'] == 2
assert result['key_three'] == 3
# test sending in None config
result = gears.fill_gear_default_values(gear, None)
assert result['key_one'] == 1
assert result['key_two'] == 2
assert result['key_three'] == 3
# test sending in semi-complete config
config = {
'key_one': None,
'key_two': []
#'key_three': 6 # missing
}
result = gears.fill_gear_default_values(gear, config)
assert result['key_one'] == None
assert result['key_two'] == []
assert result['key_three'] == 3
| mit | -6,420,825,812,181,520,000 | 26.464286 | 113 | 0.574122 | false |
hackultura/django-badger | badger/south_migrations/0006_auto__add_field_nomination_rejecter__add_field_nomination_rejection_re.py | 9 | 11582 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Nomination.rejected_by'
db.add_column('badger_nomination', 'rejected_by',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='nomination_rejected_by', null=True, to=orm['auth.User']),
keep_default=False)
# Adding field 'Nomination.rejected_reason'
db.add_column('badger_nomination', 'rejected_reason',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Nomination.rejected_by'
db.delete_column('badger_nomination', 'rejected_by_id')
# Deleting field 'Nomination.rejected_reason'
db.delete_column('badger_nomination', 'rejected_reason')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'badger.award': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Award'},
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['badger.Badge']"}),
'claim_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'db_index': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'award_creator'", 'null': 'True', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'badger.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'unique_together': "(('title', 'slug'),)", 'object_name': 'Badge'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'nominations_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['badger.Badge']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'unique': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'badger.deferredaward': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'DeferredAward'},
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['badger.Badge']"}),
'claim_code': ('django.db.models.fields.CharField', [], {'default': "'xamuuk'", 'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'claim_group': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'reusable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'badger.nomination': {
'Meta': {'object_name': 'Nomination'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'approver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nomination_approver'", 'null': 'True', 'to': "orm['auth.User']"}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['badger.Award']", 'null': 'True', 'blank': 'True'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['badger.Badge']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nomination_creator'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'nominee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nomination_nominee'", 'to': "orm['auth.User']"}),
'rejected_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nomination_rejected_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'rejected_reason': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'badger.progress': {
'Meta': {'unique_together': "(('badge', 'user'),)", 'object_name': 'Progress'},
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['badger.Badge']"}),
'counter': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('badger.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'progress_user'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['badger']
| bsd-3-clause | 6,520,274,571,262,319,000 | 78.875862 | 183 | 0.551718 | false |
psi29a/django-authopenid | django_authopenid/forms.py | 1 | 5436 | # -*- coding: utf-8 -*-
# Copyright 2007, 2008,2009 by Benoît Chesneau <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.utils.translation import ugettext as _
from django.conf import settings
# needed for some linux distributions like debian
try:
from openid.yadis import xri
except ImportError:
from yadis import xri
from django_authopenid.models import UserAssociation
class OpenidSigninForm(forms.Form):
""" signin form """
openid_url = forms.CharField(max_length=255,
widget=forms.widgets.TextInput(attrs={'class': 'required openid'}),
label=_("OpenID URL"))
def clean_openid_url(self):
""" test if openid is accepted """
if 'openid_url' in self.cleaned_data:
openid_url = self.cleaned_data['openid_url']
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
raise forms.ValidationError(_('i-names are not supported'))
return self.cleaned_data['openid_url']
attrs_dict = { 'class': 'required login' }
username_re = re.compile(r'^\w+$')
class OpenidRegisterForm(forms.Form):
""" openid signin form """
username = forms.CharField(max_length=30,
widget=forms.widgets.TextInput(attrs=attrs_dict))
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=200)), label=_('Email address'))
def __init__(self, *args, **kwargs):
super(OpenidRegisterForm, self).__init__(*args, **kwargs)
self.user = None
def clean_username(self):
""" test if username is valid and exist in database """
if 'username' in self.cleaned_data:
if not username_re.search(self.cleaned_data['username']):
raise forms.ValidationError(_("Usernames can only contain \
letters, numbers and underscores"))
try:
user = User.objects.get(
username__exact = self.cleaned_data['username']
)
except User.DoesNotExist:
return self.cleaned_data['username']
except User.MultipleObjectsReturned:
raise forms.ValidationError(u'There is already more than one \
account registered with that username. Please try \
another.')
self.user = user
raise forms.ValidationError(_("This username is already \
taken. Please choose another."))
def clean_email(self):
"""For security reason one unique email in database"""
if 'email' in self.cleaned_data:
try:
user = User.objects.get(email = self.cleaned_data['email'])
except User.DoesNotExist:
return self.cleaned_data['email']
except User.MultipleObjectsReturned:
raise forms.ValidationError(u'There is already more than one \
account registered with that e-mail address. Please try \
another.')
raise forms.ValidationError(_("This email is already \
registered in our database. Please choose another."))
class AssociateOpenID(forms.Form):
""" new openid association form """
openid_url = forms.CharField(max_length=255,
widget=forms.widgets.TextInput(attrs={'class': 'required openid'}),
label=_("OpenID URL"))
def __init__(self, user, *args, **kwargs):
super(AssociateOpenID, self).__init__(*args, **kwargs)
self.user = user
def clean_openid_url(self):
""" test if openid is accepted """
if 'openid_url' in self.cleaned_data:
openid_url = self.cleaned_data['openid_url']
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
raise forms.ValidationError(_('i-names are not supported'))
try:
rel = UserAssociation.objects.get(openid_url__exact=openid_url)
except UserAssociation.DoesNotExist:
return self.cleaned_data['openid_url']
if rel.user != self.user:
raise forms.ValidationError(_("This openid is already \
registered in our database by another account. Please choose another."))
raise forms.ValidationError(_("You already associated this openid to your account."))
class OpenidDissociateForm(OpenidSigninForm):
""" form used to dissociate an openid. """
openid_url = forms.CharField(max_length=255, widget=forms.widgets.HiddenInput())
| apache-2.0 | -6,427,070,213,331,639,000 | 41.460938 | 97 | 0.616375 | false |
xparedesfortuny/Phot | analysis/multi_night_std_test.py | 1 | 2214 | # Author: Xavier Paredes-Fortuny ([email protected])
# License: MIT, see LICENSE.md
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
def compute_nightly_average(cat_mag):
return np.asarray([[np.average(mag[:, i]) for i in xrange(len(mag[0, :]))] for mag in cat_mag])
def compute_statistics(cat_mag):
avg_mag = [np.average(cat_mag[:, k]) for k in xrange(len(cat_mag[0, :]))]
std_mag = [np.std(cat_mag[:, k]) for k in xrange(len(cat_mag[0, :]))]
return np.array(avg_mag), np.array(std_mag)
def plot(x, y, o):
plt.rcdefaults()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, '.')
ax.set_yscale('log')
ax.set_xlabel(r'$\overline{m}$ (mag)')
ax.set_ylabel(r'$\sigma_{m}$ (mag)')
ax.set_xlim((min(x)*(1-0.05), max(x)*(1+0.05)))
ax.set_ylim((min(y)*(1-0.05), max(y)*(1+0.05)))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
plt.table(cellText=[['N', r'$\overline{{\sigma}}$'],
[1, '{:.3f}'.format(y[0])],
[5, '{:.3f}'.format(np.average(y[0:5]))],
[10, '{:.3f}'.format(np.average(y[0:10]))],
[25, '{:.3f}'.format(np.average(y[0:25]))],
[50, '{:.3f}'.format(np.average(y[0:50]))],
[100, '{:.3f}'.format(np.average(y[0:100]))]],
colWidths=[0.1, 0.1],
loc='center left')
fig.savefig(o, bbox_inches='tight', pad_inches=0.05)
plt.close(fig)
def perform_test(cat_mag, o, ind=-1, ind_comp=-1, ind_ref=-1, ind_comp1=-1, ind_comp2=-1):
cat_mag_avg = compute_nightly_average(cat_mag)
avg_mag, std_mag = compute_statistics(cat_mag_avg)
# x = avg_mag[std_mag.argsort()]
# y = sorted(std_mag)
plot(avg_mag, std_mag, o)
if ind != -1:
flag = np.zeros(len(avg_mag))
flag[ind] = 1
flag[ind_comp] = 2
flag[ind_ref] = 3
flag[ind_comp1] = 4
flag[ind_comp2] = 5
np.savetxt(o[:-3]+'dat', np.transpose((avg_mag, std_mag, flag)), delimiter=' ')
if __name__ == '__main__':
# Testing
print('STOP: Testing should be done from analysis.py')
| mit | 1,029,043,583,157,024,300 | 34.709677 | 99 | 0.542909 | false |
wslihgt/pyfasst | pyfasst/tftransforms/nsgt/nsgtf.py | 1 | 3768 | '''
Created on 05.11.2011
@author: thomas
'''
import numpy as N
from math import ceil
from itertools import izip
from util import chkM,fftp,ifftp
try:
import theano as T
except ImportError:
T = None
#@profile
def nsgtf_sl(f_slices,g,wins,nn,M=None,real=False,reducedform=0,measurefft=False):
M = chkM(M,g)
fft = fftp(measure=measurefft)
ifft = ifftp(measure=measurefft)
if real:
assert 0 <= reducedform <= 2
sl = slice(reducedform,len(g)//2+1-reducedform)
else:
sl = slice(0,None)
maxLg = max(int(ceil(float(len(gii))/mii))*mii for mii,gii in izip(M[sl],g[sl]))
temp0 = None
loopparams = []
for mii,gii,win_range in izip(M[sl],g[sl],wins[sl]):
Lg = len(gii)
col = int(ceil(float(Lg)/mii))
assert col*mii >= Lg
gi1 = gii[:(Lg+1)//2]
gi2 = gii[-(Lg//2):]
p = (mii,gii,gi1,gi2,win_range,Lg,col)
loopparams.append(p)
if True or T is None:
def loop(temp0):
c = [] # Initialization of the result
# The actual transform
# TODO: stuff loop into theano
for mii,gii,gi1,gi2,win_range,Lg,col in loopparams:
# Lg = len(gii)
# if the number of time channels is too small (mii < Lg), aliasing is introduced
# wrap around and sum up in the end (below)
# col = int(ceil(float(Lg)/mii)) # normally col == 1
# assert col*mii >= Lg
temp = temp0[:col*mii]
# original version
# t = ft[win_range]*N.fft.fftshift(N.conj(gii))
# temp[:(Lg+1)//2] = t[Lg//2:] # if mii is odd, this is of length mii-mii//2
# temp[-(Lg//2):] = t[:Lg//2] # if mii is odd, this is of length mii//2
# modified version to avoid superfluous memory allocation
t1 = temp[:(Lg+1)//2]
t1[:] = gi1 # if mii is odd, this is of length mii-mii//2
t2 = temp[-(Lg//2):]
t2[:] = gi2 # if mii is odd, this is of length mii//2
ftw = ft[win_range]
t2 *= ftw[:Lg//2]
t1 *= ftw[Lg//2:]
# (wh1a,wh1b),(wh2a,wh2b) = win_range
# t2[:wh1a.stop-wh1a.start] *= ft[wh1a]
# t2[wh1a.stop-wh1a.start:] *= ft[wh1b]
# t1[:wh2a.stop-wh2a.start] *= ft[wh2a]
# t1[wh2a.stop-wh2a.start:] *= ft[wh2b]
temp[(Lg+1)//2:-(Lg//2)] = 0 # clear gap (if any)
if col > 1:
temp = N.sum(temp.reshape((mii,-1)),axis=1)
else:
temp = temp.copy()
c.append(temp)
return c
else:
raise RuntimeError("Theano support not implemented yet")
for f in f_slices:
Ls = len(f)
# some preparation
ft = fft(f)
if temp0 is None:
# pre-allocate buffer (delayed because of dtype)
temp0 = N.empty(maxLg,dtype=ft.dtype)
# A small amount of zero-padding might be needed (e.g. for scale frames)
if nn > Ls:
ft = N.concatenate((ft,N.zeros(nn-Ls,dtype=ft.dtype)))
# The actual transform
c = loop(temp0)
yield map(ifft,c) # TODO: if matrixform, perform "2D" FFT along one axis
# non-sliced version
def nsgtf(f,g,wins,nn,M=None,real=False,reducedform=0,measurefft=False):
return nsgtf_sl((f,),g,wins,nn,M=M,real=real,reducedform=reducedform,measurefft=measurefft).next()
| gpl-2.0 | -7,260,402,539,768,473,000 | 32.052632 | 102 | 0.49655 | false |
frewsxcv/python-geojson | geojson/feature.py | 6 | 1884 | """
SimpleWebFeature is a working example of a class that satisfies the Python geo
interface.
"""
from geojson.base import GeoJSON
class Feature(GeoJSON):
"""
Represents a WGS84 GIS feature.
"""
def __init__(self, id=None, geometry=None, properties=None, **extra):
"""
Initialises a Feature object with the given parameters.
:param id: Feature identifier, such as a sequential number.
:type id: str, int
:param geometry: Geometry corresponding to the feature.
:param properties: Dict containing properties of the feature.
:type properties: dict
:return: Feature object
:rtype: Feature
"""
super(Feature, self).__init__(**extra)
if id is not None:
self["id"] = id
self["geometry"] = (self.to_instance(geometry, strict=True)
if geometry else None)
self["properties"] = properties or {}
def errors(self):
geo = self.get('geometry')
return geo.errors() if geo else None
class FeatureCollection(GeoJSON):
"""
Represents a FeatureCollection, a set of multiple Feature objects.
"""
def __init__(self, features, **extra):
"""
Initialises a FeatureCollection object from the
:param features: List of features to constitute the FeatureCollection.
:type features: list
:return: FeatureCollection object
:rtype: FeatureCollection
"""
super(FeatureCollection, self).__init__(**extra)
self["features"] = features
def errors(self):
return self.check_list_errors(lambda x: x.errors(), self.features)
def __getitem__(self, key):
try:
return self.get("features", ())[key]
except (KeyError, TypeError, IndexError):
return super(GeoJSON, self).__getitem__(key)
| bsd-3-clause | 5,742,698,560,417,820,000 | 29.885246 | 78 | 0.60828 | false |
Hypernode/xtraceback | xtraceback/shim.py | 2 | 1261 | import inspect
import os
class Shim(object):
_instances = {}
def __init__(self, target, xtb):
self.target = target
self.xtb = xtb
def __repr__(self):
raise NotImplementedError()
@classmethod
def get_instance(cls, target, xtb):
oid = id(target)
if oid not in cls._instances:
cls._instances[oid] = cls(target, xtb)
return cls._instances[oid]
class ModuleShim(Shim):
def __init__(self, target, xtb):
super(ModuleShim, self).__init__(target, xtb)
self.package = False
try:
self.filename = inspect.getsourcefile(target)
except TypeError:
self.filename = None
if self.filename is not None:
if os.path.basename(self.filename) == "__init__.py":
self.package = True
self.filename = os.path.dirname(self.filename)
self.filename = self.xtb._format_filename(self.filename)
def __repr__(self):
if self.filename is None:
return repr(self.target)
return "<%s '%s' from=%r>" % (self.package and "package" or "module",
self.target.__name__,
self.filename)
| mit | -6,364,031,631,502,445,000 | 27.659091 | 77 | 0.536082 | false |
googleapis/java-workflow-executions | synth.py | 1 | 1142 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
service = 'workflows-executions'
versions = ['v1beta','v1']
for version in versions:
java.bazel_library(
service=service,
version=version,
proto_path=f'google/cloud/workflows/executions/{version}',
bazel_target=f'//google/cloud/workflows/executions/{version}:google-cloud-{service}-{version}-java',
destination_name='workflow-executions',
)
java.common_templates()
| apache-2.0 | -4,461,214,997,939,001,000 | 32.588235 | 106 | 0.745184 | false |
Afnarel/django-threadedcomments | setup.py | 3 | 1797 | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import re
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-threadedcomments',
version=find_version('threadedcomments', '__init__.py'),
license='BSD',
description='A simple yet flexible threaded commenting system.',
long_description=read('README.rst'),
keywords='django,comments,threading',
author='Eric Florenzano',
author_email='[email protected]',
maintainer='Diederik van der Boor',
maintainer_email='[email protected]',
url='https://github.com/HonzaKral/django-threadedcomments',
download_url='https://github.com/HonzaKral/django-threadedcomments/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| bsd-3-clause | 4,456,813,197,631,138,000 | 30.526316 | 88 | 0.642738 | false |
DickJC123/mxnet | python/mxnet/gluon/metric.py | 4 | 64655 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=no-member, too-many-lines
"""Online evaluation metric module."""
import math
from collections import OrderedDict
from .. import numpy
from ..util import use_np
from ..base import numeric_types, string_types
from .. import ndarray
from .. import registry
def check_label_shapes(labels, preds, wrap=False, shape=False):
"""Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length.
"""
if not shape:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
if wrap:
if isinstance(labels, ndarray.ndarray.NDArray):
labels = [labels]
if isinstance(preds, ndarray.ndarray.NDArray):
preds = [preds]
return labels, preds
class EvalMetric(object):
"""Base class for all evaluation metrics.
.. note::
This is a base class that provides common metric interfaces.
One should not use this class directly, but instead create new metric
classes that extend it.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self, name, output_names=None,
label_names=None, **kwargs):
self.name = str(name)
self.output_names = output_names
self.label_names = label_names
self._kwargs = kwargs
self.reset()
def __str__(self):
return "EvalMetric: {}".format(dict(self.get_name_value()))
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
"""
config = self._kwargs.copy()
config.update({
'metric': self.__class__.__name__,
'name': self.name,
'output_names': self.output_names,
'label_names': self.label_names})
return config
def update_dict(self, label, pred):
"""Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
"""
if self.output_names is not None:
pred = [pred[name] for name in self.output_names]
else:
pred = list(pred.values())
if self.label_names is not None:
label = [label[name] for name in self.label_names]
else:
label = list(label.values())
self.update(label, pred)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
raise NotImplementedError()
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0
self.sum_metric = 0.0
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return (self.name, float('nan'))
else:
res = self.sum_metric / self.num_inst
if isinstance(res, numpy.ndarray) and len(res.shape) == 0:
# currently calling ' c = mxnet.numpy.array([1,2,3]).sum() ' would get
# ' array(6.) ', a ndarray with shape ()
# In this case, returning a 'float' in .get() is more explicit.
res = res.item()
return (self.name, res)
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
# pylint: disable=invalid-name
register = registry.get_register_func(EvalMetric, 'metric')
alias = registry.get_alias_func(EvalMetric, 'metric')
_create = registry.get_create_func(EvalMetric, 'metric')
# pylint: enable=invalid-name
def create(metric, *args, **kwargs):
"""Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
- A list, each element of which is a metric or a metric name.
- An evaluation function that computes custom metric for a given batch of
labels and predictions.
*args : list
Additional arguments to metric constructor.
Only used when metric is str.
**kwargs : dict
Additional arguments to metric constructor.
Only used when metric is str
Examples
--------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label - pred))
...
>>> metric1 = mx.gluon.metric.create('acc')
>>> metric2 = mx.gluon.metric.create(custom_metric)
>>> metric3 = mx.gluon.metric.create([metric1, metric2, 'rmse'])
"""
if callable(metric):
return CustomMetric(metric, *args, **kwargs)
elif isinstance(metric, list):
composite_metric = CompositeEvalMetric()
for child_metric in metric:
composite_metric.add(create(child_metric, *args, **kwargs))
return composite_metric
return _create(metric, *args, **kwargs)
@register
@alias('composite')
class CompositeEvalMetric(EvalMetric):
"""Manages multiple evaluation metrics.
Parameters
----------
metrics : list of EvalMetric
List of child metrics.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> eval_metrics_1 = mx.gluon.metric.Accuracy()
>>> eval_metrics_2 = mx.gluon.metric.F1()
>>> eval_metrics = mx.gluon.metric.CompositeEvalMetric()
>>> for child_metric in [eval_metrics_1, eval_metrics_2]:
>>> eval_metrics.add(child_metric)
>>> eval_metrics.update(labels = labels, preds = predicts)
>>> print eval_metrics.get()
(['accuracy', 'f1'], [0.6666666666666666, 0.8])
"""
def __init__(self, metrics=None, name='composite',
output_names=None, label_names=None):
super(CompositeEvalMetric, self).__init__(
name, output_names=output_names, label_names=label_names)
if metrics is None:
metrics = []
self.metrics = [create(i) for i in metrics]
def add(self, metric):
"""Adds a child metric.
Parameters
----------
metric
A metric instance.
"""
self.metrics.append(create(metric))
def get_metric(self, index):
"""Returns a child metric.
Parameters
----------
index : int
Index of child metric in the list of metrics.
"""
try:
return self.metrics[index]
except IndexError:
return ValueError("Metric index {} is out of range 0 and {}".format(
index, len(self.metrics)))
def update_dict(self, labels, preds): # pylint: disable=arguments-differ
if self.label_names is not None:
labels = OrderedDict([i for i in labels.items()
if i[0] in self.label_names])
if self.output_names is not None:
preds = OrderedDict([i for i in preds.items()
if i[0] in self.output_names])
for metric in self.metrics:
metric.update_dict(labels, preds)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
for metric in self.metrics:
metric.update(labels, preds)
def reset(self):
"""Resets the internal evaluation result to initial state."""
try:
for metric in self.metrics:
metric.reset()
except AttributeError:
pass
def get(self):
"""Returns the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
names = []
values = []
for metric in self.metrics:
name, value = metric.get()
if isinstance(name, string_types):
name = [name]
if isinstance(value, numeric_types):
value = [value]
names.extend(name)
values.extend(value)
return (names, values)
def get_config(self):
config = super(CompositeEvalMetric, self).get_config()
config.update({'metrics': [i.get_config() for i in self.metrics]})
return config
########################
# CLASSIFICATION METRICS
########################
@register
@alias('acc')
@use_np
class Accuracy(EvalMetric):
"""Computes accuracy classification score.
The accuracy score is defined as
.. math::
\\text{accuracy}(y, \\hat{y}) = \\frac{1}{n} \\sum_{i=0}^{n-1}
\\text{1}(\\hat{y_i} == y_i)
Parameters
----------
axis : int, default=1
The axis that represents classes
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> acc = mx.gluon.metric.Accuracy()
>>> acc.update(preds = predicts, labels = labels)
>>> print acc.get()
('accuracy', 0.6666666666666666)
"""
def __init__(self, axis=1, name='accuracy',
output_names=None, label_names=None):
super(Accuracy, self).__init__(
name, axis=axis,
output_names=output_names, label_names=label_names)
self.axis = axis
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data with class indices as values, one per sample.
preds : list of `NDArray`
Prediction values for samples. Each prediction value can either be the class index,
or a vector of likelihoods for all classes.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred_label in zip(labels, preds):
pred_label = pred_label.as_np_ndarray().as_in_ctx(label.ctx)
label = label.as_np_ndarray()
if pred_label.shape != label.shape:
pred_label = pred_label.argmax(axis=self.axis)
pred_label = pred_label.astype('int32')
label = label.astype('int32')
# flatten before checking shapes to avoid shape miss match
label = label.reshape(-1)
pred_label = pred_label.reshape(-1)
check_label_shapes(label, pred_label)
num_correct = (pred_label == label).sum().astype('float64')
self.sum_metric += num_correct
self.num_inst += len(pred_label)
@register
@alias('top_k_accuracy', 'top_k_acc')
@use_np
class TopKAccuracy(EvalMetric):
"""Computes top k predictions accuracy.
`TopKAccuracy` differs from Accuracy in that it considers the prediction
to be ``True`` as long as the ground truth label is in the top K
predicated labels.
If `top_k` = ``1``, then `TopKAccuracy` is identical to `Accuracy`.
Parameters
----------
top_k : int
Whether targets are in top k predictions.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> np.random.seed(999)
>>> top_k = 3
>>> labels = [mx.nd.array([2, 6, 9, 2, 3, 4, 7, 8, 9, 6])]
>>> predicts = [mx.nd.array(np.random.rand(10, 10))]
>>> acc = mx.gluon.metric.TopKAccuracy(top_k=top_k)
>>> acc.update(labels, predicts)
>>> print acc.get()
('top_k_accuracy', 0.3)
"""
def __init__(self, top_k=1, name='top_k_accuracy',
output_names=None, label_names=None):
super(TopKAccuracy, self).__init__(
name, top_k=top_k,
output_names=output_names, label_names=label_names)
self.top_k = top_k
assert(self.top_k > 1), 'Please use Accuracy if top_k is no more than 1'
self.name += '_%d' % self.top_k
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred_label in zip(labels, preds):
assert(len(pred_label.shape) <= 2), 'Predictions should be no more than 2 dims'
# Using argpartition here instead of argsort is safe because
# we do not care about the order of top k elements. It is
# much faster, which is important since that computation is
# single-threaded due to Python GIL.
pred_label = pred_label.as_np_ndarray().as_in_ctx(label.ctx).astype('float32')
pred_label = numpy.argpartition(pred_label, -self.top_k)
label = label.as_np_ndarray().astype('int32')
check_label_shapes(label, pred_label)
num_samples = pred_label.shape[0]
num_dims = len(pred_label.shape)
if num_dims == 1:
num_correct = (pred_label.reshape(-1) == label.reshape(-1)).sum()
self.sum_metric += num_correct.astype('float64')
elif num_dims == 2:
num_classes = pred_label.shape[1]
top_k = min(num_classes, self.top_k)
for j in range(top_k):
num_correct = (pred_label[:, num_classes - 1 - j].reshape(-1) == label.reshape(-1)).sum()
self.sum_metric += num_correct.astype('float64')
self.num_inst += num_samples
def predict_with_threshold(pred, threshold=0.5):
"""Do thresholding of predictions in binary and multilabel cases.
Parameters
----------
preds : ndarray
predictions in shape of (batch_size, ...) or (batch_size, ..., num_categories)
preds : float or ndarray
threshold(s) in shape of float or (num_categories)
"""
if isinstance(threshold, float):
return pred > threshold
elif isinstance(threshold, (numpy.ndarray, ndarray.ndarray.NDArray)):
num_classes = pred.shape[-1]
assert threshold.shape[-1] == num_classes, \
"shape mismatch: %s vs. %s"%(pred.shape[-1], threshold.shape[-1])
return pred > threshold
else:
raise ValueError("{} is a wrong type for threshold!".format(type(threshold)))
def one_hot(idx, num):
return (numpy.arange(num).astype(idx) == idx[:, None]).astype('int32')
@use_np
class _ClassificationMetrics(object):
"""Private container class for classification metric statistics.
True/false positive and true/false negative counts are sufficient statistics for various classification metrics.
This class provides the machinery to track those statistics across mini-batches of
(label, prediction) pairs.
Parameters
----------
class_type : str, default "binary"
"binary": f1 for binary classification.
"multiclass": f1 for multiclassification problem.
"multilabel": f1 for multilabel classification.
beta : float, default 1
weight of precision in harmonic mean.
threshold : float, default 0.5
threshold for deciding whether the predictions are positive or negative.
"""
def __init__(self, class_type="binary", threshold=0.5, beta=1):
self.class_type = class_type
self.threshold = threshold
self.beta = beta
self.reset_stats()
def _set(self, num, ctx):
if self.num_classes is None:
self.num_classes = num
self.true_positives = numpy.zeros(num, dtype='float64').as_in_ctx(ctx)
self.false_negatives = numpy.zeros(num, dtype='float64').as_in_ctx(ctx)
self.false_positives = numpy.zeros(num, dtype='float64').as_in_ctx(ctx)
self.true_negatives = numpy.zeros(num, dtype='float64').as_in_ctx(ctx)
else:
assert self.num_classes == num, \
"Input number of classes has changed from {} to {}".format(self.num_classes, num)
def update_stats(self, label, pred):
"""Update various binary classification counts for a single (label, pred) pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
label = label.as_np_ndarray().astype('int32')
if self.class_type == "binary":
self._set(1, label.ctx)
if label.max() > 1:
raise ValueError("Wrong label for binary classification.")
if pred.shape == label.shape:
pass
elif pred.shape[-1] > 2:
raise ValueError("The shape of prediction {} is wrong for binary classification.".format(pred.shape))
elif pred.shape[-1] == 2:
pred = pred.reshape(-1, 2)[:, 1]
pred_label = predict_with_threshold(pred, self.threshold).reshape(-1)
label = label.reshape(-1)
elif self.class_type == "multiclass":
num = pred.shape[-1]
self._set(num, label.ctx)
assert label.max() < num, "pred contains fewer classes than label!"
pred_label = one_hot(pred.argmax(axis=-1).reshape(-1), num)
label = one_hot(label.reshape(-1), num)
elif self.class_type == "multilabel":
num = pred.shape[-1]
self._set(num, label.ctx)
assert pred.shape == label.shape, \
"The shape of label should be same as that of prediction for multilabel classification."
pred_label = predict_with_threshold(pred, self.threshold).reshape(-1, num)
label = label.reshape(-1, num)
else:
raise ValueError(
"Wrong class_type {}! Only supports ['binary', 'multiclass', 'multilabel']".format(self.class_type))
check_label_shapes(label, pred_label)
pred_true = (pred_label == 1)
pred_false = (pred_label == 0)
label_true = (label == 1)
label_false = (label == 0)
true_pos = (pred_true * label_true).sum(0)
false_pos = (pred_true * label_false).sum(0)
false_neg = (pred_false * label_true).sum(0)
true_neg = (pred_false * label_false).sum(0)
self.true_positives += true_pos
self.false_positives += false_pos
self.false_negatives += false_neg
self.true_negatives += true_neg
@property
def precision(self):
if self.num_classes is not None:
return self.true_positives / numpy.maximum(self.true_positives + self.false_positives, 1e-12)
else:
return 0.
@property
def micro_precision(self):
if self.num_classes is not None:
return self.true_positives.sum() / \
numpy.maximum(self.true_positives.sum() + self.false_positives.sum(), 1e-12)
else:
return 0.
@property
def recall(self):
if self.num_classes is not None:
return self.true_positives / numpy.maximum(self.true_positives + self.false_negatives, 1e-12)
else:
return 0.
@property
def micro_recall(self):
if self.num_classes is not None:
return self.true_positives.sum() / \
numpy.maximum(self.true_positives.sum() + self.false_negatives.sum(), 1e-12)
else:
return 0.
@property
def fscore(self):
return (1 + self.beta ** 2) * self.precision * self.recall / \
numpy.maximum(self.beta ** 2 * self.precision + self.recall, 1e-12)
@property
def micro_fscore(self):
if self.micro_precision + self.micro_recall > 0:
return (1 + self.beta ** 2) * self.micro_precision * self.micro_recall / \
(self.beta ** 2 * self.micro_precision + self.micro_recall)
else:
return 0.
def binary_matthewscc(self):
"""Calculate the Matthew's Correlation Coefficent"""
if not self.total_examples:
return 0.
true_pos = float(self.true_positives)
false_pos = float(self.false_positives)
false_neg = float(self.false_negatives)
true_neg = float(self.true_negatives)
terms = [(true_pos + false_pos),
(true_pos + false_neg),
(true_neg + false_pos),
(true_neg + false_neg)]
denom = 1.
for t in filter(lambda t: t != 0., terms):
denom *= t
return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)
@property
def total_examples(self):
if self.num_classes is None:
return 0
return int(self.false_negatives[0] + self.false_positives[0] + \
self.true_negatives[0] + self.true_positives[0])
def reset_stats(self):
self.num_classes = None
self.true_positives = None
self.false_negatives = None
self.false_positives = None
self.true_negatives = None
@register
@use_np
class F1(EvalMetric):
"""Computes the F1 score of a binary classification problem.
The F1 score is equivalent to harmonic mean of the precision and recall,
where the best value is 1.0 and the worst value is 0.0. The formula for F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
The formula for precision and recall is::
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
.. note::
This F1 score only supports binary classification.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
class_type : str, default "binary"
"binary": f1 for binary classification.
"multiclass": f1 for multiclassification problem.
"multilabel": f1 for multilabel classification.
threshold : float, default 0.5
threshold for postive confidence value.
average : str, default 'micro'
Strategy to be used for aggregating across mini-batches.
"macro": Calculate metrics for each label and return unweighted mean of f1.
"micro": Calculate metrics globally by counting the total TP, FN and FP.
None: Return f1 scores for each class (numpy.ndarray) .
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0., 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0., 1., 1.])]
>>> f1 = mx.gluon.metric.F1()
>>> f1.update(preds = predicts, labels = labels)
>>> print f1.get()
('f1', 0.8)
"""
def __init__(self, name='f1',
output_names=None, label_names=None, class_type="binary", threshold=0.5, average="micro"):
self.average = average
self.metrics = _ClassificationMetrics(class_type=class_type, threshold=threshold)
EvalMetric.__init__(self, name=name,
output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
self.metrics.update_stats(label, pred)
if self.average == "micro":
self.sum_metric = self.metrics.micro_fscore * self.metrics.total_examples
elif self.average == "macro":
self.sum_metric = self.metrics.fscore.mean() * self.metrics.total_examples
else:
self.sum_metric = self.metrics.fscore * self.metrics.total_examples
self.num_inst = self.metrics.total_examples
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.sum_metric = 0.
self.num_inst = 0
self.metrics.reset_stats()
@register
@use_np
class Fbeta(F1):
"""Computes the Fbeta score of a binary classification problem.
The Fbeta score is equivalent to harmonic mean of the precision and recall,
where the best value is 1.0 and the worst value is 0.0. The formula for Fbeta score is::
Fbeta = (1 + beta ** 2) * (precision * recall) / (beta ** 2 * precision + recall)
The formula for precision and recall is::
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
.. note::
This Fbeta score only supports binary classification.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
class_type : str, default "binary"
"binary": f1 for binary classification.
"multiclass": f1 for multiclassification problem.
"multilabel": f1 for multilabel classification.
beta : float, default 1
weight of precision in harmonic mean.
threshold : float, default 0.5
threshold for postive confidence value.
average : str, default 'micro'
Strategy to be used for aggregating across mini-batches.
"macro": Calculate metrics for each label and return unweighted mean of f1.
"micro": Calculate metrics globally by counting the total TP, FN and FP.
None: Return f1 scores for each class.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0., 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0., 1., 1.])]
>>> fbeta = mx.gluon.metric.Fbeta(beta=2)
>>> fbeta.update(preds = predicts, labels = labels)
>>> print fbeta.get()
('fbeta', 0.9090909090909091)
"""
def __init__(self, name='fbeta',
output_names=None, label_names=None, class_type="binary", beta=1, threshold=0.5, average="micro"):
super(Fbeta, self).__init__(
name=name, output_names=output_names, label_names=label_names,
class_type=class_type, threshold=threshold, average=average)
self.metrics = _ClassificationMetrics(class_type=class_type, threshold=threshold, beta=beta)
@register
@use_np
class BinaryAccuracy(EvalMetric):
"""Computes the accuracy of a binary or multilabel classification problem.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
threshold : float or ndarray, default 0.5
threshold for deciding whether the predictions are positive or negative.
Examples
--------
>>> predicts = [mx.nd.array([0.7, 1, 0.55])]
>>> labels = [mx.nd.array([0., 1., 0.])]
>>> bacc = mx.gluon.metric.BinaryAccuracy(threshold=0.6)
>>> bacc.update(preds = predicts, labels = labels)
>>> print bacc.get()
('binary_accuracy', 0.6666666666666666)
"""
def __init__(self, name='binary_accuracy',
output_names=None, label_names=None, threshold=0.5):
self.threshold = threshold
EvalMetric.__init__(self, name=name,
output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
Each label denotes positive/negative for each class.
preds : list of `NDArray`
Each prediction value is a confidence value of being positive for each class.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred_label in zip(labels, preds):
pred_label = predict_with_threshold(pred_label, self.threshold)
pred_label = pred_label.as_np_ndarray().astype('int32').as_in_ctx(label.ctx)
label = label.as_np_ndarray().astype('int32')
# flatten before checking shapes to avoid shape miss match
label = label.reshape(-1)
pred_label = pred_label.reshape(-1)
check_label_shapes(label, pred_label)
num_correct = (pred_label == label).sum().astype('float64')
self.sum_metric += num_correct
self.num_inst += len(pred_label)
@register
@use_np
class MCC(EvalMetric):
"""Computes the Matthews Correlation Coefficient of a binary classification problem.
While slower to compute than F1 the MCC can give insight that F1 or Accuracy cannot.
For instance, if the network always predicts the same result
then the MCC will immeadiately show this. The MCC is also symetric with respect
to positive and negative categorization, however, there needs to be both
positive and negative examples in the labels or it will always return 0.
MCC of 0 is uncorrelated, 1 is completely correlated, and -1 is negatively correlated.
.. math::
\\text{MCC} = \\frac{ TP \\times TN - FP \\times FN }
{\\sqrt{ (TP + FP) ( TP + FN ) ( TN + FP ) ( TN + FN ) } }
where 0 terms in the denominator are replaced by 1.
.. note::
This version of MCC only supports binary classification. See PCC.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> # In this example the network almost always predicts positive
>>> false_positives = 1000
>>> false_negatives = 1
>>> true_positives = 10000
>>> true_negatives = 1
>>> predicts = [mx.nd.array(
[[.3, .7]]*false_positives +
[[.7, .3]]*true_negatives +
[[.7, .3]]*false_negatives +
[[.3, .7]]*true_positives
)]
>>> labels = [mx.nd.array(
[0.]*(false_positives + true_negatives) +
[1.]*(false_negatives + true_positives)
)]
>>> f1 = mx.gluon.metric.F1()
>>> f1.update(preds = predicts, labels = labels)
>>> mcc = mx.gluon.metric.MCC()
>>> mcc.update(preds = predicts, labels = labels)
>>> print f1.get()
('f1', 0.95233560306652054)
>>> print mcc.get()
('mcc', 0.01917751877733392)
"""
def __init__(self, name='mcc',
output_names=None, label_names=None):
self._metrics = _ClassificationMetrics()
EvalMetric.__init__(self, name=name,
output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
self._metrics.update_stats(label, pred)
self.sum_metric = self._metrics.binary_matthewscc() * self._metrics.total_examples
self.num_inst = self._metrics.total_examples
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.sum_metric = 0.
self.num_inst = 0.
self._metrics.reset_stats()
####################
# REGRESSION METRICS
####################
@register
@use_np
class MAE(EvalMetric):
"""Computes Mean Absolute Error (MAE) loss.
The mean absolute error is given by
.. math::
\\frac{\\sum_i^n |y_i - \\hat{y}_i|}{n}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([3, -0.5, 2, 7])]
>>> labels = [mx.nd.array([2.5, 0.0, 2, 8])]
>>> mean_absolute_error = mx.gluon.metric.MAE()
>>> mean_absolute_error.update(labels = labels, preds = predicts)
>>> print mean_absolute_error.get()
('mae', 0.5)
"""
def __init__(self, name='mae',
output_names=None, label_names=None):
super(MAE, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.as_np_ndarray()
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
num_inst = label.shape[0]
mae = numpy.abs(label - pred).reshape(num_inst, -1).mean(axis=-1).sum()
self.sum_metric += mae
self.num_inst += num_inst
@register
@use_np
class MSE(EvalMetric):
"""Computes Mean Squared Error (MSE) loss.
The mean squared error is given by
.. math::
\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([3, -0.5, 2, 7])]
>>> labels = [mx.nd.array([2.5, 0.0, 2, 8])]
>>> mean_squared_error = mx.gluon.metric.MSE()
>>> mean_squared_error.update(labels = labels, preds = predicts)
>>> print mean_squared_error.get()
('mse', 0.375)
"""
def __init__(self, name='mse',
output_names=None, label_names=None):
super(MSE, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.as_np_ndarray()
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
num_inst = label.shape[0]
mse = ((label - pred)**2.0).reshape(num_inst, -1).mean(axis=-1).sum()
self.sum_metric += mse
self.num_inst += num_inst
@register
@use_np
class RMSE(MSE):
"""Computes Root Mean Squred Error (RMSE) loss.
The root mean squared error is given by
.. math::
\\sqrt{\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([3, -0.5, 2, 7])]
>>> labels = [mx.nd.array([2.5, 0.0, 2, 8])]
>>> root_mean_squared_error = mx.gluon.metric.RMSE()
>>> root_mean_squared_error.update(labels = labels, preds = predicts)
>>> print root_mean_squared_error.get()
('rmse', 0.612372457981)
"""
def __init__(self, name='rmse',
output_names=None, label_names=None):
super(RMSE, self).__init__(
name, output_names=output_names, label_names=label_names)
def get(self):
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, math.sqrt(self.sum_metric / self.num_inst))
@register
@use_np
class MeanPairwiseDistance(EvalMetric):
"""Computes Mean Pairwise Distance.
The mean pairwise distance is given by
.. math::
\\sqrt{\\frac{(\\sum_i^n (y_i - \\hat{y}_i)^p)^\\frac{1}{p}}{n}}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
p : float, default 2
calculating distance using the p-norm
Examples
--------
>>> predicts = [mx.nd.array([[1., 2.], [3., 4.]])]
>>> labels = [mx.nd.array([[1., 0.], [4., 2.]])]
>>> mpd = mx.gluon.metric.MeanPairwiseDistance()
>>> mpd.update(labels = labels, preds = predicts)
>>> print mpd.get()
('mpd', 2.1180338859558105)
"""
def __init__(self, name='mpd',
output_names=None, label_names=None, p=2):
super(MeanPairwiseDistance, self).__init__(
name, output_names=output_names, label_names=label_names)
self.p = p
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.as_np_ndarray()
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
label = label.reshape(label.shape[0], -1)
pred = pred.reshape(pred.shape[0], -1)
dis = (((label - pred) ** self.p).sum(axis=-1)) ** (1./self.p)
dis = dis.sum()
num_inst = label.shape[0]
self.sum_metric += dis
self.num_inst += num_inst
@register
@use_np
class MeanCosineSimilarity(EvalMetric):
r"""Computes Mean Cosine Similarity.
The mean cosine similarity is given by
.. math::
cos_sim(label, pred) = \frac{{label}.{pred}}{max(||label||.||pred||, eps)}
Calculation happens on the last dimension of label and pred.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
eps : float, default 1e-8
small vale to avoid division by zero.
Examples
--------
>>> predicts = [mx.nd.array([[1., 0.], [1., 1.]])]
>>> labels = [mx.nd.array([[3., 4.], [2., 2.]])]
>>> mcs = mx.gluon.metric.MeanCosineSimilarity()
>>> mcs.update(labels = labels, preds = predicts)
>>> print mcs.get()
('cos_sim', 0.8)
"""
def __init__(self, name='cos_sim',
output_names=None, label_names=None, eps=1e-8):
super(MeanCosineSimilarity, self).__init__(
name, output_names=output_names, label_names=label_names)
self.eps = eps
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.as_np_ndarray()
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
if len(label.shape) == 1:
label = label.reshape(1, label.shape[0])
if len(pred.shape) == 1:
pred = pred.reshape(1, pred.shape[0])
sim = (label * pred).sum(axis=-1)
n_p = numpy.linalg.norm(pred, axis=-1)
n_l = numpy.linalg.norm(label, axis=-1)
sim = sim / numpy.maximum(n_l * n_p, self.eps)
sim = sim.sum()
num_inst = len(label.reshape(-1, label.shape[-1])) # numpy.prod(label.shape[:-1]) is not supported
self.sum_metric += sim
self.num_inst += num_inst
@register
@alias('ce')
@use_np
class CrossEntropy(EvalMetric):
"""Computes Cross Entropy loss.
The cross entropy over a batch of sample size :math:`N` is given by
.. math::
-\\sum_{n=1}^{N}\\sum_{k=1}^{K}t_{nk}\\log (y_{nk}),
where :math:`t_{nk}=1` if and only if sample :math:`n` belongs to class :math:`k`.
:math:`y_{nk}` denotes the probability of sample :math:`n` belonging to
class :math:`k`.
Parameters
----------
eps : float, default 1e-12
Use small constant for the case that predicted value is 0.
ignore_label : int or None, default None
Index of invalid label to ignore when
counting. By default, sets to -1.
If set to `None`, it will include all entries.
axis : int, default -1
The axis from prediction that was used to
compute softmax. By default use the last axis.
from_logits : boolean, default False
Whether `pred` is expected to be a logits tensor.
By default, we assume that `pred` encodes a probability distribution.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> ce = mx.gluon.metric.CrossEntropy()
>>> ce.update(labels, predicts)
>>> print ce.get()
('cross-entropy', 0.57159948348999023)
"""
def __init__(self, eps=1e-12, ignore_label=None, axis=-1, from_logits=False,
name='cross-entropy', output_names=None, label_names=None):
super(CrossEntropy, self).__init__(
name, output_names=output_names, label_names=label_names)
self.ignore_label = ignore_label
self.axis = axis
self.from_logits = from_logits
self.eps = eps
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
loss = 0.
num = 0
for label, pred in zip(labels, preds):
assert label.size == pred.size/pred.shape[-1], \
"shape mismatch: %s vs. %s"%(label.shape, pred.shape)
label = label.reshape((label.size,))
if self.from_logits:
pred = ndarray.softmax(pred, axis=self.axis)
pred = ndarray.pick(pred.as_in_context(label.ctx), label.astype(dtype='int32'), axis=self.axis)
label = label.as_np_ndarray()
pred = pred.as_np_ndarray()
if self.ignore_label is not None:
ignore = (label == self.ignore_label).astype(pred.dtype)
num -= ignore.sum()
pred = pred * (1 - ignore) + ignore
loss -= numpy.log(numpy.maximum(self.eps, pred)).sum()
num += pred.size
self.sum_metric += loss
self.num_inst += num
@register
@use_np
class Perplexity(CrossEntropy):
"""Computes perplexity.
Perplexity is a measurement of how well a probability distribution
or model predicts a sample. A low perplexity indicates the model
is good at predicting the sample.
The perplexity of a model q is defined as
.. math::
b^{\\big(-\\frac{1}{N} \\sum_{i=1}^N \\log_b q(x_i) \\big)}
= \\exp \\big(-\\frac{1}{N} \\sum_{i=1}^N \\log q(x_i)\\big)
where we let `b = e`.
:math:`q(x_i)` is the predicted value of its ground truth
label on sample :math:`x_i`.
For example, we have three samples :math:`x_1, x_2, x_3` and their labels
are :math:`[0, 1, 1]`.
Suppose our model predicts :math:`q(x_1) = p(y_1 = 0 | x_1) = 0.3`
and :math:`q(x_2) = 1.0`,
:math:`q(x_3) = 0.6`. The perplexity of model q is
:math:`exp\\big(-(\\log 0.3 + \\log 1.0 + \\log 0.6) / 3\\big) = 1.77109762852`.
Parameters
----------
eps : float, default 1e-12
Use small constant for the case that predicted value is 0.
ignore_label : int or None, default None
Index of invalid label to ignore when
counting. By default, sets to -1.
If set to `None`, it will include all entries.
axis : int (default -1)
The axis from prediction that was used to
compute softmax. By default use the last axis.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> perp = mx.gluon.metric.Perplexity(ignore_label=None)
>>> perp.update(labels, predicts)
>>> print perp.get()
('Perplexity', 1.7710976285155853)
"""
def __init__(self, eps=1e-12, ignore_label=None, axis=-1, from_logits=False,
name='perplexity', output_names=None, label_names=None):
super(Perplexity, self).__init__(
eps=eps, ignore_label=ignore_label, axis=axis, from_logits=from_logits,
name=name, output_names=output_names, label_names=label_names)
def get(self):
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, math.exp(self.sum_metric/self.num_inst))
@register
@alias('pearsonr')
@use_np
class PearsonCorrelation(EvalMetric):
"""Computes Pearson correlation.
The pearson correlation is given by
.. math::
\\frac{cov(y, \\hat{y})}{\\sigma{y}\\sigma{\\hat{y}}}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([[1, 0], [0, 1], [0, 1]])]
>>> pr = mx.gluon.metric.PearsonCorrelation()
>>> pr.update(labels, predicts)
>>> print pr.get()
('pearsonr', 0.42163704544016178)
"""
def __init__(self, name='pearsonr',
output_names=None, label_names=None):
super(PearsonCorrelation, self).__init__(
name, output_names=output_names, label_names=label_names)
self.reset()
def reset(self):
self._sse_p = 0
self._mean_p = 0
self._sse_l = 0
self._mean_l = 0
self._pred_nums = 0
self._label_nums = 0
self._conv = 0
self.num_inst = 0
self.sum_metric = 0.0
def update_variance(self, new_values, *aggregate):
#Welford's online algorithm for variance update
count, mean, m_2 = aggregate
count += len(new_values)
delta = new_values - mean
mean += numpy.sum(delta / count)
delta_2 = new_values - mean
m_2 += numpy.sum(delta * delta_2)
return count, mean, m_2
def update_cov(self, label, pred):
self._conv = self._conv + numpy.sum((label - self._mean_l) * (pred - self._mean_p))
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
check_label_shapes(label, pred, False, True)
label = label.as_np_ndarray().reshape(-1).astype(numpy.float64)
pred = pred.as_np_ndarray().as_in_ctx(label.ctx).reshape(-1).astype(numpy.float64)
self.num_inst += 1
self._label_nums, self._mean_l, self._sse_l = \
self.update_variance(label, self._label_nums, self._mean_l, self._sse_l)
self.update_cov(label, pred)
self._pred_nums, self._mean_p, self._sse_p = \
self.update_variance(pred, self._pred_nums, self._mean_p, self._sse_p)
def get(self):
if self.num_inst == 0:
return (self.name, float('nan'))
n = self._label_nums
pearsonr = self._conv / ((n-1) * numpy.sqrt(self._sse_p / (n - 1)) * numpy.sqrt(self._sse_l / (n - 1)))
return (self.name, float(pearsonr))
@register
@use_np
class PCC(EvalMetric):
"""PCC is a multiclass equivalent for the Matthews correlation coefficient derived
from a discrete solution to the Pearson correlation coefficient.
.. math::
\\text{PCC} = \\frac {\\sum _{k}\\sum _{l}\\sum _{m}C_{kk}C_{lm}-C_{kl}C_{mk}}
{{\\sqrt {\\sum _{k}(\\sum _{l}C_{kl})(\\sum _{k'|k'\\neq k}\\sum _{l'}C_{k'l'})}}
{\\sqrt {\\sum _{k}(\\sum _{l}C_{lk})(\\sum _{k'|k'\\neq k}\\sum _{l'}C_{l'k'})}}}
defined in terms of a K x K confusion matrix C.
When there are more than two labels the PCC will no longer range between -1 and +1.
Instead the minimum value will be between -1 and 0 depending on the true distribution.
The maximum value is always +1.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> # In this example the network almost always predicts positive
>>> false_positives = 1000
>>> false_negatives = 1
>>> true_positives = 10000
>>> true_negatives = 1
>>> predicts = [mx.nd.array(
[[.3, .7]]*false_positives +
[[.7, .3]]*true_negatives +
[[.7, .3]]*false_negatives +
[[.3, .7]]*true_positives
)]
>>> labels = [mx.nd.array(
[0]*(false_positives + true_negatives) +
[1]*(false_negatives + true_positives)
)]
>>> f1 = mx.gluon.metric.F1()
>>> f1.update(preds = predicts, labels = labels)
>>> pcc = mx.gluon.metric.PCC()
>>> pcc.update(preds = predicts, labels = labels)
>>> print f1.get()
('f1', 0.95233560306652054)
>>> print pcc.get()
('pcc', 0.01917751877733392)
"""
def __init__(self, name='pcc',
output_names=None, label_names=None):
self.k = 2
super(PCC, self).__init__(
name=name, output_names=output_names, label_names=label_names)
def _grow(self, inc):
self.lcm = numpy.pad(
self.lcm, ((0, inc), (0, inc)), 'constant', constant_values=(0))
self.k += inc
def _calc_mcc(self, cmat):
n = cmat.sum()
x = cmat.sum(axis=1)
y = cmat.sum(axis=0)
cov_xx = numpy.sum(x * (n - x))
cov_yy = numpy.sum(y * (n - y))
if cov_xx == 0 or cov_yy == 0:
return float('nan')
# i = cmat.diagonal() # mxnet.numpy.ndarray.diagonal() is currently not available.
i = cmat[numpy.arange(self.k), numpy.arange(self.k)]
cov_xy = numpy.sum(i * n - x * y)
return cov_xy / (cov_xx * cov_yy) ** 0.5
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
# update the confusion matrix
for label, pred in zip(labels, preds):
label = label.astype('int32', copy=False).as_np_ndarray()
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
if pred.shape != label.shape:
pred = pred.argmax(axis=1).astype(label, copy=False)
else:
pred = pred.astype('int32', copy=False)
n = int(max(pred.max(), label.max()))
if n >= self.k:
self._grow(n + 1 - self.k)
bcm = numpy.zeros((self.k, self.k), dtype='float64')
for i, j in zip(pred, label):
bcm[i, j] += 1
self.lcm += bcm
self.num_inst += 1
@property
def sum_metric(self):
return self._calc_mcc(self.lcm) * self.num_inst
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0.
self.lcm = numpy.zeros((self.k, self.k), dtype='float64')
@register
class Loss(EvalMetric):
"""Dummy metric for directly printing loss.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self, name='loss',
output_names=None, label_names=None):
super(Loss, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, _, preds):
if isinstance(preds, ndarray.ndarray.NDArray):
preds = [preds]
for pred in preds:
loss = ndarray.sum(pred).asscalar()
self.sum_metric += loss
self.num_inst += pred.size
@register
class Torch(Loss):
"""Dummy metric for torch criterions."""
def __init__(self, name='torch',
output_names=None, label_names=None):
super(Torch, self).__init__(
name, output_names=output_names, label_names=label_names)
@register
@use_np
class CustomMetric(EvalMetric):
"""Computes a customized evaluation metric.
The `feval` function can return a `tuple` of (sum_metric, num_inst) or return
an `int` sum_metric.
Parameters
----------
feval : callable(label, pred)
Customized evaluation function.
name : str, optional
The name of the metric. (the default is None).
allow_extra_outputs : bool, optional
If true, the prediction outputs can have extra outputs.
This is useful in RNN, where the states are also produced
in outputs for forwarding. (the default is False).
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> feval = lambda x, y : (x + y).mean()
>>> eval_metrics = mx.gluon.metric.CustomMetric(feval=feval)
>>> eval_metrics.update(labels, predicts)
>>> print eval_metrics.get()
('custom(<lambda>)', 6.0)
"""
def __init__(self, feval, name=None, allow_extra_outputs=False,
output_names=None, label_names=None):
if name is None:
name = feval.__name__
if name.find('<') != -1:
name = 'custom(%s)' % name
super(CustomMetric, self).__init__(
name, feval=feval,
allow_extra_outputs=allow_extra_outputs,
output_names=output_names, label_names=label_names)
self._feval = feval
self._allow_extra_outputs = allow_extra_outputs
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
if not self._allow_extra_outputs:
labels, preds = check_label_shapes(labels, preds, True)
for pred, label in zip(preds, labels):
label = label.as_np_ndarray()
pred = pred.as_np_ndarray().as_in_ctx(label.ctx)
reval = self._feval(label, pred)
if isinstance(reval, tuple):
(sum_metric, num_inst) = reval
self.sum_metric += sum_metric
self.num_inst += num_inst
else:
self.sum_metric += reval
self.num_inst += 1
def get_config(self):
raise NotImplementedError("CustomMetric cannot be serialized")
# pylint: disable=invalid-name
def np(numpy_feval, name=None, allow_extra_outputs=False):
"""Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating point number.
name : str, optional
Name of the custom metric.
allow_extra_outputs : bool, optional
Whether prediction output is allowed to have extra outputs. This is useful in cases
like RNN where states are also part of output which can then be fed back to the RNN
in the next step. By default, extra outputs are not allowed.
Returns
-------
float
Custom metric corresponding to the provided labels and predictions.
Example
-------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label-pred))
...
>>> metric = mx.gluon.metric.np(custom_metric)
"""
def feval(label, pred):
"""Internal eval function."""
return numpy_feval(label, pred)
feval.__name__ = numpy_feval.__name__
return CustomMetric(feval, name, allow_extra_outputs)
# pylint: enable=invalid-name
| apache-2.0 | 5,248,670,525,985,007,000 | 33.610814 | 117 | 0.584412 | false |
Zac-HD/home-assistant | homeassistant/components/sensor/steam_online.py | 17 | 2490 | """
Sensor for Steam account status.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.steam_online/
"""
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['steamodd==4.21']
CONF_ACCOUNTS = 'accounts'
ICON = 'mdi:steam'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ACCOUNTS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Steam platform."""
import steam as steamod
steamod.api.key.set(config.get(CONF_API_KEY))
add_devices(
[SteamSensor(account,
steamod) for account in config.get(CONF_ACCOUNTS)])
class SteamSensor(Entity):
"""A class for the Steam account."""
def __init__(self, account, steamod):
"""Initialize the sensor."""
self._steamod = steamod
self._account = account
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._profile.persona
@property
def entity_id(self):
"""Return the entity ID."""
return 'sensor.steam_{}'.format(self._account)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
# pylint: disable=no-member
def update(self):
"""Update device state."""
self._profile = self._steamod.user.profile(self._account)
if self._profile.current_game[2] is None:
self._game = 'None'
else:
self._game = self._profile.current_game[2]
self._state = {
1: 'Online',
2: 'Busy',
3: 'Away',
4: 'Snooze',
5: 'Trade',
6: 'Play',
}.get(self._profile.status, 'Offline')
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {'Game': self._game}
@property
def entity_picture(self):
"""Avatar of the account."""
return self._profile.avatar_medium
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
| apache-2.0 | 3,849,808,604,584,554,500 | 26.362637 | 74 | 0.613655 | false |
ekini/micolog_ru | app/trackback.py | 10 | 3389 | """tblib.py: A Trackback (client) implementation in Python
"""
__author__ = "Matt Croydon <[email protected]>"
__copyright__ = "Copyright 2003, Matt Croydon"
__license__ = "GPL"
__version__ = "0.1.0"
__history__ = """
0.1.0: 1/29/03 - Code cleanup, release. It can send pings, and autodiscover a URL to ping.
0.0.9: 1/29/03 - Basic error handling and autodiscovery works!
0.0.5: 1/29/03 - Internal development version. Working on autodiscovery and error handling.
0.0.4: 1/22/03 - First public release, code cleanup.
0.0.3: 1/22/03 - Removed hard coding that was used for testing.
0.0.2: 1/21/03 - First working version.
0.0.1: 1/21/03 - Initial version. Thanks to Mark Pilgrim for helping me figure some module basics out.
"""
import httplib, urllib, urlparse, re
from google.appengine.api import urlfetch
import logging
"""Everything I needed to know about trackback I learned from the trackback tech specs page
http://www.movabletype.org/docs/mttrackback.html. All arguments are optional. This allows us to create an empty TrackBack object,
then use autodiscovery to populate its attributes.
"""
class TrackBack:
def __init__(self, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None):
self.tbUrl = tbUrl
self.title = title
self.excerpt = excerpt
self.url = url
self.blog_name = blog_name
self.tbErrorCode = None
self.tbErrorMessage = None
def ping(self):
# Only execute if a trackback url has been defined.
if self.tbUrl:
# Create paramaters and make them play nice with HTTP
# Python's httplib example helps a lot:
# http://python.org/doc/current/lib/httplib-examples.html
params = urllib.urlencode({'title': self.title, 'url': self.url, 'excerpt': self.excerpt, 'blog_name': self.blog_name})
headers = ({"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "micolog"})
# urlparse is my hero
# http://www.python.org/doc/current/lib/module-urlparse.html
logging.info("ping...%s",params)
response=urlfetch.fetch(self.tbUrl,method=urlfetch.POST,payload=params,headers=headers)
self.httpResponse = response.status_code
data = response.content
self.tbResponse = data
logging.info("ping...%s"%data)
# Thanks to Steve Holden's book: _Python Web Programming_ (http://pydish.holdenweb.com/pwp/)
# Why parse really simple XML when you can just use regular expressions? Rawk.
errorpattern = r'<error>(.*?)</error>'
reg = re.search(errorpattern, self.tbResponse)
if reg:
self.tbErrorCode = reg.group(1)
if int(self.tbErrorCode) == 1:
errorpattern2 = r'<message>(.*?)</message>'
reg2 = re.search(errorpattern2, self.tbResponse)
if reg2:
self.tbErrorMessage = reg2.group(1)
else:
return 1
def autodiscover(self, urlToCheck):
response=urlfetch.fetch(urlToCheck)
data = response.content
tbpattern = r'trackback:ping="(.*?)"'
reg = re.search(tbpattern, data)
if reg:
self.tbUrl = reg.group(1) | mit | 170,196,212,935,383,140 | 43.213333 | 131 | 0.612275 | false |
cydcowley/Imperial-Visualizations | visuals_maths/VC-Polar/script/numeric/tools/selenium_links.py | 28 | 1550 | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
import time
import traceback
import sys
import urllib
import re
url = ""
def test(links,driver):
p=0
f=0
t=0
for k in range(len(links)):
x = links[k]
foo = ""
t=t+1
try:
link = driver.find_element_by_id(x[0])
link.click()
time.sleep(3)
foo = driver.page_source
driver.back()
assert(x[1] in foo)
print k,"PASS:",x[0],"==>",x[1],"in page"
p=p+1
except:
print k,"FAIL:",x[0],"==>",x[1],"not in page"
traceback.print_exc()
f=f+1
print 'Link testing complete. PASS:',p,'FAIL:',f,'Total:',t
url = ""
if len(sys.argv) > 1:
client = sys.argv[1]
if(len(sys.argv)>2):
url = sys.argv[2]
else:
client = "Firefox"
if url == "":
url = "http://127.0.0.1/staging/"
mainlinks = [("linkhome","Numerical analysis in Javascript"),
("linkworkshop","IN"),
("linkdoc","vectors and matrices"),
("linklib","var numeric"),
("linklibmin","var numeric="),]
driver=0
print "Link testing."
try:
driver = eval('webdriver.'+client+'()')
print "Using",client
driver.implicitly_wait(10)
driver.get(url)
time.sleep(1);
test(mainlinks,driver)
driver.quit()
except:
print "Could not do browser",client
if driver:
driver.quit()
| mit | -9,113,981,134,049,957,000 | 23.21875 | 63 | 0.549677 | false |
rabipanda/tensorflow | tensorflow/contrib/kfac/python/ops/fisher_blocks_lib.py | 4 | 1509 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FisherBlock definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.kfac.python.ops.fisher_blocks import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'FisherBlock',
'FullFB',
'NaiveDiagonalFB',
'FullyConnectedDiagonalFB',
'KroneckerProductFB',
'FullyConnectedKFACBasicFB',
'ConvKFCBasicFB',
'ConvDiagonalFB',
'set_global_constants',
'compute_pi_tracenorm',
'compute_pi_adjusted_damping',
'num_conv_locations',
'normalize_damping'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 | 4,860,322,328,813,758,000 | 34.928571 | 80 | 0.706428 | false |
srznew/pacemaker | cts/CM_lha.py | 10 | 20829 | '''CTS: Cluster Testing System: LinuxHA v2 dependent modules...
'''
__copyright__ = '''
Author: Huang Zhen <[email protected]>
Copyright (C) 2004 International Business Machines
Additional Audits, Revised Start action, Default Configuration:
Copyright (C) 2004 Andrew Beekhof <[email protected]>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
from cts.CTSvars import *
from cts.CTS import *
from cts.CIB import *
from cts.CTStests import AuditResource
from cts.watcher import LogWatcher
try:
from xml.dom.minidom import *
except ImportError:
sys.__stdout__.write("Python module xml.dom.minidom not found\n")
sys.__stdout__.write("Please install python-xml or similar before continuing\n")
sys.__stdout__.flush()
sys.exit(1)
#######################################################################
#
# LinuxHA v2 dependent modules
#
#######################################################################
class crm_lha(ClusterManager):
'''
The linux-ha version 2 cluster manager class.
It implements the things we need to talk to and manipulate
linux-ha version 2 clusters
'''
def __init__(self, Environment, randseed=None, name=None):
ClusterManager.__init__(self, Environment, randseed=randseed)
#HeartbeatCM.__init__(self, Environment, randseed=randseed)
#if not name: name="crm-lha"
#self["Name"] = name
#self.name = name
self.fastfail = 0
self.clear_cache = 0
self.cib_installed = 0
self.config = None
self.cluster_monitor = 0
self.use_short_names = 1
if self.Env["DoBSC"]:
del self.templates["Pat:They_stopped"]
del self.templates["Pat:Logd_stopped"]
self.Env["use_logd"] = 0
self._finalConditions()
self.check_transitions = 0
self.check_elections = 0
self.CIBsync = {}
self.CibFactory = ConfigFactory(self)
self.cib = self.CibFactory.createConfig(self.Env["Schema"])
def errorstoignore(self):
# At some point implement a more elegant solution that
# also produces a report at the end
'''Return list of errors which are known and very noisey should be ignored'''
return PatternSelector().get_patterns(self.name, "BadNewsIgnore")
def install_config(self, node):
if not self.ns.WaitForNodeToComeUp(node):
self.log("Node %s is not up." % node)
return None
if not node in self.CIBsync and self.Env["ClobberCIB"] == 1:
self.CIBsync[node] = 1
self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
# Only install the CIB on the first node, all the other ones will pick it up from there
if self.cib_installed == 1:
return None
self.cib_installed = 1
if self.Env["CIBfilename"] == None:
self.log("Installing Generated CIB on node %s" % (node))
self.cib.install(node)
else:
self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node))
if 0 != self.rsh.cp(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)):
raise ValueError("Can not scp file to %s %d"%(node))
self.rsh(node, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml")
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
self.partitions_expected = 1
for node in self.Env["nodes"]:
self.ShouldBeStatus[node] = ""
self.unisolate_node(node)
self.StataCM(node)
def test_node_CM(self, node):
'''Report the status of the cluster manager on a given node'''
watchpats = [ ]
watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
watchpats.append(self.templates["Pat:Slave_started"]%node)
watchpats.append(self.templates["Pat:Master_started"]%node)
idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterIdle", hosts=[node], kind=self.Env["LogWatcher"])
idle_watch.setwatch()
out = self.rsh(node, self.templates["StatusCmd"]%node, 1)
self.debug("Node %s status: '%s'" %(node, out))
if not out or string.find(out, 'ok') < 0:
if self.ShouldBeStatus[node] == "up":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "down", self.ShouldBeStatus[node]))
self.ShouldBeStatus[node] = "down"
return 0
if self.ShouldBeStatus[node] == "down":
self.log(
"Node status for %s is %s but we think it should be %s: %s"
% (node, "up", self.ShouldBeStatus[node], out))
self.ShouldBeStatus[node] = "up"
# check the output first - because syslog-ng looses messages
if string.find(out, 'S_NOT_DC') != -1:
# Up and stable
return 2
if string.find(out, 'S_IDLE') != -1:
# Up and stable
return 2
# fall back to syslog-ng and wait
if not idle_watch.look():
# just up
self.debug("Warn: Node %s is unstable: %s" % (node, out))
return 1
# Up and stable
return 2
# Is the node up or is the node down
def StataCM(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) > 0:
return 1
return None
# Being up and being stable is not the same question...
def node_stable(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) == 2:
return 1
self.log("Warn: Node %s not stable" % (node))
return None
def partition_stable(self, nodes, timeout=None):
watchpats = [ ]
watchpats.append("Current ping state: S_IDLE")
watchpats.append(self.templates["Pat:DC_IDLE"])
self.debug("Waiting for cluster stability...")
if timeout == None:
timeout = self.Env["DeadTime"]
if len(nodes) < 3:
self.debug("Cluster is inactive")
return 1
idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterStable", timeout, hosts=nodes.split(), kind=self.Env["LogWatcher"])
idle_watch.setwatch()
for node in nodes.split():
# have each node dump its current state
self.rsh(node, self.templates["StatusCmd"] % node, 1)
ret = idle_watch.look()
while ret:
self.debug(ret)
for node in nodes.split():
if re.search(node, ret):
return 1
ret = idle_watch.look()
self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout))
return None
def cluster_stable(self, timeout=None, double_check=False):
partitions = self.find_partitions()
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
if double_check:
# Make sure we are really stable and that all resources,
# including those that depend on transient node attributes,
# are started if they were going to be
time.sleep(5)
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
return 1
def is_node_dc(self, node, status_line=None):
rc = 0
if not status_line:
status_line = self.rsh(node, self.templates["StatusCmd"]%node, 1)
if not status_line:
rc = 0
elif string.find(status_line, 'S_IDLE') != -1:
rc = 1
elif string.find(status_line, 'S_INTEGRATION') != -1:
rc = 1
elif string.find(status_line, 'S_FINALIZE_JOIN') != -1:
rc = 1
elif string.find(status_line, 'S_POLICY_ENGINE') != -1:
rc = 1
elif string.find(status_line, 'S_TRANSITION_ENGINE') != -1:
rc = 1
return rc
def active_resources(self, node):
# [SM].* {node} matches Started, Slave, Master
# Stopped wont be matched as it wont include {node}
(rc, output) = self.rsh(node, """crm_resource -c""", None)
resources = []
for line in output:
if re.search("^Resource", line):
tmp = AuditResource(self, line)
if tmp.type == "primitive" and tmp.host == node:
resources.append(tmp.id)
return resources
def ResourceLocation(self, rid):
ResourceNodes = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
cmd = self.templates["RscRunning"] % (rid)
(rc, lines) = self.rsh(node, cmd, None)
if rc == 127:
self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
for line in lines:
self.log("Output: "+line)
elif rc == 0:
ResourceNodes.append(node)
return ResourceNodes
def find_partitions(self):
ccm_partitions = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
partition = self.rsh(node, self.templates["PartitionCmd"], 1)
if not partition:
self.log("no partition details for %s" % node)
elif len(partition) > 2:
nodes = partition.split()
nodes.sort()
partition = string.join(nodes, ' ')
found = 0
for a_partition in ccm_partitions:
if partition == a_partition:
found = 1
if found == 0:
self.debug("Adding partition from %s: %s" % (node, partition))
ccm_partitions.append(partition)
else:
self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
else:
self.log("bad partition details for %s" % node)
else:
self.debug("Node %s is down... skipping" % node)
self.debug("Found partitions: %s" % repr(ccm_partitions) )
return ccm_partitions
def HasQuorum(self, node_list):
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
if not node_list:
node_list = self.Env["nodes"]
for node in node_list:
if self.ShouldBeStatus[node] == "up":
quorum = self.rsh(node, self.templates["QuorumCmd"], 1)
if string.find(quorum, "1") != -1:
return 1
elif string.find(quorum, "0") != -1:
return 0
else:
self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum)
return 0
def Components(self):
complist = []
common_ignore = [
"Pending action:",
"(ERROR|error): crm_log_message_adv:",
"(ERROR|error): MSG: No message to dump",
"pending LRM operations at shutdown",
"Lost connection to the CIB service",
"Connection to the CIB terminated...",
"Sending message to CIB service FAILED",
"Action A_RECOVER .* not supported",
"(ERROR|error): stonithd_op_result_ready: not signed on",
"pingd.*(ERROR|error): send_update: Could not send update",
"send_ipc_message: IPC Channel to .* is not connected",
"unconfirmed_actions: Waiting on .* unconfirmed actions",
"cib_native_msgready: Message pending on command channel",
r": Performing A_EXIT_1 - forcefully exiting the CRMd",
r"Resource .* was active at shutdown. You may ignore this error if it is unmanaged.",
]
stonith_ignore = [
r"Updating failcount for child_DoFencing",
r"(ERROR|error).*: Sign-in failed: triggered a retry",
"lrmd.*(ERROR|error): stonithd_receive_ops_result failed.",
]
stonith_ignore.extend(common_ignore)
ccm_ignore = [
"(ERROR|error): get_channel_token: No reply message - disconnected"
]
ccm_ignore.extend(common_ignore)
ccm = Process(self, "ccm", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"CCM connection appears to have failed",
"crmd.*Action A_RECOVER .* not supported",
"crmd.*Input I_TERMINATE from do_recover",
"Exiting to recover from CCM connection failure",
r"crmd.*: Could not recover from internal error",
"crmd.*I_ERROR.*(ccm_dispatch|crmd_cib_connection_destroy)",
"crmd.*exited with return code 2.",
"attrd.*exited with return code 1.",
"cib.*exited with return code 2.",
# Not if it was fenced
# "A new node joined the cluster",
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
# "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
# "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = ccm_ignore)
cib = Process(self, "cib", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"Lost connection to the CIB service",
"Connection to the CIB terminated...",
"crmd.*Input I_TERMINATE from do_recover",
"crmd.*I_ERROR.*crmd_cib_connection_destroy",
r"crmd.*: Could not recover from internal error",
"crmd.*exited with return code 2.",
"attrd.*exited with return code 1.",
], badnews_ignore = common_ignore)
lrmd = Process(self, "lrmd", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"LRM Connection failed",
"crmd.*I_ERROR.*lrm_connection_destroy",
"State transition S_STARTING -> S_PENDING",
"crmd.*Input I_TERMINATE from do_recover",
r"crmd.*: Could not recover from internal error",
"crmd.*exited with return code 2.",
], badnews_ignore = common_ignore)
crmd = Process(self, "crmd", triggersreboot=self.fastfail, pats = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
"State transition .* S_IDLE",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
pengine = Process(self, "pengine", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"crmd.*exited with return code 2.",
"crmd.*Input I_TERMINATE from do_recover",
r"crmd.*: Could not recover from internal error",
r"crmd.*CRIT.*: Connection to the Policy Engine failed",
"crmd.*I_ERROR.*save_cib_contents",
"crmd.*exited with return code 2.",
], badnews_ignore = common_ignore, dc_only=1)
if self.Env["DoFencing"] == 1 :
complist.append(Process(self, "stoniths", triggersreboot=self.fastfail, dc_pats = [
r"crmd.*CRIT.*: Fencing daemon connection failed",
"Attempting connection to fencing daemon",
], badnews_ignore = stonith_ignore))
if self.fastfail == 0:
ccm.pats.extend([
"attrd .* exited with return code 1",
"(ERROR|error): Respawning client .*attrd",
"cib.* exited with return code 2",
"(ERROR|error): Respawning client .*cib",
"crmd.* exited with return code 2",
"(ERROR|error): Respawning client .*crmd"
])
cib.pats.extend([
"attrd.* exited with return code 1",
"(ERROR|error): Respawning client .*attrd",
"crmd.* exited with return code 2",
"(ERROR|error): Respawning client .*crmd"
])
lrmd.pats.extend([
"crmd.* exited with return code 2",
"(ERROR|error): Respawning client .*crmd"
])
pengine.pats.extend([
"(ERROR|error): Respawning client .*crmd"
])
complist.append(ccm)
complist.append(cib)
complist.append(lrmd)
complist.append(crmd)
complist.append(pengine)
return complist
def NodeUUID(self, node):
lines = self.rsh(node, self.templates["UUIDQueryCmd"], 1)
for line in lines:
self.debug("UUIDLine:" + line)
m = re.search(r'%s.+\((.+)\)' % node, line)
if m:
return m.group(1)
return ""
def StandbyStatus(self, node):
out=self.rsh(node, self.templates["StandbyQueryCmd"] % node, 1)
if not out:
return "off"
out = out[:-1]
self.debug("Standby result: "+out)
return out
# status == "on" : Enter Standby mode
# status == "off": Enter Active mode
def SetStandbyMode(self, node, status):
current_status = self.StandbyStatus(node)
cmd = self.templates["StandbyCmd"] % (node, status)
ret = self.rsh(node, cmd)
return True
def AddDummyRsc(self, node, rid):
rsc_xml = """ '<resources>
<primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
<operations>
<op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
</operations>
</primitive>
</resources>'""" % (rid, rid)
constraint_xml = """ '<constraints>
<rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
</constraints>'
""" % (rid, node, node, rid)
self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))
def RemoveDummyRsc(self, node, rid):
constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
rsc = "\"//primitive[@id='%s']\"" % (rid)
self.rsh(node, self.templates['CibDelXpath'] % constraint)
self.rsh(node, self.templates['CibDelXpath'] % rsc)
#######################################################################
#
# A little test code...
#
# Which you are advised to completely ignore...
#
#######################################################################
if __name__ == '__main__':
pass
| gpl-2.0 | -6,502,040,889,149,689,000 | 38.374291 | 143 | 0.532047 | false |
ettrig/NIPAP | nipap-www/setup.py | 4 | 1117 | from setuptools import setup, find_packages
import nipapwww
setup(
name='nipap-www',
version=nipapwww.__version__,
description='web frontend for NIPAP',
author=nipapwww.__author__,
author_email=nipapwww.__author_email__,
url=nipapwww.__url__,
install_requires=[
"Pylons>=1.0",
"Jinja2",
"pynipap",
"nipap"
],
license=nipapwww.__license__,
# setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'nipapwww': ['i18n/*/LC_MESSAGES/*.mo']},
data_files = [
( '/etc/nipap/', [ 'nipap-www.ini', 'nipap-www.wsgi' ] ),
( '/var/cache/nipap-www/', [] )
],
#message_extractors={'nipapwww': [
# ('**.py', 'python', None),
# ('public/**', 'ignore', None)]},
zip_safe=False,
# paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = nipapwww.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
| mit | 2,465,306,052,816,235,500 | 27.641026 | 65 | 0.575649 | false |
kenorb-contrib/BitTorrent | twisted/internet/iocpreactor/udp.py | 2 | 8229 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import socket
import struct
import operator
from twisted.internet import interfaces, defer, error, protocol, address
from twisted.internet.udp import MulticastMixin
from twisted.internet.abstract import isIPAddress
from twisted.persisted import styles
from twisted.python import log, failure, reflect
from ops import ReadFileOp, WriteFileOp, WSARecvFromOp, WSASendToOp
from util import StateEventMachineType
from zope.interface import implements
ERROR_PORT_UNREACHABLE = 1234
class Port(log.Logger, styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IUDPTransport, interfaces.ISystemHandle)
events = ["startListening", "stopListening", "write", "readDone", "readErr", "writeDone", "writeErr", "connect"]
sockinfo = (socket.AF_INET, socket.SOCK_DGRAM, 0)
read_op_class = WSARecvFromOp
write_op_class = WSASendToOp
reading = False
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
disconnected = property(lambda self: self.state == "disconnected")
def __init__(self, bindAddress, proto, maxPacketSize=8192):
assert isinstance(proto, protocol.DatagramProtocol)
self.state = "disconnected"
from twisted.internet import reactor
self.bindAddress = bindAddress
self._connectedAddr = None
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
self.read_op = self.read_op_class(self)
self.readbuf = reactor.AllocateReadBuffer(maxPacketSize)
self.reactor = reactor
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def handle_listening_connect(self, host, port):
if not isIPAddress(host):
raise ValueError, "please pass only IP addresses, not domain names"
self.state = "connecting"
return defer.maybeDeferred(self._connectDone, host, port)
def handle_connecting_connect(self, host, port):
raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)"
handle_connected_connect = handle_connecting_connect
def _connectDone(self, host, port):
self._connectedAddr = (host, port)
self.state = "connected"
self.socket.connect((host, port))
return self._connectedAddr
def handle_disconnected_startListening(self):
self._bindSocket()
host, port = self.bindAddress
if isIPAddress(host):
return defer.maybeDeferred(self._connectSocket, host)
else:
d = self.reactor.resolve(host)
d.addCallback(self._connectSocket)
return d
def _bindSocket(self):
try:
skt = socket.socket(*self.sockinfo)
skt.bind(self.bindAddress)
# print "bound %s to %s" % (skt.fileno(), self.bindAddress)
except socket.error, le:
raise error.CannotListenError, (None, None, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s"%(self.protocol.__class__, self._realPortNumber))
self.socket = skt
def _connectSocket(self, host):
self.bindAddress = (host, self.bindAddress[1])
self.protocol.makeConnection(self)
self.startReading()
self.state = "listening"
def startReading(self):
self.reading = True
try:
self.read_op.initiateOp(self.socket.fileno(), self.readbuf)
except WindowsError, we:
log.msg("initiating read failed with args %s" % (we,))
def stopReading(self):
self.reading = False
def handle_listening_readDone(self, bytes, addr = None):
if addr:
try:
self.protocol.datagramReceived(self.readbuf[:bytes], addr)
except:
log.err()
else:
self.protocol.datagramReceived(self.readbuf[:bytes])
if self.reading:
self.startReading()
handle_connecting_readDone = handle_listening_readDone
handle_connected_readDone = handle_listening_readDone
def handle_listening_readErr(self, ret, bytes):
log.msg("read failed with err %s" % (ret,))
# TODO: use Failures or something
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
if self.reading:
self.startReading()
handle_connecting_readErr = handle_listening_readErr
handle_connected_readErr = handle_listening_readErr
def handle_disconnected_readErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_readDone(self, bytes, addr = None):
pass # no kicking the dead horse
def handle_listening_write(self, data, addr):
self.performWrite(data, addr)
def handle_connected_write(self, data, addr = None):
assert addr in (None, self._connectedAddr)
self.performWrite(data, addr)
def performWrite(self, data, addr = None):
# print "performing write on", data, addr
self.writing = True
try:
write_op = self.write_op_class(self)
if not addr:
addr = self._connectedAddr
write_op.initiateOp(self.socket.fileno(), data, addr)
# print "initiating write_op to", addr
except WindowsError, we:
log.msg("initiating write failed with args %s" % (we,))
def handle_listening_writeDone(self, bytes):
log.msg("write success with bytes %s" % (bytes,))
# self.callBufferHandlers(event = "buffer empty")
handle_connecting_writeDone = handle_listening_writeDone
handle_connected_writeDone = handle_listening_writeDone
def handle_listening_writeErr(self, ret, bytes):
log.msg("write failed with err %s" % (ret,))
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
handle_connecting_writeErr = handle_listening_writeErr
handle_connected_writeErr = handle_listening_writeErr
def handle_disconnected_writeErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_writeDone(self, bytes):
pass # no kicking the dead horse
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def handle_listening_stopListening(self):
self.stopReading()
self.connectionLost()
handle_connecting_stopListening = handle_listening_stopListening
handle_connected_stopListening = handle_listening_stopListening
def connectionLost(self, reason=None):
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
self.protocol.doStop()
self.socket.close()
del self.socket
self.state = "disconnected"
def logPrefix(self):
return self.logstr
def getHost(self):
return address.IPv4Address('UDP', *(self.socket.getsockname() + ('INET_UDP',)))
def getHandle(self):
"""Return the socket for this connection."""
return self.socket
class MulticastPort(MulticastMixin, Port):
"""UDP Port that supports multicasting."""
implements(interfaces.IMulticastTransport)
def __init__(self, bindAddress, proto, maxPacketSize=8192, listenMultiple=False):
Port.__init__(self, bindAddress, proto, maxPacketSize)
self.listenMultiple = listenMultiple
def createInternetSocket(self):
skt = Port.createInternetSocket(self)
if self.listenMultiple:
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
return skt
| gpl-3.0 | -2,383,504,930,815,237,000 | 36.235294 | 122 | 0.651598 | false |
dchaplinsky/garnahata.in.ua | garnahata_site/catalog/management/commands/load_wp.py | 1 | 3144 | import os.path
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from catalog.models import Address
# CSV was retrieved with help of this SQL
# SELECT
# post_title,
# post_content,
# garnahata.wp_leafletmapsmarker_markers.*,
# SUBSTRING(
# post_content,
# LOCATE('[wp_excel_cms name="', post_content) +
# length('[wp_excel_cms name="'),
# locate('"', post_content,
# LOCATE('[wp_excel_cms name="', post_content) +
# length('[wp_excel_cms name="')) -
# LOCATE('[wp_excel_cms name="', post_content) -
# length('[wp_excel_cms name="')
# ) as filename,
# SUBSTRING(
# post_content,
# LOCATE('[mapsmarker marker="', post_content) +
# length('[mapsmarker marker="'),
# locate('"', post_content,
# LOCATE('[mapsmarker marker="', post_content) +
# length('[mapsmarker marker="')) -
# LOCATE('[mapsmarker marker="', post_content) -
# length('[mapsmarker marker="')
# ) as map_id
# FROM garnahata.wp_posts
# join garnahata.wp_leafletmapsmarker_markers on
# wp_leafletmapsmarker_markers.id = SUBSTRING(
# post_content,
# LOCATE('[mapsmarker marker="', post_content) +
# length('[mapsmarker marker="'),
# locate('"', post_content,
# LOCATE('[mapsmarker marker="', post_content) +
# length('[mapsmarker marker="')) -
# LOCATE('[mapsmarker marker="', post_content) -
# length('[mapsmarker marker="')
# )
# where post_status="publish" and post_type="post" and
# post_content like "%wp_excel_cms%" and post_content like "%mapsmarker%";
class Command(BaseCommand):
args = '<file_path>'
help = ('Loads the CSV export from wordpress')
def handle(self, *args, **options):
try:
file_path = args[0]
basedir = os.path.dirname(os.path.abspath(file_path))
except IndexError:
raise CommandError('First argument must be a source file')
with open(file_path, 'r', newline='\n', encoding='utf-8') as source:
reader = csv.DictReader(source, delimiter=",")
for row in reader:
excel_file = os.path.join(basedir, row["filename"] + ".xlsx")
m = re.search('href="([^"]+)"', row["post_content"])
link = ""
if m:
link = m.group(1)
row["markername"] = row["markername"].replace(
"\\'", "'").replace('\\"', '"')
addr, _ = Address.objects.get_or_create(
title=row["post_title"],
defaults={
"address": row["address"],
"city": "Київ",
"commercial_name": row["markername"],
"link": link,
"coords": [row["lat"], row["lon"]]
})
addr.import_owners(excel_file)
| mit | -9,012,706,543,481,014,000 | 37.292683 | 77 | 0.506688 | false |
coreos/autotest | frontend/urls.py | 3 | 1392 | import os
from django.conf.urls import defaults
from django.conf import settings
# The next two lines enable the admin and load each admin.py file:
from django.contrib import admin
admin.autodiscover()
RE_PREFIX = '^' + settings.URL_PREFIX
TKO_RE_PREFIX = '^' + settings.TKO_URL_PREFIX
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'autotest.frontend.afe.views.handler500'
urlpatterns = defaults.patterns(
'',
(RE_PREFIX + r'admin/', defaults.include(admin.site.urls)),
(RE_PREFIX, defaults.include('autotest.frontend.afe.urls')),
(TKO_RE_PREFIX, defaults.include('autotest.frontend.tko.urls')),
(RE_PREFIX + r'static/(?P<path>.*)', 'django.views.static.serve',
{'document_root': os.path.join(os.path.dirname(__file__), 'static')}),
)
if os.path.exists(os.path.join(os.path.dirname(__file__),
'tko', 'site_urls.py')):
urlpatterns += defaults.patterns(
'', (TKO_RE_PREFIX, defaults.include('autotest.frontend.tko.site_urls')))
debug_patterns = defaults.patterns(
'',
# redirect /tko and /results to local apache server
(r'^(?P<path>(tko|results)/.*)$',
'autotest.frontend.afe.views.redirect_with_extra_data',
{'url': 'http://%(server_name)s/%(path)s?%(getdata)s'}),
)
if settings.DEBUG:
urlpatterns += debug_patterns
| gpl-2.0 | 8,001,938,237,103,695,000 | 35.631579 | 85 | 0.639368 | false |
fsouza/tdc_django | tdc/tdc/enquetes/views.py | 1 | 1119 | # coding: utf-8
from django import http, shortcuts
from django.template import response
from django.views.generic import base
from tdc.enquetes import models
class EnqueteView(base.View):
def get(self, request, *args, **kwargs):
enquete = shortcuts.get_object_or_404(models.Enquete,
id=kwargs["id"])
ctx = kwargs.get("contexto", {})
ctx["enquete"] = enquete
return response.TemplateResponse(request,
"enquete.html",
ctx)
def post(self, request, *args, **kwargs):
opcao_id = request.POST.get("opcao")
if not opcao_id:
kwargs["contexto"] = {"erro": u"Por favor, escolha uma opção."}
return self.get(request, *args, **kwargs)
opcao = shortcuts.get_object_or_404(models.Opcao,
id=opcao_id,
enquete=kwargs["id"])
models.Voto.objects.create(opcao=opcao)
return http.HttpResponse("voto computado")
| bsd-2-clause | -693,909,208,123,491,500 | 36.233333 | 75 | 0.529096 | false |
dpoulson/r2_control | Hardware/Lights/FlthyHPControl.py | 1 | 7222 | from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
import smbus
import os
import datetime
import time
from r2utils import mainconfig
from flask import Blueprint, request
import configparser
standard_library.install_aliases()
from builtins import hex
from builtins import object
_configfile = mainconfig.mainconfig['config_dir'] + 'flthy.cfg'
_config = configparser.SafeConfigParser({'address': '0x19',
'logfile': 'flthy.log',
'reeltwo': 'false'})
if not os.path.isfile(_configfile):
print("Config file does not exist")
with open(_configfile, 'wt') as configfile:
_config.write(configfile)
_config.read(_configfile)
_defaults = _config.defaults()
_hp_list = ['top', 'front', 'rear', 'back', 'all']
_type_list = ['light', 'servo']
_sequence_list = ['leia', 'projector', 'dimpulse', 'cycle', 'shortcircuit', 'colour', 'rainbow', 'disable', 'enable']
_colour_list = ['red', 'yellow', 'green', 'cyan', 'blue', 'magenta', 'orange', 'purple', 'white', 'random']
_position_list = ['top', 'bottom', 'center', 'left', 'right']
_logdir = mainconfig.mainconfig['logdir']
_logfile = _defaults['logfile']
api = Blueprint('flthy', __name__, url_prefix='/flthy')
@api.route('/raw/<cmd>', methods=['GET'])
def _flthy_raw(cmd):
""" GET to send a raw command to the flthy HP system"""
message = ""
if request.method == 'GET':
message += _flthy.sendRaw(cmd)
return message
@api.route('/sequence/<seq>', methods=['GET'])
def _flthy_seq(seq):
""" GET to send a sequence command to the flthy HP system"""
message = ""
if request.method == 'GET':
message += _flthy.sendSequence(seq)
return message
@api.route('/<hp>/<type>/<seq>/<value>', methods=['GET'])
def _flthy_cmd(hp, type, seq, value):
""" GET to send a command to the flthy HP system"""
message = ""
if request.method == 'GET':
message += _flthy.sendCommand(hp, type, seq, value)
return message
class _FlthyHPControl(object):
def __init__(self, address, logdir, reeltwo):
self.address = address
self.reeltwo = reeltwo
self.bus = smbus.SMBus(int(mainconfig.mainconfig['busid']))
self.logdir = logdir
if __debug__:
print("Initialising FlthyHP Control")
print("Address: %s | Bus: %s | logdir: %s | reeltwo: %s" % (self.address, self.bus, self.logdir, self.reeltwo))
def sendSequence(self, seq):
if seq.isdigit():
if __debug__:
print("Integer sent, sending command")
cmd = 'S' + seq
self.sendRaw(cmd)
else:
if __debug__:
print("Not an integer, decode and send command")
if seq == "leia":
if __debug__:
print("Leia mode")
self.sendRaw('S1')
elif seq == "disable":
if __debug__:
print("Clear and Disable")
self.sendRaw('S8')
elif seq == "enable":
if __debug__:
print("Clear and Enable")
self.sendRaw('S9')
return "Ok"
def sendCommand(self, hp, type, seq, value):
# Decoding HP command
if __debug__:
print("HP: %s" % hp)
if (hp.lower() in _hp_list) or (hp in ['T', 'F', 'R', 'A']):
if __debug__:
print("HP selection OK")
if hp.lower() in _hp_list:
hp = hp.lower()
if __debug__:
print("HP word used")
if hp == "front":
hpCmd = "F"
elif hp == "top":
hpCmd = "T"
elif (hp == "rear") or (hp == "back"):
hpCmd = "R"
elif hp == "all":
hpCmd = "A"
else:
if __debug__:
print("HP code used")
hpCmd = hp
else:
print("Illegal HP code")
if (type.lower() in _type_list) or (type in ['0', '1']):
if __debug__:
print("Type selection OK")
if type.lower() in _type_list:
type = type.lower()
if __debug__:
print("Type word used")
if type == "servo":
typeCmd = "1"
elif type == "light":
typeCmd = "0"
else:
if __debug__:
print("Type code used")
typeCmd = type
else:
print("Illegal type code")
if (seq.lower() in _sequence_list) or (seq in ['01', '02', '03', '04', '05', '06', '07', '98', '99']):
if __debug__:
print("Sequence selection OK")
if seq.lower() in _sequence_list:
seq = seq.lower()
if __debug__:
print("Sequence word used")
if seq == "leia":
seqCmd = "01"
elif seq == "projector":
seqCmd = "02"
elif seq == "shortcircuit":
seqCmd = "05"
else:
if __debug__:
print("Sequence code used")
seqCmd = seq
else:
print("Illegal type code")
if typeCmd == "1":
if (value.lower() in _position_list) or (value in ['1', '2', '3', '4', '5', '6', '7', '8']):
if __debug__:
print("Servo command: %s " % value)
if value.lower() in _position_list:
value = value.lower()
else:
if __debug__:
print("Value code used")
valueCmd = value
else:
if (value.lower() in _colour_list) or (value in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']):
if __debug__:
print("Light command: %s " % value)
if value.lower() in _colour_list:
value = value.lower()
else:
if __debug__:
print("Value code used")
valueCmd = value
cmd = hpCmd + typeCmd + seqCmd + valueCmd
self.sendRaw(cmd)
return "OK"
def sendRaw(self, cmd):
command = list(cmd)
hexCommand = list()
if self.reeltwo == True:
if __debug__:
print("ReelTwo Mode");
hexCommand.append(int(hex(ord('H')), 16))
hexCommand.append(int(hex(ord('P')), 16))
for i in command:
h = int(hex(ord(i)), 16)
hexCommand.append(h)
if __debug__:
print(hexCommand)
try:
self.bus.write_i2c_block_data(int(self.address, 16), hexCommand[0], hexCommand[1:])
except:
print("Failed to send bytes")
return "Ok"
_flthy = _FlthyHPControl(_defaults['address'], _defaults['logfile'], _config.getboolean('DEFAULT', 'reeltwo'))
| gpl-2.0 | 6,547,194,968,440,524,000 | 32.906103 | 123 | 0.472861 | false |
PhilSk/zulip | zerver/worker/queue_processors.py | 9 | 17179 | # Documented in http://zulip.readthedocs.io/en/latest/queuing.html
from __future__ import absolute_import
from typing import Any, Callable, Dict, List, Mapping, Optional
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.base import BaseHandler
from zerver.models import get_user_profile_by_email, \
get_user_profile_by_id, get_prereg_user_by_email, get_client, \
UserMessage, Message, Realm
from zerver.lib.context_managers import lockfile
from zerver.lib.error_notify import do_report_error
from zerver.lib.feedback import handle_feedback
from zerver.lib.queue import SimpleQueueClient, queue_json_publish
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.notifications import handle_missedmessage_emails, enqueue_welcome_emails, \
clear_followup_emails_queue, send_local_email_template_with_delay, \
send_missedmessage_email
from zerver.lib.push_notifications import handle_push_notification
from zerver.lib.actions import do_send_confirmation_email, \
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
internal_send_message, check_send_message, extract_recipients, \
render_incoming_message, do_update_embedded_data
from zerver.lib.url_preview import preview as url_preview
from zerver.lib.digest import handle_digest_email
from zerver.lib.email_mirror import process_message as mirror_email
from zerver.decorator import JsonableError
from zerver.tornado.socket import req_redis_key
from confirmation.models import Confirmation
from zerver.lib.db import reset_queries
from zerver.lib.redis_utils import get_redis_client
from zerver.context_processors import common_context
import os
import sys
import ujson
from collections import defaultdict
import email
import time
import datetime
import logging
import requests
import simplejson
from six.moves import cStringIO as StringIO
class WorkerDeclarationException(Exception):
pass
def assign_queue(queue_name, enabled=True, queue_type="consumer"):
# type: (str, bool, Optional[str]) -> Callable[[QueueProcessingWorker], QueueProcessingWorker]
def decorate(clazz):
# type: (QueueProcessingWorker) -> QueueProcessingWorker
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz, queue_type)
return clazz
return decorate
worker_classes = {} # type: Dict[str, Any] # Any here should be QueueProcessingWorker type
queues = {} # type: Dict[str, Dict[str, QueueProcessingWorker]]
def register_worker(queue_name, clazz, queue_type):
# type: (str, QueueProcessingWorker, str) -> None
if queue_type not in queues:
queues[queue_type] = {}
queues[queue_type][queue_name] = clazz
worker_classes[queue_name] = clazz
def get_worker(queue_name):
# type: (str) -> QueueProcessingWorker
return worker_classes[queue_name]()
def get_active_worker_queues(queue_type=None):
# type: (Optional[str]) -> List[str]
"""Returns all the non-test worker queues."""
if queue_type is None:
return list(worker_classes.keys())
return list(queues[queue_type].keys())
class QueueProcessingWorker(object):
queue_name = None # type: str
def __init__(self):
# type: () -> None
self.q = None # type: SimpleQueueClient
if self.queue_name is None:
raise WorkerDeclarationException("Queue worker declared without queue_name")
def consume(self, data):
# type: (Mapping[str, Any]) -> None
raise WorkerDeclarationException("No consumer defined!")
def consume_wrapper(self, data):
# type: (Mapping[str, Any]) -> None
try:
self.consume(data)
except Exception:
self._log_problem()
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR)
fname = '%s.errors' % (self.queue_name,)
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = u'%s\t%s\n' % (time.asctime(), ujson.dumps(data))
lock_fn = fn + '.lock'
with lockfile(lock_fn):
with open(fn, 'ab') as f:
f.write(line.encode('utf-8'))
reset_queries()
def _log_problem(self):
# type: () -> None
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
def setup(self):
# type: () -> None
self.q = SimpleQueueClient()
def start(self):
# type: () -> None
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
self.q.start_consuming()
def stop(self):
# type: () -> None
self.q.stop_consuming()
@assign_queue('signups')
class SignupWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
# This should clear out any invitation reminder emails
clear_followup_emails_queue(data['email_address'])
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
params = dict(data)
params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
params['status'] = 'subscribed'
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
logging.warning("Attempted to sign up already existing email to list: %s" %
(data['email_address'],))
else:
r.raise_for_status()
enqueue_welcome_emails(data['email_address'], data['merge_fields']['NAME'])
@assign_queue('invites')
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
body = data["email_body"]
do_send_confirmation_email(invitee, referrer, body)
# queue invitation reminder for two days from now.
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=2),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
@assign_queue('user_activity')
class UserActivityWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
query = event["query"]
do_update_user_activity(user_profile, client, query, log_time)
@assign_queue('user_activity_interval')
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue('user_presence')
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received event: %s" % (event),)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue('missedmessage_emails', queue_type="loop")
class MissedMessageWorker(QueueProcessingWorker):
def start(self):
# type: () -> None
while True:
missed_events = self.q.drain_queue("missedmessage_emails", json=True)
by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]
for event in missed_events:
logging.info("Received event: %s" % (event,))
by_recipient[event['user_profile_id']].append(event)
for user_profile_id, events in by_recipient.items():
handle_missedmessage_emails(user_profile_id, events)
reset_queries()
# Aggregate all messages received every 2 minutes to let someone finish sending a batch
# of messages
time.sleep(2 * 60)
@assign_queue('missedmessage_email_senders')
class MissedMessageSendingWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
send_missedmessage_email(data)
@assign_queue('missedmessage_mobile_notifications')
class PushNotificationsWorker(QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
handle_push_notification(data['user_profile_id'], data)
def make_feedback_client():
# type: () -> Any # Should be zulip.Client, but not necessarily importable
sys.path.append(os.path.join(os.path.dirname(__file__), '../../api'))
import zulip
return zulip.Client(
client="ZulipFeedback/0.1",
email=settings.DEPLOYMENT_ROLE_NAME,
api_key=settings.DEPLOYMENT_ROLE_KEY,
verbose=True,
site=settings.FEEDBACK_TARGET)
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
@assign_queue('feedback_messages')
class FeedbackBot(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received feedback from %s" % (event["sender_email"],))
handle_feedback(event)
@assign_queue('error_reports')
class ErrorReporter(QueueProcessingWorker):
def start(self):
# type: () -> None
if settings.DEPLOYMENT_ROLE_KEY:
self.staging_client = make_feedback_client()
self.staging_client._register(
'forward_error',
method='POST',
url='deployments/report_error',
make_request=(lambda type, report: {'type': type, 'report': simplejson.dumps(report)}),
)
QueueProcessingWorker.start(self)
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email')))
if settings.DEPLOYMENT_ROLE_KEY:
self.staging_client.forward_error(event['type'], event['report'])
elif settings.ERROR_REPORTING:
do_report_error(event['report']['host'], event['type'], event['report'])
@assign_queue('slow_queries', queue_type="loop")
class SlowQueryWorker(QueueProcessingWorker):
def start(self):
# type: () -> None
while True:
self.process_one_batch()
# Aggregate all slow query messages in 1-minute chunks to avoid message spam
time.sleep(1 * 60)
def process_one_batch(self):
# type: () -> None
slow_queries = self.q.drain_queue("slow_queries", json=True)
if settings.ERROR_BOT is None:
return
if len(slow_queries) > 0:
topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)
content = ""
for query in slow_queries:
content += " %s\n" % (query,)
error_bot_realm = get_user_profile_by_email(settings.ERROR_BOT).realm
internal_send_message(error_bot_realm, settings.ERROR_BOT,
"stream", "logs", topic, content)
reset_queries()
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(MessageSenderWorker, self).__init__()
self.redis_client = get_redis_client()
self.handler = BaseHandler()
self.handler.load_middleware()
def consume(self, event):
# type: (Mapping[str, Any]) -> None
server_meta = event['server_meta']
environ = {
'REQUEST_METHOD': 'SOCKET',
'SCRIPT_NAME': '',
'PATH_INFO': '/json/messages',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': 9993,
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
'wsgi.version': (1, 0),
'wsgi.input': StringIO(),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
'zulip.emulated_method': 'POST'
}
if 'socket_user_agent' in event['request']:
environ['HTTP_USER_AGENT'] = event['request']['socket_user_agent']
del event['request']['socket_user_agent']
# We're mostly using a WSGIRequest for convenience
environ.update(server_meta['request_environ'])
request = WSGIRequest(environ)
# Note: If we ever support non-POST methods, we'll need to change this.
request._post = event['request']
request.csrf_processing_done = True
user_profile = get_user_profile_by_id(server_meta['user_id'])
request._cached_user = user_profile
resp = self.handler.get_response(request)
server_meta['time_request_finished'] = time.time()
server_meta['worker_log_data'] = request._log_data
resp_content = resp.content.decode('utf-8')
result = {'response': ujson.loads(resp_content), 'req_id': event['req_id'],
'server_meta': server_meta}
redis_key = req_redis_key(event['req_id'])
self.redis_client.hmset(redis_key, {'status': 'complete',
'response': resp_content})
queue_json_publish(server_meta['return_queue'], result, lambda e: None)
@assign_queue('digest_emails')
class DigestWorker(QueueProcessingWorker):
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
logging.info("Received digest event: %s" % (event,))
handle_digest_email(event["user_profile_id"], event["cutoff"])
@assign_queue('email_mirror')
class MirrorWorker(QueueProcessingWorker):
# who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
mirror_email(email.message_from_string(event["message"]),
rcpt_to=event["rcpt_to"], pre_checked=True)
@assign_queue('test', queue_type="test")
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event):
# type: (Mapping[str, Any]) -> None
fn = settings.ZULIP_WORKER_TEST_FILE
message = ujson.dumps(event)
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
with open(fn, 'a') as f:
f.write(message + '\n')
@assign_queue('embed_links')
class FetchLinksEmbedData(QueueProcessingWorker):
def consume(self, event):
# type: (Mapping[str, Any]) -> None
for url in event['urls']:
url_preview.get_link_embed_data(url)
message = Message.objects.get(id=event['message_id'])
# If the message changed, we will run this task after updating the message
# in zerver.views.messages.update_message_backend
if message.content != event['message_content']:
return
if message.content is not None:
ums = UserMessage.objects.filter(
message=message.id).select_related("user_profile")
message_users = {um.user_profile for um in ums}
# Fetch the realm whose settings we're using for rendering
realm = Realm.objects.get(id=event['message_realm_id'])
# If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(
message,
message.content,
message_users,
realm)
do_update_embedded_data(
message.sender, message, message.content, rendered_content)
| apache-2.0 | 2,599,879,186,572,168,000 | 39.902381 | 109 | 0.633797 | false |
CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleSetFSMountOptions.py | 1 | 4525 | #!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule ConfigureAppleSoftwareUpdate
@author: ekkehard j. koch
@change: 03/18/2013 Original Implementation
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.SetFSMountOptions import SetFSMountOptions
class zzzTestRuleSetFSMountOptions(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = SetFSMountOptions(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''Configure system for the unit test
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 | 8,118,355,819,487,207,000 | 38.347826 | 79 | 0.577459 | false |
turbokongen/home-assistant | homeassistant/components/ipp/__init__.py | 3 | 4975 | """The Internet Printing Protocol (IPP) integration."""
import asyncio
from datetime import timedelta
import logging
from typing import Any, Dict
from pyipp import IPP, IPPError, Printer as IPPPrinter
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_NAME,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
CONF_BASE_PATH,
DOMAIN,
)
PLATFORMS = [SENSOR_DOMAIN]
SCAN_INTERVAL = timedelta(seconds=60)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: Dict) -> bool:
"""Set up the IPP component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up IPP from a config entry."""
coordinator = hass.data[DOMAIN].get(entry.entry_id)
if not coordinator:
# Create IPP instance for this entry
coordinator = IPPDataUpdateCoordinator(
hass,
host=entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
base_path=entry.data[CONF_BASE_PATH],
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
hass.data[DOMAIN][entry.entry_id] = coordinator
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class IPPDataUpdateCoordinator(DataUpdateCoordinator[IPPPrinter]):
"""Class to manage fetching IPP data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
*,
host: str,
port: int,
base_path: str,
tls: bool,
verify_ssl: bool,
):
"""Initialize global IPP data updater."""
self.ipp = IPP(
host=host,
port=port,
base_path=base_path,
tls=tls,
verify_ssl=verify_ssl,
session=async_get_clientsession(hass, verify_ssl),
)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self) -> IPPPrinter:
"""Fetch data from IPP."""
try:
return await self.ipp.printer()
except IPPError as error:
raise UpdateFailed(f"Invalid response from API: {error}") from error
class IPPEntity(CoordinatorEntity):
"""Defines a base IPP entity."""
def __init__(
self,
*,
entry_id: str,
device_id: str,
coordinator: IPPDataUpdateCoordinator,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the IPP entity."""
super().__init__(coordinator)
self._device_id = device_id
self._enabled_default = enabled_default
self._entry_id = entry_id
self._icon = icon
self._name = name
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this IPP device."""
if self._device_id is None:
return None
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},
ATTR_NAME: self.coordinator.data.info.name,
ATTR_MANUFACTURER: self.coordinator.data.info.manufacturer,
ATTR_MODEL: self.coordinator.data.info.model,
ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,
}
| apache-2.0 | 6,653,180,964,060,747,000 | 26.638889 | 93 | 0.614472 | false |
irudayarajisawa/django-cms | cms/models/managers.py | 46 | 18640 | # -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from django.db import models
from django.db.models import Q
from django.utils import six
from cms.cache.permissions import get_permission_cache, set_permission_cache
from cms.exceptions import NoPermissionsException
from cms.models.query import PageQuerySet
from cms.publisher import PublisherManager
from cms.utils import get_cms_setting
from cms.utils.i18n import get_fallback_languages
class PageManager(PublisherManager):
"""Use draft() and public() methods for accessing the corresponding
instances.
"""
def get_queryset(self):
"""Change standard model queryset to our own.
"""
return PageQuerySet(self.model)
def drafts(self):
return super(PageManager, self).drafts()
def public(self):
return super(PageManager, self).public()
# !IMPORTANT: following methods always return access to draft instances,
# take care on what you do one them. use Page.objects.public() for accessing
# the published page versions
# Just some of the queryset methods are implemented here, access queryset
# for more getting more supporting methods.
# TODO: check which from following methods are really required to be on
# manager, maybe some of them can be just accessible over queryset...?
def on_site(self, site=None):
return self.get_queryset().on_site(site)
def published(self, site=None):
return self.get_queryset().published(site=site)
def get_home(self, site=None):
return self.get_queryset().get_home(site)
def search(self, q, language=None, current_site_only=True):
"""Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes
"""
from cms.plugin_pool import plugin_pool
qs = self.get_queryset()
qs = qs.public()
if current_site_only:
site = Site.objects.get_current()
qs = qs.filter(site=site)
qt = Q(title_set__title__icontains=q)
# find 'searchable' plugins and build query
qp = Q()
plugins = plugin_pool.get_all_plugins()
for plugin in plugins:
cmsplugin = plugin.model
if hasattr(cmsplugin, 'search_fields'):
for field in cmsplugin.search_fields:
qp |= Q(**{'placeholders__cmsplugin__%s__%s__icontains' % \
(cmsplugin.__name__.lower(), field): q})
if language:
qt &= Q(title_set__language=language)
qp &= Q(cmsplugin__language=language)
qs = qs.filter(qt | qp)
return qs.distinct()
class TitleManager(PublisherManager):
def get_title(self, page, language, language_fallback=False):
"""
Gets the latest content for a particular page and language. Falls back
to another language if wanted.
"""
try:
title = self.get(language=language, page=page)
return title
except self.model.DoesNotExist:
if language_fallback:
try:
titles = self.filter(page=page)
fallbacks = get_fallback_languages(language)
for lang in fallbacks:
for title in titles:
if lang == title.language:
return title
return None
except self.model.DoesNotExist:
pass
else:
raise
return None
# created new public method to meet test case requirement and to get a list of titles for published pages
def public(self):
return self.get_queryset().filter(publisher_is_draft=False, published=True)
def drafts(self):
return self.get_queryset().filter(publisher_is_draft=True)
def set_or_create(self, request, page, form, language):
"""
set or create a title for a particular page and language
"""
base_fields = [
'slug',
'title',
'meta_description',
'page_title',
'menu_title'
]
advanced_fields = [
'redirect',
]
cleaned_data = form.cleaned_data
try:
obj = self.get(page=page, language=language)
except self.model.DoesNotExist:
data = {}
for name in base_fields:
if name in cleaned_data:
data[name] = cleaned_data[name]
data['page'] = page
data['language'] = language
if page.has_advanced_settings_permission(request):
overwrite_url = cleaned_data.get('overwrite_url', None)
if overwrite_url:
data['has_url_overwrite'] = True
data['path'] = overwrite_url
else:
data['has_url_overwrite'] = False
for field in advanced_fields:
value = cleaned_data.get(field, None)
data[field] = value
return self.create(**data)
for name in base_fields:
if name in form.base_fields:
value = cleaned_data.get(name, None)
setattr(obj, name, value)
if page.has_advanced_settings_permission(request):
if 'overwrite_url' in cleaned_data:
overwrite_url = cleaned_data.get('overwrite_url', None)
obj.has_url_overwrite = bool(overwrite_url)
obj.path = overwrite_url
for field in advanced_fields:
if field in form.base_fields:
value = cleaned_data.get(field, None)
setattr(obj, field, value)
obj.save()
return obj
################################################################################
# Permissions
################################################################################
class BasicPagePermissionManager(models.Manager):
"""Global page permission manager accessible under objects.
!IMPORTANT: take care, PagePermissionManager and GlobalPagePermissionManager
both inherit from this manager
"""
def with_user(self, user):
"""Get all objects for given user, also takes look if user is in some
group.
"""
return self.filter(Q(user=user) | Q(group__user=user))
def with_can_change_permissions(self, user):
"""Set of objects on which user haves can_change_permissions. !But only
the ones on which is this assigned directly. For getting reall
permissions use page.permissions manager.
"""
return self.with_user(user).filter(can_change_permissions=True)
class GlobalPagePermissionManager(BasicPagePermissionManager):
def user_has_permission(self, user, site_id, perm):
"""
Provide a single point of entry for deciding whether any given global
permission exists.
"""
# if the user has add rights to this site explicitly
this_site = Q(**{perm: True, 'sites__in': [site_id]})
# if the user can add to all sites
all_sites = Q(**{perm: True, 'sites__isnull': True})
return self.with_user(user).filter(this_site | all_sites)
def user_has_add_permission(self, user, site_id):
return self.user_has_permission(user, site_id, 'can_add')
def user_has_change_permission(self, user, site_id):
return self.user_has_permission(user, site_id, 'can_change')
def user_has_view_permission(self, user, site_id):
return self.user_has_permission(user, site_id, 'can_view')
class PagePermissionManager(BasicPagePermissionManager):
"""Page permission manager accessible under objects.
"""
def subordinate_to_user(self, user):
"""Get all page permission objects on which user/group is lover in
hierarchy then given user and given user can change permissions on them.
!IMPORTANT, but exclude objects with given user, or any group containing
this user - he can't be able to change his own permissions, because if
he does, and removes some permissions from himself, he will not be able
to add them anymore.
Example:
A
/ \
user B,E
/ \
C,X D,Y
Gives permission nodes C,X,D,Y under user, so he can edit
permissions if he haves can_change_permission.
Example:
A,Y
/ \
user B,E,X
/ \
C,X D,Y
Gives permission nodes C,D under user, so he can edit, but not
anymore to X,Y, because this users are on the same level or higher
in page hierarchy. (but only if user have can_change_permission)
Example:
A
/ \
user B,E
/ \ \
C,X D,Y user
/ \
I J,A
User permissions can be assigned to multiple page nodes, so merge of
all of them is required. In this case user can see permissions for
users C,X,D,Y,I,J but not A, because A user in higher in hierarchy.
If permission object holds group, this permission object can be visible
to user only if all of the group members are lover in hierarchy. If any
of members is higher then given user, this entry must stay invisible.
If user is superuser, or haves global can_change_permission permissions,
show him everything.
Result of this is used in admin for page permissions inline.
"""
from cms.models import GlobalPagePermission, Page
if user.is_superuser or \
GlobalPagePermission.objects.with_can_change_permissions(user):
# everything for those guys
return self.all()
# get user level
from cms.utils.permissions import get_user_permission_level
try:
user_level = get_user_permission_level(user)
except NoPermissionsException:
return self.none()
# get current site
site = Site.objects.get_current()
# get all permissions
page_id_allow_list = Page.permissions.get_change_permissions_id_list(user, site)
# get permission set, but without objects targeting user, or any group
# in which he can be
qs = self.filter(
page__id__in=page_id_allow_list,
page__level__gte=user_level,
)
qs = qs.exclude(user=user).exclude(group__user=user)
return qs
def for_page(self, page):
"""Returns queryset containing all instances somehow connected to given
page. This includes permissions to page itself and permissions inherited
from higher pages.
NOTE: this returns just PagePermission instances, to get complete access
list merge return of this function with Global permissions.
"""
# permissions should be managed on the draft page only
page = page.get_draft_object()
from cms.models import (ACCESS_DESCENDANTS, ACCESS_CHILDREN,
ACCESS_PAGE_AND_CHILDREN, ACCESS_PAGE_AND_DESCENDANTS, ACCESS_PAGE)
if page.depth is None or page.path is None or page.numchild is None:
raise ValueError("Cannot use unsaved page for permission lookup, missing MPTT attributes.")
paths = [
page.path[0:pos]
for pos in range(0, len(page.path), page.steplen)[1:]
]
parents = Q(page__path__in=paths) & (Q(grant_on=ACCESS_DESCENDANTS) | Q(grant_on=ACCESS_PAGE_AND_DESCENDANTS))
direct_parents = Q(page__pk=page.parent_id) & (Q(grant_on=ACCESS_CHILDREN) | Q(grant_on=ACCESS_PAGE_AND_CHILDREN))
page_qs = Q(page=page) & (Q(grant_on=ACCESS_PAGE_AND_DESCENDANTS) | Q(grant_on=ACCESS_PAGE_AND_CHILDREN) |
Q(grant_on=ACCESS_PAGE))
query = (parents | direct_parents | page_qs)
return self.filter(query).order_by('page__depth')
class PagePermissionsPermissionManager(models.Manager):
"""Page permissions permission manager.
!IMPORTANT: this actually points to Page model, not to PagePermission.
Seems this will be better approach. Accessible under permissions.
Maybe this even shouldn't be a manager - it mixes different models together.
"""
# we will return this in case we have a superuser, or permissions are not
# enabled/configured in settings
GRANT_ALL = 'All'
def get_publish_id_list(self, user, site):
"""
Give a list of page where the user has publish rights or the string "All" if
the user has all rights.
"""
return self.__get_id_list(user, site, "can_publish")
def get_change_id_list(self, user, site):
"""
Give a list of page where the user has edit rights or the string "All" if
the user has all rights.
"""
return self.__get_id_list(user, site, "can_change")
def get_add_id_list(self, user, site):
"""
Give a list of page where the user has add page rights or the string
"All" if the user has all rights.
"""
return self.__get_id_list(user, site, "can_add")
def get_delete_id_list(self, user, site):
"""
Give a list of page where the user has delete rights or the string "All" if
the user has all rights.
"""
return self.__get_id_list(user, site, "can_delete")
def get_advanced_settings_id_list(self, user, site):
"""
Give a list of page where the user can change advanced settings or the
string "All" if the user has all rights.
"""
return self.__get_id_list(user, site, "can_change_advanced_settings")
def get_change_permissions_id_list(self, user, site):
"""Give a list of page where the user can change permissions.
"""
return self.__get_id_list(user, site, "can_change_permissions")
def get_move_page_id_list(self, user, site):
"""Give a list of pages which user can move.
"""
return self.__get_id_list(user, site, "can_move_page")
def get_view_id_list(self, user, site):
"""Give a list of pages which user can view.
"""
return self.__get_id_list(user, site, "can_view")
def get_restricted_id_list(self, site):
from cms.models import (GlobalPagePermission, PagePermission,
MASK_CHILDREN, MASK_DESCENDANTS, MASK_PAGE)
global_permissions = GlobalPagePermission.objects.all()
if global_permissions.filter(Q(sites__in=[site]) | Q(sites__isnull=True)
).filter(can_view=True).exists():
# user or his group are allowed to do `attr` action
# !IMPORTANT: page permissions must not override global permissions
from cms.models import Page
return Page.objects.filter(site=site).values_list('id', flat=True)
# for standard users without global permissions, get all pages for him or
# his group/s
qs = PagePermission.objects.filter(page__site=site, can_view=True).select_related('page')
qs.order_by('page__path')
# default is denny...
page_id_allow_list = []
for permission in qs:
if permission.grant_on & MASK_PAGE:
page_id_allow_list.append(permission.page_id)
if permission.grant_on & MASK_CHILDREN:
page_id_allow_list.extend(permission.page.get_children().values_list('id', flat=True))
elif permission.grant_on & MASK_DESCENDANTS:
page_id_allow_list.extend(permission.page.get_descendants().values_list('id', flat=True))
# store value in cache
return page_id_allow_list
def __get_id_list(self, user, site, attr):
if site and not isinstance(site, six.integer_types):
site = site.pk
from cms.models import (GlobalPagePermission, PagePermission,
MASK_PAGE, MASK_CHILDREN, MASK_DESCENDANTS)
if attr != "can_view":
if not user.is_authenticated() or not user.is_staff:
return []
if user.is_superuser or not get_cms_setting('PERMISSION'):
# got superuser, or permissions aren't enabled? just return grant
# all mark
return PagePermissionsPermissionManager.GRANT_ALL
# read from cache if possible
cached = get_permission_cache(user, attr)
if cached is not None:
return cached
# check global permissions
global_perm = GlobalPagePermission.objects.user_has_permission(user, site, attr).exists()
if global_perm:
# user or his group are allowed to do `attr` action
# !IMPORTANT: page permissions must not override global permissions
return PagePermissionsPermissionManager.GRANT_ALL
# for standard users without global permissions, get all pages for him or
# his group/s
qs = PagePermission.objects.with_user(user)
qs.filter(**{'page__site_id': site}).order_by('page__path').select_related('page')
# default is denny...
page_id_allow_list = []
for permission in qs:
if getattr(permission, attr):
# can add is special - we are actually adding page under current page
if permission.grant_on & MASK_PAGE or attr is "can_add":
page_id_allow_list.append(permission.page_id)
if permission.grant_on & MASK_CHILDREN and not attr is "can_add":
page_id_allow_list.extend(permission.page.get_children().values_list('id', flat=True))
elif permission.grant_on & MASK_DESCENDANTS:
page_id_allow_list.extend(permission.page.get_descendants().values_list('id', flat=True))
# store value in cache
set_permission_cache(user, attr, page_id_allow_list)
return page_id_allow_list
| bsd-3-clause | -2,844,040,086,095,711,000 | 39.69869 | 122 | 0.585783 | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/pdfium/testing/tools/pngdiffer.py | 4 | 3264 | #!/usr/bin/env python
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import common
class PNGDiffer():
ACTUAL_TEMPLATE = '.pdf.%d.png'
EXPECTED_TEMPLATE = '_expected' + ACTUAL_TEMPLATE
PLATFORM_EXPECTED_TEMPLATE = '_expected_%s' + ACTUAL_TEMPLATE
def __init__(self, finder):
self.pdfium_diff_path = finder.ExecutablePath('pdfium_diff')
self.os_name = finder.os_name
def GetActualFiles(self, input_filename, source_dir, working_dir):
actual_paths = []
template_paths = self._GetTemplatePaths(
input_filename, source_dir, working_dir)
actual_path_template = template_paths[0];
expected_path_template = template_paths[1]
platform_expected_path_template = template_paths[2]
i = 0
while True:
actual_path = actual_path_template % i
expected_path = expected_path_template % i
platform_expected_path = (
platform_expected_path_template % (self.os_name, i))
if os.path.exists(platform_expected_path):
expected_path = platform_expected_path
elif not os.path.exists(expected_path):
break
actual_paths.append(actual_path)
i += 1
return actual_paths
def HasDifferences(self, input_filename, source_dir, working_dir):
template_paths = self._GetTemplatePaths(
input_filename, source_dir, working_dir)
actual_path_template = template_paths[0];
expected_path_template = template_paths[1]
platform_expected_path_template = template_paths[2]
i = 0
while True:
actual_path = actual_path_template % i
expected_path = expected_path_template % i
# PDFium tests should be platform independent. Platform based results are
# used to capture platform dependent implementations.
platform_expected_path = (
platform_expected_path_template % (self.os_name, i))
if (not os.path.exists(expected_path) and
not os.path.exists(platform_expected_path)):
if i == 0:
print "WARNING: no expected results files for " + input_filename
break
print "Checking " + actual_path
sys.stdout.flush()
if os.path.exists(expected_path):
error = common.RunCommand(
[self.pdfium_diff_path, expected_path, actual_path])
else:
error = 1;
if error:
# When failed, we check against platform based results.
if os.path.exists(platform_expected_path):
error = common.RunCommand(
[self.pdfium_diff_path, platform_expected_path, actual_path])
if error:
print "FAILURE: " + input_filename + "; " + str(error)
return True
i += 1
return False
def _GetTemplatePaths(self, input_filename, source_dir, working_dir):
input_root, _ = os.path.splitext(input_filename)
actual_path = os.path.join(working_dir, input_root + self.ACTUAL_TEMPLATE)
expected_path = os.path.join(
source_dir, input_root + self.EXPECTED_TEMPLATE)
platform_expected_path = os.path.join(
source_dir, input_root + self.PLATFORM_EXPECTED_TEMPLATE)
return (actual_path, expected_path, platform_expected_path)
| gpl-3.0 | 6,642,088,539,112,358,000 | 37.4 | 79 | 0.66299 | false |
spearheadsys/check_mk-rds_licenses | web/plugins/wato/rds_licenses.py | 1 | 1037 | #!/usr/bin/python
# 2015 [email protected]
# SpearHead Systems
register_check_parameters(
subgroup_applications,
"rds_licenses",
_("RDS Licenses Usage Status"),
Dictionary(
elements = [
("levels", # Name of your parameters
Tuple(
title = "Levels for RDS Licenses Usage check", # Specify a title for this parameters
elements = [
Integer(
title = _("Warning if above"),
unit = _("Percent"),
default_value = 80
),
Integer(
title = _("Critical if above"),
unit = _("Percent"),
default_value = 90
),
]
)
),
],
optional_keys = None, # Always show this subgroup
),
TextAscii( title = "Service name"),
"dict"
)
| gpl-2.0 | 3,685,275,934,627,379,700 | 30.424242 | 104 | 0.403086 | false |
jaredly/django-feedback | test/test_settings.py | 5 | 1957 | """
Django settings for t project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_+y6^$(4t0(xqqgp1n96+4!2f2sscxmcr3ljuf_%!p48%8i%^+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 't.urls'
WSGI_APPLICATION = 't.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| mit | 8,863,966,426,106,192,000 | 22.865854 | 71 | 0.718958 | false |
highco-groupe/odoo | addons/analytic/wizard/__init__.py | 23 | 1300 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_journal_report
import account_analytic_balance_report
import account_analytic_inverted_balance_report
import account_analytic_cost_ledger_report
import account_analytic_cost_ledger_for_journal_report
import account_analytic_chart
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,674,212,764,106,266,000 | 43.827586 | 78 | 0.660769 | false |
qlai/stochasticLDA | stochastic_lda.py | 1 | 7754 | import sys, re, time, string, random, csv, argparse
import numpy as n
from scipy.special import psi
from nltk.tokenize import wordpunct_tokenize
from utils import *
# import matplotlib.pyplot as plt
n.random.seed(10000001)
meanchangethresh = 1e-3
MAXITER = 10000
class SVILDA():
def __init__(self, vocab, K, D, alpha, eta, tau, kappa, docs, iterations, parsed = False):
self._vocab = vocab
self._V = len(vocab)
self._K = K
self._D = D
self._alpha = alpha
self._eta = eta
self._tau = tau
self._kappa = kappa
self._lambda = 1* n.random.gamma(100., 1./100., (self._K, self._V))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
self._docs = docs
self.ct = 0
self._iterations = iterations
self._parsed = parsed
print self._lambda.shape
self._trace_lambda = {}
for i in range(self._K):
self._trace_lambda[i] = [self.computeProbabilities()[i]]
self._x = [0]
def updateLocal(self, doc): #word_dn is an indicator variable with dimension V
(words, counts) = doc
newdoc = []
N_d = sum(counts)
phi_d = n.zeros((self._K, N_d))
gamma_d = n.random.gamma(100., 1./100., (self._K))
Elogtheta_d = dirichlet_expectation(gamma_d)
expElogtheta_d = n.exp(Elogtheta_d)
for i, item in enumerate(counts):
for j in range(item):
newdoc.append(words[i])
assert len(newdoc) == N_d, "error"
for i in range(self._iterations):
for m, word in enumerate(newdoc):
phi_d[:, m] = n.multiply(expElogtheta_d, self._expElogbeta[:, word]) + 1e-100
phi_d[:, m] = phi_d[:, m]/n.sum(phi_d[:, m])
gamma_new = self._alpha + n.sum(phi_d, axis = 1)
meanchange = n.mean(abs(gamma_d - gamma_new))
if (meanchange < meanchangethresh):
break
gamma_d = gamma_new
Elogtheta_d = dirichlet_expectation(gamma_d)
expElogtheta_d = n.exp(Elogtheta_d)
newdoc = n.asarray(newdoc)
return phi_d, newdoc, gamma_d
def updateGlobal(self, phi_d, doc):
# print 'updating global parameters'
lambda_d = n.zeros((self._K, self._V))
for k in range(self._K):
phi_dk = n.zeros(self._V)
for m, word in enumerate(doc):
# print word
phi_dk[word] += phi_d[k][m]
lambda_d[k] = self._eta + self._D * phi_dk
rho = (self.ct + self._tau) **(-self._kappa)
self._lambda = (1-rho) * self._lambda + rho * lambda_d
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
if self.ct % 10 == 9:
for i in range(self._K):
self._trace_lambda[i].append(self.computeProbabilities()[i])
self._x.append(self.ct)
def runSVI(self):
for i in range(self._iterations):
randint = random.randint(0, self._D-1)
print "ITERATION", i, " running document number ", randint
if self._parsed == False:
doc = parseDocument(self._docs[randint],self._vocab)
phi_doc, newdoc, gamma_d = self.updateLocal(doc)
self.updateGlobal(phi_doc, newdoc)
self.ct += 1
def computeProbabilities(self):
prob_topics = n.sum(self._lambda, axis = 1)
prob_topics = prob_topics/n.sum(prob_topics)
return prob_topics
def getTopics(self, docs = None):
prob_topics = self.computeProbabilities()
prob_words = n.sum(self._lambda, axis = 0)
if docs == None:
docs = self._docs
results = n.zeros((len(docs), self._K))
for i, doc in enumerate(docs):
parseddoc = parseDocument(doc, self._vocab)
for j in range(self._K):
aux = [self._lambda[j][word]/prob_words[word] for word in parseddoc[0]]
doc_probability = [n.log(aux[k]) * parseddoc[1][k] for k in range(len(aux))]
results[i][j] = sum(doc_probability) + n.log(prob_topics[j])
finalresults = n.zeros(len(docs))
for k in range(len(docs)):
finalresults[k] = n.argmax(results[k])
return finalresults, prob_topics
def calcPerplexity(self, docs = None):
perplexity = 0.
doclen = 0.
if docs == None:
docs = self._docs
for doc in docs:
parseddoc = parseDocument(doc, self._vocab)
_, newdoc, gamma_d = self.updateLocal(parseddoc)
approx_mixture = n.dot(gamma_d, self._lambda)
# print n.shape(approx_mixture)
approx_mixture = approx_mixture / n.sum(approx_mixture)
log_doc_prob = 0.
for word in newdoc:
log_doc_prob += n.log(approx_mixture[word])
perplexity += log_doc_prob
doclen += len(newdoc)
# print perplexity, doclen
perplexity = n.exp( - perplexity / doclen)
print perplexity
return perplexity
def plotTopics(self, perp):
plottrace(self._x, self._trace_lambda, self._K, self._iterations, perp)
def test(k, iterations):
allmydocs = getalldocs("alldocs2.txt")
vocab = getVocab("dictionary2.csv")
testset = SVILDA(vocab = vocab, K = k, D = 847, alpha = 0.2, eta = 0.2, tau = 0.7, kappa = 1024, docs = allmydocs, iterations= iterations)
testset.runSVI()
finallambda = testset._lambda
heldoutdocs = getalldocs("testdocs.txt")
perplexity = testset.calcPerplexity(docs = heldoutdocs)
with open("temp/%i_%i_%f_results.csv" %(k, iterations, perplexity), "w+") as f:
writer = csv.writer(f)
for i in range(k):
bestwords = sorted(range(len(finallambda[i])), key=lambda j:finallambda[i, j])
# print bestwords
bestwords.reverse()
writer.writerow([i])
for j, word in enumerate(bestwords):
writer.writerow([word, vocab.keys()[vocab.values().index(word)]])
if j >= 15:
break
topics, topic_probs = testset.getTopics()
testset.plotTopics(perplexity)
for kk in range(0, len(finallambda)):
lambdak = list(finallambda[kk, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, range(0, len(lambdak)))
temp = sorted(temp, key = lambda x: x[0], reverse=True)
# print temp
print 'topic %d:' % (kk)
# feel free to change the "53" here to whatever fits your screen nicely.
for i in range(0, 10):
print '%20s \t---\t %.4f' % (vocab.keys()[vocab.values().index(temp[i][1])], temp[i][0])
print
with open("temp/%i_%i_%f_raw.txt" %(k, iterations, perplexity), "w+") as f:
# f.write(finallambda)
for result in topics:
f.write(str(result) + " \n")
f.write(str(topic_probs) + " \n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-K','--topics', help='number of topics, defaults to 10',required=True)
parser.add_argument('-m','--mode', help='mode, test | normal',required=True)
parser.add_argument('-v','--vocab', help='Vocab file name, .csv', default = "dictionary.csv", required=False)
parser.add_argument('-d','--docs', help='file with list of docs, .txt', default = "alldocs.txt", required=False)
parser.add_argument('-a','--alpha', help='alpha parameter, defaults to 0.2',default = 0.2, required=False)
parser.add_argument('-e','--eta', help='eta parameter, defaults to 0.2',default= 0.2, required=False)
parser.add_argument('-t','--tau', help='tau parameter (delay), defaults to 1024',default= 1024, required=False)
parser.add_argument('-k','--kappa', help='kappa (forgetting rate), defaults to 0.7',default = 0.7, required=False)
parser.add_argument('-n','--iterations', help='number of iterations, defaults to 10000',default = 10000, required=False)
args = parser.parse_args()
mode = str(args.mode)
vocab = str(args.vocab)
K = int(args.topics)
alpha = float(args.alpha)
eta = float(args.eta)
tau = float(args.tau)
kappa = float(args.kappa)
iterations = int(args.iterations)
docs = str(args.docs)
vocab = str(args.vocab)
if mode == "test":
test(K, iterations)
if mode == "normal":
assert vocab is not None, "no vocab"
assert docs is not None, "no docs"
D = len(docs)
docs = getalldocs(docs)
vocab = getVocab(vocab)
lda = SVILDA(vocab = vocab, K = K, D = D, alpha = alpha, eta = eta, tau = tau, kappa = kappa, docs = docs, iterations = iterations)
lda.runSVI()
return lda
if __name__ == '__main__':
main()
| gpl-3.0 | 2,381,726,608,113,466,000 | 30.909465 | 139 | 0.656306 | false |
idan/oauthlib | tests/openid/connect/core/grant_types/test_implicit.py | 1 | 5919 | # -*- coding: utf-8 -*-
from unittest import mock
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import errors
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from oauthlib.openid.connect.core.grant_types.exceptions import OIDCNoPrompt
from oauthlib.openid.connect.core.grant_types.implicit import ImplicitGrant
from tests.oauth2.rfc6749.grant_types.test_implicit import ImplicitGrantTest
from tests.unittest import TestCase
from .test_authorization_code import get_id_token_mock
class OpenIDImplicitInterferenceTest(ImplicitGrantTest):
"""Test that OpenID don't interfere with normal OAuth 2 flows."""
def setUp(self):
super().setUp()
self.auth = ImplicitGrant(request_validator=self.mock_validator)
class OpenIDImplicitTest(TestCase):
def setUp(self):
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'openid')
self.request.expires_in = 1800
self.request.client_id = 'abcdef'
self.request.response_type = 'id_token token'
self.request.redirect_uri = 'https://a.b/cb'
self.request.state = 'abc'
self.request.nonce = 'xyz'
self.mock_validator = mock.MagicMock()
self.mock_validator.get_id_token.side_effect = get_id_token_mock
self.auth = ImplicitGrant(request_validator=self.mock_validator)
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
@mock.patch('oauthlib.common.generate_token')
def test_authorization(self, generate_token):
scope, info = self.auth.validate_authorization_request(self.request)
generate_token.return_value = 'abc'
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
self.assertIsNone(b)
self.assertEqual(s, 302)
self.request.response_type = 'id_token'
token = 'MOCKED_TOKEN'
url = 'https://a.b/cb#state=abc&id_token=%s' % token
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], url, parse_fragment=True)
self.assertIsNone(b)
self.assertEqual(s, 302)
@mock.patch('oauthlib.common.generate_token')
def test_no_prompt_authorization(self, generate_token):
generate_token.return_value = 'abc'
scope, info = self.auth.validate_authorization_request(self.request)
self.request.prompt = 'none'
self.assertRaises(OIDCNoPrompt,
self.auth.validate_authorization_request,
self.request)
bearer = BearerToken(self.mock_validator)
self.request.id_token_hint = '[email protected]'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
self.assertIsNone(b)
self.assertEqual(s, 302)
# Test alernative response modes
self.request.response_mode = 'query'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_query)
# Ensure silent authentication and authorization is done
self.mock_validator.validate_silent_login.return_value = False
self.mock_validator.validate_silent_authorization.return_value = True
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
self.mock_validator.validate_silent_login.return_value = True
self.mock_validator.validate_silent_authorization.return_value = False
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=consent_required', h['Location'])
# ID token hint must match logged in user
self.mock_validator.validate_silent_authorization.return_value = True
self.mock_validator.validate_user_match.return_value = False
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
@mock.patch('oauthlib.common.generate_token')
def test_required_nonce(self, generate_token):
generate_token.return_value = 'abc'
self.request.nonce = None
self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertIsNone(b)
self.assertEqual(s, 302)
class OpenIDImplicitNoAccessTokenTest(OpenIDImplicitTest):
def setUp(self):
super().setUp()
self.request.response_type = 'id_token'
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?state=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#state=abc&id_token=%s' % token
@mock.patch('oauthlib.common.generate_token')
def test_required_nonce(self, generate_token):
generate_token.return_value = 'abc'
self.request.nonce = None
self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertIsNone(b)
self.assertEqual(s, 302)
| bsd-3-clause | 4,533,979,543,435,547,600 | 43.503759 | 144 | 0.68221 | false |
sveetch/Optimus | optimus/pages/views/base.py | 1 | 8912 | # -*- coding: utf-8 -*-
import logging
import os
import six
from jinja2 import meta as Jinja2Meta
from optimus.exceptions import ViewImproperlyConfigured
from optimus.utils import UnicodeMixin
from optimus.i18n.lang import LangBase
class PageViewBase(UnicodeMixin):
"""
Base view object for a page
You can set class attributes at the init if needed
The render method is responsible to rendering the HTML from the template
and his context. Actually this is the only used method directly.
Only ``lang`` and ``context`` attributes are optional, so take care to set
all the required ones because their default value is ``None``. You should
not use directly ``PageViewBase``, inherit it in a common object with all
attributes setted by default.
Template context will have the following variables :
page_title
Page title
page_destination
Page destination
page_lang
Given langage if any
page_template_name
Template name used to compile the page HTML
But you can add new variable if needed. The default context variables can
not be overriden from the ``context`` class attribute, only from the
``get_context`` class method.
View need settings to be defined either as argument on instance init or
later through attribute setter.
Attributes:
title (string): Page title.
template_name (string): Page template file path relaive to templates
directoy. Used as Python template string with optional non
positional argument ``{{ language_code }}`` available for
internationalized pages.
destination (string): Page destionation path relative to build
directory.
lang (string): Language identifier or an instance of
``optimus.i18n.LangBase``.
context (dict): Initial page template context.
logger (logging.Logger): Optimus logger.
_used_templates (list): List of every used templates. Only filled when
``introspect()`` method is executed. Default to ``None``.
__settings (conf.model.SettingsModel): Settings registry instance when
given in kwargs. Default to ``None``.
Arguments:
**kwargs: Arbitrary keyword arguments. Will be added as object
attribute.
"""
title = None
template_name = None
destination = None
lang = None
context = {}
def __init__(self, **kwargs):
self._used_templates = None
self.logger = logging.getLogger('optimus')
self.__settings = kwargs.pop('settings', None)
# Store every passed keyword argument as object attribute
for key, value in kwargs.items():
setattr(self, key, value)
self.validate()
def __unicode__(self):
return self.get_destination()
def __repr__(self):
"""
Object representation
Returns:
string: Representation with name and code
"""
return "<{name} {dest}>".format(
name=self.__class__.__name__,
dest=self.get_destination()
)
def validate(self):
"""
Validate every required attribute is set.
Returns:
boolean: ``True`` if requirements are set.
"""
err = []
for item in ['title', 'template_name', 'destination']:
if not getattr(self, item):
err.append(item)
if len(err) > 0:
msg = ("These attributes are required: {}".format(", ".join(err)))
raise ViewImproperlyConfigured(msg)
return True
@property
def settings(self):
"""
``settings`` attribute getter, check settings have been correctly
defined.
Returns:
conf.model.SettingsModel: Settings registry instance when given
in kwargs. Default to ``None``.
"""
if not self.__settings:
msg = ("""View required settings defined either from init """
"""arguments or through settings attribute""")
raise ViewImproperlyConfigured(msg)
return self.__settings
@settings.setter
def settings(self, settings):
"""
``settings`` attribute setter
Arguments:
settings (conf.model.SettingsModel): Settings registry instance.
"""
self.__settings = settings
def get_title(self):
"""
Get page title.
Default behavior is to used page attribute ``title``.
Returns:
string: Page title.
"""
return self.title
def get_lang(self):
"""
Get page language object.
Returns:
optimus.i18n.LangBase: Language object. If ``lang`` page attribute
is ``None`` it will create a language object using default
language identifier from setting ``LANGUAGE_CODE``.
"""
# Defaut language identifier if not given
if getattr(self, "lang", None) is None:
self.lang = LangBase(code=self.settings.LANGUAGE_CODE)
# If the lang attribute contains a string, assume this is the language
# identifier
elif isinstance(getattr(self, "lang"), six.string_types):
self.lang = LangBase(code=getattr(self, "lang"))
return self.lang
def get_destination(self):
"""
Get page destination path.
Returns:
string: Page destination path relative to build directory.
"""
return os.path.normpath(
self.destination.format(
language_code=self.get_lang().code
)
)
def get_relative_position(self):
"""
Get relative path position from the destination file to the root.
Returns:
string: Either something like "../../" if the destination is in
subdirectories or "./" if at the root. Won't never return empty
string.
"""
return ((len(self.get_destination().split("/"))-1)*"../" or "./")
def get_template_name(self):
"""
Get template file path.
Returns:
string: Template file path relative to templates directory.
"""
return self.template_name.format(
language_code=self.get_lang().code
)
def get_context(self):
"""
Get template context.
Returns:
dict: Template context of variables.
"""
self.context.update({
'page_title': self.get_title(),
'page_destination': self.get_destination(),
'page_relative_position': self.get_relative_position(),
'page_lang': self.get_lang(),
'page_template_name': self.get_template_name(),
})
self.logger.debug(" - Initial context: {}".format(self.context))
return self.context
def render(self, env):
"""
Take the Jinja2 environment as required argument.
Arguments:
env (jinja2.Jinja2Environment): Jinja environment.
Returns:
string: HTML builded from page template with its context.
"""
self.env = env
context = self.get_context()
template = self.env.get_template(self.get_template_name())
return template.render(lang=self.get_lang(), **context)
def _recurse_template_search(self, env, template_name):
"""
Load involved template sources from given template file path then find
their template references.
Arguments:
env (jinja2.Jinja2Environment): Jinja environment.
template_name (string): Template file path.
Returns:
list: List of involved templates sources files.
"""
template_source = env.loader.get_source(env, template_name)[0]
parsed_content = env.parse(template_source)
deps = []
for item in Jinja2Meta.find_referenced_templates(parsed_content):
deps.append(item)
deps += self._recurse_template_search(env, item)
return deps
def introspect(self, env):
"""
Take the Jinja2 environment as required argument to find every
templates dependancies from page.
Arguments:
env (jinja2.Jinja2Environment): Jinja environment.
Returns:
list: List of involved templates sources files.
"""
if self._used_templates is None:
self.env = env
found = self._recurse_template_search(
env,
self.get_template_name()
)
self._used_templates = [self.get_template_name()] + found
self.logger.debug(" - Used templates: {}".format(
self._used_templates
))
return self._used_templates
| mit | 3,893,322,322,326,397,000 | 29.520548 | 78 | 0.593133 | false |
commaai/openpilot | selfdrive/car/toyota/radar_interface.py | 2 | 3743 | #!/usr/bin/env python3
from opendbc.can.parser import CANParser
from cereal import car
from selfdrive.car.toyota.values import NO_DSU_CAR, DBC, TSS2_CAR
from selfdrive.car.interfaces import RadarInterfaceBase
def _create_radar_can_parser(car_fingerprint):
if car_fingerprint in TSS2_CAR:
RADAR_A_MSGS = list(range(0x180, 0x190))
RADAR_B_MSGS = list(range(0x190, 0x1a0))
else:
RADAR_A_MSGS = list(range(0x210, 0x220))
RADAR_B_MSGS = list(range(0x220, 0x230))
msg_a_n = len(RADAR_A_MSGS)
msg_b_n = len(RADAR_B_MSGS)
signals = list(zip(['LONG_DIST'] * msg_a_n + ['NEW_TRACK'] * msg_a_n + ['LAT_DIST'] * msg_a_n +
['REL_SPEED'] * msg_a_n + ['VALID'] * msg_a_n + ['SCORE'] * msg_b_n,
RADAR_A_MSGS * 5 + RADAR_B_MSGS,
[255] * msg_a_n + [1] * msg_a_n + [0] * msg_a_n + [0] * msg_a_n + [0] * msg_a_n + [0] * msg_b_n))
checks = list(zip(RADAR_A_MSGS + RADAR_B_MSGS, [20]*(msg_a_n + msg_b_n)))
return CANParser(DBC[car_fingerprint]['radar'], signals, checks, 1)
class RadarInterface(RadarInterfaceBase):
def __init__(self, CP):
super().__init__(CP)
self.track_id = 0
self.radar_ts = CP.radarTimeStep
if CP.carFingerprint in TSS2_CAR:
self.RADAR_A_MSGS = list(range(0x180, 0x190))
self.RADAR_B_MSGS = list(range(0x190, 0x1a0))
else:
self.RADAR_A_MSGS = list(range(0x210, 0x220))
self.RADAR_B_MSGS = list(range(0x220, 0x230))
self.valid_cnt = {key: 0 for key in self.RADAR_A_MSGS}
self.rcp = _create_radar_can_parser(CP.carFingerprint)
self.trigger_msg = self.RADAR_B_MSGS[-1]
self.updated_messages = set()
# No radar dbc for cars without DSU which are not TSS 2.0
# TODO: make a adas dbc file for dsu-less models
self.no_radar = CP.carFingerprint in NO_DSU_CAR and CP.carFingerprint not in TSS2_CAR
def update(self, can_strings):
if self.no_radar:
return super().update(None)
vls = self.rcp.update_strings(can_strings)
self.updated_messages.update(vls)
if self.trigger_msg not in self.updated_messages:
return None
rr = self._update(self.updated_messages)
self.updated_messages.clear()
return rr
def _update(self, updated_messages):
ret = car.RadarData.new_message()
errors = []
if not self.rcp.can_valid:
errors.append("canError")
ret.errors = errors
for ii in sorted(updated_messages):
if ii in self.RADAR_A_MSGS:
cpt = self.rcp.vl[ii]
if cpt['LONG_DIST'] >= 255 or cpt['NEW_TRACK']:
self.valid_cnt[ii] = 0 # reset counter
if cpt['VALID'] and cpt['LONG_DIST'] < 255:
self.valid_cnt[ii] += 1
else:
self.valid_cnt[ii] = max(self.valid_cnt[ii] - 1, 0)
score = self.rcp.vl[ii+16]['SCORE']
# print ii, self.valid_cnt[ii], score, cpt['VALID'], cpt['LONG_DIST'], cpt['LAT_DIST']
# radar point only valid if it's a valid measurement and score is above 50
if cpt['VALID'] or (score > 50 and cpt['LONG_DIST'] < 255 and self.valid_cnt[ii] > 0):
if ii not in self.pts or cpt['NEW_TRACK']:
self.pts[ii] = car.RadarData.RadarPoint.new_message()
self.pts[ii].trackId = self.track_id
self.track_id += 1
self.pts[ii].dRel = cpt['LONG_DIST'] # from front of car
self.pts[ii].yRel = -cpt['LAT_DIST'] # in car frame's y axis, left is positive
self.pts[ii].vRel = cpt['REL_SPEED']
self.pts[ii].aRel = float('nan')
self.pts[ii].yvRel = float('nan')
self.pts[ii].measured = bool(cpt['VALID'])
else:
if ii in self.pts:
del self.pts[ii]
ret.points = list(self.pts.values())
return ret
| mit | 1,808,293,244,215,928,300 | 35.339806 | 113 | 0.604595 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.