repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Maccimo/intellij-community
|
python/testData/inspections/PyDataclassInspection/fieldsOrder.py
|
15
|
1775
|
import dataclasses
from typing import ClassVar
@dataclasses.dataclass
class A1:
bar1: int
<error descr="Fields with a default value must come after any fields without a default.">baz1</error>: int = 1
foo1: int
<error descr="Fields with a default value must come after any fields without a default.">bar2</error>: int = 2
baz2: int
foo2: int = 3
@dataclasses.dataclass()
class A2:
bar: int
baz: str = ""
foo: int = 5
@dataclasses.dataclass
class A3:
bar1: int
baz1: ClassVar[int] = 1
foo1: int
bar2: ClassVar[int] = 2
baz2: int
foo2: int = 3
@dataclasses.dataclass
class A4:
bar1: int
baz1: ClassVar = 1
foo1: int
bar2: ClassVar = 2
baz2: int
foo2: int = 3
@dataclasses.dataclass
class B1:
a: int = dataclasses.field()
b: int
@dataclasses.dataclass
class B2:
<error descr="Fields with a default value must come after any fields without a default.">a</error>: int = dataclasses.field(default=1)
b: int = dataclasses.field()
@dataclasses.dataclass
class B3:
<error descr="Fields with a default value must come after any fields without a default.">a</error>: int = dataclasses.field(default_factory=int)
b: int = dataclasses.field()
@dataclasses.dataclass
class C1:
x: int = dataclasses.MISSING
y: int
@dataclasses.dataclass
class C2:
x: int = dataclasses.field(default=dataclasses.MISSING)
y: int
C2(1, 2)
@dataclasses.dataclass
class C3:
x: int = dataclasses.field(default_factory=dataclasses.MISSING)
y: int
C3(1, 2)
@dataclasses.dataclass
class D1:
x: int = 0
y: int = dataclasses.field(init=False)
@dataclasses.dataclass
class E1:
foo = "bar" # <- has no type annotation, so doesn't count.
baz: str
|
apache-2.0
|
endorphinl/horizon
|
openstack_dashboard/dashboards/identity/projects/views.py
|
47
|
9056
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import keystone
from openstack_dashboard import policy
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.identity.projects \
import tables as project_tables
from openstack_dashboard.dashboards.identity.projects \
import workflows as project_workflows
from openstack_dashboard.dashboards.project.overview \
import views as project_views
PROJECT_INFO_FIELDS = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
INDEX_URL = "horizon:identity:projects:index"
class TenantContextMixin(object):
@memoized.memoized_method
def get_object(self):
tenant_id = self.kwargs['tenant_id']
try:
return api.keystone.tenant_get(self.request, tenant_id, admin=True)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project information.'),
redirect=reverse(INDEX_URL))
def get_context_data(self, **kwargs):
context = super(TenantContextMixin, self).get_context_data(**kwargs)
context['tenant'] = self.get_object()
return context
class IndexView(tables.DataTableView):
table_class = project_tables.TenantsTable
template_name = 'identity/projects/index.html'
page_title = _("Projects")
def has_more_data(self, table):
return self._more
def get_data(self):
tenants = []
marker = self.request.GET.get(
project_tables.TenantsTable._meta.pagination_param, None)
domain_context = self.request.session.get('domain_context', None)
self._more = False
if policy.check((("identity", "identity:list_projects"),),
self.request):
try:
tenants, self._more = api.keystone.tenant_list(
self.request,
domain=domain_context,
paginate=True,
marker=marker)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve project list."))
elif policy.check((("identity", "identity:list_user_projects"),),
self.request):
try:
tenants, self._more = api.keystone.tenant_list(
self.request,
user=self.request.user.id,
paginate=True,
marker=marker,
admin=False)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve project information."))
else:
msg = \
_("Insufficient privilege level to view project information.")
messages.info(self.request, msg)
return tenants
class ProjectUsageView(usage.UsageView):
table_class = usage.ProjectUsageTable
usage_class = usage.ProjectUsage
template_name = 'identity/projects/usage.html'
csv_response_class = project_views.ProjectUsageCsvRenderer
csv_template_name = 'project/overview/usage.csv'
page_title = _("Project Usage")
def get_data(self):
super(ProjectUsageView, self).get_data()
return self.usage.get_instances()
class CreateProjectView(workflows.WorkflowView):
workflow_class = project_workflows.CreateProject
def get_initial(self):
initial = super(CreateProjectView, self).get_initial()
# Set the domain of the project
domain = api.keystone.get_default_domain(self.request)
initial["domain_id"] = domain.id
initial["domain_name"] = domain.name
# get initial quota defaults
try:
quota_defaults = quotas.get_default_quota_data(self.request)
try:
if api.base.is_service_enabled(self.request, 'network') and \
api.neutron.is_quotas_extension_supported(
self.request):
# TODO(jpichon): There is no API to access the Neutron
# default quotas (LP#1204956). For now, use the values
# from the current project.
project_id = self.request.user.project_id
quota_defaults += api.neutron.tenant_quota_get(
self.request,
tenant_id=project_id)
except Exception:
error_msg = _('Unable to retrieve default Neutron quota '
'values.')
self.add_error_to_step(error_msg, 'create_quotas')
for field in quotas.QUOTA_FIELDS:
initial[field] = quota_defaults.get(field).limit
except Exception:
error_msg = _('Unable to retrieve default quota values.')
self.add_error_to_step(error_msg, 'create_quotas')
return initial
class UpdateProjectView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateProject
def get_initial(self):
initial = super(UpdateProjectView, self).get_initial()
project_id = self.kwargs['tenant_id']
initial['project_id'] = project_id
try:
# get initial project info
project_info = api.keystone.tenant_get(self.request, project_id,
admin=True)
for field in PROJECT_INFO_FIELDS:
initial[field] = getattr(project_info, field, None)
# Retrieve the domain name where the project belong
if keystone.VERSIONS.active >= 3:
try:
domain = api.keystone.domain_get(self.request,
initial["domain_id"])
initial["domain_name"] = domain.name
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project domain.'),
redirect=reverse(INDEX_URL))
# get initial project quota
quota_data = quotas.get_tenant_quota_data(self.request,
tenant_id=project_id)
if api.base.is_service_enabled(self.request, 'network') and \
api.neutron.is_quotas_extension_supported(self.request):
quota_data += api.neutron.tenant_quota_get(
self.request, tenant_id=project_id)
for field in quotas.QUOTA_FIELDS:
initial[field] = quota_data.get(field).limit
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project details.'),
redirect=reverse(INDEX_URL))
return initial
class DetailProjectView(generic.TemplateView):
template_name = 'identity/projects/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailProjectView, self).get_context_data(**kwargs)
project = self.get_data()
table = project_tables.TenantsTable(self.request)
context["project"] = project
context["page_title"] = _("Project Details: %s") % project.name
context["url"] = reverse(INDEX_URL)
context["actions"] = table.render_row_actions(project)
return context
@memoized.memoized_method
def get_data(self):
try:
project_id = self.kwargs['project_id']
project = api.keystone.tenant_get(self.request, project_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project details.'),
redirect=reverse(INDEX_URL))
return project
|
apache-2.0
|
Scapogo/zipline
|
zipline/testing/slippage.py
|
6
|
1028
|
from zipline.assets import Equity
from zipline.finance.slippage import SlippageModel
from zipline.utils.sentinel import sentinel
class TestingSlippage(SlippageModel):
"""
Slippage model that fills a constant number of shares per tick, for
testing purposes.
Parameters
----------
filled_per_tick : int or TestingSlippage.ALL
The number of shares to fill on each call to process_order. If
TestingSlippage.ALL is passed, the entire order is filled.
See also
--------
zipline.finance.slippage.SlippageModel
"""
ALL = sentinel('ALL')
allowed_asset_types = (Equity,)
def __init__(self, filled_per_tick):
super(TestingSlippage, self).__init__()
self.filled_per_tick = filled_per_tick
def process_order(self, data, order):
price = data.current(order.asset, "close")
if self.filled_per_tick is self.ALL:
volume = order.amount
else:
volume = self.filled_per_tick
return (price, volume)
|
apache-2.0
|
chengui/mic
|
mic/bootstrap.py
|
2
|
8915
|
#!/usr/bin/python -tt
#
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
import os
import sys
import tempfile
import shutil
import subprocess
import rpm
from mic import msger
from mic.utils import errors, proxy, misc
from mic.utils.rpmmisc import readRpmHeader, RPMInstallCallback
from mic.chroot import cleanup_mounts, setup_chrootenv, cleanup_chrootenv
PATH_BOOTSTRAP = "/usr/sbin:/usr/bin:/sbin:/bin"
RPMTRANS_FLAGS = [
rpm.RPMTRANS_FLAG_ALLFILES,
rpm.RPMTRANS_FLAG_NOSCRIPTS,
rpm.RPMTRANS_FLAG_NOTRIGGERS,
]
RPMVSF_FLAGS = [
rpm._RPMVSF_NOSIGNATURES,
rpm._RPMVSF_NODIGESTS
]
RPMPROB_FLAGS = [
rpm.RPMPROB_FILTER_OLDPACKAGE,
rpm.RPMPROB_FILTER_REPLACEPKG,
rpm.RPMPROB_FILTER_IGNOREARCH
]
class MiniBackend(object):
def __init__(self, rootdir, arch=None, repomd=None):
self._ts = None
self.rootdir = os.path.abspath(rootdir)
self.arch = arch
self.repomd = repomd
self.dlpkgs = []
self.localpkgs = {}
self.optionals = []
self.preins = {}
self.postins = {}
self.scriptlets = False
def __del__(self):
try:
del self.ts
except:
pass
def get_ts(self):
if not self._ts:
self._ts = rpm.TransactionSet(self.rootdir)
self._ts.setFlags(reduce(lambda x, y: x|y, RPMTRANS_FLAGS))
self._ts.setVSFlags(reduce(lambda x, y: x|y, RPMVSF_FLAGS))
self._ts.setProbFilter(reduce(lambda x, y: x|y, RPMPROB_FLAGS))
return self._ts
def del_ts(self):
if self._ts:
self._ts.closeDB()
self._ts = None
ts = property(fget = lambda self: self.get_ts(),
fdel = lambda self: self.del_ts(),
doc="TransactionSet object")
def selectPackage(self, pkg):
if not pkg in self.dlpkgs:
self.dlpkgs.append(pkg)
def runInstall(self):
# FIXME: check space
self.downloadPkgs()
self.installPkgs()
if not self.scriptlets:
return
for pkg in self.preins.keys():
prog, script = self.preins[pkg]
self.run_pkg_script(pkg, prog, script, '0')
for pkg in self.postins.keys():
prog, script = self.postins[pkg]
self.run_pkg_script(pkg, prog, script, '1')
def downloadPkgs(self):
nonexist = []
for pkg in self.dlpkgs:
try:
localpth = misc.get_package(pkg, self.repomd, self.arch)
if localpth:
self.localpkgs[pkg] = localpth
elif pkg in self.optionals:
# skip optional rpm
continue
else:
# mark nonexist rpm
nonexist.append(pkg)
except:
raise
if nonexist:
raise errors.BootstrapError("Can't get rpm binary: %s" %
','.join(nonexist))
def installPkgs(self):
for pkg in self.localpkgs.keys():
rpmpath = self.localpkgs[pkg]
hdr = readRpmHeader(self.ts, rpmpath)
# save prein and postin scripts
self.preins[pkg] = (hdr['PREINPROG'], hdr['PREIN'])
self.postins[pkg] = (hdr['POSTINPROG'], hdr['POSTIN'])
# mark pkg as install
self.ts.addInstall(hdr, rpmpath, 'u')
# run transaction
self.ts.order()
cb = RPMInstallCallback(self.ts)
self.ts.run(cb.callback, '')
def run_pkg_script(self, pkg, prog, script, arg):
mychroot = lambda: os.chroot(self.rootdir)
if not script:
return
if prog == "<lua>":
prog = "/usr/bin/lua"
tmpdir = os.path.join(self.rootdir, "tmp")
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tmpfd, tmpfp = tempfile.mkstemp(dir=tmpdir, prefix="%s.pre-" % pkg)
script = script.replace('\r', '')
os.write(tmpfd, script)
os.close(tmpfd)
os.chmod(tmpfp, 0700)
try:
script_fp = os.path.join('/tmp', os.path.basename(tmpfp))
subprocess.call([prog, script_fp, arg], preexec_fn=mychroot)
except (OSError, IOError), err:
msger.warning(str(err))
finally:
os.unlink(tmpfp)
class Bootstrap(object):
def __init__(self, rootdir, distro, arch=None):
self.rootdir = misc.mkdtemp(dir=rootdir, prefix=distro)
self.distro = distro
self.arch = arch
self.logfile = None
self.pkgslist = []
self.repomd = None
def __del__(self):
self.cleanup()
def get_rootdir(self):
if os.path.exists(self.rootdir):
shutil.rmtree(self.rootdir, ignore_errors=True)
os.makedirs(self.rootdir)
return self.rootdir
def dirsetup(self, rootdir=None):
_path = lambda pth: os.path.join(rootdir, pth.lstrip('/'))
if not rootdir:
rootdir = self.rootdir
try:
# make /tmp and /etc path
tmpdir = _path('/tmp')
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
etcdir = _path('/etc')
if not os.path.exists(etcdir):
os.makedirs(etcdir)
# touch distro file
tzdist = _path('/etc/%s-release' % self.distro)
if not os.path.exists(tzdist):
with open(tzdist, 'w') as wf:
wf.write("bootstrap")
except:
pass
def create(self, repomd, pkglist, optlist=[]):
try:
pkgmgr = MiniBackend(self.get_rootdir())
pkgmgr.arch = self.arch
pkgmgr.repomd = repomd
pkgmgr.optionals = optlist
map(pkgmgr.selectPackage, pkglist + optlist)
pkgmgr.runInstall()
except (OSError, IOError, errors.CreatorError), err:
raise errors.BootstrapError("%s" % err)
except:
raise
def run(self, cmd, chdir, rootdir=None, bindmounts=None):
def mychroot():
os.chroot(rootdir)
os.chdir(chdir)
def sync_timesetting(rootdir):
try:
# sync time and zone info to bootstrap
if os.path.exists(rootdir + "/etc/localtime"):
os.unlink(rootdir + "/etc/localtime")
shutil.copyfile("/etc/localtime", rootdir + "/etc/localtime")
except:
pass
def sync_passwdfile(rootdir):
try:
# sync passwd file to bootstrap, saving the user info
if os.path.exists(rootdir + "/etc/passwd"):
os.unlink(rootdir + "/etc/passwd")
shutil.copyfile("/etc/passwd", rootdir + "/etc/passwd")
except:
pass
if not rootdir:
rootdir = self.rootdir
if isinstance(cmd, list):
shell = False
else:
shell = True
env = os.environ
env['PATH'] = "%s:%s" % (PATH_BOOTSTRAP, env['PATH'])
retcode = 0
gloablmounts = None
try:
proxy.set_proxy_environ()
gloablmounts = setup_chrootenv(rootdir, bindmounts, False)
sync_timesetting(rootdir)
sync_passwdfile(rootdir)
retcode = subprocess.call(cmd, preexec_fn=mychroot, env=env, shell=shell)
except (OSError, IOError), err:
raise RuntimeError(err)
finally:
if self.logfile and os.path.isfile(self.logfile):
msger.log(file(self.logfile).read())
cleanup_chrootenv(rootdir, bindmounts, gloablmounts)
proxy.unset_proxy_environ()
return retcode
def cleanup(self):
try:
# clean mounts
cleanup_mounts(self.rootdir)
# remove rootdir
shutil.rmtree(self.rootdir, ignore_errors=True)
except:
pass
|
gpl-2.0
|
kylerbrown/scikit-learn
|
examples/svm/plot_weighted_samples.py
|
188
|
1943
|
"""
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
|
bsd-3-clause
|
tovid-suite/tovid
|
libtovid/metagui/manpage.py
|
1
|
2685
|
"""This module is for parsing manual pages and extracting necessary information
for creating a metaGUI.
Needed capabilities:
* Retrieve text of a given manpage
* Read manpage for a given program and create a list of command-line options,
with expected parameters and accompanying paragraph(s) of documentation.
"""
__all__ = [
'get',
'parse',
]
import re
import os
import textwrap
from libtovid.metagui.control import *
class Matcher:
def search(self, pattern, text):
self.value = re.search(pattern, text)
return self.value
def __getitem__(self, index):
return self.value.group(index)
def get(program):
"""Return the text of the 'man' page for the given command-line program.
"""
text = os.popen('man %s | col -b' % program).readlines()
return ''.join(text)
class Option:
def __init__(self, header='', doc=''):
self.header = header
self.doc = ''
self.append(doc)
# Get option name from header
self.option = header.split()[0].lstrip('-')
def getopt(self):
"""Get option name(s) and argument type from option header."""
text = self.header
option = ''
arg = ''
match = Matcher()
# Typical option header styles
# -f, --foobar
if match.search('(-\w), --[-\w]+', text):
option = match[1]
# -f ARG, --foobar=ARG
elif match.search('(-\w) (\w+), --[-\w]+=\w+', text):
option = match[1]
arg = match[2]
# -foo
elif match.search('(-[-\w]+)', text):
option = match[1]
# -foo ARG
elif match.search('(-[-\w]+) \[?(\w+)]?', text):
option = match[1]
arg = match[2]
return (option, arg)
def append(self, text):
"""Append text to the documentation, with extra whitespace removed.
"""
text = text.replace('\t', ' ')
text = text.replace(' ', ' ')
text = text.strip()
# Join hyphenated words at end of lines
if self.doc.endswith('-'):
self.doc = self.doc.rstrip('-') + text
else:
self.doc += ' ' + text
def __str__(self):
text = self.header + '\n'
text += textwrap.fill(self.doc.strip())
return text
def parse(text):
"""Generate meta-GUI from manpage documentation"""
options = []
# Match lines defining options
option = re.compile("^ *-(\w+) ?(.*)")
for line in text.splitlines():
if re.match(option, line):
options.append(Option(line.strip()))
elif len(options) > 0:
options[-1].append(line)
return options
|
gpl-2.0
|
freakynit/vertx-web
|
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/sandbox.py
|
221
|
9994
|
import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import pkg_resources
if os.name == "java":
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools.compat import builtins, execfile
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
old_dir = os.getcwd()
save_argv = sys.argv[:]
save_path = sys.path[:]
setup_dir = os.path.abspath(os.path.dirname(setup_script))
temp_dir = os.path.join(setup_dir,'temp')
if not os.path.isdir(temp_dir): os.makedirs(temp_dir)
save_tmp = tempfile.tempdir
save_modules = sys.modules.copy()
pr_state = pkg_resources.__getstate__()
try:
tempfile.tempdir = temp_dir
os.chdir(setup_dir)
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
DirectorySandbox(setup_dir).run(
lambda: execfile(
"setup.py",
{'__file__':setup_script, '__name__':'__main__'}
)
)
except SystemExit:
v = sys.exc_info()[1]
if v.args and v.args[0]:
raise
# Normal exit, just return
finally:
pkg_resources.__setstate__(pr_state)
sys.modules.update(save_modules)
# remove any modules imported within the sandbox
del_modules = [
mod_name for mod_name in sys.modules
if mod_name not in save_modules
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
]
list(map(sys.modules.__delitem__, del_modules))
os.chdir(old_dir)
sys.path[:] = save_path
sys.argv[:] = save_argv
tempfile.tempdir = save_tmp
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0x1FF, *args, **kw): # 0777
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
|
apache-2.0
|
NL66278/OCB
|
addons/l10n_in_hr_payroll/report/report_payslip_details.py
|
374
|
1791
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.osv import osv
from openerp.addons.hr_payroll import report
class payslip_details_report_in(report.report_payslip_details.payslip_details_report):
def __init__(self, cr, uid, name, context):
super(payslip_details_report_in, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_details_by_rule_category': self.get_details_by_rule_category,
})
class wrapped_report_payslipdetailsin(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payslipdetails'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payslipdetails'
_wrapped_report_class = payslip_details_report_in
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/db/models/fields/__init__.py
|
24
|
48391
|
from __future__ import unicode_literals
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry, total_ordering
from django.utils.itercompat import is_iterator
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
@total_ordering
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in validators.EMPTY_VALUES:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if is_iterator(self._choices):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn("DateTimeField received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, verbose_name, name, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value is None:
return value
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator())
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
|
bsd-3-clause
|
freakboy3742/django
|
docs/conf.py
|
5
|
13852
|
# Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/g/sphinx-dev/c/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.6.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
'sphinx.ext.extlinks',
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
]
# AutosectionLabel settings.
# Uses a <page>:<label> schema which doesn't work for duplicate sub-section
# labels, so set max depth.
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# Linkcheck settings.
linkcheck_ignore = [
# Special-use addresses and domain names. (RFC 6761/6890)
r'^https?://(?:127\.0\.0\.1|\[::1\])(?::\d+)?/',
r'^https?://(?:[^/\.]+\.)*example\.(?:com|net|org)(?::\d+)?/',
r'^https?://(?:[^/\.]+\.)*(?:example|invalid|localhost|test)(?::\d+)?/',
# Pages that are inaccessible because they require authentication.
r'^https://github\.com/[^/]+/[^/]+/fork',
r'^https://code\.djangoproject\.com/github/login',
r'^https://code\.djangoproject\.com/newticket',
r'^https://(?:code|www)\.djangoproject\.com/admin/',
r'^https://www\.djangoproject\.com/community/add/blogs/',
r'^https://www\.google\.com/webmasters/tools/ping',
r'^https://search\.google\.com/search-console/welcome',
# Fragments used to dynamically switch content or populate fields.
r'^https://web\.libera\.chat/#',
r'^https://github\.com/[^#]+#L\d+-L\d+$',
r'^https://help\.apple\.com/itc/podcasts_connect/#/itc',
# Anchors on certain pages with missing a[name] attributes.
r'^https://tools\.ietf\.org/html/rfc1123\.html#section-',
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
spelling_warning = True
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver:
return pep440ver + '.dev'
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = '4.0'
extlinks = {
'bpo': ('https://bugs.python.org/issue%s', 'bpo-'),
'commit': ('https://github.com/django/django/commit/%s', ''),
'cve': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-'),
# A file or directory. GitHub redirects from blob to tree if needed.
'source': ('https://github.com/django/django/blob/main/%s', ''),
'ticket': ('https://code.djangoproject.com/ticket/%s', '#'),
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_theme', 'requirements.txt']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "default-role-error"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
'psycopg2': ('https://www.psycopg.org/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ['app.add_directive']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': (
'\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}'
'\\DeclareUnicodeCharacter{2665}{[unicode-heart]}'
'\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}'
),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
)]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
bsd-3-clause
|
ParkJinSang/Logle
|
sample/common/fileio.py
|
2
|
1415
|
__author__ = 'PARKJINSANG'
#
def read_file(path):
"""
Read text from file path
:param path: string of path of file to read
:return: contents inside the file
"""
file = open(path, "r", encoding='ISO-8859-1')
contents = ""
while True:
line = file.readline()
if not line:
break
contents += line
return contents
def read_file_lines(path):
"""
Read text line by line from file path
:param path: string of path of file to read
:return: list that includes each string line
"""
file = open(path, "r", encoding='ISO-8859-1')
text_list = []
while True:
line = file.readline()
if not line:
break
text_list.append(line)
return text_list
def write_file(path, contents):
file = open(path, 'w', encoding='ISO-8869-1')
file.write(contents)
file.close()
def write_file_dict(path, dict):
"""
Write dictionary data into the file(csv)
:param path: string of path of file to read
:param dict: dictionary to write as a form of csv format
:return: none
"""
file = open(path, 'w', encoding='ISO-8859-1')
for k, v in dict.items():
text = str(k) + "," + str(v) + '\n'
file.write(text)
file.close()
def append_file(path, contents):
file = open(path, 'a', encoding='ISO-8859-1')
file.write(contents)
file.close()
|
mit
|
zhuyue1314/archinfo
|
archinfo/arch_mips32.py
|
1
|
7869
|
import capstone as _capstone
from .arch import Arch
# FIXME: Tell fish to fix whatever he was storing in info['current_function']
# TODO: Only persist t9 in PIC programs
class ArchMIPS32(Arch):
def __init__(self, endness="Iend_LE"):
super(ArchMIPS32, self).__init__(endness)
if endness == 'Iend_BE':
self.function_prologs = {
r"\x27\xbd\xff[\x00-\xff]" # addiu $sp, xxx
r"\x3c\x1c[\x00-\xff][\x00-\xff]\x9c\x27[\x00-\xff][\x00-\xff]" # lui $gp, xxx; addiu $gp, $gp, xxxx
}
self.function_epilogs = {
r"\x8f\xbf[\x00-\xff]{2}([\x00-\xff]{4}){0,4}\x03\xe0\x00\x08" # lw ra, off(sp); ... ; jr ra
}
self.triplet = 'mips-linux-gnu'
self.linux_name = 'mips'
bits = 32
vex_arch = "VexArchMIPS32"
name = "MIPS32"
qemu_name = 'mips'
ida_processor = 'mipsb'
linux_name = 'mipsel' # ???
triplet = 'mipsel-linux-gnu'
max_inst_bytes = 4
ip_offset = 128
sp_offset = 116
bp_offset = 120
ret_offset = 8
call_pushes_ret = False
stack_change = -4
cs_arch = _capstone.CS_ARCH_MIPS
cs_mode = _capstone.CS_MODE_32 + _capstone.CS_MODE_LITTLE_ENDIAN
function_prologs = {
r"[\x00-\xff]\xff\xbd\x27", # addiu $sp, xxx
r"[\x00-\xff][\x00-\xff]\x1c\x3c[\x00-\xff][\x00-\xff]\x9c\x27" # lui $gp, xxx; addiu $gp, $gp, xxxx
}
function_epilogs = {
r"[\x00-\xff]{2}\xbf\x8f([\x00-\xff]{4}){0,4}\x08\x00\xe0\x03" # lw ra, off(sp); ... ; jr ra
}
ret_instruction = "\x08\x00\xE0\x03" + "\x25\x08\x20\x00"
nop_instruction = "\x00\x00\x00\x00"
instruction_alignment = 4
persistent_regs = ['gp', 'ra', 't9']
default_register_values = [
( 'sp', Arch.initial_sp, True, 'global' ), # the stack
]
entry_register_values = {
'v0': 'ld_destructor',
'ra': 0
}
default_symbolic_registers = [ 'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15', 'r16', 'r17', 'r18', 'r19', 'r20', 'r21', 'r22', 'r23', 'r24', 'r25', 'r26', 'r27', 'r28', 'sp', 'bp', 'lr', 'pc', 'hi', 'lo' ]
register_names = {
0: 'zero',
4: 'at',
8: 'v0',
12: 'v1',
16: 'a0',
20: 'a1',
24: 'a2',
28: 'a3',
32: 't0',
36: 't1',
40: 't2',
44: 't3',
48: 't4',
52: 't5',
56: 't6',
60: 't7',
64: 's0',
68: 's1',
72: 's2',
76: 's3',
80: 's4',
84: 's5',
88: 's6',
92: 's7',
96: 't8',
100: 't9',
104: 'k0',
108: 'k1',
112: 'gp',
116: 'sp',
120: 's8',
124: 'ra',
128: 'pc',
132: 'hi',
136: 'lo',
144: 'f0',
152: 'f1',
160: 'f2',
168: 'f3',
176: 'f4',
184: 'f5',
192: 'f6',
200: 'f7',
208: 'f8',
216: 'f9',
224: 'f10',
232: 'f11',
240: 'f12',
248: 'f13',
256: 'f14',
264: 'f15',
272: 'f16',
280: 'f17',
288: 'f18',
296: 'f19',
304: 'f20',
312: 'f21',
320: 'f22',
328: 'f23',
336: 'f24',
344: 'f25',
352: 'f26',
360: 'f27',
368: 'f28',
376: 'f29',
384: 'f30',
392: 'f31',
400: 'fir',
404: 'fccr',
408: 'fexr',
412: 'fenr',
416: 'fcsr',
420: 'ulr',
424: 'emnote',
428: 'cmstart',
432: 'cmlen',
436: 'nraddr',
440: 'evc_failaddr',
444: 'evc_counter',
448: 'cond',
452: 'dspcontrol',
456: 'ac0',
464: 'ac1',
472: 'ac2',
480: 'ac3'
}
registers = {
'r0': (0, 4), 'zero': (0, 4),
'r1': (4, 4), 'at': (4, 4),
'r2': (8, 4), 'v0': (8, 4),
'r3': (12, 4), 'v1': (12, 4),
'r4': (16, 4), 'a0': (16, 4),
'r5': (20, 4), 'a1': (20, 4),
'r6': (24, 4), 'a2': (24, 4),
'r7': (28, 4), 'a3': (28, 4),
'r8': (32, 4), 't0': (32, 4),
'r9': (36, 4), 't1': (36, 4),
'r10': (40, 4), 't2': (40, 4),
'r11': (44, 4), 't3': (44, 4),
'r12': (48, 4), 't4': (48, 4),
'r13': (52, 4), 't5': (52, 4),
'r14': (56, 4), 't6': (56, 4),
'r15': (60, 4), 't7': (60, 4),
'r16': (64, 4), 's0': (64, 4),
'r17': (68, 4), 's1': (68, 4),
'r18': (72, 4), 's2': (72, 4),
'r19': (76, 4), 's3': (76, 4),
'r20': (80, 4), 's4': (80, 4),
'r21': (84, 4), 's5': (84, 4),
'r22': (88, 4), 's6': (88, 4),
'r23': (92, 4), 's7': (92, 4),
'r24': (96, 4), 't8': (96, 4),
'r25': (100, 4), 't9': (100, 4),
'r26': (104, 4), 'k0': (104, 4),
'r27': (108, 4), 'k1': (108, 4),
'r28': (112, 4), 'gp': (112, 4),
'r29': (116, 4), 'sp': (116, 4),
'r30': (120, 4), 's8': (120, 4), 'bp': (120, 4), 'fp': (120, 4),
'r31': (124, 4), 'ra': (124, 4), 'lr': (124, 4),
'pc': (128, 4),
'ip': (128, 4),
'hi': (132, 4),
'lo': (136, 4),
'f0': (144, 8),
'f1': (152, 8),
'f2': (160, 8),
'f3': (168, 8),
'f4': (176, 8),
'f5': (184, 8),
'f6': (192, 8),
'f7': (200, 8),
'f8': (208, 8),
'f9': (216, 8),
'f10': (224, 8),
'f11': (232, 8),
'f12': (240, 8),
'f13': (248, 8),
'f14': (256, 8),
'f15': (264, 8),
'f16': (272, 8),
'f17': (280, 8),
'f18': (288, 8),
'f19': (296, 8),
'f20': (304, 8),
'f21': (312, 8),
'f22': (320, 8),
'f23': (328, 8),
'f24': (336, 8),
'f25': (344, 8),
'f26': (352, 8),
'f27': (360, 8),
'f28': (368, 8),
'f29': (376, 8),
'f30': (384, 8),
'f31': (392, 8),
'fir': (400, 4),
'fccr': (404, 4),
'fexr': (408, 4),
'fenr': (412, 4),
'fcsr': (416, 4),
'ulr': (420, 4),
'emnote': (424, 4),
'cmstart': (428, 4),
'cmlen': (432, 4),
'nraddr': (436, 4),
'evc_failaddr': (440, 4),
'evc_counter': (444, 4),
'cond': (448, 4),
'dspcontrol': (452, 4),
'ac0': (456, 8),
'ac1': (464, 8),
'ac2': (472, 8),
'ac3': (480, 8)
}
argument_registers = {
registers['v0'][0],
registers['v1'][0],
registers['a0'][0],
registers['a2'][0],
registers['a3'][0],
registers['t0'][0],
registers['t1'][0],
registers['t2'][0],
registers['t3'][0],
registers['t4'][0],
registers['t5'][0],
registers['t6'][0],
registers['t7'][0],
registers['s0'][0],
registers['s1'][0],
registers['s2'][0],
registers['s3'][0],
registers['s4'][0],
registers['s5'][0],
registers['s6'][0],
registers['t8'][0],
registers['t9'][0]
}
reloc_s_a = [2]
reloc_b_a = [3] # ..?
reloc_tls_mod_id = [38]
reloc_tls_offset = [47]
dynamic_tag_translation = {
0x70000001: 'DT_MIPS_RLD_VERSION',
0x70000005: 'DT_MIPS_FLAGS',
0x70000006: 'DT_MIPS_BASE_ADDRESS',
0x7000000a: 'DT_MIPS_LOCAL_GOTNO',
0x70000011: 'DT_MIPS_SYMTABNO',
0x70000012: 'DT_MIPS_UNREFEXTNO',
0x70000013: 'DT_MIPS_GOTSYM',
0x70000016: 'DT_MIPS_RLD_MAP'
}
got_section_name = '.got'
ld_linux_name = 'ld.so.1'
|
bsd-2-clause
|
Fl0rianFischer/sme_odoo
|
addons/crm/report/crm_activity_report.py
|
20
|
2126
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import fields, models
from openerp import tools
class crm_activity_report(models.Model):
""" CRM Lead Analysis """
_name = "crm.activity.report"
_auto = False
_description = "CRM Activity Analysis"
_rec_name = 'id'
date = fields.Datetime('Date', readonly=True)
author_id = fields.Many2one('res.partner', 'Author', readonly=True)
user_id = fields.Many2one('res.users', 'Responsible', readonly=True)
team_id = fields.Many2one('crm.team', 'Sales Team', readonly=True)
subtype_id = fields.Many2one('mail.message.subtype', 'Activity', readonly=True)
country_id = fields.Many2one('res.country', 'Country', readonly=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True)
stage_id = fields.Many2one('crm.stage', 'Stage', readonly=True)
partner_id = fields.Many2one('res.partner', 'Partner/Customer', readonly=True)
lead_type = fields.Char(
string='Type',
selection=[('lead', 'Lead'), ('opportunity', 'Opportunity')],
help="Type is used to separate Leads and Opportunities")
def init(self, cr):
tools.drop_view_if_exists(cr, 'crm_activity_report')
cr.execute("""
CREATE OR REPLACE VIEW crm_activity_report AS (
select
m.id,
m.subtype_id,
m.author_id,
m.date,
l.user_id,
l.team_id,
l.country_id,
l.company_id,
l.stage_id,
l.partner_id,
l.type as lead_type
from
"mail_message" m
left join
"crm_lead" l
on
(m.res_id = l.id)
inner join
"crm_activity" a
on
(m.subtype_id = a.subtype_id)
WHERE
(m.model = 'crm.lead')
)""")
|
gpl-3.0
|
samdowd/drumm-farm
|
drumm_env/lib/python2.7/site-packages/boto/ec2/launchspecification.py
|
170
|
3829
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a launch specification for Spot instances.
"""
from boto.ec2.ec2object import EC2Object
from boto.resultset import ResultSet
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.group import Group
from boto.ec2.instance import SubParse
class GroupList(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'groupId':
self.append(value)
class LaunchSpecification(EC2Object):
def __init__(self, connection=None):
super(LaunchSpecification, self).__init__(connection)
self.key_name = None
self.instance_type = None
self.image_id = None
self.groups = []
self.placement = None
self.kernel = None
self.ramdisk = None
self.monitored = False
self.subnet_id = None
self._in_monitoring_element = False
self.block_device_mapping = None
self.instance_profile = None
self.ebs_optimized = False
def __repr__(self):
return 'LaunchSpecification(%s)' % self.image_id
def startElement(self, name, attrs, connection):
if name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
elif name == 'monitoring':
self._in_monitoring_element = True
elif name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'iamInstanceProfile':
self.instance_profile = SubParse('iamInstanceProfile')
return self.instance_profile
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
elif name == 'keyName':
self.key_name = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.placement = value
elif name == 'placement':
pass
elif name == 'kernelId':
self.kernel = value
elif name == 'ramdiskId':
self.ramdisk = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'state':
if self._in_monitoring_element:
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
elif name == 'ebsOptimized':
self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
|
mit
|
lucafavatella/intellij-community
|
python/testData/inspections/PyNumpyType/Slogdet.py
|
79
|
1052
|
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
"""
pass
(sign, logdet) = slogdet(a)
|
apache-2.0
|
ScottSteiner/uguubot
|
plugins/wu.py
|
1
|
4455
|
# weather.py
# Rewritten by ScottSteiner to use Weather Underground instead of Yahoo
from util import hook, database, http, web
base_url = "http://api.wunderground.com/api/{}/{}/q/{}.json"
def format_coordinates(latitude, longitude):
"Formats coordinates into one string"
latitude = round(float(latitude), 2)
longitude = round(float(longitude), 2)
if latitude > 0: latitude_direction = "N"
else: latitude_direction = "S"
if longitude > 0: longitude_direction = "E"
else: longitude_direction = "W"
return "{}{} {}{}".format(roundnum(abs(latitude)), latitude_direction, roundnum(abs(longitude)), longitude_direction)
def get_weather(location, api_key):
"Gets weather information from weather underground"
weather_url = base_url.format(api_key, "geolookup/conditions", location)
weather_data = http.get_json(weather_url)
if 'results' in weather_data['response']:
location = "zmw:{}".format(weather_data['response']['results'][0]['zmw'])
weather_url = base_url.format(api_key, "geolookup/conditions", location)
weather_data = http.get_json(weather_url)
alerts_url = base_url.format(api_key, "alerts", location)
forecast_url = base_url.format(api_key, "forecast", location)
alerts = http.get_json(alerts_url)['alerts']
forecast = http.get_json(forecast_url)['forecast']
current_observation = weather_data['current_observation']
return (current_observation, alerts, forecast)
@hook.command('w', autohelp=False)
@hook.command('we', autohelp=False)
@hook.command('wu', autohelp=False)
@hook.command('wz', autohelp=False)
@hook.command('weather', autohelp=False)
def weatherunderground(inp, nick=None, reply=None, db=None, notice=None, bot=None):
"weather | <location> [save] | <@ user> -- Gets weather data for <location>."
save = False
api_key = bot.config.get("api_keys", {}).get("weatherunderground", None)
if not api_key: return "error: missing api key"
if '@' in inp:
nick = inp.split('@')[1].strip()
loc = database.get(db,'users','location','nick',nick)
if not loc: return "No location stored for {}.".format(nick.encode('ascii', 'ignore'))
else:
if not inp:
loc = database.get(db,'users','location','nick',nick)
if not loc:
notice(weatherunderground.__doc__)
return
else:
if " save" in inp:
inp = inp.replace(' save','')
database.set(db,'users','location',inp,'nick',nick)
loc = inp
location = http.quote_plus(loc.replace(' ', '_'))
# now, to get the actual weather
try:
(data, alerts, forecast) = get_weather(location, api_key)
except KeyError:
return "Could not get weather for that location ({}).".format(location)
# put all the stuff we want to use in a dictionary for easy formatting of the output
tomorrow = forecast['simpleforecast']['forecastday'][1]
weather_data = {
"city": data['display_location']['full'],
"zip": data['display_location']['zip'],
"coordinates": format_coordinates(data['display_location']['latitude'],data['display_location']['longitude']),
"conditions": data['weather'],
"temp_f": roundnum(float(data['temp_f']),0),
"temp_c": roundnum(float(data['temp_c']),0),
"humidity": data['relative_humidity'],
"wind_kph": roundnum(data['wind_kph']),
"wind_mph": roundnum(data['wind_mph']),
"wind_direction": data['wind_dir'],
"wind_text": data['wind_string'],
"tomorrow_conditions": tomorrow['conditions'],
"tomorrow_high_f": roundnum(float(tomorrow['high']['fahrenheit']),0),
"tomorrow_high_c": roundnum(float(tomorrow['high']['celsius']),0),
"tomorrow_low_f": tomorrow['low']['fahrenheit'],
"tomorrow_low_c": tomorrow['low']['celsius'],
"alerts": ""
}
if weather_data['zip'] == "00000": weather_data['zip'] = ""
else:
if alerts:
desc = [x['description'] for x in alerts]
url = "http://www.accuweather.com/us/nothing/finer/{}/watches-warnings.asp".format(weather_data['zip'])
weather_data['alerts'] = " \x034,8\x02{}\x02 \x0312\037{}\037\x03".format(", ".join(desc), web.isgd(url))
weather_data['zip'] = " (\x02{}\x02)".format(weather_data['zip'])
reply(u"\x02{city}{zip} (\x02{coordinates}\x02) " \
"Current\x02: {conditions}, " \
"{temp_f}F/{temp_c}C, " \
"{wind_mph}mph/{wind_kph}kph ({wind_direction}) Wind, " \
"{humidity} Humidity "\
"\x02Tomorrow\x02: {tomorrow_conditions}, "\
"High {tomorrow_high_f}F/{tomorrow_high_c}C, "\
"Low {tomorrow_low_f}F/{tomorrow_low_c}C"\
"{alerts}".format(**weather_data))
def roundnum(num, digits=2):
return "{:g}".format(round(num,digits))
|
gpl-3.0
|
mustafat/odoo-1
|
addons/gamification/models/res_users.py
|
386
|
4010
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
from challenge import MAX_VISIBILITY_RANKING
class res_users_gamification_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check gamification.challenge linked to
this group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
return self._serialised_goals_summary(cr, uid, user_id=uid, excluded_categories=excluded_categories, context=context)
def _serialised_goals_summary(self, cr, uid, user_id, excluded_categories=None, context=None):
"""Return a serialised list of goals assigned to the user, grouped by challenge
:excluded_categories: list of challenge categories to exclude in search
[
{
'id': <gamification.challenge id>,
'name': <gamification.challenge name>,
'visibility_mode': <visibility {ranking,personal}>,
'currency': <res.currency id>,
'lines': [(see gamification_challenge._get_serialized_challenge_lines() format)]
},
]
"""
all_goals_info = []
challenge_obj = self.pool.get('gamification.challenge')
domain = [('user_ids', 'in', uid), ('state', '=', 'inprogress')]
if excluded_categories and isinstance(excluded_categories, list):
domain.append(('category', 'not in', excluded_categories))
user = self.browse(cr, uid, uid, context=context)
challenge_ids = challenge_obj.search(cr, uid, domain, context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
# serialize goals info to be able to use it in javascript
lines = challenge_obj._get_serialized_challenge_lines(cr, uid, challenge, user_id, restrict_top=MAX_VISIBILITY_RANKING, context=context)
if lines:
all_goals_info.append({
'id': challenge.id,
'name': challenge.name,
'visibility_mode': challenge.visibility_mode,
'currency': user.company_id.currency_id.id,
'lines': lines,
})
return all_goals_info
def get_challenge_suggestions(self, cr, uid, context=None):
"""Return the list of challenges suggested to the user"""
challenge_info = []
challenge_obj = self.pool.get('gamification.challenge')
challenge_ids = challenge_obj.search(cr, uid, [('invited_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
values = {
'id': challenge.id,
'name': challenge.name,
'description': challenge.description,
}
challenge_info.append(values)
return challenge_info
|
agpl-3.0
|
Pablo126/SSBW
|
Tarea4/tarea4/lib/python3.5/site-packages/pip/_vendor/distlib/util.py
|
327
|
52991
|
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on macOS
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
|
gpl-3.0
|
taknevski/tensorflow-xsmm
|
tensorflow/contrib/copy_graph/python/util/copy_test.py
|
112
|
3739
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.copy_graph.python.util import copy_elements
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
graph1 = ops.Graph()
graph2 = ops.Graph()
class CopyVariablesTest(test.TestCase):
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = variables.Variable(2)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = copy_elements.copy_variable_to_graph(some_var, graph2)
#Make another copy with different scope
copy2 = copy_elements.copy_variable_to_graph(some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = session_lib.Session()
#Initialize the Variables
variables.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, variables.Variable)
assert isinstance(copy2, variables.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(test.TestCase):
def testOpsCopy(self):
with graph1.as_default():
#Initialize a basic expression y = ax + b
x = array_ops.placeholder("float")
a = variables.Variable(3.0)
b = constant_op.constant(4.0)
ax = math_ops.multiply(x, a)
y = math_ops.add(ax, b)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = copy_elements.copy_variable_to_graph(a, graph2)
#Initialize a1 in graph2
with graph2.as_default():
#Initialize session
sess2 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = copy_elements.copy_op_to_graph(y, graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = copy_elements.get_copied_op(x, graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
test.main()
|
apache-2.0
|
zyq001/ryu
|
ryu/lib/packet/igmp.py
|
19
|
19270
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internet Group Management Protocol(IGMP) packet parser/serializer
RFC 1112
IGMP v1 format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| Type | Unused | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
RFC 2236
IGMP v2 format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Max Resp Time | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
RFC 3376
IGMP v3 Membership Query format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x11 | Max Resp Code | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Resv |S| QRV | QQIC | Number of Sources (N) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
+- -+
| Source Address [2] |
+- . -+
. . .
. . .
+- -+
| Source Address [N] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
IGMP v3 Membership Report format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x22 | Reserved | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Number of Group Records (M) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [1] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [2] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
. . .
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [M] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
where each Group Record has the following internal format:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Record Type | Aux Data Len | Number of Sources (N) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Multicast Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
+- -+
| Source Address [2] |
+- -+
. . .
. . .
. . .
+- -+
| Source Address [N] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Auxiliary Data .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
import six
import struct
from math import trunc
from ryu.lib import addrconv
from ryu.lib import stringify
from ryu.lib.packet import packet_base
from ryu.lib.packet import packet_utils
IGMP_TYPE_QUERY = 0x11
IGMP_TYPE_REPORT_V1 = 0x12
IGMP_TYPE_REPORT_V2 = 0x16
IGMP_TYPE_LEAVE = 0x17
IGMP_TYPE_REPORT_V3 = 0x22
QUERY_RESPONSE_INTERVAL = 10.0
LAST_MEMBER_QUERY_INTERVAL = 1.0
MULTICAST_IP_ALL_HOST = '224.0.0.1'
MULTICAST_MAC_ALL_HOST = '01:00:5e:00:00:01'
# for types of IGMPv3 Report Group Records
MODE_IS_INCLUDE = 1
MODE_IS_EXCLUDE = 2
CHANGE_TO_INCLUDE_MODE = 3
CHANGE_TO_EXCLUDE_MODE = 4
ALLOW_NEW_SOURCES = 5
BLOCK_OLD_SOURCES = 6
class igmp(packet_base.PacketBase):
"""
Internet Group Management Protocol(IGMP, RFC 1112, RFC 2236)
header encoder/decoder class.
http://www.ietf.org/rfc/rfc1112.txt
http://www.ietf.org/rfc/rfc2236.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v2, or a combination of
version and a message type for v1.
maxresp max response time in unit of 1/10 second. it is
meaningful only in Query Message.
csum a check sum value. 0 means automatically-calculate
when encoding.
address a group address value.
=============== ====================================================
"""
_PACK_STR = '!BBH4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
]
}
def __init__(self, msgtype=IGMP_TYPE_QUERY, maxresp=0, csum=0,
address='0.0.0.0'):
super(igmp, self).__init__()
self.msgtype = msgtype
self.maxresp = maxresp
self.csum = csum
self.address = address
@classmethod
def parser(cls, buf):
assert cls._MIN_LEN <= len(buf)
(msgtype, ) = struct.unpack_from('!B', buf)
if (IGMP_TYPE_QUERY == msgtype and
igmpv3_query.MIN_LEN <= len(buf)):
(instance, subclass, rest,) = igmpv3_query.parser(buf)
elif IGMP_TYPE_REPORT_V3 == msgtype:
(instance, subclass, rest,) = igmpv3_report.parser(buf)
else:
(msgtype, maxresp, csum, address
) = struct.unpack_from(cls._PACK_STR, buf)
instance = cls(msgtype, maxresp, csum,
addrconv.ipv4.bin_to_text(address))
subclass = None
rest = buf[cls._MIN_LEN:]
return instance, subclass, rest
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(self._PACK_STR, self.msgtype,
trunc(self.maxresp), self.csum,
addrconv.ipv4.text_to_bin(self.address)))
if self.csum == 0:
self.csum = packet_utils.checksum(hdr)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
class igmpv3_query(igmp):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Query message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v3.
maxresp max response time in unit of 1/10 second.
csum a check sum value. 0 means automatically-calculate
when encoding.
address a group address value.
s_flg when set to 1, routers suppress the timer process.
qrv robustness variable for a querier.
qqic an interval time for a querier in unit of seconds.
num a number of the multicast servers.
srcs a list of IPv4 addresses of the multicast servers.
=============== ====================================================
"""
_PACK_STR = '!BBH4sBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
MIN_LEN = _MIN_LEN
_TYPE = {
'ascii': [
'address'
],
'asciilist': [
'srcs'
]
}
def __init__(self, msgtype=IGMP_TYPE_QUERY, maxresp=100, csum=0,
address='0.0.0.0', s_flg=0, qrv=2, qqic=0, num=0,
srcs=None):
super(igmpv3_query, self).__init__(
msgtype, maxresp, csum, address)
self.s_flg = s_flg
self.qrv = qrv
self.qqic = qqic
self.num = num
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
@classmethod
def parser(cls, buf):
(msgtype, maxresp, csum, address, s_qrv, qqic, num
) = struct.unpack_from(cls._PACK_STR, buf)
s_flg = (s_qrv >> 3) & 0b1
qrv = s_qrv & 0b111
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 4 <= len(buf[offset:])
(src, ) = struct.unpack_from('4s', buf, offset)
srcs.append(addrconv.ipv4.bin_to_text(src))
offset += 4
assert num == len(srcs)
return (cls(msgtype, maxresp, csum,
addrconv.ipv4.bin_to_text(address), s_flg, qrv,
qqic, num, srcs),
None,
buf[offset:])
def serialize(self, payload, prev):
s_qrv = self.s_flg << 3 | self.qrv
buf = bytearray(struct.pack(self._PACK_STR, self.msgtype,
trunc(self.maxresp), self.csum,
addrconv.ipv4.text_to_bin(self.address),
s_qrv, trunc(self.qqic), self.num))
for src in self.srcs:
buf.extend(struct.pack('4s', addrconv.ipv4.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 10, self.num)
if 0 == self.csum:
self.csum = packet_utils.checksum(buf)
struct.pack_into('!H', buf, 2, self.csum)
return six.binary_type(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 4
class igmpv3_report(igmp):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Report message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v3.
csum a check sum value. 0 means automatically-calculate
when encoding.
record_num a number of the group records.
records a list of ryu.lib.packet.igmp.igmpv3_report_group.
None if no records.
=============== ====================================================
"""
_PACK_STR = '!BxH2xH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_class_prefixes = ['igmpv3_report_group']
def __init__(self, msgtype=IGMP_TYPE_REPORT_V3, csum=0, record_num=0,
records=None):
self.msgtype = msgtype
self.csum = csum
self.record_num = record_num
records = records or []
assert isinstance(records, list)
for record in records:
assert isinstance(record, igmpv3_report_group)
self.records = records
@classmethod
def parser(cls, buf):
(msgtype, csum, record_num
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
records = []
while 0 < len(buf[offset:]) and record_num > len(records):
record = igmpv3_report_group.parser(buf[offset:])
records.append(record)
offset += len(record)
assert record_num == len(records)
return (cls(msgtype, csum, record_num, records),
None,
buf[offset:])
def serialize(self, payload, prev):
buf = bytearray(struct.pack(self._PACK_STR, self.msgtype,
self.csum, self.record_num))
for record in self.records:
buf.extend(record.serialize())
if 0 == self.record_num:
self.record_num = len(self.records)
struct.pack_into('!H', buf, 6, self.record_num)
if 0 == self.csum:
self.csum = packet_utils.checksum(buf)
struct.pack_into('!H', buf, 2, self.csum)
return six.binary_type(buf)
def __len__(self):
records_len = 0
for record in self.records:
records_len += len(record)
return self._MIN_LEN + records_len
class igmpv3_report_group(stringify.StringifyMixin):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Report Group Record message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
This is used with ryu.lib.packet.igmp.igmpv3_report.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
type\_ a group record type for v3.
aux_len the length of the auxiliary data.
num a number of the multicast servers.
address a group address value.
srcs a list of IPv4 addresses of the multicast servers.
aux the auxiliary data.
=============== ====================================================
"""
_PACK_STR = '!BBH4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
],
'asciilist': [
'srcs'
]
}
def __init__(self, type_=0, aux_len=0, num=0, address='0.0.0.0',
srcs=None, aux=None):
self.type_ = type_
self.aux_len = aux_len
self.num = num
self.address = address
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
self.aux = aux
@classmethod
def parser(cls, buf):
(type_, aux_len, num, address
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 4 <= len(buf[offset:])
(src, ) = struct.unpack_from('4s', buf, offset)
srcs.append(addrconv.ipv4.bin_to_text(src))
offset += 4
assert num == len(srcs)
aux = None
if aux_len:
(aux, ) = struct.unpack_from('%ds' % (aux_len * 4), buf, offset)
return cls(type_, aux_len, num,
addrconv.ipv4.bin_to_text(address), srcs, aux)
def serialize(self):
buf = bytearray(struct.pack(self._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address)))
for src in self.srcs:
buf.extend(struct.pack('4s', addrconv.ipv4.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 2, self.num)
if self.aux is not None:
mod = len(self.aux) % 4
if mod:
self.aux += bytearray(4 - mod)
self.aux = six.binary_type(self.aux)
buf.extend(self.aux)
if 0 == self.aux_len:
self.aux_len = len(self.aux) // 4
struct.pack_into('!B', buf, 1, self.aux_len)
return six.binary_type(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 4 + self.aux_len * 4
|
apache-2.0
|
debasishm89/burpy
|
modules/fbxsrf.py
|
3
|
1716
|
from rawweb import *
def main(raw_stream,ssl):
'''
This Burpy module is specially written to find CSRF vulnerability in Facebook Application.
It has already found few minor CSRF vulnerability in FB application. Few them was qualifed for Bug Bounty.
It simply checks whether CSRF token validation is present in Server Side or not by removing token from request and replaying it.
Facebook application always shows a generic error message for CSRF error which is "Please try closing and re-opening your browser"
If this error is not present in response after removing the token it returns +ve.
'''
title = [
"Possible XSRF", #Please don't add <script>/ html tags here and report XSS :P
"CSRF Token (fb_dtsg) Removed from Raw Request"
]
csrf_error = "Please try closing and re-opening your browser" #
raw = RawWeb(raw_stream)
if "fb_dtsg" in raw_stream: # Check if request contains any CSRF token or not
final = raw.removeparameter("fb_dtsg") # Use rawweb api to remove the parameter from request
else:
return "FALSE" #CSRF token not present in request
result = raw.fire(ssl)
#result[0] => 200
#result[1] => OK
#result[2] => Respheaders => dict
#result[3] => body
if csrf_error in result[3]: # If the CSRF error presnt in response body, Everythng is fine, return false
if result[0] != 500:
# validation there
# If test positive return True,reponse header , response body.
#return res.status,res.reason,res_headers,self.craft_res(res.getheaders(),res.read())
return "FALSE"
else:
return title,final,result[0],result[1],result[2],result[3]
else:
# If false only send False
return title,final,result[0],result[1],result[2],result[3] # Else return the crafted request
|
gpl-2.0
|
tsl143/zamboni
|
mkt/account/views.py
|
1
|
16495
|
import hashlib
import hmac
import json
import uuid
import urlparse
from django import http
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.db import IntegrityError
from django.utils.datastructures import MultiValueDictKeyError
import basket
import commonware.log
from django_browserid import get_audience
from django_statsd.clients import statsd
from requests_oauthlib import OAuth2Session
from rest_framework import status
from rest_framework.exceptions import AuthenticationFailed, ParseError
from rest_framework.generics import (CreateAPIView, DestroyAPIView,
RetrieveAPIView, RetrieveUpdateAPIView)
from rest_framework.mixins import DestroyModelMixin, ListModelMixin
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
from rest_framework.viewsets import GenericViewSet
import mkt
from lib.metrics import record_action
from mkt.access.models import Group, GroupUser
from mkt.users.models import UserProfile
from mkt.users.views import browserid_authenticate
from mkt.account.serializers import (AccountSerializer, FeedbackSerializer,
FxALoginSerializer, GroupsSerializer,
LoginSerializer,
NewsletterSerializer,
PermissionsSerializer)
from mkt.api.authentication import (RestAnonymousAuthentication,
RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView
from mkt.api.permissions import AllowSelf, AllowOwner, GroupPermission
from mkt.constants.apps import INSTALL_TYPE_USER
from mkt.site.mail import send_mail_jinja
from mkt.site.utils import log_cef
from mkt.webapps.serializers import SimpleAppSerializer
from mkt.webapps.models import Installed, Webapp
log = commonware.log.getLogger('z.account')
def user_relevant_apps(user):
return {
'developed': list(user.addonuser_set.filter(
role=mkt.AUTHOR_ROLE_OWNER).values_list('addon_id', flat=True)),
'installed': list(user.installed_set.values_list(
'addon_id', flat=True)),
'purchased': list(user.purchase_ids()),
}
class MineMixin(object):
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
if pk == 'mine':
self.kwargs['pk'] = self.request.user.pk
return super(MineMixin, self).get_object(queryset)
class InstalledViewSet(CORSMixin, MarketplaceView, ListModelMixin,
GenericViewSet):
cors_allowed_methods = ['get']
serializer_class = SimpleAppSerializer
permission_classes = [AllowSelf]
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
def get_queryset(self):
return Webapp.objects.filter(
installed__user=self.request.user,
installed__install_type=INSTALL_TYPE_USER).order_by(
'-installed__created')
def remove_app(self, request, **kwargs):
self.cors_allowed_methods = ['post']
try:
to_remove = Webapp.objects.get(pk=request.DATA['app'])
except (KeyError, MultiValueDictKeyError):
raise ParseError(detail='`app` was not provided.')
except Webapp.DoesNotExist:
raise ParseError(detail='`app` does not exist.')
try:
installed = request.user.installed_set.get(
install_type=INSTALL_TYPE_USER, addon_id=to_remove.pk)
installed.delete()
except Installed.DoesNotExist:
raise ParseError(detail='`app` is not installed or not removable.')
return Response(status=status.HTTP_202_ACCEPTED)
class CreateAPIViewWithoutModel(MarketplaceView, CreateAPIView):
"""
A base class for APIs that need to support a create-like action, but
without being tied to a Django Model.
"""
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
cors_allowed_methods = ['post']
permission_classes = (AllowAny,)
def response_success(self, request, serializer, data=None):
if data is None:
data = serializer.data
return Response(data, status=status.HTTP_201_CREATED)
def response_error(self, request, serializer):
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.DATA)
if serializer.is_valid():
data = self.create_action(request, serializer)
return self.response_success(request, serializer, data=data)
return self.response_error(request, serializer)
class AccountView(MineMixin, CORSMixin, RetrieveUpdateAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
cors_allowed_methods = ['get', 'patch', 'put']
model = UserProfile
permission_classes = (AllowOwner,)
serializer_class = AccountSerializer
class AnonymousUserMixin(object):
def get_object(self, *args, **kwargs):
try:
user = super(AnonymousUserMixin, self).get_object(*args, **kwargs)
except http.Http404:
# The base get_object() will raise Http404 instead of DoesNotExist.
# Treat no object as an anonymous user (source: unknown).
user = UserProfile(is_verified=False)
return user
class FeedbackView(CORSMixin, CreateAPIViewWithoutModel):
class FeedbackThrottle(UserRateThrottle):
THROTTLE_RATES = {
'user': '30/hour',
}
serializer_class = FeedbackSerializer
throttle_classes = (FeedbackThrottle,)
throttle_scope = 'user'
def create_action(self, request, serializer):
context_data = self.get_context_data(request, serializer)
sender = getattr(request.user, 'email', settings.NOBODY_EMAIL)
send_mail_jinja(u'Marketplace Feedback', 'account/email/feedback.txt',
context_data, headers={'Reply-To': sender},
recipient_list=[settings.MKT_APPS_FEEDBACK_EMAIL])
def get_context_data(self, request, serializer):
context_data = {
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'ip_address': request.META.get('REMOTE_ADDR', '')
}
context_data.update(serializer.data)
context_data['user'] = request.user
return context_data
def commonplace_token(email):
unique_id = uuid.uuid4().hex
consumer_id = hashlib.sha1(
email + settings.SECRET_KEY).hexdigest()
hm = hmac.new(
unique_id + settings.SECRET_KEY,
consumer_id, hashlib.sha512)
return ','.join((email, hm.hexdigest(), unique_id))
def fxa_oauth_api(name):
return urlparse.urljoin(settings.FXA_OAUTH_URL, 'v1/' + name)
def find_or_create_user(email, fxa_uid):
def find_user(**kwargs):
try:
return UserProfile.objects.get(**kwargs)
except UserProfile.DoesNotExist:
return None
profile = find_user(fxa_uid=fxa_uid) or find_user(email=email)
if profile:
created = False
profile.update(fxa_uid=fxa_uid, email=email)
else:
created = True
profile = UserProfile.objects.create(
fxa_uid=fxa_uid,
email=email,
source=mkt.LOGIN_SOURCE_FXA,
display_name=email.partition('@')[0],
is_verified=True)
if profile.source != mkt.LOGIN_SOURCE_FXA:
log.info('Set account to FxA for {0}'.format(email))
statsd.incr('z.mkt.user.fxa')
profile.update(source=mkt.LOGIN_SOURCE_FXA)
return profile, created
def fxa_authorize(session, client_secret, auth_response):
token = session.fetch_token(
fxa_oauth_api('token'),
authorization_response=auth_response,
client_secret=client_secret)
res = session.post(
fxa_oauth_api('verify'),
data=json.dumps({'token': token['access_token']}),
headers={'Content-Type': 'application/json'})
return res.json()
class FxALoginView(CORSMixin, CreateAPIViewWithoutModel):
authentication_classes = []
serializer_class = FxALoginSerializer
def create_action(self, request, serializer):
client_id = request.POST.get('client_id', settings.FXA_CLIENT_ID)
secret = settings.FXA_SECRETS[client_id]
session = OAuth2Session(
client_id,
scope=u'profile',
state=serializer.data['state'])
auth_response = serializer.data['auth_response']
fxa_authorization = fxa_authorize(session, secret, auth_response)
if 'user' in fxa_authorization:
email = fxa_authorization['email']
fxa_uid = fxa_authorization['user']
profile, created = find_or_create_user(email, fxa_uid)
if created:
log_cef('New Account', 5, request, username=fxa_uid,
signature='AUTHNOTICE',
msg='User created a new account (from FxA)')
record_action('new-user', request)
auth.login(request, profile)
profile.update(last_login_ip=request.META.get('REMOTE_ADDR', ''))
auth.signals.user_logged_in.send(sender=profile.__class__,
request=request,
user=profile)
else:
raise AuthenticationFailed('No profile.')
request.user = profile
request.groups = profile.groups.all()
# Remember whether the user has logged in to highlight the register or
# sign in nav button. 31536000 == one year.
request.set_cookie('has_logged_in', '1', max_age=5 * 31536000)
# We want to return completely custom data, not the serializer's.
data = {
'error': None,
'token': commonplace_token(request.user.email),
'settings': {
'display_name': request.user.display_name,
'email': request.user.email,
'enable_recommendations': request.user.enable_recommendations,
'source': 'firefox-accounts',
}
}
# Serializers give up if they aren't passed an instance, so we
# do that here despite PermissionsSerializer not needing one
# really.
permissions = PermissionsSerializer(context={'request': request},
instance=True)
data.update(permissions.data)
# Add ids of installed/purchased/developed apps.
data['apps'] = user_relevant_apps(profile)
return data
class LoginView(CORSMixin, CreateAPIViewWithoutModel):
authentication_classes = []
serializer_class = LoginSerializer
def create_action(self, request, serializer):
with statsd.timer('auth.browserid.verify'):
profile, msg = browserid_authenticate(
request, serializer.data['assertion'],
browserid_audience=(serializer.data['audience'] or
get_audience(request)),
is_mobile=serializer.data['is_mobile'],
)
if profile is None:
# Authentication failure.
log.info('No profile: %s' % (msg or ''))
raise AuthenticationFailed('No profile.')
request.user = profile
request.groups = profile.groups.all()
auth.login(request, profile)
user_logged_in.send(sender=profile.__class__, request=request,
user=profile)
# We want to return completely custom data, not the serializer's.
data = {
'error': None,
'token': commonplace_token(request.user.email),
'settings': {
'display_name': request.user.display_name,
'email': request.user.email,
'enable_recommendations': request.user.enable_recommendations,
}
}
# Serializers give up if they aren't passed an instance, so we
# do that here despite PermissionsSerializer not needing one
# really.
permissions = PermissionsSerializer(context={'request': request},
instance=True)
data.update(permissions.data)
# Add ids of installed/purchased/developed apps.
data['apps'] = user_relevant_apps(profile)
return data
class LogoutView(CORSMixin, DestroyAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = (IsAuthenticated,)
cors_allowed_methods = ['delete']
def delete(self, request):
auth.logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
class NewsletterView(CORSMixin, CreateAPIViewWithoutModel):
class NewsletterThrottle(UserRateThrottle):
scope = 'newsletter'
THROTTLE_RATES = {
'newsletter': '30/hour',
}
serializer_class = NewsletterSerializer
throttle_classes = (NewsletterThrottle,)
def get_region(self):
return self.request.REGION.slug
def get_country(self):
region = self.get_region()
return '' if region == 'restofworld' else region
def response_success(self, request, serializer, data=None):
return Response({}, status=status.HTTP_204_NO_CONTENT)
def create_action(self, request, serializer):
email = serializer.data['email']
newsletter = serializer.data['newsletter']
lang = serializer.data['lang']
country = self.get_country()
basket.subscribe(email, newsletter, format='H', country=country,
lang=lang, optin='Y', trigger_welcome='Y')
class PermissionsView(CORSMixin, MineMixin, RetrieveAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
cors_allowed_methods = ['get']
permission_classes = (AllowSelf,)
model = UserProfile
serializer_class = PermissionsSerializer
class GroupsViewSet(CORSMixin, ListModelMixin, DestroyModelMixin,
GenericViewSet):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
cors_allowed_methods = ['get', 'post', 'delete']
serializer_class = GroupsSerializer
permission_classes = [GroupPermission('Admin', '%')]
def paginate_queryset(self, queryset, page_size=None):
return None
def get_queryset(self):
return self.get_user().groups.all()
def get_user(self):
try:
return UserProfile.objects.get(pk=self.kwargs.get('pk'))
except UserProfile.DoesNotExist:
raise ParseError('User must exist.')
def get_group(self):
try:
group = (self.request.DATA.get('group') or
self.request.QUERY_PARAMS.get('group'))
return Group.objects.get(pk=group)
except Group.DoesNotExist:
raise ParseError('Group does not exist.')
def get_object(self):
user = self.get_user()
group = self.get_group()
try:
obj = GroupUser.objects.get(user=user, group=group)
except GroupUser.DoesNotExist, e:
raise ParseError('User isn\'t in that group? %s' % e)
return obj
def pre_delete(self, instance):
if instance.group.restricted:
raise ParseError('Restricted groups can\'t be unset via the API.')
def create(self, request, **kwargs):
user = self.get_user()
group = self.get_group()
if group.restricted:
raise ParseError('Restricted groups can\'t be set via the API.')
try:
GroupUser.objects.create(user=user, group=group)
except IntegrityError, e:
raise ParseError('User is already in that group? %s' % e)
return Response(status=status.HTTP_201_CREATED)
|
bsd-3-clause
|
andhit-r/account-financial-tools
|
currency_rate_update/services/update_service_PL_NBP.py
|
41
|
4486
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# Abstract class to fetch rates from National Bank of Poland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .currency_getter_interface import Currency_getter_interface
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
import logging
_logger = logging.getLogger(__name__)
class PL_NBP_getter(Currency_getter_interface):
"""Implementation of Currency_getter_factory interface
for PL NBP service
"""
def rate_retrieve(self, dom, ns, curr):
""" Parse a dom node to retrieve
currencies data"""
res = {}
xpath_rate_currency = ("/tabela_kursow/pozycja[kod_waluty='%s']/"
"kurs_sredni/text()") % (curr.upper())
xpath_rate_ref = ("/tabela_kursow/pozycja[kod_waluty='%s']/"
"przelicznik/text()") % (curr.upper())
res['rate_currency'] = float(
dom.xpath(xpath_rate_currency, namespaces=ns)[0].replace(',', '.')
)
res['rate_ref'] = float(dom.xpath(xpath_rate_ref, namespaces=ns)[0])
return res
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
# LastA.xml is always the most recent one
url = 'http://www.nbp.pl/kursy/xml/LastA.xml'
# We do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
# Move to new XML lib cf Launchpad bug #645263
from lxml import etree
_logger.debug("NBP.pl currency rate service : connecting...")
rawfile = self.get_url(url)
dom = etree.fromstring(rawfile)
ns = {} # Cool, there are no namespaces !
_logger.debug("NBP.pl sent a valid XML file")
rate_date = dom.xpath('/tabela_kursow/data_publikacji/text()',
namespaces=ns)[0]
rate_date_datetime = datetime.strptime(rate_date,
DEFAULT_SERVER_DATE_FORMAT)
self.check_rate_date(rate_date_datetime, max_delta_days)
# We dynamically update supported currencies
self.supported_currency_array = dom.xpath(
'/tabela_kursow/pozycja/kod_waluty/text()',
namespaces=ns
)
self.supported_currency_array.append('PLN')
_logger.debug("Supported currencies = %s" %
self.supported_currency_array)
self.validate_cur(main_currency)
if main_currency != 'PLN':
main_curr_data = self.rate_retrieve(dom, ns, main_currency)
# 1 MAIN_CURRENCY = main_rate PLN
main_rate = (main_curr_data['rate_currency'] /
main_curr_data['rate_ref'])
for curr in currency_array:
self.validate_cur(curr)
if curr == 'PLN':
rate = main_rate
else:
curr_data = self.rate_retrieve(dom, ns, curr)
# 1 MAIN_CURRENCY = rate CURR
if main_currency == 'PLN':
rate = curr_data['rate_ref'] / curr_data['rate_currency']
else:
rate = (main_rate * curr_data['rate_ref'] /
curr_data['rate_currency'])
self.updated_currency[curr] = rate
_logger.debug("Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
return self.updated_currency, self.log_info
|
agpl-3.0
|
mhvk/astropy
|
astropy/io/ascii/tests/test_c_reader.py
|
8
|
62931
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import functools
from io import BytesIO
from textwrap import dedent
import pytest
import numpy as np
from numpy import ma
from astropy.table import Table, MaskedColumn
from astropy.io import ascii
from astropy.io.ascii.core import ParameterError, FastOptionsError, InconsistentTableError
from astropy.io.ascii.fastbasic import (
FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader)
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_equal, assert_almost_equal, assert_true
StringIO = lambda x: BytesIO(x.encode('ascii')) # noqa
CI = os.environ.get('CI', False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.e-15, atol=1.e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i]))
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += '\n'
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
t6 = ascii.read(table, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = str(tmpdir.join(f'table{_filename_counter}.txt'))
_filename_counter += 1
with open(filename, 'wb') as f:
f.write(table.encode('ascii'))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(filename, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope='function')
def read_basic(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')
@pytest.fixture(scope='function')
def read_csv(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')
@pytest.fixture(scope='function')
def read_tab(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')
@pytest.fixture(scope='function')
def read_commented_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,
format='commented_header')
@pytest.fixture(scope='function')
def read_rdb(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')
@pytest.fixture(scope='function')
def read_no_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastNoHeader,
format='no_header')
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']],
names=('col1', 'col2', 'col3'))
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header("A B C\n1 2 3\n4 5 6",
names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
""" + ' \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = dedent("""
COL1 COL2 COL3
1 A -1
2 B -2
""")
expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))
for sep in ' ,\t#;':
table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel)
expected = Table([[2, 6], [3, 7]], names=('B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = dedent("""
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
""")
table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],
exclude_names=['B', 'F'], parallel=parallel)
expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
tbl = '\n'.join(['a,b',
'"d""","d""q"',
'"""q",""""'])
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
@pytest.mark.filterwarnings("ignore:OverflowError converting to IntType in column TIMESTAMP")
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = dedent("""
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
""") # noqa
ascii.read(tbl, format='csv', fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = dedent("""
"A B" C D
1.5 2.1 -37.1
a b " c
d"
""")
table = read_basic(text, parallel=parallel)
expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D'))
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("key,val", [
('delimiter', ',,'), # multi-char delimiter
('comment', '##'), # multi-char comment
('data_start', None), # data_start=None
('data_start', -1), # data_start negative
('quotechar', '##'), # multi-char quote signifier
('header_start', -1), # negative header_start
('converters', dict((i + 1, ascii.convert_numpy(np.uint))
for i in range(3))), # passing converters
('Inputter', ascii.ContinuationLinesInputter), # passing Inputter
('header_Splitter', ascii.DefaultSplitter), # passing Splitter
('data_Splitter', ascii.DefaultSplitter)])
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read('1 2 3\n4 5 6')
with pytest.raises(ParameterError):
ascii.read('1 2 3\n4 5 6',
format='fast_basic', guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6')
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = dedent("""
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
""")
with pytest.raises(InconsistentTableError) as e:
FastBasic().read(text)
assert 'Number of header columns (3) ' \
'inconsistent with data columns in data line 2' in str(e.value)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols4():
# https://github.com/astropy/astropy/issues/9922
with pytest.raises(InconsistentTableError) as e:
ascii.read(get_pkg_data_filename('data/conf_py.txt'),
fast_reader=True, guess=True)
assert 'Unable to guess table format with the guesses listed below' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table['B'][1] is not ma.masked
assert table['C'][1] is ma.masked
with pytest.raises(InconsistentTableError):
table = FastBasic(delimiter=',').read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent("""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
""")
expected = Table({'A': [np.nan, np.nan, np.nan,
np.inf, np.inf, np.inf, np.inf,
-np.inf, -np.inf]})
table = read_basic(text, parallel=parallel)
assert table['A'].dtype.kind == 'f'
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=',', parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table['A'], MaskedColumn)
assert table['A'][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table['A'].data.data[0], '0')
assert table['A'][1] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)
assert isinstance(table['B'], MaskedColumn)
assert table['A'][0] is not ma.masked # empty value unaffected
assert table['C'][2] is not ma.masked # -9999 is not an exact match
assert table['B'][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table['B'].data.data[1], 0.0)
assert table['B'][0] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in 'ABC':
assert not isinstance(table[name], MaskedColumn)
table = read_basic(text, delimiter=',',
fill_values=[('', '0', 'A'),
('nan', '999', 'A', 'C')], parallel=parallel)
assert np.isnan(table['B'][3]) # nan filling skips column B
assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan
assert table['A'][0] is ma.masked
assert table['A'][2] is ma.masked
assert_equal(table['A'].data.data[0], '0')
assert_equal(table['A'].data.data[2], '999')
assert table['C'][0] is ma.masked
assert_almost_equal(table['C'].data.data[0], 999.0)
assert_almost_equal(table['C'][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is ma.masked
assert table['C'][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)
assert table['C'][2] is ma.masked
assert table['A'][0] is not ma.masked
assert table['B'][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(text, fill_include_names=['A', 'B'],
fill_exclude_names=['B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names
assert table['C'][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = 'A B C\n'
for i in range(500): # create 500 rows
text += ' '.join([str(i) for i in range(3)])
text += '\n'
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = ' '.join([str(i) for i in range(500)])
text += ('\n' + text + '\n' + text)
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = 'a b c\n1 2 3\n4 5 6'
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format='fast_basic', guess=False, comment='##')
# Enable multiprocessing and the fast converter
try:
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': True, 'use_fast_converter': True})
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == 'nt':
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': False,
'use_fast_converter': True})
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})
# Use the slow reader instead
ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format='basic', guess=False, comment='##')
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table['1'][0], ' a') # preserve line whitespace
assert_equal(table['2'][0], ' b ') # preserve field whitespace
assert table['3'][0] is ma.masked # empty value should be masked
assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields
assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(t1, expected)
text = '# first commented line\n # second commented line\n\n' + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
t3 = read_commented_header(text, header_start=-1, data_start=0,
parallel=parallel) # negative indexing allowed
assert_table_equal(t3, expected)
text += '7 8 9'
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
read_commented_header(text, header_start=-1, data_start=-1,
parallel=parallel) # data_start cannot be negative
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
assert_equal(table['A'].dtype.kind, 'i')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'f')
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert 'Column C failed to convert' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified
read_rdb(text, parallel=parallel)
assert 'mismatch between number of column names and column types' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C
read_rdb(text, parallel=parallel)
assert 'type definitions do not all match [num](N|S)' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
\t1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table['c'][0] is ma.masked # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = 'a,b,c\n#1,2,3\n4,5,6'
table = read_csv(text, parallel=parallel)
expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = 'a\tb\tc\n # comment line\n1\t2\t3'
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = 'a b c\n1 2 \n3 4 5'
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format='fast_basic', guess=False)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
text = 'a b c\n 1 2 3 \t \n 4 5 6 '
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic('a b c', parallel=parallel)
expected = Table([[], [], []], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n'
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))
for newline in ('\r\n', '\r'):
table = read_basic(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = '#' + text
for newline in ('\r\n', '\r'):
table = read_commented_header(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table([MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],
names=('a', 'b', 'c'))
expected['a'][0] = np.ma.masked
expected['c'][0] = np.ma.masked
text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n'
for newline in ('\r\n', '\r'):
table = read_rdb(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta['comments'],
['header comment', 'comment 2', 'comment 3'])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=('a', 'b'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" # noqa
head = [f'A{i}' for i in range(28)]
read_tab(content, data_start=1, parallel=parallel, names=head)
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table(tmpdir):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running
the test requires quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).")
data = np.random.random(NB_ROWS)
t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {}"
" + 1 (header).".format(NB_ROWS))
assert sum(1 for line in open(filename)) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table2(tmpdir):
"""Test reading of a file with a huge column.
"""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = (2**32 // 2) // 10 + 5
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table.")
data = np.full(2**32 // 2 // 10 + 5, int(1e9), dtype=np.int32)
t = Table(data=[data], names=['a'], copy=False)
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {}"
" + 1 (header).".format(NB_ROWS))
assert sum(1 for line in open(filename)) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
test_for_warnings = fast_reader and not parallel
fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns(None) as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 2-5
assert len(w) == 4
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+2}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test some additional corner cases
fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305',
'0.2e-323', '5200e-327', ' 0.0000000000000000000001024E+330']
values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])
with pytest.warns(None) as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 4-6
assert len(w) == 3
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+4}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get('use_fast_converter'):
fast_reader.update({'exponent_style': 'A'})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305',
'0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330']
with pytest.warns(None) as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings:
assert len(w) == 3
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_at_range_limit(parallel, fast_reader, guess):
"""
Test parsing of fixed-format float64 numbers near range limits
(|~4.94e-324 to 1.7977e+308|) - within limit for full precision
(|~2.5e-307| for strtod C parser, factor 10 better for fast_converter)
exact numbers shall be returned, beyond that an Overflow warning raised.
Input of exactly 0.0 must not raise an OverflowError.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
# Test very long fixed-format strings (to strtod range limit w/o Overflow)
for D in 99, 202, 305:
t = ascii.read(StringIO(99 * '0' + '.' + D * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**-(D + 1), rtol=rtol, atol=1.e-324)
for D in 99, 202, 308:
t = ascii.read(StringIO('1' + D * '0' + '.0'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**D, rtol=rtol, atol=1.e-324)
# 0.0 is always exact (no Overflow warning)!
for s in '0.0', '0.0e+0', 399 * '0' + '.' + 365 * '0':
t = ascii.read(StringIO(s), format='no_header',
guess=guess, fast_reader=fast_reader)
assert t['col1'][0] == 0.0
# Test OverflowError at precision limit with laxer rtol
if parallel:
pytest.skip("Catching warnings broken in parallel mode")
elif not fast_reader:
pytest.skip("Python/numpy reader does not raise on Overflow")
with pytest.warns(None) as warning_lines:
t = ascii.read(StringIO('0.' + 314 * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
n_warns = len(warning_lines)
assert n_warns in (0, 1), f'Expected 0 or 1 warning, found {n_warns}'
if n_warns == 1:
assert 'OverflowError converting to FloatType in column col1, possibly resulting in degraded precision' in str(warning_lines[0].message) # noqa
assert_almost_equal(t['col1'][0], 1.e-315, rtol=1.e-10, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min + 1
imax = np.iinfo(int).max - 1
huge = f'{imax+2:d}'
text = f'P M S\n {imax:d} {imin:d} {huge:s}'
expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns(None) as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
# Check with leading zeroes to make sure strtol does not read them as octal
text = f'P M S\n000{imax:d} -0{-imin:d} 00{huge:s}'
expected = Table([[imax], [imin], ['00' + huge]], names=('P', 'M', 'S'))
with pytest.warns(None) as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
def test_int_out_of_order(guess):
"""
Mixed columns should be returned as float, but if the out-of-range integer
shows up first, it will produce a string column - with both readers.
Broken with the parallel fast_reader.
"""
imax = np.iinfo(int).max - 1
text = f'A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7'
expected = Table([[12.3, 10. * imax], [f'{imax:d}0', '45.6e7']],
names=('A', 'B'))
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=True)
assert_table_equal(table, expected)
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# Check for nominal np.float64 precision
rtol = 1.e-15
atol = 0.0
text = 'A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n' + \
' 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309'
expc = Table([[1.0001e101, 0.42], [2, 0.5], [2.e-103, 6.e3], [3, 1.7e307]],
names=('A', 'B', 'C', 'D'))
expstyles = {'e': 6 * ('E'),
'D': ('D', 'd', 'd', 'D', 'd', 'D'),
'Q': 3 * ('q', 'Q'),
'Fortran': ('E', '0', 'D', 'Q', 'd', '0')}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(text.format(*(6 * ('D'))), format='basic', guess=guess,
fast_reader={'use_fast_converter': False,
'parallel': parallel, 'exponent_style': 'D'})
assert 'fast_reader: exponent_style requires use_fast_converter' in str(e.value)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(text.format(*c), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': s})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = 'A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n ' + \
'0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330'
table = ascii.read(text, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
formats = {'basic': ' ', 'tab': '\t', 'csv': ','}
header = ['S1', 'F2', 'S2', 'F3', 'S3', 'F4', 'F5', 'S4', 'I1', 'F6', 'F7']
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)),
format=f, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats['bar'] = '|'
else:
formats = {'basic': ' '}
for s in formats.values():
t2 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'a'})
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': True})
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'D'})
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': False})
read_values = [col[0] for col in t5.itercols()]
if os.name == 'nt':
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent("""
a b
1 1.23D4
2 5.67D-8
""")[1:-1]
t1 = ascii.read(tabstr.split('\n'), fast_reader=dict(exponent_style='D'))
assert t1['b'].dtype.kind == 'f'
tabrdb = dedent("""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
""")[1:-1]
t2 = ascii.read(tabrdb.split('\n'), format='rdb',
fast_reader=dict(exponent_style='fortran'))
assert t2['b'].dtype.kind == 'f'
tabrst = dedent("""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
""")[1:-1]
t3 = ascii.read(tabrst.split('\n'), format='rst')
assert t3['b'].dtype.kind == 'f'
t4 = ascii.read(tabrst.split('\n'), guess=True)
assert t4['b'].dtype.kind == 'f'
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split('\n'), format='rst', fast_reader=True)
assert t5['b'].dtype.kind == 'f'
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader='force')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(use_fast_converter=False))
tabrst = tabrst.replace('E', 'D')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(exponent_style='D'))
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize('fast_reader', [dict(exponent_style='D'),
dict(exponent_style='A')])
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get('exponent_style', 'E')
fields = ['10.1D+199', '3.14d+313', '2048d+306', '0.6D-325', '-2.d345']
ascii.read(StringIO(' '.join(fields)), guess=guess,
fast_reader=fast_reader)
assert fast_reader.get('exponent_style', None) == expstyle
@pytest.mark.parametrize('fast_reader', [False,
dict(parallel=True),
dict(parallel=False)])
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta['comments'] == ['comment 1', 'comment 2']
assert len(t) == 0
assert t.colnames == ['col1', 'col2']
@pytest.mark.parametrize('fast_reader', [dict(use_fast_converter=True),
dict(exponent_style='A')])
def test_conversion_fast(fast_reader):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = ascii.read(text, fast_reader=fast_reader)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize('delimiter', ['\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_newline_as_delimiter(delimiter, fast_reader):
"""
Check that newline characters are correctly handled as delimiters.
Tests the fix for #9928.
"""
if delimiter == '\r':
eol = '\n'
else:
eol = '\r'
inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "]
inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol)
inp2 = [f"a {delimiter} b{delimiter} c",
f"1{delimiter} '2' {delimiter} 3.0"]
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader)
assert t1.colnames == t2.colnames == ['a', 'b', 'c']
assert len(t1) == len(t2) == 1
assert t1['b'].dtype.kind in ('S', 'U')
assert t2['b'].dtype.kind in ('S', 'U')
assert_table_equal(t1, t0)
assert_table_equal(t2, t0)
inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format('|', eol)
inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol)
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
if not fast_reader:
pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter")
assert_equal(t1['b'].dtype.kind, 'i')
@pytest.mark.parametrize('delimiter', [' ', '|', '\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_single_line_string(delimiter, fast_reader):
"""
String input without a newline character is interpreted as filename,
unless element of an iterable. Maybe not logical, but test that it is
at least treated consistently.
"""
expected = Table([[1], [2], [3.00]], names=('col1', 'col2', 'col3'))
text = "1{0:s}2{0:s}3.0".format(delimiter)
if delimiter in ('\r', '\n'):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t1, expected)
else:
# Windows raises OSError, but not the other OSes.
with pytest.raises((FileNotFoundError, OSError)):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read([text], format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t2, expected)
|
bsd-3-clause
|
ferrants/ansible
|
contrib/inventory/vmware.py
|
59
|
16907
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMware Inventory Script
=======================
Retrieve information about virtual machines from a vCenter server or
standalone ESX host. When `group_by=false` (in the INI file), host systems
are also returned in addition to VMs.
This script will attempt to read configuration from an INI file with the same
base filename if present, or `vmware.ini` if not. It is possible to create
symlinks to the inventory script to support multiple configurations, e.g.:
* `vmware.py` (this script)
* `vmware.ini` (default configuration, will be read by `vmware.py`)
* `vmware_test.py` (symlink to `vmware.py`)
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
`vmware_other.ini` exists)
The path to an INI file may also be specified via the `VMWARE_INI` environment
variable, in which case the filename matching rules above will not apply.
Host and authentication parameters may be specified via the `VMWARE_HOST`,
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
import collections
import json
import logging
import optparse
import os
import sys
import time
import ConfigParser
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network
from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
def __init__(self, guests_only=None):
self.config = ConfigParser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
break
# Retrieve only guest VMs, or include host systems?
if guests_only is not None:
self.guests_only = guests_only
elif self.config.has_option('defaults', 'guests_only'):
self.guests_only = self.config.getboolean('defaults', 'guests_only')
else:
self.guests_only = True
# Read authentication information from VMware environment variables
# (if set), otherwise from INI file.
auth_host = os.environ.get('VMWARE_HOST')
if not auth_host and self.config.has_option('auth', 'host'):
auth_host = self.config.get('auth', 'host')
auth_user = os.environ.get('VMWARE_USER')
if not auth_user and self.config.has_option('auth', 'user'):
auth_user = self.config.get('auth', 'user')
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
def _put_cache(self, name, value):
'''
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
with open(cache_file, 'w') as cache:
json.dump(value, cache)
def _get_cache(self, name, default=None):
'''
Retrieves the value from cache for the given name.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = self.config.get('defaults', 'cache_dir')
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
if self.config.has_option('defaults', 'cache_max_age'):
cache_max_age = self.config.getint('defaults', 'cache_max_age')
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
def _flatten_dict(self, d, parent_key='', sep='_'):
'''
Flatten nested dicts by combining keys with a separator. Lists with
only string items are included as is; any other lists are discarded.
'''
items = []
for k, v in d.items():
if k.startswith('_'):
continue
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, basestring) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
return dict(items)
def _get_obj_info(self, obj, depth=99, seen=None):
'''
Recursively build a data structure for the given pSphere object (depth
only applies to ManagedObject instances).
'''
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
obj_unicode = unicode(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
return obj_unicode
seen.add(obj)
if depth <= 0:
return obj_unicode
d = {}
for attr in dir(obj):
if attr.startswith('_'):
continue
try:
val = getattr(obj, attr)
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
except Exception, e:
pass
return d
elif isinstance(obj, SudsObject):
d = {}
for key, val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
d[key] = obj_info
return d
elif isinstance(obj, (list, tuple)):
l = []
for val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, int, long, float, basestring)):
return obj
else:
return ()
def _get_host_info(self, host, prefix='vmware'):
'''
Return a flattened dict with info about the given host system.
'''
host_info = {
'name': host.name,
}
for attr in ('datastore', 'network', 'vm'):
try:
value = getattr(host, attr)
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
host_info['%ss' % attr] = []
for k, v in self._get_obj_info(host.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
host_info[k2] = v2
elif k != 'host':
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
except Exception, e:
print >> sys.stderr, e
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
return host_info
def _get_vm_info(self, vm, prefix='vmware'):
'''
Return a flattened dict with info about the given virtual machine.
'''
vm_info = {
'name': vm.name,
}
for attr in ('datastore', 'network'):
try:
value = getattr(vm, attr)
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
vm_info['%ss' % attr] = []
try:
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
except AttributeError:
vm_info['resourcePool'] = ''
try:
vm_info['guestState'] = vm.guest.guestState
except AttributeError:
vm_info['guestState'] = ''
for k, v in self._get_obj_info(vm.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
if k2 == 'host':
k2 = 'hostSystem'
vm_info[k2] = v2
elif k != 'vm':
vm_info[k] = v
vm_info = self._flatten_dict(vm_info, prefix)
if ('%s_ipAddress' % prefix) in vm_info:
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
return vm_info
def _add_host(self, inv, parent_group, host_name):
'''
Add the host to the parent group in the given inventory.
'''
p_group = inv.setdefault(parent_group, [])
if isinstance(p_group, dict):
group_hosts = p_group.setdefault('hosts', [])
else:
group_hosts = p_group
if host_name not in group_hosts:
group_hosts.append(host_name)
def _add_child(self, inv, parent_group, child_group):
'''
Add a child group to a parent group in the given inventory.
'''
if parent_group != 'all':
p_group = inv.setdefault(parent_group, {})
if not isinstance(p_group, dict):
inv[parent_group] = {'hosts': p_group}
p_group = inv[parent_group]
group_children = p_group.setdefault('children', [])
if child_group not in group_children:
group_children.append(child_group)
inv.setdefault(child_group, [])
def get_inventory(self, meta_hostvars=True):
'''
Reads the inventory from cache or VMware API via pSphere.
'''
# Use different cache names for guests only vs. all hosts.
if self.guests_only:
cache_name = '__inventory_guests__'
else:
cache_name = '__inventory_all__'
inv = self._get_cache(cache_name, None)
if inv is not None:
return inv
inv = {'all': {'hosts': []}}
if meta_hostvars:
inv['_meta'] = {'hostvars': {}}
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if not self.guests_only:
if self.config.has_option('defaults', 'hw_group'):
hw_group = self.config.get('defaults', 'hw_group')
else:
hw_group = default_group + '_hw'
if self.config.has_option('defaults', 'vm_group'):
vm_group = self.config.get('defaults', 'vm_group')
else:
vm_group = default_group + '_vm'
if self.config.has_option('defaults', 'prefix_filter'):
prefix_filter = self.config.get('defaults', 'prefix_filter')
else:
prefix_filter = None
# Loop through physical hosts:
for host in HostSystem.all(self.client):
if not self.guests_only:
self._add_host(inv, 'all', host.name)
self._add_host(inv, hw_group, host.name)
host_info = self._get_host_info(host)
if meta_hostvars:
inv['_meta']['hostvars'][host.name] = host_info
self._put_cache(host.name, host_info)
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith( prefix_filter ):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
if meta_hostvars:
inv['_meta']['hostvars'][vm.name] = vm_info
self._put_cache(vm.name, vm_info)
# Group by resource pool.
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
if vm_resourcePool:
self._add_child(inv, vm_group, 'resource_pools')
self._add_child(inv, 'resource_pools', vm_resourcePool)
self._add_host(inv, vm_resourcePool, vm.name)
# Group by datastore.
for vm_datastore in vm_info.get('vmware_datastores', []):
self._add_child(inv, vm_group, 'datastores')
self._add_child(inv, 'datastores', vm_datastore)
self._add_host(inv, vm_datastore, vm.name)
# Group by network.
for vm_network in vm_info.get('vmware_networks', []):
self._add_child(inv, vm_group, 'networks')
self._add_child(inv, 'networks', vm_network)
self._add_host(inv, vm_network, vm.name)
# Group by guest OS.
vm_guestId = vm_info.get('vmware_guestId', None)
if vm_guestId:
self._add_child(inv, vm_group, 'guests')
self._add_child(inv, 'guests', vm_guestId)
self._add_host(inv, vm_guestId, vm.name)
# Group all VM templates.
vm_template = vm_info.get('vmware_template', False)
if vm_template:
self._add_child(inv, vm_group, 'templates')
self._add_host(inv, 'templates', vm.name)
self._put_cache(cache_name, inv)
return inv
def get_host(self, hostname):
'''
Read info about a specific host or VM from cache or VMware API.
'''
inv = self._get_cache(hostname, None)
if inv is not None:
return inv
if not self.guests_only:
try:
host = HostSystem.get(self.client, name=hostname)
inv = self._get_host_info(host)
except ObjectNotFoundError:
pass
if inv is None:
try:
vm = VirtualMachine.get(self.client, name=hostname)
inv = self._get_vm_info(vm)
except ObjectNotFoundError:
pass
if inv is not None:
self._put_cache(hostname, inv)
return inv or {}
def main():
parser = optparse.OptionParser()
parser.add_option('--list', action='store_true', dest='list',
default=False, help='Output inventory groups and hosts')
parser.add_option('--host', dest='host', default=None, metavar='HOST',
help='Output variables only for the given hostname')
# Additional options for use when running the script standalone, but never
# used by Ansible.
parser.add_option('--pretty', action='store_true', dest='pretty',
default=False, help='Output nicely-formatted JSON')
parser.add_option('--include-host-systems', action='store_true',
dest='include_host_systems', default=False,
help='Include host systems in addition to VMs')
parser.add_option('--no-meta-hostvars', action='store_false',
dest='meta_hostvars', default=True,
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
options, args = parser.parse_args()
if options.include_host_systems:
vmware_inventory = VMwareInventory(guests_only=False)
else:
vmware_inventory = VMwareInventory()
if options.host is not None:
inventory = vmware_inventory.get_host(options.host)
else:
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
json_kwargs = {}
if options.pretty:
json_kwargs.update({'indent': 4, 'sort_keys': True})
json.dump(inventory, sys.stdout, **json_kwargs)
if __name__ == '__main__':
main()
|
gpl-3.0
|
abhishekarora12/ansible
|
lib/ansible/module_utils/redhat.py
|
324
|
10219
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), James Laska
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import types
import ConfigParser
import shlex
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
cfg = ConfigParser.ConfigParser()
cfg.read([plugin_conf])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(plugin_conf, 'rwa+')
cfg.write(fd)
fd.close()
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = ConfigParser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHN
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--system.hostname'.
for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHN.
'''
# Quick version...
if False:
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
os.path.isfile('/etc/pki/consumer/key.pem')
args = ['subscription-manager', 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey):
'''
Register the current system to the provided RHN server
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'register']
# Generate command arguments
if activationkey:
args.append('--activationkey "%s"' % activationkey)
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
# Do the needful...
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self):
'''
Unsubscribe a system from all subscribed channels
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unsubscribe', '--all']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
for pool in available_pools.filter(regexp):
pool.subscribe()
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.PoolId
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module):
self.module = module
self.products = self._load_product_list()
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self):
"""
Loads list of all available pools for system in data structure
"""
args = "subscription-manager list --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of an output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':',1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
#else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter(self, regexp='^$'):
'''
Return a list of RhsmPools whose name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
|
gpl-3.0
|
datakortet/dkmodelfields
|
tests/test_durationfield.py
|
1
|
3701
|
# -*- coding: utf-8 -*-
import sys
from datetime import timedelta, datetime
import pytest
import ttcal
from django.core.exceptions import ValidationError
from django.db import connection
from django.forms import Form
from ttcal import Duration
from dkmodelfields import DurationField
from dkmodelfields import adminforms
@pytest.fixture
def durationform():
class DurationForm(Form):
duration = adminforms.DurationField(label='Duration')
return DurationForm
def test_formfield1():
df = DurationField()
assert isinstance(df.formfield(), adminforms.durationfield.DurationField)
assert df.db_type(connection) == 'BIGINT'
def test_duration_form_field(durationform):
f = durationform()
assert str(f) == '''<tr><th><label for="id_duration">Duration:</label></th><td><input id="id_duration" name="duration" type="text" /></td></tr>'''
def test_duration_form_field_empty(durationform):
f = durationform({'duration': u''})
assert str(f) == '''<tr><th><label for="id_duration">Duration:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input id="id_duration" name="duration" type="text" /></td></tr>'''
def test_duration_form_field_strval(durationform):
f = durationform({'duration': u'2:20:00'})
assert str(f) == '''<tr><th><label for="id_duration">Duration:</label></th><td><input id="id_duration" name="duration" type="text" value="2:20:00" /></td></tr>'''
def test_duration_form_field_duration_val(durationform):
f = durationform({'duration': ttcal.Duration.parse(u'2:20:00')})
assert str(f) == '''<tr><th><label for="id_duration">Duration:</label></th><td><ul class="errorlist"><li>Enter a valid duration.</li></ul><input id="id_duration" name="duration" type="text" /></td></tr>'''
def test_duration_form_field_invalid(durationform):
f = durationform({'duration': u'asdf'})
f.full_clean()
assert f.clean() == {
'duration': ttcal.Duration()
}
def test_adminform_clean():
df = adminforms.DurationField()
with pytest.raises(ValidationError):
assert df.clean(['3.14'])
with pytest.raises(ValidationError):
assert df.to_python(3.14)
def test_create():
df = DurationField()
assert df.description == 'A duration of time'
assert df.db_type(connection) == 'BIGINT'
def test_get_internal_type():
df = DurationField()
assert df.get_internal_type() == 'DurationField'
def test_get_prep_value():
df = DurationField()
assert df.get_prep_value(None) is None
assert df.get_prep_value(60*60*2) == Duration(hours=2, minutes=0, seconds=0).seconds
def test_get_db_prep_save():
df = DurationField()
assert df.get_db_prep_save(Duration(hours=1, minutes=20), connection) == 60*80
assert df.get_db_prep_save(60*80, connection) == Duration(hours=1, minutes=20).toint()
assert df.get_db_prep_save(None, connection) == None
def test_to_python():
df = DurationField()
assert df.to_python(None) is None
assert df.to_python(Duration(hours=1, minutes=40)) == Duration(hours=1, minutes=40)
assert df.to_python(timedelta(hours=3, minutes=30)) == Duration(hours=3, minutes=30)
assert df.to_python(60*60*3) == Duration(hours=3, minutes=0, seconds=0)
assert df.to_python('2:20:0') == Duration(hours=2, minutes=20)
assert df.to_python('asdf') == Duration()
if sys.version_info < (3,):
assert df.to_python(long(687876)) == Duration(hours=191, minutes=4, seconds=36)
def test_formfield():
df = DurationField()
assert df.formfield().to_python('4:30') == Duration(hours=4, minutes=30)
def test_value_to_string():
df = DurationField()
assert df.value_to_string(None) == ''
|
mit
|
chevanlol360/Kernel_LGE_X5
|
scripts/gcc-wrapper.py
|
1276
|
3382
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
gpl-2.0
|
justincely/cos_monitoring
|
docs/source/conf.py
|
1
|
8446
|
# -*- coding: utf-8 -*-
#
# smov_cos_redo documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 8 10:53:41 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'COSMO'
# noinspection PyShadowingBuiltins
copyright = u'2015, Association of Universities for Research in Astronomy.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cosmo_redodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'cosmo_redo.tex', u'cosmo\\_redo Documentation',
u'Justin Ely, Jo Taylor, Mees Fix', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cosmo_redo', u'cosmo_redo Documentation',
[u'Justin Ely, Jo Taylor, Mees Fix'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cosmo_redo', u'cosmo_redo Documentation',
u'Justin Ely, Jo Taylor, Mees Fix', 'cosmo_redo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
bsd-3-clause
|
calixtolinux/Android-ics-kernel
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
1935
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
gpl-2.0
|
mattclay/ansible-modules-core
|
system/systemd.py
|
15
|
15771
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
module: systemd
author:
- "Ansible Core Team"
version_added: "2.2"
short_description: Manage services.
description:
- Controls systemd services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['unit', 'service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
masked:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
daemon_reload:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
user:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run systemctl talking to the service manager of the calling user, rather than the service manager
of the system.
notes:
- One option other than name is required.
requirements:
- A system managed by systemd
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
# Example action to restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
- systemd:
state: restarted
daemon_reload: yes
name: crond
# Example action to reload service httpd, in all cases
- systemd:
name: httpd
state: reloaded
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
# Example action to enable a timer for dnf-automatic
- systemd:
name: dnf-automatic.timer
state: started
enabled: True
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils._text import to_native
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['unit', 'service']),
state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
masked = dict(type='bool'),
daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']),
user= dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
systemctl = module.get_bin_path('systemctl')
if module.params['user']:
systemctl = systemctl + " --user"
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {},
'warnings': [],
}
# Run daemon-reload first, if requested
if module.params['daemon_reload']:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0:
# load return of systemctl show into dictionary for easy access and return
multival = []
if out:
k = None
for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {}
if line.strip():
if k is None:
if '=' in line:
k,v = line.split('=', 1)
if v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
else:
if line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
# Check for loading error
if is_systemd and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
result['warnings'].append('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked')
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if is_initd and (not out.startswith('disabled') or sysv_is_enabled(unit)):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if result['status']['ActiveState'] != 'active':
action = 'start'
elif module.params['state'] == 'stopped':
if result['status']['ActiveState'] == 'active':
action = 'stop'
else:
if result['status']['ActiveState'] != 'active':
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/util/retry.py
|
360
|
10664
|
from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
_observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
mit
|
alexbruy/QGIS
|
python/ext-libs/jinja2/testsuite/utils.py
|
415
|
2235
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.utils
~~~~~~~~~~~~~~~~~~~~~~
Tests utilities jinja uses.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import gc
import unittest
import pickle
from jinja2.testsuite import JinjaTestCase
from jinja2.utils import LRUCache, escape, object_type_repr
class LRUCacheTestCase(JinjaTestCase):
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert 'a' in d and 'c' in d and 'd' in d and 'b' not in d
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
class HelpersTestCase(JinjaTestCase):
def test_object_type_repr(self):
class X(object):
pass
self.assert_equal(object_type_repr(42), 'int object')
self.assert_equal(object_type_repr([]), 'list object')
self.assert_equal(object_type_repr(X()),
'jinja2.testsuite.utils.X object')
self.assert_equal(object_type_repr(None), 'None')
self.assert_equal(object_type_repr(Ellipsis), 'Ellipsis')
class MarkupLeakTestCase(JinjaTestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LRUCacheTestCase))
suite.addTest(unittest.makeSuite(HelpersTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
|
gpl-2.0
|
cburbridge/mongodb_store
|
mongodb_log/scripts/mongodb_log.py
|
2
|
24463
|
#!/usr/bin/python
###########################################################################
# mongodb_log.py - Python based ROS to MongoDB logger (multi-process)
#
# Created: Sun Dec 05 19:45:51 2010
# Copyright 2010-2012 Tim Niemueller [www.niemueller.de]
# 2010-2011 Carnegie Mellon University
# 2010 Intel Labs Pittsburgh
###########################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# Read the full text in the LICENSE.GPL file in the doc directory.
# make sure we aren't using floor division
from __future__ import division, with_statement
PACKAGE_NAME='mongodb_log'
NODE_NAME='mongodb_log'
NODE_NAME_TEMPLATE='%smongodb_log'
WORKER_NODE_NAME = "%smongodb_log_worker_%d_%s"
QUEUE_MAXSIZE = 100
# import roslib; roslib.load_manifest(PACKAGE_NAME)
import rospy
# for msg_to_document
import mongodb_store.util
import os
import re
import sys
import time
import pprint
import string
import signal
import subprocess
from threading import Thread, Timer
from Queue import Empty
from optparse import OptionParser
from tempfile import mktemp
from datetime import datetime, timedelta
from time import sleep
from random import randint
from tf.msg import tfMessage
from sensor_msgs.msg import PointCloud, CompressedImage
from roslib.packages import find_node
#from rviz_intel.msg import TriangleMesh
use_setproctitle = True
try:
from setproctitle import setproctitle
except ImportError:
use_setproctitle = False
use_processes = False
# if use_processes:
from multiprocessing import Process, Lock, Condition, Queue, Value, current_process, Event
import multiprocessing as mp
# else:
# from threading import Lock, Condition, Event
# from Queue import Queue
# def Value(t, val, lock=None):
# return val
import genpy
import rosgraph.masterapi
import roslib.message
#from rospy import Time, Duration
import rostopic
from pymongo import SLOW_ONLY
from pymongo.errors import InvalidDocument, InvalidStringData
MongoClient = mongodb_store.util.import_MongoClient()
BACKLOG_WARN_LIMIT = 100
STATS_LOOPTIME = 10
STATS_GRAPHTIME = 60
class Counter(object):
def __init__(self, value = None, lock = True):
self.count = value or Value('i', 0, lock=lock)
self.mutex = Lock()
def increment(self, by = 1):
with self.mutex: self.count.value += by
def value(self):
with self.mutex: return self.count.value
class Barrier(object):
def __init__(self, num_threads):
self.num_threads = num_threads
self.threads_left = Value('i', num_threads, lock=True)
self.mutex = Lock()
self.waitcond = Condition(self.mutex)
def wait(self):
self.mutex.acquire()
self.threads_left.value -= 1
if self.threads_left.value == 0:
self.threads_left.value = self.num_threads
self.waitcond.notify_all()
self.mutex.release()
else:
self.waitcond.wait()
self.mutex.release()
class WorkerProcess(object):
def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
drop_counter_value, queue_maxsize,
mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
self.id = idnum
self.topic = topic
self.collname = collname
self.queue = Queue(queue_maxsize)
self.out_counter = Counter(out_counter_value)
self.in_counter = Counter(in_counter_value)
self.drop_counter = Counter(drop_counter_value)
self.worker_out_counter = Counter()
self.worker_in_counter = Counter()
self.worker_drop_counter = Counter()
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.nodename_prefix = nodename_prefix
self.quit = Value('i', 0)
# print "Creating process %s" % self.name
self.process = Process(name=self.name, target=self.run)
# self.process = Thread(name=self.name, target=self.run)
# print "created %s" % self.process
self.process.start()
# print "started %s" % self.process
def init(self):
global use_setproctitle
if use_setproctitle:
setproctitle("mongodb_log %s" % self.topic)
self.mongoconn = MongoClient(self.mongodb_host, self.mongodb_port)
self.mongodb = self.mongoconn[self.mongodb_name]
self.mongodb.set_profiling_level = SLOW_ONLY
self.collection = self.mongodb[self.collname]
self.collection.count()
self.queue.cancel_join_thread()
# clear signal handlers in this child process, rospy will handle signals for us
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
worker_node_name = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
# print "Calling init_node with %s from process %s" % (worker_node_name, mp.current_process())
rospy.init_node(worker_node_name, anonymous=False)
self.subscriber = None
while not self.subscriber and not self.is_quit():
try:
msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
except rostopic.ROSTopicIOException:
print("FAILED to subscribe, will keep trying %s" % self.name)
time.sleep(randint(1,10))
except rospy.ROSInitException:
print("FAILED to initialize, will keep trying %s" % self.name)
time.sleep(randint(1,10))
self.subscriber = None
def run(self):
self.init()
print("ACTIVE: %s" % self.name)
# run the thread
self.dequeue()
# free connection
# self.mongoconn.end_request()
def is_quit(self):
return self.quit.value == 1
def shutdown(self):
if not self.is_quit():
#print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
self.quit.value = 1
self.queue.put("shutdown")
while not self.queue.empty(): sleep(0.1)
#print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
self.process.join()
self.process.terminate()
def qsize(self):
return self.queue.qsize()
def enqueue(self, data, topic, current_time=None):
if not self.is_quit():
if self.queue.full():
try:
self.queue.get_nowait()
self.drop_counter.increment()
self.worker_drop_counter.increment()
except Empty:
pass
#self.queue.put((topic, data, current_time or datetime.now()))
self.queue.put((topic, data, rospy.get_time(), data._connection_header))
self.in_counter.increment()
self.worker_in_counter.increment()
def dequeue(self):
while not self.is_quit():
t = None
try:
t = self.queue.get(True)
except IOError:
# Anticipate Ctrl-C
#print("Quit W1: %s" % self.name)
self.quit.value = 1
break
if isinstance(t, tuple):
self.out_counter.increment()
self.worker_out_counter.increment()
topic = t[0]
msg = t[1]
ctime = t[2]
connection_header = t[3]
if isinstance(msg, rospy.Message):
try:
#print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
#pprint.pprint(doc)
meta = {}
# switched to use inserted_at to match message_store
# meta["recorded"] = ctime or datetime.now()
meta["topic"] = topic
if connection_header['latching'] == '1':
meta['latch'] = True
else:
meta['latch'] = False
if ctime is not None:
meta['inserted_at'] = datetime.utcfromtimestamp(ctime)
else:
meta['inserted_at'] = datetime.utcfromtimestamp(rospy.get_rostime().to_sec())
mongodb_store.util.store_message(self.collection, msg, meta)
except InvalidDocument, e:
print("InvalidDocument " + current_process().name + "@" + topic +": \n")
print e
except InvalidStringData, e:
print("InvalidStringData " + current_process().name + "@" + topic +": \n")
print e
else:
#print("Quit W2: %s" % self.name)
self.quit.value = 1
# we must make sure to clear the queue before exiting,
# or the parent thread might deadlock otherwise
#print("Quit W3: %s" % self.name)
self.subscriber.unregister()
self.subscriber = None
while not self.queue.empty():
t = self.queue.get_nowait()
print("STOPPED: %s" % self.name)
class SubprocessWorker(object):
def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
drop_counter_value, queue_maxsize,
mongodb_host, mongodb_port, mongodb_name, nodename_prefix, cpp_logger):
self.name = "SubprocessWorker-%4d-%s" % (idnum, topic)
self.id = idnum
self.topic = topic
self.collname = collname
self.queue = Queue(queue_maxsize)
self.out_counter = Counter(out_counter_value)
self.in_counter = Counter(in_counter_value)
self.drop_counter = Counter(drop_counter_value)
self.worker_out_counter = Counter()
self.worker_in_counter = Counter()
self.worker_drop_counter = Counter()
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.nodename_prefix = nodename_prefix
self.quit = False
self.qsize = 0
self.thread = Thread(name=self.name, target=self.run)
mongodb_host_port = "%s:%d" % (mongodb_host, mongodb_port)
collection = "%s.%s" % (mongodb_name, collname)
nodename = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
self.process = subprocess.Popen([cpp_logger[0], "-t", topic, "-n", nodename,
"-m", mongodb_host_port, "-c", collection],
stdout=subprocess.PIPE)
self.thread.start()
def qsize(self):
return self.qsize
def run(self):
while not self.quit:
line = self.process.stdout.readline().rstrip()
if line == "": continue
arr = string.split(line, ":")
self.in_counter.increment(int(arr[0]))
self.out_counter.increment(int(arr[1]))
self.drop_counter.increment(int(arr[2]))
self.qsize = int(arr[3])
self.worker_in_counter.increment(int(arr[0]))
self.worker_out_counter.increment(int(arr[1]))
self.worker_drop_counter.increment(int(arr[2]))
def shutdown(self):
self.quit = True
self.process.kill()
self.process.wait()
class MongoWriter(object):
def __init__(self, topics = [],
all_topics = False, all_topics_interval = 5,
exclude_topics = [],
mongodb_host=None, mongodb_port=None, mongodb_name="roslog",
no_specific=False, nodename_prefix=""):
self.all_topics = all_topics
self.all_topics_interval = all_topics_interval
self.exclude_topics = exclude_topics
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.no_specific = no_specific
self.nodename_prefix = nodename_prefix
self.quit = False
self.topics = set()
#self.str_fn = roslib.message.strify_message
self.sep = "\n" #'\033[2J\033[;H'
self.in_counter = Counter()
self.out_counter = Counter()
self.drop_counter = Counter()
self.workers = {}
global use_setproctitle
if use_setproctitle:
setproctitle("mongodb_log MAIN")
self.exclude_regex = []
for et in self.exclude_topics:
self.exclude_regex.append(re.compile(et))
self.exclude_already = []
self.missing_topics = self.subscribe_topics(set(topics))
self.fill_in_topics()
if self.all_topics:
print("All topics")
self.ros_master = rosgraph.masterapi.Master(NODE_NAME_TEMPLATE % self.nodename_prefix)
self.update_topics(restart=False)
self.start_all_topics_timer()
def subscribe_topics(self, topics):
# print "existing topics %s" % self.topics
# print "subscribing to topics %s" % topics
missing_topics = set()
for topic in topics:
if topic and topic[-1] == '/':
topic = topic[:-1]
if topic in self.topics: continue
if topic in self.exclude_already: continue
do_continue = False
for tre in self.exclude_regex:
if tre.match(topic):
print("*** IGNORING topic %s due to exclusion rule" % topic)
do_continue = True
self.exclude_already.append(topic)
break
if do_continue: continue
# although the collections is not strictly necessary, since MongoDB could handle
# pure topic names as collection names and we could then use mongodb[topic], we want
# to have names that go easier with the query tools, even though there is the theoretical
# possibility of name clashes (hence the check)
collname = mongodb_store.util.topic_name_to_collection_name(topic)
if collname in self.workers.keys():
print("Two converted topic names clash: %s, ignoring topic %s"
% (collname, topic))
else:
try:
print("Adding topic %s" % topic)
w = self.create_worker(len(self.workers), topic, collname)
self.workers[collname] = w
self.topics |= set([topic])
except Exception, e:
print('Failed to subsribe to %s due to %s' % (topic, e))
missing_topics.add(topic)
return missing_topics
def create_worker(self, idnum, topic, collname):
try:
msg_class, real_topic, msg_eval = rostopic.get_topic_class(topic, blocking=False)
except Exception, e:
print('Topic %s not announced, cannot get type: %s' % (topic, e))
raise
if real_topic is None:
raise rostopic.ROSTopicException('topic type was empty, probably not announced')
w = None
node_path = None
if not self.no_specific and msg_class == tfMessage:
print("DETECTED transform topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_tf")
if not node_path:
print("FAILED to detect mongodb_log_tf, falling back to generic logger (did not build package?)")
elif not self.no_specific and msg_class == PointCloud:
print("DETECTED point cloud topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_pcl")
if not node_path:
print("FAILED to detect mongodb_log_pcl, falling back to generic logger (did not build package?)")
elif not self.no_specific and msg_class == CompressedImage:
print("DETECTED compressed image topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_cimg")
if not node_path:
print("FAILED to detect mongodb_log_cimg, falling back to generic logger (did not build package?)")
"""
elif msg_class == TriangleMesh:
print("DETECTED triangle mesh topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_trimesh")
if not node_path:
print("FAILED to detect mongodb_log_trimesh, falling back to generic logger (did not build package?)")
"""
if node_path:
w = SubprocessWorker(idnum, topic, collname,
self.in_counter.count, self.out_counter.count,
self.drop_counter.count, QUEUE_MAXSIZE,
self.mongodb_host, self.mongodb_port, self.mongodb_name,
self.nodename_prefix, node_path)
if not w:
print("GENERIC Python logger used for topic %s" % topic)
w = WorkerProcess(idnum, topic, collname,
self.in_counter.count, self.out_counter.count,
self.drop_counter.count, QUEUE_MAXSIZE,
self.mongodb_host, self.mongodb_port, self.mongodb_name,
self.nodename_prefix)
return w
def run(self):
looping_threshold = timedelta(0, STATS_LOOPTIME, 0)
while not self.quit:
started = datetime.now()
# the following code makes sure we run once per STATS_LOOPTIME, taking
# varying run-times and interrupted sleeps into account
td = datetime.now() - started
while not self.quit and td < looping_threshold:
sleeptime = STATS_LOOPTIME - (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
if sleeptime > 0: sleep(sleeptime)
td = datetime.now() - started
def shutdown(self):
self.quit = True
if hasattr(self, "all_topics_timer"): self.all_topics_timer.cancel()
for name, w in self.workers.items():
#print("Shutdown %s" % name)
w.shutdown()
def start_all_topics_timer(self):
if not self.all_topics or self.quit: return
self.all_topics_timer = Timer(self.all_topics_interval, self.update_topics)
self.all_topics_timer.start()
def start_fill_in_topics_timer(self):
if len(self.missing_topics) == 0 or self.quit: return
self.fill_in_topics_timer = Timer(self.all_topics_interval, self.fill_in_topics)
self.fill_in_topics_timer.start()
def update_topics(self, restart=True):
"""
Called at a fixed interval (see start_all_topics_timer) to update the list of topics if we are logging all topics (e.g. --all-topics flag is given).
"""
if not self.all_topics or self.quit: return
ts = rospy.get_published_topics()
topics = set([t for t, t_type in ts if t != "/rosout" and t != "/rosout_agg"])
new_topics = topics - self.topics
self.subscribe_topics(new_topics)
if restart: self.start_all_topics_timer()
def fill_in_topics(self, restart=True):
"""
Called at a fixed interval (see start_all_topics_timer) to update the list of topics if we are logging all topics (e.g. --all-topics flag is given).
"""
if len(self.missing_topics) == 0 or self.quit: return
self.missing_topics = self.subscribe_topics(self.missing_topics)
if restart: self.start_fill_in_topics_timer()
def get_memory_usage_for_pid(self, pid):
scale = {'kB': 1024, 'mB': 1024 * 1024,
'KB': 1024, 'MB': 1024 * 1024}
try:
f = open("/proc/%d/status" % pid)
t = f.read()
f.close()
except:
return (0, 0, 0)
if t == "": return (0, 0, 0)
try:
tmp = t[t.index("VmSize:"):].split(None, 3)
size = int(tmp[1]) * scale[tmp[2]]
tmp = t[t.index("VmRSS:"):].split(None, 3)
rss = int(tmp[1]) * scale[tmp[2]]
tmp = t[t.index("VmStk:"):].split(None, 3)
stack = int(tmp[1]) * scale[tmp[2]]
return (size, rss, stack)
except ValueError:
return (0, 0, 0)
def get_memory_usage(self):
size, rss, stack = 0, 0, 0
for _, w in self.workers.items():
pmem = self.get_memory_usage_for_pid(w.process.pid)
size += pmem[0]
rss += pmem[1]
stack += pmem[2]
#print("Size: %d RSS: %s Stack: %s" % (size, rss, stack))
return (size, rss, stack)
def main(argv):
parser = OptionParser()
parser.usage += " [TOPICs...]"
parser.add_option("--nodename-prefix", dest="nodename_prefix",
help="Prefix for worker node names", metavar="ROS_NODE_NAME",
default="")
parser.add_option("--mongodb-host", dest="mongodb_host",
help="Hostname of MongoDB", metavar="HOST",
default=rospy.get_param("mongodb_host", "localhost"))
parser.add_option("--mongodb-port", dest="mongodb_port",
help="Hostname of MongoDB", type="int",
metavar="PORT", default=rospy.get_param("mongodb_port", 27017))
parser.add_option("--mongodb-name", dest="mongodb_name",
help="Name of DB in which to store values",
metavar="NAME", default="roslog")
parser.add_option("-a", "--all-topics", dest="all_topics", default=False,
action="store_true",
help="Log all existing topics (still excludes /rosout, /rosout_agg)")
parser.add_option("--all-topics-interval", dest="all_topics_interval", default=5,
help="Time in seconds between checks for new topics", type="int")
parser.add_option("-x", "--exclude", dest="exclude",
help="Exclude topics matching REGEX, may be given multiple times",
action="append", type="string", metavar="REGEX", default=[])
parser.add_option("--no-specific", dest="no_specific", default=False,
action="store_true", help="Disable specific loggers")
(options, args) = parser.parse_args(rospy.myargv(argv=sys.argv)[1:])
if not options.all_topics and len(args) == 0:
parser.print_help()
return
try:
rosgraph.masterapi.Master(NODE_NAME_TEMPLATE % options.nodename_prefix).getPid()
except socket.error:
print("Failed to communicate with master")
mongowriter = MongoWriter(topics=args,
all_topics=options.all_topics,
all_topics_interval = options.all_topics_interval,
exclude_topics = options.exclude,
mongodb_host=options.mongodb_host,
mongodb_port=options.mongodb_port,
mongodb_name=options.mongodb_name,
no_specific=options.no_specific,
nodename_prefix=options.nodename_prefix)
def signal_handler(signal, frame):
mongowriter.shutdown()
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
mongowriter.run()
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
|
matiasherranz/keyczar
|
cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/packaging/targz.py
|
19
|
1792
|
"""SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/targz.py 4043 2009/02/23 09:06:45 scons"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source, TARFLAGS='-zc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
|
fogleman/pg
|
pg/font.py
|
3
|
4056
|
from OpenGL.GL import *
from itertools import product
from math import ceil, log
from PIL import Image, ImageDraw, ImageFont
from .core import Context, Texture, VertexBuffer, Scene
from .matrix import Matrix
from .programs import TextProgram
from .util import interleave
def float_to_byte_color(color):
return tuple(int(round(x * 255)) for x in color)
class Font(object):
def __init__(self, scene_or_window, unit, name, size, fg=None, bg=None):
window = scene_or_window
if isinstance(scene_or_window, Scene):
window = scene_or_window.window
self.fg = float_to_byte_color(fg or (1.0, 1.0, 1.0, 1.0))
self.bg = float_to_byte_color(bg or (0.0, 0.0, 0.0, 0.0))
if len(self.fg) == 3:
self.fg += (255,)
if len(self.bg) == 3:
self.bg += (255,)
self.window = window
self.kerning = {}
self.load(name, size)
self.context = Context(TextProgram())
self.context.sampler = Texture(unit, self.im)
def render(self, text, coord=(0, 0), anchor=(0, 0)):
size, positions, uvs = self.generate_vertex_data(text)
ww, wh = self.window.size
tx, ty = coord
ax, ay = anchor
tw, th = size
matrix = Matrix()
matrix = matrix.translate((tx - tw * ax, ty - th * ay, 0))
matrix = matrix.orthographic(0, ww, wh, 0, -1, 1)
self.context.matrix = matrix
vertex_buffer = VertexBuffer(interleave(positions, uvs))
self.context.position, self.context.uv = vertex_buffer.slices(2, 2)
glEnable(GL_BLEND)
glDisable(GL_DEPTH_TEST)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.context.draw(GL_TRIANGLES)
glEnable(GL_DEPTH_TEST)
glDisable(GL_BLEND)
vertex_buffer.delete()
def generate_vertex_data(self, text):
positions = []
uvs = []
data = [
(0, 0), (0, 1), (1, 0),
(0, 1), (1, 1), (1, 0),
]
x = y = 0
previous = None
for c in text:
if c not in self.sizes:
c = ' '
index = ord(c) - 32
row = index / 10
col = index % 10
u = self.du * col
v = self.dv * row
sx, sy = self.sizes[c]
ox, oy = self.offsets[c]
k = self.get_kerning(previous, c) if previous else 0
x += k
for i, j in data:
cx = x + i * self.dx + ox
cy = y + j * self.dy + oy
positions.append((cx, cy))
uvs.append((u + i * self.du, 1 - v - j * self.dv))
x += ox + sx
previous = c
size = (x, self.dy)
return size, positions, uvs
def get_kerning(self, c1, c2):
key = c1 + c2
if key not in self.kerning:
a = self.sizes[c1][0] + self.sizes[c2][0]
b = self.font.getsize(key)[0]
self.kerning[key] = b - a
return self.kerning[key]
def load(self, name, size):
font = ImageFont.truetype(name, size)
chars = [chr(x) for x in range(32, 127)]
sizes = dict((c, font.getsize(c)) for c in chars)
offsets = dict((c, font.getoffset(c)) for c in chars)
mw = max(sizes[c][0] for c in chars) + 1
mh = max(sizes[c][1] for c in chars) + 1
rows = 10
cols = 10
w = mw * cols
h = mh * rows
w = int(2 ** ceil(log(w) / log(2)))
h = int(2 ** ceil(log(h) / log(2)))
im = Image.new('RGBA', (w, h), self.bg)
draw = ImageDraw.Draw(im)
for (row, col), c in zip(product(range(rows), range(cols)), chars):
x = col * mw
y = row * mh
dx, dy = offsets[c]
draw.text((x + 1 - dx, y + 1 - dy), c, self.fg, font)
self.dx = mw
self.dy = mh
self.du = float(mw) / w
self.dv = float(mh) / h
self.sizes = sizes
self.offsets = offsets
self.im = im
self.font = font
|
mit
|
Oliver2213/NVDAYoutube-dl
|
addon/globalPlugins/nvdaYoutubeDL/youtube_dl/extractor/mtv.py
|
9
|
14408
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_str,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
HEADRequest,
sanitized_Request,
unescapeHTML,
url_basename,
RegexNotFoundError,
)
def _media_xml_tag(tag):
return '{http://search.yahoo.com/mrss/}%s' % tag
class MTVServicesInfoExtractor(InfoExtractor):
_MOBILE_TEMPLATE = None
_LANG = None
@staticmethod
def _id_from_uri(uri):
return uri.split(':')[-1]
# This was originally implemented for ComedyCentral, but it also works here
@staticmethod
def _transform_rtmp_url(rtmp_video_url):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
if not m:
return rtmp_video_url
base = 'http://viacommtvstrmfs.fplive.net/'
return base + m.group('finalid')
def _get_feed_url(self, uri):
return self._FEED_URL
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
thumb_node = itemdoc.find(search_path)
if thumb_node is None:
return None
else:
return thumb_node.attrib['url']
def _extract_mobile_video_formats(self, mtvn_id):
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
req = sanitized_Request(webpage_url)
# Otherwise we get a webpage that would execute some javascript
req.add_header('User-Agent', 'curl/7')
webpage = self._download_webpage(req, mtvn_id,
'Downloading mobile page')
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
req = HEADRequest(metrics_url)
response = self._request_webpage(req, mtvn_id, 'Resolving url')
url = response.geturl()
# Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
return self._extract_mobile_video_formats(mtvn_id)
raise ExtractorError('This video is not available from your country.',
expected=True)
formats = []
for rendition in mdoc.findall('.//rendition'):
try:
_, _, ext = rendition.attrib['type'].partition('/')
rtmp_video_url = rendition.find('./src').text
if rtmp_video_url.endswith('siteunavail.png'):
continue
formats.append({
'ext': ext,
'url': self._transform_rtmp_url(rtmp_video_url),
'format_id': rendition.get('bitrate'),
'width': int(rendition.get('width')),
'height': int(rendition.get('height')),
})
except (KeyError, TypeError):
raise ExtractorError('Invalid rendition field.')
self._sort_formats(formats)
return formats
def _extract_subtitles(self, mdoc, mtvn_id):
subtitles = {}
for transcript in mdoc.findall('.//transcript'):
if transcript.get('kind') != 'captions':
continue
lang = transcript.get('srclang')
subtitles[lang] = [{
'url': compat_str(typographic.get('src')),
'ext': typographic.get('format')
} for typographic in transcript.findall('./typographic')]
return subtitles
def _get_video_info(self, itemdoc):
uri = itemdoc.find('guid').text
video_id = self._id_from_uri(uri)
self.report_extraction(video_id)
mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url']
# Remove the templates, like &device={device}
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
if 'acceptMethods' not in mediagen_url:
mediagen_url += '&' if '?' in mediagen_url else '?'
mediagen_url += 'acceptMethods=fms'
mediagen_doc = self._download_xml(mediagen_url, video_id,
'Downloading video urls')
item = mediagen_doc.find('./video/item')
if item is not None and item.get('type') == 'text':
message = '%s returned error: ' % self.IE_NAME
if item.get('code') is not None:
message += '%s - ' % item.get('code')
message += item.text
raise ExtractorError(message, expected=True)
description_node = itemdoc.find('description')
if description_node is not None:
description = description_node.text.strip()
else:
description = None
title_el = None
if title_el is None:
title_el = find_xpath_attr(
itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:video_title')
if title_el is None:
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
if title_el is None:
title_el = itemdoc.find('.//title') or itemdoc.find('./title')
if title_el.text is None:
title_el = None
title = title_el.text
if title is None:
raise ExtractorError('Could not find video title')
title = title.strip()
# This a short id that's used in the webpage urls
mtvn_id = None
mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:id')
if mtvn_id_node is not None:
mtvn_id = mtvn_id_node.text
return {
'title': title,
'formats': self._extract_video_formats(mediagen_doc, mtvn_id),
'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id),
'id': video_id,
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
'description': description,
}
def _get_videos_info(self, uri):
video_id = self._id_from_uri(uri)
feed_url = self._get_feed_url(uri)
data = compat_urllib_parse.urlencode({'uri': uri})
info_url = feed_url + '?'
if self._LANG:
info_url += 'lang=%s&' % self._LANG
info_url += data
return self._get_videos_info_from_url(info_url, video_id)
def _get_videos_info_from_url(self, url, video_id):
idoc = self._download_xml(
url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
return self.playlist_result(
[self._get_video_info(item) for item in idoc.findall('.//item')])
def _real_extract(self, url):
title = url_basename(url)
webpage = self._download_webpage(url, title)
try:
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
# or http://media.mtvnservices.com/{mgid}
og_url = self._og_search_video_url(webpage)
mgid = url_basename(og_url)
if mgid.endswith('.swf'):
mgid = mgid[:-4]
except RegexNotFoundError:
mgid = None
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
webpage, 'mgid', default=None)
if not mgid:
sm4_embed = self._html_search_meta(
'sm4:video:embed', webpage, 'sm4 embed', default='')
mgid = self._search_regex(
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid')
videos_info = self._get_videos_info(mgid)
return videos_info
class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
IE_NAME = 'mtvservices:embedded'
_VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
_TEST = {
# From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
'info_dict': {
'id': '1043906',
'ext': 'mp4',
'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
site_id = uri.replace(video_id, '')
config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
'context4/context5/config.xml'.format(site_id))
config_doc = self._download_xml(config_url, video_id)
feed_node = config_doc.find('.//feed')
feed_url = feed_node.text.strip().split('?')[0]
return feed_url
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mgid = mobj.group('mgid')
return self._get_videos_info(mgid)
class MTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
_TESTS = [
{
'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml',
'md5': '850f3f143316b1e71fa56a4edfd6e0f8',
'info_dict': {
'id': '853555',
'ext': 'mp4',
'title': 'Taylor Swift - "Ours (VH1 Storytellers)"',
'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.',
},
},
]
def _get_thumbnail_url(self, uri, itemdoc):
return 'http://mtv.mtvnimages.com/uri/' + uri
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
uri = mobj.groupdict().get('mgid')
if uri is None:
webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com
m_vevo = re.search(
r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage)
if m_vevo:
vevo_id = m_vevo.group(1)
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri)
class MTVIggyIE(MTVServicesInfoExtractor):
IE_NAME = 'mtviggy.com'
_VALID_URL = r'https?://www\.mtviggy\.com/videos/.+'
_TEST = {
'url': 'http://www.mtviggy.com/videos/arcade-fire-behind-the-scenes-at-the-biggest-music-experiment-yet/',
'info_dict': {
'id': '984696',
'ext': 'mp4',
'title': 'Arcade Fire: Behind the Scenes at the Biggest Music Experiment Yet',
}
}
_FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/'
class MTVDEIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv.de'
_VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$'
_TESTS = [{
'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum',
'info_dict': {
'id': 'music_video-a50bc5f0b3aa4b3190aa',
'ext': 'mp4',
'title': 'MusicVideo_cro-traum',
'description': 'Cro - Traum',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen',
'info_dict': {
'id': 'local_playlist-f5ae778b9832cc837189',
'ext': 'mp4',
'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# single video in pagePlaylist with different id
'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3',
'info_dict': {
'id': 'local_playlist-4e760566473c4c8c5344',
'ext': 'mp4',
'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
'description': 'MTV Movies Supercut',
},
'params': {
# rtmp download
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
playlist = self._parse_json(
self._search_regex(
r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'),
video_id)
# news pages contain single video in playlist with different id
if len(playlist) == 1:
return self._get_videos_info_from_url(playlist[0]['mrss'], video_id)
for item in playlist:
item_id = item.get('id')
if item_id and compat_str(item_id) == video_id:
return self._get_videos_info_from_url(item['mrss'], video_id)
|
gpl-2.0
|
jhaus/pinax
|
pinax/apps/signup_codes/models.py
|
2
|
3734
|
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.hashcompat import sha_constructor
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from pinax.apps.signup_codes.signals import signup_code_sent, signup_code_used
class SignupCode(models.Model):
code = models.CharField(max_length=40)
max_uses = models.PositiveIntegerField(default=0)
expiry = models.DateTimeField(null=True, blank=True)
inviter = models.ForeignKey(User, null=True, blank=True)
email = models.EmailField(blank=True)
notes = models.TextField(blank=True)
sent = models.DateTimeField(null=True, blank=True)
created = models.DateTimeField(default=datetime.datetime.now, editable=False)
# calculated
use_count = models.PositiveIntegerField(editable=False, default=0)
def __unicode__(self):
return "%s [%s]" % (self.email, self.code)
@classmethod
def create(cls, email, expiry, group=None):
expiry = datetime.datetime.now() + datetime.timedelta(hours=expiry)
bits = [
settings.SECRET_KEY,
email,
str(expiry),
]
if group:
bits.append("%s%s" % (group._meta, group.pk))
code = sha_constructor("".join(bits)).hexdigest()
return cls(code=code, email=email, max_uses=1, expiry=expiry)
@classmethod
def check(cls, code):
if code:
try:
signup_code = cls._default_manager.get(code=code)
except cls.DoesNotExist:
return False
else:
# check max uses
if signup_code.max_uses and signup_code.max_uses < signup_code.use_count + 1:
return False
else:
if signup_code.expiry and datetime.datetime.now() > signup_code.expiry:
return False
else:
return signup_code
else:
return False
def calculate_use_count(self):
self.use_count = self.signupcoderesult_set.count()
self.save()
def use(self, user):
"""
Add a SignupCode result attached to the given user.
"""
result = SignupCodeResult()
result.signup_code = self
result.user = user
result.save()
signup_code_used.send(sender=result.__class__, signup_code_result=result)
def send(self, group=None):
current_site = Site.objects.get_current()
domain = unicode(current_site.domain)
ctx = {
"group": group,
"signup_code": self,
"domain": domain,
}
subject = render_to_string("signup_codes/invite_user_subject.txt", ctx)
message = render_to_string("signup_codes/invite_user.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [self.email])
self.sent = datetime.datetime.now()
self.save()
signup_code_sent.send(
sender=SignupCode,
signup_code=self
)
class SignupCodeResult(models.Model):
signup_code = models.ForeignKey(SignupCode)
user = models.ForeignKey(User)
timestamp = models.DateTimeField(default=datetime.datetime.now)
@receiver(post_save, sender=SignupCodeResult)
def signup_code_result_save(sender, instance=None, created=False, **kwargs):
if instance:
signup_code = instance.signup_code
signup_code.calculate_use_count()
|
mit
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/docutils/parsers/rst/languages/pl.py
|
54
|
3362
|
# $Id$
# Author: Robert Wojciechowicz <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Polish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'uwaga': 'attention',
'ostro\u017cnie': 'caution',
'code (translation required)': 'code',
'niebezpiecze\u0144stwo': 'danger',
'b\u0142\u0105d': 'error',
'wskaz\u00f3wka': 'hint',
'wa\u017cne': 'important',
'przypis': 'note',
'rada': 'tip',
'ostrze\u017cenie': 'warning',
'upomnienie': 'admonition',
'ramka': 'sidebar',
'temat': 'topic',
'blok-linii': 'line-block',
'sparsowany-litera\u0142': 'parsed-literal',
'rubryka': 'rubric',
'epigraf': 'epigraph',
'highlights': 'highlights', # FIXME no polish equivalent?
'pull-quote': 'pull-quote', # FIXME no polish equivalent?
'z\u0142o\u017cony': 'compound',
'kontener': 'container',
#'questions': 'questions',
'tabela': 'table',
'tabela-csv': 'csv-table',
'tabela-listowa': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
'obraz': 'image',
'rycina': 'figure',
'do\u0142\u0105cz': 'include',
'surowe': 'raw',
'zast\u0105p': 'replace',
'unikod': 'unicode',
'data': 'date',
'klasa': 'class',
'rola': 'role',
'rola-domy\u015blna': 'default-role',
'tytu\u0142': 'title',
'tre\u015b\u0107': 'contents',
'sectnum': 'sectnum',
'numeracja-sekcji': 'sectnum',
'nag\u0142\u00f3wek': 'header',
'stopka': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes': 'target-notes', # FIXME no polish equivalent?
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Polish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'skr\u00f3t': 'abbreviation',
'akronim': 'acronym',
'code (translation required)': 'code',
'indeks': 'index',
'indeks-dolny': 'subscript',
'indeks-g\u00f3rny': 'superscript',
'referencja-tytu\u0142': 'title-reference',
'referencja-pep': 'pep-reference',
'referencja-rfc': 'rfc-reference',
'podkre\u015blenie': 'emphasis',
'wyt\u0142uszczenie': 'strong',
'dos\u0142ownie': 'literal',
'math (translation required)': 'math',
'referencja-nazwana': 'named-reference',
'referencja-anonimowa': 'anonymous-reference',
'referencja-przypis': 'footnote-reference',
'referencja-cytat': 'citation-reference',
'referencja-podstawienie': 'substitution-reference',
'cel': 'target',
'referencja-uri': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'surowe': 'raw',}
"""Mapping of Polish role names to canonical role names for interpreted text.
"""
|
mit
|
SteveXiSong/ECE757-SnoopingPredictions
|
src/mem/slicc/generate/dot.py
|
92
|
2077
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util.code_formatter import code_formatter
def printDotty(sm, code):
code('digraph ${{sm.getIdent()}} {')
code.indent()
for t in sm.transitions:
# Don't print ignored transitions
if t.getActionShorthands() in ("--", "z"):
continue
code('${{t.getStateShorthand()}} -> ${{t.getNextStateShorthand()}')
code(' [label="${{t.getEventShorthand()}}/${{t.getActionShorthands()}}"')
code.dedent()
code('}')
|
bsd-3-clause
|
oostende/blackhole-2
|
lib/python/Plugins/SystemPlugins/SoftwareManager/SoftwareTools.py
|
47
|
9344
|
# -*- coding: iso-8859-1 -*-
from enigma import eConsoleAppContainer
from Components.Console import Console
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.Sources.List import List
from Components.Ipkg import IpkgComponent
from Components.Network import iNetwork
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_METADIR
from Tools.HardwareInfo import HardwareInfo
from time import time
class SoftwareTools(PackageInfoHandler):
lastDownloadDate = None
NetworkConnectionAvailable = None
list_updating = False
available_updates = 0
available_updatelist = []
available_packetlist = []
installed_packetlist = {}
def __init__(self):
aboutInfo = about.getImageVersionString()
if aboutInfo.startswith("dev-"):
self.ImageVersion = 'Experimental'
else:
self.ImageVersion = 'Stable'
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
PackageInfoHandler.__init__(self, self.statusCallback, blocking = False, neededTag = 'ALL_TAGS', neededFlag = self.ImageVersion)
self.directory = resolveFilename(SCOPE_METADIR)
self.list = List([])
self.NotifierCallback = None
self.Console = Console()
self.UpdateConsole = Console()
self.cmdList = []
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
def statusCallback(self, status, progress):
pass
def startSoftwareTools(self, callback = None):
if callback is not None:
self.NotifierCallback = callback
iNetwork.checkNetworkState(self.checkNetworkCB)
def checkNetworkCB(self,data):
if data is not None:
if data <= 2:
self.NetworkConnectionAvailable = True
self.getUpdates()
else:
self.NetworkConnectionAvailable = False
self.getUpdates()
def getUpdates(self, callback = None):
if self.lastDownloadDate is None:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
else:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
if self.list_updating and callback is not None:
self.NotifierCallback = callback
self.startIpkgListAvailable()
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback(False)
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.startIpkgListAvailable()
pass
def startIpkgListAvailable(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list"
self.UpdateConsole.ePopen(cmd, self.IpkgListAvailableCB, callback)
def IpkgListAvailableCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.list_updating:
self.available_packetlist = []
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
self.available_packetlist.append([name, version, descr])
if callback is None:
self.startInstallMetaPackage()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startInstallMetaPackage(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if self.NetworkConnectionAvailable == True:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " install enigma2-meta enigma2-plugins-meta enigma2-skins-meta"
self.UpdateConsole.ePopen(cmd, self.InstallMetaPackageCB, callback)
else:
self.InstallMetaPackageCB(True)
def InstallMetaPackageCB(self, result, retval = None, extra_args = None):
(callback) = extra_args
if result:
self.fillPackagesIndexList()
if callback is None:
self.startIpkgListInstalled()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startIpkgListInstalled(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.UpdateConsole.ePopen(cmd, self.IpkgListInstalledCB, callback)
def IpkgListInstalledCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
for package in self.packagesIndexlist[:]:
if not self.verifyPrerequisites(package[0]["prerequisites"]):
self.packagesIndexlist.remove(package)
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
if attributes.has_key("packagetype"):
if attributes["packagetype"] == "internal":
self.packagesIndexlist.remove(package)
if callback is None:
self.countUpdates()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def countUpdates(self, callback = None):
self.available_updates = 0
self.available_updatelist = []
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
packagename = attributes["packagename"]
for x in self.available_packetlist:
if x[0] == packagename:
if self.installed_packetlist.has_key(packagename):
if self.installed_packetlist[packagename] != x[1]:
self.available_updates +=1
self.available_updatelist.append([packagename])
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
elif self.NotifierCallback is not None:
self.NotifierCallback(True)
self.NotifierCallback = None
def startIpkgUpdate(self, callback = None):
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " update"
self.Console.ePopen(cmd, self.IpkgUpdateCB, callback)
def IpkgUpdateCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.Console:
if len(self.Console.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
def cleanupSoftwareTools(self):
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback = None
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
if self.UpdateConsole is not None:
if len(self.UpdateConsole.appContainers):
for name in self.UpdateConsole.appContainers.keys():
self.UpdateConsole.kill(name)
def verifyPrerequisites(self, prerequisites):
if prerequisites.has_key("hardware"):
hardware_found = False
for hardware in prerequisites["hardware"]:
if hardware == HardwareInfo().device_name:
hardware_found = True
if not hardware_found:
return False
return True
iSoftwareTools = SoftwareTools()
|
gpl-2.0
|
pfnet/chainer
|
chainerx/testing/array.py
|
8
|
6059
|
import numpy.testing
import chainerx
# NumPy-like assertion functions that accept both NumPy and ChainerX arrays
def _as_numpy(x):
if isinstance(x, chainerx.ndarray):
return chainerx.to_numpy(x)
assert isinstance(x, numpy.ndarray) or numpy.isscalar(x)
return x
def _check_dtype_and_strides(x, y, dtype_check, strides_check):
if (strides_check is not None
and dtype_check is not None
and strides_check
and not dtype_check):
raise ValueError(
'Combination of dtype_check=False and strides_check=True is not '
'allowed')
if dtype_check is None:
dtype_check = True
if strides_check is None:
strides_check = dtype_check
if (isinstance(x, (numpy.ndarray, chainerx.ndarray))
and isinstance(y, (numpy.ndarray, chainerx.ndarray))):
if strides_check:
assert x.strides == y.strides, (
'Strides mismatch: x: {}, y: {}'.format(x.strides, y.strides))
if dtype_check:
assert x.dtype.name == y.dtype.name, (
'Dtype mismatch: x: {}, y: {}'.format(x.dtype, y.dtype))
def _preprocess_input(a):
# Convert to something NumPy can handle and return
return _as_numpy(a)
def assert_allclose(
x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True):
"""Raises an AssertionError if two array_like objects are not equal up to a
tolerance.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_allclose`
"""
x = _preprocess_input(x)
y = _preprocess_input(y)
numpy.testing.assert_allclose(
x, y, rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg,
verbose=verbose)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_array_equal`
"""
x = _preprocess_input(x)
y = _preprocess_input(y)
numpy.testing.assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
def assert_allclose_ex(x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='',
verbose=True, **kwargs):
"""assert_allclose_ex(
x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True,
*, dtype_check=True, strides_check=True)
Raises an AssertionError if two array_like objects are not equal up to a
tolerance.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
float16_rtol(float): Relative tolerance for float16 dtype.
float16_atol(float): Absolute tolerance for float16 dtype.
float32_rtol(float): Relative tolerance for float32 dtype.
float32_atol(float): Absolute tolerance for float32 dtype.
float64_rtol(float): Relative tolerance for float64 dtype.
float64_atol(float): Absolute tolerance for float64 dtype.
.. seealso:: :func:`numpy.testing.assert_allclose`
"""
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
atol = kwargs.pop(x.dtype.name + '_atol', atol)
rtol = kwargs.pop(x.dtype.name + '_rtol', rtol)
assert_allclose(x, y, rtol, atol, equal_nan, err_msg, verbose)
_check_dtype_and_strides(x, y, dtype_check, strides_check)
def assert_array_equal_ex(x, y, *args, **kwargs):
"""assert_array_equal_ex(
x, y, err_msg='', verbose=True, *, dtype_check=True,
strides_check=True)
Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
.. seealso::
:func:`numpy.testing.assert_array_equal`
"""
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
assert_array_equal(x, y, *args, **kwargs)
_check_dtype_and_strides(x, y, dtype_check, strides_check)
|
mit
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Tools/Scripts/pdeps.py
|
96
|
3937
|
#! /usr/bin/env python
# pdeps
#
# Find dependencies between a bunch of Python modules.
#
# Usage:
# pdeps file1.py file2.py ...
#
# Output:
# Four tables separated by lines like '--- Closure ---':
# 1) Direct dependencies, listing which module imports which other modules
# 2) The inverse of (1)
# 3) Indirect dependencies, or the closure of the above
# 4) The inverse of (3)
#
# To do:
# - command line options to select output type
# - option to automatically scan the Python library for referenced modules
# - option to limit output to particular modules
import sys
import re
import os
# Main program
#
def main():
args = sys.argv[1:]
if not args:
print 'usage: pdeps file.py file.py ...'
return 2
#
table = {}
for arg in args:
process(arg, table)
#
print '--- Uses ---'
printresults(table)
#
print '--- Used By ---'
inv = inverse(table)
printresults(inv)
#
print '--- Closure of Uses ---'
reach = closure(table)
printresults(reach)
#
print '--- Closure of Used By ---'
invreach = inverse(reach)
printresults(invreach)
#
return 0
# Compiled regular expressions to search for import statements
#
m_import = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
m_from = re.compile('^[ \t]*import[ \t]+([^#]+)')
# Collect data from one file
#
def process(filename, table):
fp = open(filename, 'r')
mod = os.path.basename(filename)
if mod[-3:] == '.py':
mod = mod[:-3]
table[mod] = list = []
while 1:
line = fp.readline()
if not line: break
while line[-1:] == '\\':
nextline = fp.readline()
if not nextline: break
line = line[:-1] + nextline
if m_import.match(line) >= 0:
(a, b), (a1, b1) = m_import.regs[:2]
elif m_from.match(line) >= 0:
(a, b), (a1, b1) = m_from.regs[:2]
else: continue
words = line[a1:b1].split(',')
# print '#', line, words
for word in words:
word = word.strip()
if word not in list:
list.append(word)
# Compute closure (this is in fact totally general)
#
def closure(table):
modules = table.keys()
#
# Initialize reach with a copy of table
#
reach = {}
for mod in modules:
reach[mod] = table[mod][:]
#
# Iterate until no more change
#
change = 1
while change:
change = 0
for mod in modules:
for mo in reach[mod]:
if mo in modules:
for m in reach[mo]:
if m not in reach[mod]:
reach[mod].append(m)
change = 1
#
return reach
# Invert a table (this is again totally general).
# All keys of the original table are made keys of the inverse,
# so there may be empty lists in the inverse.
#
def inverse(table):
inv = {}
for key in table.keys():
if not inv.has_key(key):
inv[key] = []
for item in table[key]:
store(inv, item, key)
return inv
# Store "item" in "dict" under "key".
# The dictionary maps keys to lists of items.
# If there is no list for the key yet, it is created.
#
def store(dict, key, item):
if dict.has_key(key):
dict[key].append(item)
else:
dict[key] = [item]
# Tabulate results neatly
#
def printresults(table):
modules = table.keys()
maxlen = 0
for mod in modules: maxlen = max(maxlen, len(mod))
modules.sort()
for mod in modules:
list = table[mod]
list.sort()
print mod.ljust(maxlen), ':',
if mod in list:
print '(*)',
for ref in list:
print ref,
print
# Call main and honor exit status
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
gpl-3.0
|
kaixinjxq/crosswalk-test-suite
|
cordova/cordova-lite-android-tests/lite/webapp_install.py
|
14
|
1900
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Lin, Wanming <[email protected]>
import unittest
import os
import sys
import commands
import comm
class TestWebAppInstall(unittest.TestCase):
def test_install(self):
comm.setUp()
app_name = "helloworld"
pkg_name = "com.example." + app_name.lower()
comm.app_install(app_name, pkg_name, self)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
OptiPop/external_chromium_org
|
native_client_sdk/src/build_tools/tests/update_nacl_manifest_test.py
|
43
|
25883
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import datetime
import hashlib
import logging
import os
import posixpath
import subprocess
import sys
import tempfile
import unittest
import urlparse
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import manifest_util
import update_nacl_manifest
from update_nacl_manifest import CANARY_BUNDLE_NAME, BIONIC_CANARY_BUNDLE_NAME
HTTPS_BASE_URL = 'https://storage.googleapis.com' \
'/nativeclient_mirror/nacl/nacl_sdk/'
OS_CR = ('cros',)
OS_L = ('linux',)
OS_M = ('mac',)
OS_ML = ('mac', 'linux')
OS_MW = ('mac', 'win')
OS_LW = ('linux', 'win')
OS_MLW = ('mac', 'linux', 'win')
OS_ALL = ('all',)
POST_STABLE = 'post_stable'
STABLE = 'stable'
BETA = 'beta'
DEV = 'dev'
CANARY = 'canary'
def GetArchiveURL(basename, version):
return urlparse.urljoin(HTTPS_BASE_URL, posixpath.join(version, basename))
def GetPlatformArchiveUrl(host_os, version):
basename = 'naclsdk_%s.tar.bz2' % (host_os,)
return GetArchiveURL(basename, version)
def GetBionicArchiveUrl(version):
basename = 'naclsdk_bionic.tar.bz2'
return GetArchiveURL(basename, version)
def MakeGsUrl(rel_path):
return update_nacl_manifest.GS_BUCKET_PATH + rel_path
def GetPathFromGsUrl(url):
assert url.startswith(update_nacl_manifest.GS_BUCKET_PATH)
return url[len(update_nacl_manifest.GS_BUCKET_PATH):]
def GetPathFromHttpsUrl(url):
assert url.startswith(HTTPS_BASE_URL)
return url[len(HTTPS_BASE_URL):]
def MakeArchive(url, host_os):
archive = manifest_util.Archive(host_os)
archive.url = url
# dummy values that won't succeed if we ever use them, but will pass
# validation. :)
archive.checksum = {'sha1': 'foobar'}
archive.size = 1
return archive
def MakePlatformArchive(host_os, version):
return MakeArchive(GetPlatformArchiveUrl(host_os, version), host_os)
def MakeBionicArchive(host_os, version):
return MakeArchive(GetBionicArchiveUrl(version), host_os)
def MakeNonPlatformArchive(basename, version):
return MakeArchive(GetArchiveURL(basename, version), 'all')
def MakeNonPepperBundle(name, with_archives=False):
bundle = manifest_util.Bundle(name)
bundle.version = 1
bundle.revision = 1
bundle.description = 'Dummy bundle'
bundle.recommended = 'yes'
bundle.stability = 'stable'
if with_archives:
for host_os in OS_MLW:
archive = manifest_util.Archive(host_os)
archive.url = 'http://example.com'
archive.checksum = {'sha1': 'blah'}
archive.size = 2
bundle.AddArchive(archive)
return bundle
def MakePepperBundle(major_version, revision=0, version=None, stability='dev',
bundle_name=None):
assert (version is None or
version.split('.')[0] == 'trunk' or
version.split('.')[0] == str(major_version))
if not bundle_name:
bundle_name = 'pepper_' + str(major_version)
bundle = manifest_util.Bundle(bundle_name)
bundle.version = major_version
bundle.revision = revision
bundle.description = 'Chrome %s bundle, revision %s' % (major_version,
revision)
bundle.repath = 'pepper_' + str(major_version)
bundle.recommended = 'no'
bundle.stability = stability
return bundle
def MakePlatformBundle(major_version, revision=0, version=None, host_oses=None,
stability='dev'):
bundle = MakePepperBundle(major_version, revision, version, stability)
if host_oses:
for host_os in host_oses:
bundle.AddArchive(MakePlatformArchive(host_os, version))
return bundle
def MakeBionicBundle(major_version, revision=0, version=None, host_oses=None):
bundle = MakePepperBundle(major_version, revision, version, 'dev')
if host_oses:
for host_os in host_oses:
bundle.AddArchive(MakeBionicArchive(host_os, version))
return bundle
class MakeManifest(manifest_util.SDKManifest):
def __init__(self, *args):
manifest_util.SDKManifest.__init__(self)
for bundle in args:
self.AddBundle(bundle)
def AddBundle(self, bundle):
self.MergeBundle(bundle, allow_existing=False)
class MakeHistory(object):
def __init__(self):
# used for a dummy timestamp
self.datetime = datetime.datetime.utcnow()
self.history = []
def Add(self, host_oses, channel, version):
for host_os in host_oses:
timestamp = self.datetime.strftime('%Y-%m-%d %H:%M:%S.%f')
self.history.append((host_os, channel, version, timestamp))
self.datetime += datetime.timedelta(0, -3600) # one hour earlier
self.datetime += datetime.timedelta(-1) # one day earlier
class MakeFiles(dict):
def AddOnlineManifest(self, manifest_string):
self['naclsdk_manifest2.json'] = manifest_string
def Add(self, bundle, add_archive_for_os=OS_MLW, add_json_for_os=OS_MLW):
for archive in bundle.GetArchives():
if not archive.host_os in add_archive_for_os:
continue
self.AddArchive(bundle, archive, archive.host_os in add_json_for_os)
def AddArchive(self, bundle, archive, add_json=True):
path = GetPathFromHttpsUrl(archive.url)
self[path] = 'My Dummy archive'
if add_json:
# add .json manifest snippet, it should look like a normal Bundle, but
# only has one archive.
new_bundle = manifest_util.Bundle('')
new_bundle.CopyFrom(bundle)
del new_bundle.archives[:]
new_bundle.AddArchive(archive)
self[path + '.json'] = new_bundle.GetDataAsString()
class TestDelegate(update_nacl_manifest.Delegate):
def __init__(self, manifest, history, files):
self.manifest = manifest
self.history = history
self.files = files
self.dryrun = 0
self.called_gsutil_cp = False
self.called_sendmail = False
def GetRepoManifest(self):
return self.manifest
def GetHistory(self):
return self.history
def GsUtil_ls(self, url):
path = GetPathFromGsUrl(url)
result = []
for filename in self.files.iterkeys():
if not filename.startswith(path):
continue
# Find the first slash after the prefix (path).
# +1, because if the slash is directly after path, then we want to find
# the following slash anyway.
slash = filename.find('/', len(path) + 1)
if slash != -1:
filename = filename[:slash]
result.append(MakeGsUrl(filename))
# Remove dupes.
return list(set(result))
def GsUtil_cat(self, url):
path = GetPathFromGsUrl(url)
if path not in self.files:
raise subprocess.CalledProcessError(1, 'gsutil cat %s' % (url,))
return self.files[path]
def GsUtil_cp(self, src, dest, stdin=None):
self.called_gsutil_cp = True
dest_path = GetPathFromGsUrl(dest)
if src == '-':
self.files[dest_path] = stdin
else:
src_path = GetPathFromGsUrl(src)
if src_path not in self.files:
raise subprocess.CalledProcessError(1, 'gsutil cp %s %s' % (src, dest))
self.files[dest_path] = self.files[src_path]
def SendMail(self, subject, text):
self.called_sendmail = True
# Shorthand for premade bundles/versions
V18_0_1025_163 = '18.0.1025.163'
V18_0_1025_175 = '18.0.1025.175'
V18_0_1025_184 = '18.0.1025.184'
V19_0_1084_41 = '19.0.1084.41'
V19_0_1084_67 = '19.0.1084.67'
V21_0_1145_0 = '21.0.1145.0'
V21_0_1166_0 = '21.0.1166.0'
V26_0_1386_0 = '26.0.1386.0'
V26_0_1386_1 = '26.0.1386.1'
V37_0_2054_0 = '37.0.2054.0'
VTRUNK_140819 = 'trunk.140819'
VTRUNK_277776 = 'trunk.277776'
B18_0_1025_163_MLW = MakePlatformBundle(18, 132135, V18_0_1025_163, OS_MLW)
B18_0_1025_184_MLW = MakePlatformBundle(18, 134900, V18_0_1025_184, OS_MLW)
B18_NONE = MakePlatformBundle(18)
B19_0_1084_41_MLW = MakePlatformBundle(19, 134854, V19_0_1084_41, OS_MLW)
B19_0_1084_67_MLW = MakePlatformBundle(19, 142000, V19_0_1084_67, OS_MLW)
B19_NONE = MakePlatformBundle(19)
BCANARY_NONE = MakePepperBundle(0, stability=CANARY,
bundle_name=CANARY_BUNDLE_NAME)
B21_0_1145_0_MLW = MakePlatformBundle(21, 138079, V21_0_1145_0, OS_MLW)
B21_0_1166_0_MW = MakePlatformBundle(21, 140819, V21_0_1166_0, OS_MW)
B26_NONE = MakePlatformBundle(26)
B26_0_1386_0_MLW = MakePlatformBundle(26, 177362, V26_0_1386_0, OS_MLW)
B26_0_1386_1_MLW = MakePlatformBundle(26, 177439, V26_0_1386_1, OS_MLW)
BTRUNK_140819_MLW = MakePlatformBundle(21, 140819, VTRUNK_140819, OS_MLW)
BBIONIC_NONE = MakePepperBundle(0, stability=CANARY,
bundle_name=BIONIC_CANARY_BUNDLE_NAME)
BBIONIC_TRUNK_277776 = MakeBionicBundle(37, 277776, VTRUNK_277776, OS_L)
NON_PEPPER_BUNDLE_NOARCHIVES = MakeNonPepperBundle('foo')
NON_PEPPER_BUNDLE_ARCHIVES = MakeNonPepperBundle('bar', with_archives=True)
class TestUpdateManifest(unittest.TestCase):
def setUp(self):
self.history = MakeHistory()
self.files = MakeFiles()
self.version_mapping = {}
self.delegate = None
self.uploaded_manifest = None
self.manifest = None
def _MakeDelegate(self):
self.delegate = TestDelegate(self.manifest, self.history.history,
self.files)
def _Run(self, host_oses, extra_archives=None, fixed_bundle_versions=None):
update_nacl_manifest.Run(self.delegate, host_oses, extra_archives,
fixed_bundle_versions)
def _HasUploadedManifest(self):
return 'naclsdk_manifest2.json' in self.files
def _ReadUploadedManifest(self):
self.uploaded_manifest = manifest_util.SDKManifest()
self.uploaded_manifest.LoadDataFromString(
self.files['naclsdk_manifest2.json'])
def _AssertUploadedManifestHasBundle(self, bundle, stability,
bundle_name=None):
if not bundle_name:
bundle_name = bundle.name
uploaded_manifest_bundle = self.uploaded_manifest.GetBundle(bundle_name)
# Bundles that we create in the test (and in the manifest snippets) have
# their stability set to "dev". update_nacl_manifest correctly updates it.
# So we have to force the stability of |bundle| so they compare equal.
test_bundle = copy.copy(bundle)
test_bundle.stability = stability
if bundle_name:
test_bundle.name = bundle_name
self.assertEqual(uploaded_manifest_bundle, test_bundle)
def _AddCsvHistory(self, history):
import csv
import cStringIO
history_stream = cStringIO.StringIO(history)
self.history.history = [(platform, channel, version, date)
for platform, channel, version, date in csv.reader(history_stream)]
def testNoUpdateNeeded(self):
self.manifest = MakeManifest(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self.assertFalse(self._HasUploadedManifest())
# Add another bundle, make sure it still doesn't update
self.manifest.AddBundle(B19_0_1084_41_MLW)
self._Run(OS_MLW)
self.assertFalse(self._HasUploadedManifest())
def testSimpleUpdate(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testOnePlatformHasNewerRelease(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_M, BETA, V18_0_1025_175) # Mac has newer version
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testMultipleMissingPlatformsInHistory(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_ML, BETA, V18_0_1025_184)
self.history.Add(OS_M, BETA, V18_0_1025_175)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testUpdateOnlyOneBundle(self):
self.manifest = MakeManifest(B18_NONE, B19_0_1084_41_MLW)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self._AssertUploadedManifestHasBundle(B19_0_1084_41_MLW, DEV)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 2)
def testUpdateTwoBundles(self):
self.manifest = MakeManifest(B18_NONE, B19_NONE)
self.history.Add(OS_MLW, DEV, V19_0_1084_41)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self.files.Add(B19_0_1084_41_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self._AssertUploadedManifestHasBundle(B19_0_1084_41_MLW, DEV)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 2)
def testUpdateWithMissingPlatformsInArchives(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_184)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_184_MLW, add_archive_for_os=OS_M)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testUpdateWithMissingManifestSnippets(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_184)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_184_MLW, add_json_for_os=OS_ML)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testRecommendedIsStable(self):
for channel in STABLE, BETA, DEV, CANARY:
self.setUp()
bundle = copy.deepcopy(B18_NONE)
self.manifest = MakeManifest(bundle)
self.history.Add(OS_MLW, channel, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
uploaded_bundle = self.uploaded_manifest.GetBundle('pepper_18')
if channel == STABLE:
self.assertEqual(uploaded_bundle.recommended, 'yes')
else:
self.assertEqual(uploaded_bundle.recommended, 'no')
def testNoUpdateWithNonPepperBundle(self):
self.manifest = MakeManifest(NON_PEPPER_BUNDLE_NOARCHIVES,
B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self.assertFalse(self._HasUploadedManifest())
def testUpdateWithHistoryWithExtraneousPlatforms(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_ML, BETA, V18_0_1025_184)
self.history.Add(OS_CR, BETA, V18_0_1025_184)
self.history.Add(OS_CR, BETA, V18_0_1025_175)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_163_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testSnippetWithStringRevisionAndVersion(self):
# This test exists because some manifest snippets were uploaded with
# strings for their revisions and versions. I want to make sure the
# resulting manifest is still consistent with the old format.
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
bundle_string_revision = MakePlatformBundle('18', '1234', V18_0_1025_163,
OS_MLW)
self.files.Add(bundle_string_revision)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
uploaded_bundle = self.uploaded_manifest.GetBundle(
bundle_string_revision.name)
self.assertEqual(uploaded_bundle.revision, 1234)
self.assertEqual(uploaded_bundle.version, 18)
def testUpdateCanary(self):
self.manifest = MakeManifest(copy.deepcopy(BCANARY_NONE))
self.files.Add(BTRUNK_140819_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(BTRUNK_140819_MLW, CANARY,
bundle_name=CANARY_BUNDLE_NAME)
def testCanaryShouldOnlyUseCanaryVersions(self):
canary_bundle = copy.deepcopy(BCANARY_NONE)
self.manifest = MakeManifest(canary_bundle)
self.history.Add(OS_MW, CANARY, V21_0_1166_0)
self.history.Add(OS_MW, BETA, V19_0_1084_41)
self.files.Add(B19_0_1084_41_MLW)
self.version_mapping[V21_0_1166_0] = VTRUNK_140819
self._MakeDelegate()
self.assertRaises(Exception, self._Run, OS_MLW)
def testExtensionWorksAsBz2(self):
# Allow old bundles with just .bz2 extension to work
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
bundle = copy.deepcopy(B18_0_1025_163_MLW)
archive_url = bundle.GetArchive('mac').url
bundle.GetArchive('mac').url = archive_url.replace('.tar', '')
self.files.Add(bundle)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(bundle, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testOnlyOneStableBundle(self):
# Make sure that any bundle that has an older version than STABLE is marked
# as POST_STABLE, even if the last version we found was BETA, DEV, etc.
for channel in STABLE, BETA, DEV, CANARY:
self.setUp()
self.manifest = MakeManifest(B18_NONE, B19_NONE)
self.history.Add(OS_MLW, channel, V18_0_1025_163)
self.history.Add(OS_MLW, STABLE, V19_0_1084_41)
self.files.Add(B18_0_1025_163_MLW)
self.files.Add(B19_0_1084_41_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
p18_bundle = self.uploaded_manifest.GetBundle(B18_NONE.name)
self.assertEqual(p18_bundle.stability, POST_STABLE)
self.assertEqual(p18_bundle.recommended, 'no')
p19_bundle = self.uploaded_manifest.GetBundle(B19_NONE.name)
self.assertEqual(p19_bundle.stability, STABLE)
self.assertEqual(p19_bundle.recommended, 'yes')
def testDontPushIfNoChange(self):
# Make an online manifest that already has this bundle.
online_manifest = MakeManifest(B18_0_1025_163_MLW)
self.files.AddOnlineManifest(online_manifest.GetDataAsString())
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, DEV, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self.assertFalse(self.delegate.called_gsutil_cp)
def testDontPushIfRollback(self):
# Make an online manifest that has a newer bundle
online_manifest = MakeManifest(B18_0_1025_184_MLW)
self.files.AddOnlineManifest(online_manifest.GetDataAsString())
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, DEV, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self.assertFalse(self.delegate.called_gsutil_cp)
def testRunWithFixedBundleVersions(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self.files.Add(B18_0_1025_184_MLW)
self._MakeDelegate()
self._Run(OS_MLW, None, [('pepper_18', '18.0.1025.184')])
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(B18_0_1025_184_MLW, BETA)
self.assertEqual(len(self.uploaded_manifest.GetBundles()), 1)
def testRunWithMissingFixedBundleVersions(self):
self.manifest = MakeManifest(B18_NONE)
self.history.Add(OS_MLW, BETA, V18_0_1025_163)
self.files.Add(B18_0_1025_163_MLW)
self._MakeDelegate()
self._Run(OS_MLW, None, [('pepper_18', '18.0.1025.184')])
# Nothing should be uploaded if the user gives a missing fixed version.
self.assertFalse(self.delegate.called_gsutil_cp)
def testDontIncludeRandomBundles(self):
self.manifest = MakeManifest(B26_NONE)
self.history.Add(OS_MLW, BETA, V26_0_1386_0)
self.files.Add(B26_0_1386_0_MLW)
some_other_bundle = MakePepperBundle(26, 1, V26_0_1386_0, BETA)
some_other_archive = MakeNonPlatformArchive('some_other.tar.bz2',
V26_0_1386_0)
some_other_bundle.AddArchive(some_other_archive)
self.files.AddArchive(some_other_bundle, some_other_archive)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
uploaded_bundle = self.uploaded_manifest.GetBundle('pepper_26')
self.assertEqual(1, len(uploaded_bundle.GetHostOSArchives()))
def testNaclportsBundle(self):
self.manifest = MakeManifest(B26_NONE)
self.history.Add(OS_MLW, BETA, V26_0_1386_0)
self.files.Add(B26_0_1386_0_MLW)
# NaclPorts "bundle".
naclports_bundle = MakePepperBundle(26, 1, V26_0_1386_0, BETA)
naclports_archive = MakeNonPlatformArchive('naclports.tar.bz2',
V26_0_1386_0)
naclports_bundle.AddArchive(naclports_archive)
self.files.AddArchive(naclports_bundle, naclports_archive)
self._MakeDelegate()
self._Run(OS_MLW, [('naclports.tar.bz2', '26.0.1386.0')])
self._ReadUploadedManifest()
uploaded_bundle = self.uploaded_manifest.GetBundle('pepper_26')
self.assertEqual(2, len(uploaded_bundle.GetHostOSArchives()))
def testKeepBundleOrder(self):
# This is a regression test: when a bundle is skipped (because it isn't
# newer than the online bundle), it was added to the end of the list.
# Make an online manifest that already has B18.
online_manifest = MakeManifest(B18_0_1025_163_MLW)
self.files.AddOnlineManifest(online_manifest.GetDataAsString())
self.manifest = MakeManifest(B18_NONE, B19_NONE)
self.history.Add(OS_MLW, STABLE, V18_0_1025_163)
self.history.Add(OS_MLW, STABLE, V19_0_1084_41)
self.files.Add(B18_0_1025_163_MLW)
self.files.Add(B19_0_1084_41_MLW)
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
# Bundle 18 should be before bundle 19.
bundles = self.uploaded_manifest.GetBundles()
self.assertEqual(2, len(bundles))
self.assertEqual('pepper_18', bundles[0].name)
self.assertEqual('pepper_19', bundles[1].name)
def testBundleWithoutHistoryUsesOnline(self):
online_manifest = MakeManifest(B18_0_1025_163_MLW)
self.files.AddOnlineManifest(online_manifest.GetDataAsString())
self.manifest = MakeManifest(B18_NONE)
self._MakeDelegate()
# This should not raise.
self._Run(OS_MLW)
self._ReadUploadedManifest()
# But it should have sent an email nagging the users to lock this bundle
# manually.
self.assertTrue(self.delegate.called_sendmail)
uploaded_bundle = self.uploaded_manifest.GetBundle('pepper_18')
self.assertEqual(uploaded_bundle, B18_0_1025_163_MLW)
def testBundleWithoutHistoryOrOnlineRaises(self):
self.manifest = MakeManifest(B18_NONE)
self._MakeDelegate()
self.assertRaises(update_nacl_manifest.UnknownLockedBundleException,
self._Run, OS_MLW)
def testUpdateBionic(self):
bionic_bundle = copy.deepcopy(BBIONIC_NONE)
self.manifest = MakeManifest(bionic_bundle)
self.history.Add(OS_MW, CANARY, V37_0_2054_0)
self.files.Add(BBIONIC_TRUNK_277776)
self.version_mapping[V37_0_2054_0] = VTRUNK_277776
self._MakeDelegate()
self._Run(OS_MLW)
self._ReadUploadedManifest()
self._AssertUploadedManifestHasBundle(BBIONIC_TRUNK_277776, CANARY,
bundle_name=BIONIC_CANARY_BUNDLE_NAME)
class TestUpdateVitals(unittest.TestCase):
def setUp(self):
f = tempfile.NamedTemporaryFile('w', prefix="test_update_nacl_manifest")
self.test_file = f.name
f.close()
test_data = "Some test data"
self.sha1 = hashlib.sha1(test_data).hexdigest()
self.data_len = len(test_data)
with open(self.test_file, 'w') as f:
f.write(test_data)
def tearDown(self):
os.remove(self.test_file)
def testUpdateVitals(self):
archive = manifest_util.Archive(manifest_util.GetHostOS())
path = os.path.abspath(self.test_file)
if sys.platform == 'win32':
# On Windows, the path must start with three slashes, i.e.
# (file:///C:\whatever)
path = '/' + path
archive.url = 'file://' + path
bundle = MakePlatformBundle(18)
bundle.AddArchive(archive)
manifest = MakeManifest(bundle)
archive = manifest.GetBundles()[0]['archives'][0]
self.assertTrue('size' not in archive)
self.assertTrue('checksum' not in archive)
self.assertRaises(manifest_util.Error, manifest.Validate)
manifest.Validate(add_missing_info=True)
self.assertEqual(archive['size'], self.data_len)
self.assertEqual(archive['checksum']['sha1'], self.sha1)
if __name__ == '__main__':
logging.basicConfig(level=logging.CRITICAL)
# Uncomment the following line to enable more debugging info.
# logging.getLogger('update_nacl_manifest').setLevel(logging.INFO)
sys.exit(unittest.main())
|
bsd-3-clause
|
alanljj/oca_hr
|
hr_department_sequence/tests/test_hr_department.py
|
18
|
2682
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
class test_department(TransactionCase):
def setUp(self):
super(test_department, self).setUp()
# Clean up registries
self.registry('ir.model').clear_caches()
self.registry('ir.model.data').clear_caches()
# Get registries
self.user_model = self.registry("res.users")
self.department_model = self.registry("hr.department")
# Get context
self.context = self.user_model.context_get(self.cr, self.uid)
self.vals = {
'name': 'test',
'code': 'TEST',
'sequence': 1,
}
def test_create_department(self):
cr, uid, context = self.cr, self.uid, self.context
department_id = self.department_model.create(
self.cr, self.uid, self.vals, context=self.context)
department = self.department_model.browse(cr, uid, department_id,
context=context)
self.assertEqual(self.vals['name'], department.name)
self.assertEqual(self.vals['code'], department.code)
self.assertEqual(self.vals['sequence'], department.sequence)
def test_name_search_department(self):
cr, uid, context = self.cr, self.uid, self.context
department_id = self.department_model.create(
self.cr, self.uid, self.vals, context=self.context)
found_id = self.department_model.name_search(
cr, uid, name=self.vals['name'], context=context)[0][0]
self.assertEqual(department_id, found_id,
"Found wrong id for name=%s" % self.vals['name'])
|
agpl-3.0
|
rupakc/Kaggle-Compendium
|
Homesite Quote Conversion/home-baseline.py
|
1
|
3586
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
import numpy
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def spilt_date(list_of_date_string,separator='-',format='yyyy-mm-dd'):
month_list = list([])
day_list = list([])
year_list = list([])
for date_string in list_of_date_string:
date_list = date_string.strip().split(separator)
month_list.append(date_list[1])
day_list.append(date_list[2])
year_list.append(date_list[0])
return month_list,day_list,year_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
home_frame = pd.read_csv(filename)
class_labels = list(home_frame['QuoteConversion_Flag'].values)
del home_frame['QuoteConversion_Flag']
del home_frame['QuoteNumber']
month_list, day_list, year_list = spilt_date(list(home_frame['Original_Quote_Date'].values))
home_frame['Month'] = month_list
home_frame['Day'] = day_list
home_frame['Year'] = year_list
del home_frame['Original_Quote_Date']
label_encoded_frame = label_encode_frame(home_frame)
imputed_features = Imputer().fit_transform(label_encoded_frame.values)
X_train,X_test,y_train,y_test = train_test_split(imputed_features,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
mit
|
cloud9209/cloud9209_flask
|
lib/flask/wrappers.py
|
773
|
6709
|
# -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from .debughelpers import attach_enctype_error_multidict
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: the internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: a dict of view arguments that matched the request. If an exception
#: happened when matching, this will be `None`.
view_args = None
#: if matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# switched by the request context until 1.0 to opt in deprecated
# module functionality
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the `MAX_CONTENT_LENGTH` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be `None`.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The :meth:`get_json` method should be used instead.
"""
# XXX: deprecate property
return self.get_json()
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the :meth:`on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is ``application/json``
but this can be overriden by the `force` parameter.
:param force: if set to `True` the mimetype is ignored.
:param silent: if set to `False` this method will fail silently
and return `False`.
:param cache: if set to `True` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if self.mimetype != 'application/json' and not force:
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# in debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
|
apache-2.0
|
rbreitenmoser/snapcraft
|
snapcraft/tests/test_plugin_catkin.py
|
1
|
33334
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import subprocess
import builtins
from unittest import mock
from snapcraft import tests
from snapcraft.plugins import catkin
class _CompareLists():
def __init__(self, test, expected):
self.test = test
self.expected = expected
def __eq__(self, packages):
self.test.assertEqual(len(packages), len(self.expected),
'Expected {} packages to be installed, '
'got {}'.format(len(self.expected),
len(packages)))
for expectation in self.expected:
self.test.assertTrue(expectation in packages,
'Expected "{}" to be installed'
.format(expectation))
return True
class CatkinPluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
class props:
rosdistro = 'indigo'
catkin_packages = ['my_package']
source_space = 'src'
source_subdir = None
self.properties = props()
patcher = mock.patch('snapcraft.repo.Ubuntu')
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch(
'snapcraft.plugins.catkin._find_system_dependencies')
self.dependencies_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_schema(self):
schema = catkin.CatkinPlugin.schema()
# Check rosdistro property
properties = schema['properties']
self.assertTrue('rosdistro' in properties,
'Expected "rosdistro" to be included in properties')
rosdistro = properties['rosdistro']
self.assertTrue('type' in rosdistro,
'Expected "type" to be included in "rosdistro"')
self.assertTrue('default' in rosdistro,
'Expected "default" to be included in "rosdistro"')
rosdistro_type = rosdistro['type']
self.assertEqual(rosdistro_type, 'string',
'Expected "rosdistro" "type" to be "string", but it '
'was "{}"'.format(rosdistro_type))
rosdistro_default = rosdistro['default']
self.assertEqual(rosdistro_default, 'indigo',
'Expected "rosdistro" "default" to be "indigo", but '
'it was "{}"'.format(rosdistro_default))
# Check catkin-packages property
self.assertTrue('catkin-packages' in properties,
'Expected "catkin-packages" to be included in '
'properties')
catkin_packages = properties['catkin-packages']
self.assertTrue('type' in catkin_packages,
'Expected "type" to be included in "catkin-packages"')
self.assertTrue('default' in catkin_packages,
'Expected "default" to be included in '
'"catkin-packages"')
self.assertTrue('minitems' in catkin_packages,
'Expected "minitems" to be included in '
'"catkin-packages"')
self.assertTrue('uniqueItems' in catkin_packages,
'Expected "uniqueItems" to be included in '
'"catkin-packages"')
self.assertTrue('items' in catkin_packages,
'Expected "items" to be included in "catkin-packages"')
catkin_packages_type = catkin_packages['type']
self.assertEqual(catkin_packages_type, 'array',
'Expected "catkin-packages" "type" to be "aray", but '
'it was "{}"'.format(catkin_packages_type))
catkin_packages_default = catkin_packages['default']
self.assertEqual(catkin_packages_default, [],
'Expected "catkin-packages" "default" to be [], but '
'it was {}'.format(catkin_packages_default))
catkin_packages_minitems = catkin_packages['minitems']
self.assertEqual(catkin_packages_minitems, 1,
'Expected "catkin-packages" "minitems" to be 1, but '
'it was {}'.format(catkin_packages_minitems))
self.assertTrue(catkin_packages['uniqueItems'])
catkin_packages_items = catkin_packages['items']
self.assertTrue('type' in catkin_packages_items,
'Expected "type" to be included in "catkin-packages" '
'"items"')
catkin_packages_items_type = catkin_packages_items['type']
self.assertEqual(catkin_packages_items_type, 'string',
'Expected "catkin-packages" "item" "type" to be '
'"string", but it was "{}"'
.format(catkin_packages_items_type))
# Check source-space property
self.assertTrue('source-space' in properties,
'Expected "source-space" to be included in properties')
source_space = properties['source-space']
self.assertTrue('type' in rosdistro,
'Expected "type" to be included in "source-space"')
self.assertTrue('default' in rosdistro,
'Expected "default" to be included in "source-space"')
source_space_type = source_space['type']
self.assertEqual(source_space_type, 'string',
'Expected "source-space" "type" to be "string", but '
'it was "{}"'.format(source_space_type))
source_space_default = source_space['default']
self.assertEqual(source_space_default, 'src',
'Expected "source-space" "default" to be "src", but '
'it was "{}"'.format(source_space_default))
# Check required
self.assertTrue('catkin-packages' in schema['required'],
'Expected "catkin-packages" to be included in '
'"required"')
def test_pull_debian_dependencies(self):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
self.dependencies_mock.return_value = ['foo', 'bar', 'baz']
plugin.pull()
# Verify that dependencies were found as expected
self.dependencies_mock.assert_called_once_with(
{'my_package'}, self.properties.rosdistro,
os.path.join(plugin.sourcedir, 'src'),
os.path.join(plugin.partdir, 'rosdep'),
plugin.PLUGIN_STAGE_SOURCES)
# Verify that the dependencies were installed
self.ubuntu_mock.return_value.get.assert_called_with(
_CompareLists(self, ['foo', 'bar', 'baz']))
self.ubuntu_mock.return_value.unpack.assert_called_with(
plugin.installdir)
def test_pull_local_dependencies(self):
self.properties.catkin_packages.append('package_2')
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
# No system dependencies (only local)
self.dependencies_mock.return_value = []
plugin.pull()
# Verify that dependencies were found as expected
self.dependencies_mock.assert_called_once_with(
{'my_package', 'package_2'}, self.properties.rosdistro,
os.path.join(plugin.sourcedir, 'src'),
os.path.join(plugin.partdir, 'rosdep'),
plugin.PLUGIN_STAGE_SOURCES)
# Verify that no .deb packages were installed
self.assertTrue(mock.call().unpack(plugin.installdir) not in
self.ubuntu_mock.mock_calls)
def test_valid_catkin_workspace_src(self):
# sourcedir is expected to be the root of the Catkin workspace. Since
# it contains a 'src' directory, this is a valid Catkin workspace.
try:
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
plugin.pull()
except FileNotFoundError:
self.fail('Unexpectedly raised an exception when the Catkin '
'workspace was valid')
def test_invalid_catkin_workspace_no_src(self):
# sourcedir is expected to be the root of the Catkin workspace. Since
# it does not contain a `src` folder and `source-space` is 'src', this
# should fail.
with self.assertRaises(FileNotFoundError) as raised:
plugin = catkin.CatkinPlugin('test-part', self.properties)
plugin.pull()
self.assertEqual(
str(raised.exception),
'Unable to find package path: "{}"'.format(os.path.join(
plugin.sourcedir, 'src')))
def test_valid_catkin_workspace_source_space(self):
self.properties.source_space = 'foo'
# sourcedir is expected to be the root of the Catkin workspace.
# Normally this would mean it contained a `src` directory, but it can
# be remapped via the `source-space` key.
try:
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir,
self.properties.source_space))
plugin.pull()
except FileNotFoundError:
self.fail('Unexpectedly raised an exception when the Catkin '
'src was remapped in a valid manner')
def test_invalid_catkin_workspace_invalid_source_space(self):
self.properties.source_space = 'foo'
# sourcedir is expected to be the root of the Catkin workspace. Since
# it does not contain a `src` folder and source_space wasn't
# specified, this should fail.
with self.assertRaises(FileNotFoundError) as raised:
plugin = catkin.CatkinPlugin('test-part', self.properties)
plugin.pull()
self.assertEqual(
str(raised.exception),
'Unable to find package path: "{}"'.format(os.path.join(
plugin.sourcedir, self.properties.source_space)))
def test_invalid_catkin_workspace_source_space_same_as_source(self):
self.properties.source_space = '.'
# sourcedir is expected to be the root of the Catkin workspace. Since
# source_space was specified to be the same as the root, this should
# fail.
with self.assertRaises(RuntimeError) as raised:
catkin.CatkinPlugin('test-part', self.properties).pull()
self.assertEqual(str(raised.exception),
'source-space cannot be the root of the Catkin '
'workspace')
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, '_run_in_bash')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
@mock.patch.object(catkin.CatkinPlugin, '_prepare_build')
@mock.patch.object(catkin.CatkinPlugin, '_finish_build')
def test_build(self, finish_build_mock, prepare_build_mock,
run_output_mock, bashrun_mock, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
plugin.build()
prepare_build_mock.assert_called_once_with()
# Matching like this for order independence (otherwise it would be
# quite fragile)
class check_build_command():
def __eq__(self, args):
command = ' '.join(args)
return (
args[0] == 'catkin_make_isolated' and
'--install' in command and
'--pkg my_package' in command and
'--directory {}'.format(plugin.builddir) in command and
'--install-space {}'.format(plugin.rosdir) in command and
'--source-space {}'.format(os.path.join(
plugin.builddir,
plugin.options.source_space)) in command)
bashrun_mock.assert_called_with(check_build_command())
self.assertFalse(
self.dependencies_mock.called,
'Dependencies should have been discovered in the pull() step')
finish_build_mock.assert_called_once_with()
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, '_run_in_bash')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
@mock.patch.object(catkin.CatkinPlugin, '_prepare_build')
@mock.patch.object(catkin.CatkinPlugin, '_finish_build')
def test_build_multiple(self, finish_build_mock, prepare_build_mock,
run_output_mock, bashrun_mock, run_mock):
self.properties.catkin_packages.append('package_2')
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
plugin.build()
class check_pkg_arguments():
def __init__(self, test):
self.test = test
def __eq__(self, args):
index = args.index('--pkg')
packages = args[index+1:index+3]
if 'my_package' not in packages:
self.test.fail('Expected "my_package" to be installed '
'within the same command as "package_2"')
if 'package_2' not in packages:
self.test.fail('Expected "package_2" to be installed '
'within the same command as "my_package"')
return True
bashrun_mock.assert_called_with(check_pkg_arguments(self))
self.assertFalse(
self.dependencies_mock.called,
'Dependencies should have been discovered in the pull() step')
finish_build_mock.assert_called_once_with()
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
def test_build_runs_in_bash(self, run_output_mock, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
plugin.build()
run_mock.assert_has_calls([
mock.call(['/bin/bash', mock.ANY], cwd=mock.ANY)
])
@mock.patch.object(catkin.CatkinPlugin, '_prepare_build')
@mock.patch.object(catkin.CatkinPlugin, '_finish_build')
def test_build_encompasses_source_space(self, finish_mock, prepare_mock):
self.properties.catkin_packages = []
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'src'))
plugin.build()
self.assertTrue(os.path.isdir(os.path.join(plugin.builddir, 'src')))
@mock.patch.object(catkin.CatkinPlugin, '_prepare_build')
@mock.patch.object(catkin.CatkinPlugin, '_finish_build')
def test_build_encompasses_remapped_source_space(self, finish_mock,
prepare_mock):
self.properties.catkin_packages = []
self.properties.source_space = 'foo'
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'foo'))
plugin.build()
self.assertTrue(os.path.isdir(os.path.join(plugin.builddir, 'foo')))
@mock.patch.object(catkin.CatkinPlugin, '_prepare_build')
@mock.patch.object(catkin.CatkinPlugin, '_finish_build')
def test_build_accounts_for_source_subdir(self, finish_mock, prepare_mock):
self.properties.catkin_packages = []
self.properties.source_subdir = 'workspace'
self.properties.source_space = 'foo'
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.sourcedir, 'workspace', 'foo'))
plugin.build()
self.assertTrue(os.path.isdir(os.path.join(plugin.builddir, 'foo')))
def test_prepare_build(self):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.rosdir, 'test'))
# Place a few .cmake files with incorrect paths, and some files that
# shouldn't be changed.
files = [
{
'path': 'fooConfig.cmake',
'contents': '"/usr/lib/foo"',
'expected': '"{}/usr/lib/foo"'.format(plugin.installdir),
},
{
'path': 'bar.cmake',
'contents': '"/usr/lib/bar"',
'expected': '"/usr/lib/bar"',
},
{
'path': 'test/bazConfig.cmake',
'contents': '"/test/baz;/usr/lib/baz"',
'expected': '"{0}/test/baz;{0}/usr/lib/baz"'.format(
plugin.installdir),
},
{
'path': 'test/quxConfig.cmake',
'contents': 'qux',
'expected': 'qux',
},
{
'path': 'test/installedConfig.cmake',
'contents': '"{}/foo"'.format(plugin.installdir),
'expected': '"{}/foo"'.format(plugin.installdir),
}
]
for fileInfo in files:
with open(os.path.join(plugin.rosdir, fileInfo['path']), 'w') as f:
f.write(fileInfo['contents'])
plugin._prepare_build()
for fileInfo in files:
with open(os.path.join(plugin.rosdir, fileInfo['path']), 'r') as f:
self.assertEqual(f.read(), fileInfo['expected'])
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
def test_finish_build_python_shebangs(self, run_output_mock, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.rosdir, 'bin'))
# Place a few files with bad shebangs, and some files that shouldn't be
# changed.
files = [
{
'path': os.path.join(plugin.rosdir, '_setup_util.py'),
'contents': '#!/foo/bar/baz/python',
'expected': '#!/usr/bin/env python',
},
{
'path': os.path.join(plugin.rosdir, 'bin/catkin_find'),
'contents': '#!/foo/baz/python',
'expected': '#!/usr/bin/env python',
},
{
'path': os.path.join(plugin.rosdir, 'foo'),
'contents': 'foo',
'expected': 'foo',
}
]
for file_info in files:
with open(file_info['path'], 'w') as f:
f.write(file_info['contents'])
plugin._finish_build()
for file_info in files:
with open(os.path.join(plugin.rosdir,
file_info['path']), 'r') as f:
self.assertEqual(f.read(), file_info['expected'])
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
def test_finish_build_absolute_python(self, run_output_mock, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(os.path.join(plugin.rosdir, 'etc', 'catkin', 'profile.d'))
ros_profile = os.path.join(plugin.rosdir, 'etc', 'catkin', 'profile.d',
'10.ros.sh')
# Place 10.ros.sh with an absolute path to python
with open(ros_profile, 'w') as f:
f.write('/usr/bin/python foo')
plugin._finish_build()
# Verify that the absolute path in 10.ros.sh was rewritten correctly
with open(ros_profile, 'r') as f:
self.assertEqual(f.read(), 'python foo',
'The absolute path to python was not replaced as '
'expected')
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
def test_finish_build_binary(self, run_output_mock, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
os.makedirs(plugin.rosdir)
# Place a file to be discovered by _finish_build().
open(os.path.join(plugin.rosdir, 'foo'), 'w').close()
file_mock = mock.mock_open()
with mock.patch.object(builtins, 'open', file_mock):
# Reading a binary file may throw a UnicodeDecodeError. Make sure
# that's handled.
file_mock.return_value.read.side_effect = UnicodeDecodeError(
'foo', b'bar', 1, 2, 'baz')
try:
plugin._finish_build()
except UnicodeDecodeError:
self.fail('Expected _finish_build to handle binary files')
@mock.patch.object(catkin.CatkinPlugin, 'run')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='foo')
def test_finish_build_cmake_prefix_path(self, run_output_mock, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
setup_file = os.path.join(plugin.rosdir, '_setup_util.py')
os.makedirs(os.path.dirname(setup_file))
with open(setup_file, 'w') as f:
f.write("CMAKE_PREFIX_PATH = '{0}/{1};{0}\n".format(
plugin.rosdir, plugin.options.rosdistro))
plugin._finish_build()
expected = 'CMAKE_PREFIX_PATH = []\n'
with open(setup_file, 'r') as f:
self.assertEqual(
f.read(), expected,
'The absolute path to python or the CMAKE_PREFIX_PATH '
'was not replaced as expected')
@mock.patch.object(catkin.CatkinPlugin, 'run_output', return_value='bar')
def test_run_environment(self, run_mock):
plugin = catkin.CatkinPlugin('test-part', self.properties)
environment = plugin.env('/foo')
self.assertTrue('PYTHONPATH={}'.format(os.path.join(
'/foo', 'usr', 'lib', 'bar', 'dist-packages')
in environment))
self.assertTrue('ROS_MASTER_URI=http://localhost:11311' in environment)
self.assertTrue('ROS_HOME=$SNAP_USER_DATA/ros' in environment)
self.assertTrue('_CATKIN_SETUP_DIR={}'.format(os.path.join(
'/foo', 'opt', 'ros', self.properties.rosdistro)) in environment)
self.assertTrue('. {}'.format('/foo', 'opt', 'ros', 'setup.sh') in
'\n'.join(environment),
'Expected ROS\'s setup.sh to be sourced')
class FindSystemDependenciesTestCase(tests.TestCase):
def setUp(self):
super().setUp()
patcher = mock.patch('snapcraft.plugins.catkin._Rosdep')
self.rosdep_mock = patcher.start()
self.addCleanup(patcher.stop)
def verify_rosdep_setup(self, rosdistro, package_path, rosdep_path,
sources):
self.rosdep_mock.assert_has_calls([
mock.call(rosdistro, package_path, rosdep_path, sources),
mock.call().setup()])
def test_find_system_dependencies_system_only(self):
mockInstance = self.rosdep_mock.return_value
mockInstance.get_dependencies.return_value = ['bar']
mockInstance.resolve_dependency.return_value = 'baz'
self.assertEqual(['baz'], catkin._find_system_dependencies(
{'foo'}, 'indigo', '/test/path1', '/test/path2', []))
# Verify that rosdep was setup as expected
self.verify_rosdep_setup('indigo', '/test/path1', '/test/path2', [])
mockInstance.get_dependencies.assert_called_once_with('foo')
mockInstance.resolve_dependency.assert_called_once_with('bar')
def test_find_system_dependencies_local_only(self):
mockInstance = self.rosdep_mock.return_value
mockInstance.get_dependencies.return_value = ['bar']
self.assertEqual([], catkin._find_system_dependencies(
{'foo', 'bar'}, 'indigo', '/test/path1', '/test/path2', []))
# Verify that rosdep was setup as expected
self.verify_rosdep_setup('indigo', '/test/path1', '/test/path2', [])
mockInstance.get_dependencies.assert_has_calls([mock.call('foo'),
mock.call('bar')],
any_order=True)
mockInstance.resolve_dependency.assert_not_called()
def test_find_system_dependencies_mixed(self):
mockInstance = self.rosdep_mock.return_value
mockInstance.get_dependencies.return_value = ['bar', 'baz']
mockInstance.resolve_dependency.return_value = 'qux'
self.assertEqual(['qux'], catkin._find_system_dependencies(
{'foo', 'bar'}, 'indigo', '/test/path1', '/test/path2', []))
# Verify that rosdep was setup as expected
self.verify_rosdep_setup('indigo', '/test/path1', '/test/path2', [])
mockInstance.get_dependencies.assert_has_calls([mock.call('foo'),
mock.call('bar')],
any_order=True)
mockInstance.resolve_dependency.assert_called_once_with('baz')
def test_find_system_dependencies_missing_local_dependency(self):
mockInstance = self.rosdep_mock.return_value
# Setup a dependency on a non-existing package, and it doesn't resolve
# to a system dependency.'
mockInstance.get_dependencies.return_value = ['bar']
mockInstance.resolve_dependency.return_value = None
with self.assertRaises(RuntimeError) as raised:
catkin._find_system_dependencies({'foo'}, 'indigo', '/test/path1',
'/test/path2', [])
self.assertEqual(raised.exception.args[0],
'Package "bar" isn\'t a valid system dependency. Did '
'you forget to add it to catkin-packages? If not, '
'add the Ubuntu package containing it to '
'stage-packages until you can get it into the rosdep '
'database.')
def test_find_system_dependencies_roscpp_includes_gplusplus(self):
mockInstance = self.rosdep_mock.return_value
mockInstance.get_dependencies.return_value = ['roscpp']
mockInstance.resolve_dependency.return_value = 'baz'
self.assertEqual(_CompareLists(self, ['baz', 'g++']),
catkin._find_system_dependencies({'foo'}, 'indigo',
'/test/path1',
'/test/path2', []))
# Verify that rosdep was setup as expected
self.verify_rosdep_setup('indigo', '/test/path1', '/test/path2', [])
mockInstance.get_dependencies.assert_called_once_with('foo')
mockInstance.resolve_dependency.assert_called_once_with('roscpp')
class RosdepTestCase(tests.TestCase):
def setUp(self):
super().setUp()
self.rosdep = catkin._Rosdep('ros_distro', 'package_path',
'rosdep_path', 'sources')
patcher = mock.patch('snapcraft.repo.Ubuntu')
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('subprocess.check_output')
self.check_output_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_setup(self):
# Return something other than a Mock to ease later assertions
self.check_output_mock.return_value = b''
self.rosdep.setup()
# Verify that only rosdep was installed (no other .debs)
self.assertEqual(self.ubuntu_mock.call_count, 1)
self.assertEqual(self.ubuntu_mock.return_value.get.call_count, 1)
self.assertEqual(self.ubuntu_mock.return_value.unpack.call_count, 1)
self.ubuntu_mock.assert_has_calls([
mock.call(self.rosdep._rosdep_path, sources='sources'),
mock.call().get(['python-rosdep']),
mock.call().unpack(self.rosdep._rosdep_install_path)])
# Verify that rosdep was initialized and updated
self.assertEqual(self.check_output_mock.call_count, 2)
self.check_output_mock.assert_has_calls([
mock.call(['rosdep', 'init'], env=mock.ANY),
mock.call(['rosdep', 'update'], env=mock.ANY)
])
def test_setup_can_run_multiple_times(self):
self.rosdep.setup()
# Make sure running setup() again doesn't have problems with the old
# environment
try:
self.rosdep.setup()
except FileExistsError:
self.fail('Unexpectedly raised an exception when running setup() '
'multiple times')
def test_setup_initialization_failure(self):
def run(args, **kwargs):
if args == ['rosdep', 'init']:
raise subprocess.CalledProcessError(1, 'foo', b'bar')
return mock.DEFAULT
self.check_output_mock.side_effect = run
with self.assertRaises(RuntimeError) as raised:
self.rosdep.setup()
self.assertEqual(str(raised.exception),
'Error initializing rosdep database:\nbar')
def test_setup_update_failure(self):
def run(args, **kwargs):
if args == ['rosdep', 'update']:
raise subprocess.CalledProcessError(1, 'foo', b'bar')
return mock.DEFAULT
self.check_output_mock.side_effect = run
with self.assertRaises(RuntimeError) as raised:
self.rosdep.setup()
self.assertEqual(str(raised.exception),
'Error updating rosdep database:\nbar')
def test_get_dependencies(self):
self.check_output_mock.return_value = b'foo\nbar\nbaz'
self.assertEqual(self.rosdep.get_dependencies('foo'),
['foo', 'bar', 'baz'])
self.check_output_mock.assert_called_with(['rosdep', 'keys', 'foo'],
env=mock.ANY)
def test_get_dependencies_no_dependencies(self):
self.check_output_mock.return_value = b''
self.assertEqual(self.rosdep.get_dependencies('foo'), [])
def test_get_dependencies_invalid_package(self):
self.check_output_mock.side_effect = subprocess.CalledProcessError(
1, 'foo')
with self.assertRaises(FileNotFoundError) as raised:
self.rosdep.get_dependencies('bar')
self.assertEqual(str(raised.exception),
'Unable to find Catkin package "bar"')
def test_resolve_dependency(self):
self.check_output_mock.return_value = b'#apt\nmylib-dev'
self.assertEqual(self.rosdep.resolve_dependency('foo'), 'mylib-dev')
self.check_output_mock.assert_called_with(
['rosdep', 'resolve', 'foo', '--rosdistro', 'ros_distro', '--os',
'ubuntu:trusty'],
env=mock.ANY)
def test_resolve_invalid_dependency(self):
self.check_output_mock.side_effect = subprocess.CalledProcessError(
1, 'foo')
self.assertEqual(self.rosdep.resolve_dependency('bar'), None)
def test_resolve_dependency_weird_output(self):
self.check_output_mock.return_value = b'mylib-dev'
with self.assertRaises(RuntimeError) as raised:
self.rosdep.resolve_dependency('')
self.assertEqual(str(raised.exception),
'Unexpected rosdep resolve output:\nmylib-dev')
def test_run(self):
rosdep = self.rosdep
rosdep._run(['qux'])
class check_env():
def __eq__(self, env):
rosdep_sources_path = rosdep._rosdep_sources_path
return (
env['PATH'] == os.path.join(rosdep._rosdep_install_path,
'usr', 'bin') and
env['PYTHONPATH'] == os.path.join(
rosdep._rosdep_install_path, 'usr', 'lib', 'python2.7',
'dist-packages') and
env['ROSDEP_SOURCE_PATH'] == rosdep_sources_path and
env['ROS_HOME'] == rosdep._rosdep_cache_path and
env['ROS_PACKAGE_PATH'] == rosdep._ros_package_path)
self.check_output_mock.assert_called_with(mock.ANY, env=check_env())
|
gpl-3.0
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_posix.py
|
6
|
17908
|
"Test posix functions"
from test import test_support
# Skip these tests if there is no posix module.
posix = test_support.import_module('posix')
import errno
import sys
import time
import os
import pwd
import shutil
import stat
import sys
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
test_support.TESTFN + '-dummy-symlink')
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(test_support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ test_support.TESTFN ]
def tearDown(self):
for teardown_file in self.teardown_files:
os.unlink(teardown_file)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdu", "uname",
"times", "getloadavg", "tmpnam",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid",
]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "", DeprecationWarning)
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
if hasattr(posix, 'getresuid'):
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
if hasattr(posix, 'getresgid'):
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
if hasattr(posix, 'setresuid'):
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
if hasattr(posix, 'setresgid'):
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
name = pwd.getpwuid(posix.getuid()).pw_name
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
def test_statvfs(self):
if hasattr(posix, 'statvfs'):
self.assertTrue(posix.statvfs(os.curdir))
def test_fstatvfs(self):
if hasattr(posix, 'fstatvfs'):
fp = open(test_support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
finally:
fp.close()
def test_ftruncate(self):
if hasattr(posix, 'ftruncate'):
fp = open(test_support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
def test_dup(self):
if hasattr(posix, 'dup'):
fp = open(test_support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
def test_confstr(self):
if hasattr(posix, 'confstr'):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
def test_dup2(self):
if hasattr(posix, 'dup2'):
fp1 = open(test_support.TESTFN)
fp2 = open(test_support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
def fdopen_helper(self, *args):
fd = os.open(test_support.TESTFN, os.O_RDONLY)
fp2 = posix.fdopen(fd, *args)
fp2.close()
def test_fdopen(self):
if hasattr(posix, 'fdopen'):
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_osexlock(self):
if hasattr(posix, "O_EXLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
def test_osshlock(self):
if hasattr(posix, "O_SHLOCK"):
fd1 = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
def test_fstat(self):
if hasattr(posix, 'fstat'):
fp = open(test_support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
finally:
fp.close()
def test_stat(self):
if hasattr(posix, 'stat'):
self.assertTrue(posix.stat(test_support.TESTFN))
def _test_all_chown_common(self, chown_func, first_param):
"""Common code for chown, fchown and lchown tests."""
if os.getuid() == 0:
try:
# Many linux distros have a nfsnobody user as MAX_UID-2
# that makes a good test case for signedness issues.
# http://bugs.python.org/issue1747858
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
ent = pwd.getpwnam('nfsnobody')
chown_func(first_param, ent.pw_uid, ent.pw_gid)
except KeyError:
pass
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func,
first_param, 0, 0)
# test a successful chown call
chown_func(first_param, os.getuid(), os.getgid())
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(test_support.TESTFN)
self.assertRaises(OSError, posix.chown, test_support.TESTFN, -1, -1)
# re-create the file
open(test_support.TESTFN, 'w').close()
self._test_all_chown_common(posix.chown, test_support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(test_support.TESTFN)
# re-create the file
test_file = open(test_support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd)
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(test_support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, test_support.TESTFN)
self._test_all_chown_common(posix.lchown, test_support.TESTFN)
def test_chdir(self):
if hasattr(posix, 'chdir'):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, test_support.TESTFN)
def test_lsdir(self):
if hasattr(posix, 'lsdir'):
self.assertIn(test_support.TESTFN, posix.lsdir(os.curdir))
def test_access(self):
if hasattr(posix, 'access'):
self.assertTrue(posix.access(test_support.TESTFN, os.R_OK))
def test_umask(self):
if hasattr(posix, 'umask'):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
def test_strerror(self):
if hasattr(posix, 'strerror'):
self.assertTrue(posix.strerror(0))
def test_pipe(self):
if hasattr(posix, 'pipe'):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
def test_tempnam(self):
if hasattr(posix, 'tempnam'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.assertTrue(posix.tempnam())
self.assertTrue(posix.tempnam(os.curdir))
self.assertTrue(posix.tempnam(os.curdir, 'blah'))
def test_tmpfile(self):
if hasattr(posix, 'tmpfile'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
fp = posix.tmpfile()
fp.close()
def test_utime(self):
if hasattr(posix, 'utime'):
now = time.time()
posix.utime(test_support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, now))
posix.utime(test_support.TESTFN, (int(now), int(now)))
posix.utime(test_support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
chflags_func(target_file, st.st_flags | stat.UF_IMMUTABLE)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except IOError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, test_support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, test_support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(test_support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(test_support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
posix.lchflags(_DUMMY_SYMLINK,
dummy_symlink_st.st_flags | stat.UF_IMMUTABLE)
try:
new_testfn_st = os.stat(test_support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
posix.lchflags(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
def test_getcwd_long_pathnames(self):
if hasattr(posix, 'getcwd'):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(test_support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception,
# because the test results in Error in that case.
# Is that ok?
# raise unittest.SkipTest, "cannot create directory for testing"
return
try:
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest, "mkdir cannot create directory sufficiently deep for getcwd test"
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 4099:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
except OSError as e:
expected_errno = errno.ENAMETOOLONG
if 'sunos' in sys.platform or 'openbsd' in sys.platform:
expected_errno = errno.ERANGE # Issue 9185
self.assertEqual(e.errno, expected_errno)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
shutil.rmtree(base_path)
@unittest.skipIf(test_support.is_jython, "FIXME: not working on Jython")
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G') as idg:
groups = idg.read().strip()
if not groups:
raise unittest.SkipTest("need working 'id -G'")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], range(16)]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
test_support.run_unittest(PosixTester, PosixGroupsTester)
if __name__ == '__main__':
test_main()
|
epl-1.0
|
GoogleCloudPlatform/mlops-on-gcp
|
workshops/kfp-caip-sklearn/lab-02-kfp-pipeline/pipeline/covertype_training_pipeline.py
|
3
|
7714
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP pipeline orchestrating BigQuery and Cloud AI Platform services."""
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS = """
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
"""
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
"""Prepares the data sampling query."""
sampling_query_template = """
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
"""
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
"""Orchestrates training and deployment of an sklearn model."""
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(
project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(
use_gcp_secret('user-gcp-sa'))
|
apache-2.0
|
gppezzi/easybuild-easyblocks
|
easybuild/easyblocks/x/xmipp.py
|
2
|
7758
|
##
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Xmipp, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
"""
import glob
import os
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, extract_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
class EB_Xmipp(EasyBlock):
"""Support for building/installing Xmipp."""
def __init__(self, *args, **kwargs):
"""Easyblock constructor, enable building in installation directory."""
super(EB_Xmipp, self).__init__(*args, **kwargs)
self.build_in_installdir = True
def extract_step(self):
"""Extract Xmipp sources."""
# strip off 'xmipp' part to avoid having everything in a 'xmipp' subdirectory
self.cfg.update('unpack_options', '--strip-components=1')
super(EB_Xmipp, self).extract_step()
def configure_step(self):
"""Configure Xmipp build via a provided wrapper around scons."""
# check if all our dependencies are in place
self.python_root = get_software_root('Python')
if not self.python_root:
raise EasyBuildError("Python not loaded as a dependency, which is required for %s", self.name)
python_libdir = det_pylibdir()
self.python_short_ver = '.'.join(get_software_version('Python').split('.')[:2])
java_root = get_software_root('Java')
if not java_root:
raise EasyBuildError("Java not loaded as a dependency, which is required for %s", self.name)
# extract some dependencies that we really need and can't find anywhere else
# alglib tarball has version in name, so lets find it with a glob
# we can't do this in extract step before these are in the original sources tarball, so we need to know
# startdir first
external_path = os.path.join(self.cfg['start_dir'], 'external')
alglib_tar = glob.glob(os.path.join(external_path, 'alglib*.tgz'))[0]
for src in ['bilib.tgz', 'bilib.tgz', 'condor.tgz', alglib_tar, 'scons.tgz']:
extract_file(os.path.join(external_path, src), external_path)
# make sure we are back in the start dir
os.chdir(self.cfg['start_dir'])
# build step expects these to exist
mkdir(os.path.join(self.cfg['start_dir'], 'bin'))
mkdir(os.path.join(self.cfg['start_dir'], 'lib'))
python_inc_dir = os.path.join(self.python_root, 'include', 'python%s' % self.python_short_ver)
numpy_inc_dir = os.path.join(self.python_root, python_libdir, 'numpy', 'core', 'include')
if self.toolchain.mpi_family() == toolchain.INTELMPI:
mpi_bindir = os.path.join(get_software_root('impi'), 'intel64', 'bin')
else:
mpi_bindir = os.path.join(get_software_root(self.toolchain.MPI_MODULE_NAME[0]), 'bin')
if not os.path.exists(numpy_inc_dir):
raise EasyBuildError("numpy 'include' directory %s not found", numpy_inc_dir)
if not os.path.exists(mpi_bindir):
raise EasyBuildError("MPI 'bin' subdir %s does not exist", mpi_bindir)
cmd = ' '.join([
self.cfg['preconfigopts'],
'python external/scons/scons.py',
'mode=configure',
'-j %s' % self.cfg['parallel'],
'--config=force',
'profile=no',
'fast=yes',
'warn=no',
'release=yes',
'gtest=no',
'cuda=no',
'debug=no',
'matlab=no',
'java=no',
'LINKERFORPROGRAMS="$CXX"',
'MPI_BINDIR=%s' % mpi_bindir,
'JAVA_HOME=%s' % java_root,
'JAVAC=javac',
'CC="$CC"',
'CXXFLAGS="$CXXFLAGS -DMPICH_IGNORE_CXX_SEEK -I%s -I%s"' % (python_inc_dir, numpy_inc_dir),
'CXX="$CXX"',
'MPI_CC="$MPICC"',
'MPI_CXX="$MPICXX"',
'MPI_INCLUDE="$MPI_INC_DIR"',
'MPI_LIBDIR="$MPI_LIB_DIR"',
'MPI_LINKERFORPROGRAMS="$MPICC"',
'LIBPATH="$LD_LIBRARY_PATH"',
self.cfg['configopts'],
])
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Custom build procedure for Xmipp: call the scons wrapper with compile argument"""
cmd = ' '.join([
self.cfg['prebuildopts'],
'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/lib',
'python external/scons/scons.py',
'mode=compile',
'-j %s' % self.cfg['parallel'],
self.cfg['buildopts'],
])
run_cmd(cmd, log_all=True, simple=True)
def install_step(self):
"""install step for Xmipp, this builds a local database and seems to do some tests?"""
python_dynlib_dir = os.path.join(self.python_root, 'lib', 'python%s' % self.python_short_ver, 'lib-dynload')
if not os.path.exists(python_dynlib_dir):
raise EasyBuildError("Python lib-dynload dir %s not found", python_dynlib_dir)
extra_pythonpaths = [
os.path.join(self.cfg['start_dir'], 'protocols'),
os.path.join(self.cfg['start_dir'], 'libraries', 'bindings', 'python'),
python_dynlib_dir,
]
cmd = ' '.join([
self.cfg['preinstallopts'],
'XMIPP_HOME=%s' % self.cfg['start_dir'],
'PATH=%s:$PATH' % os.path.join(self.cfg['start_dir'], 'bin'),
'PYTHONPATH="%s"' % os.pathsep.join(['$PYTHONPATH'] + extra_pythonpaths),
'python setup.py install',
self.cfg['installopts'],
])
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for Xmipp."""
custom_paths = {
# incomplete list, random picks, cfr. http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/ListOfProgramsv3
'files': ['bin/xmipp_%s' % x for x in ['compile', 'imagej', 'mpi_run', 'phantom_create',
'transform_filter', 'tomo_project', 'volume_align']],
'dirs': ['lib'],
}
super(EB_Xmipp, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define Xmipp specific variables in generated module file, i.e. XMIPP_HOME."""
txt = super(EB_Xmipp, self).make_module_extra()
txt += self.module_generator.set_environment('XMIPP_HOME', self.installdir)
return txt
|
gpl-2.0
|
mensler/ansible
|
lib/ansible/modules/network/avi/avi_network.py
|
49
|
4780
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_network
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Network Avi RESTful Object
description:
- This module is used to configure Network object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
configured_subnets:
description:
- List of subnet.
dhcp_enabled:
description:
- Select the ip address management scheme for this network.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
exclude_discovered_subnets:
description:
- When selected, excludes all discovered subnets in this network from consideration for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
name:
description:
- Name of the object.
required: true
synced_from_se:
description:
- Boolean flag to set synced_from_se.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vcenter_dvs:
description:
- Boolean flag to set vcenter_dvs.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
vimgrnw_ref:
description:
- It is a reference to an object of type vimgrnwruntime.
vrf_context_ref:
description:
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Network object
avi_network:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_network
"""
RETURN = '''
obj:
description: Network (api/network) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_ref=dict(type='str',),
configured_subnets=dict(type='list',),
dhcp_enabled=dict(type='bool',),
exclude_discovered_subnets=dict(type='bool',),
name=dict(type='str', required=True),
synced_from_se=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vcenter_dvs=dict(type='bool',),
vimgrnw_ref=dict(type='str',),
vrf_context_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'network',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
kevalds51/sympy
|
sympy/vector/point.py
|
42
|
4853
|
from sympy.core.compatibility import range
from sympy.core.basic import Basic
from sympy.vector.vector import Vector
from sympy.vector.coordsysrect import CoordSysCartesian
from sympy.vector.functions import _path
from sympy import Symbol
from sympy.core.cache import cacheit
class Point(Basic):
"""
Represents a point in 3-D space.
"""
def __new__(cls, name, position=Vector.zero, parent_point=None):
#Check the args first
if not isinstance(position, Vector):
raise TypeError("position should be a Vector instance")
if (not isinstance(parent_point, Point)
and parent_point is not None):
raise TypeError("parent_point should be a Point instance")
#Create an object
if parent_point is None:
arg_parent = Symbol('default')
arg_self = Symbol(name)
else:
arg_parent = parent_point
arg_self = Symbol('default')
#All points that are defined as 'roots' are unequal.
#Points defined at same position wrt the same
#'parent' are equal, irrespective of the names.
obj = super(Point, cls).__new__(cls, arg_self,
position, arg_parent)
#Decide the object parameters
obj._name = name
obj._pos = position
if parent_point is None:
obj._parent = None
obj._root = obj
else:
obj._parent = parent_point
obj._root = parent_point._root
#Return object
return obj
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of this Point with respect to
another Point/CoordSysCartesian.
Parameters
==========
other : Point/CoordSysCartesian
If other is a Point, the position of this Point wrt it is
returned. If its an instance of CoordSyRect, the position
wrt its origin is returned.
Examples
========
>>> from sympy.vector import Point, CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> p1 = N.origin.locate_new('p1', 10 * N.i)
>>> N.origin.position_wrt(p1)
(-10)*N.i
"""
if (not isinstance(other, Point)
and not isinstance(other, CoordSysCartesian)):
raise TypeError(str(other) +
"is not a Point or CoordSysCartesian")
if isinstance(other, CoordSysCartesian):
other = other.origin
#Handle special cases
if other == self:
return Vector.zero
elif other == self._parent:
return self._pos
elif other._parent == self:
return -1 * other._pos
#Else, use point tree to calculate position
rootindex, path = _path(self, other)
result = Vector.zero
i = -1
for i in range(rootindex):
result += path[i]._pos
i += 2
while i < len(path):
result -= path[i]._pos
i += 1
return result
def locate_new(self, name, position):
"""
Returns a new Point located at the given position wrt this
Point.
Thus, the position vector of the new Point wrt this one will
be equal to the given 'position' parameter.
Parameters
==========
name : str
Name of the new point
position : Vector
The position vector of the new Point wrt this one
Examples
========
>>> from sympy.vector import Point, CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> p1 = N.origin.locate_new('p1', 10 * N.i)
>>> p1.position_wrt(N.origin)
10*N.i
"""
return Point(name, position, self)
def express_coordinates(self, coordinate_system):
"""
Returns the Cartesian/rectangular coordinates of this point
wrt the origin of the given CoordSysCartesian instance.
Parameters
==========
coordinate_system : CoordSysCartesian
The coordinate system to express the coordinates of this
Point in.
Examples
========
>>> from sympy.vector import Point, CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> p1 = N.origin.locate_new('p1', 10 * N.i)
>>> p2 = p1.locate_new('p2', 5 * N.j)
>>> p2.express_coordinates(N)
(10, 5, 0)
"""
#Determine the position vector
pos_vect = self.position_wrt(coordinate_system.origin)
#Express it in the given coordinate system
return tuple(pos_vect.to_matrix(coordinate_system))
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
|
bsd-3-clause
|
NathanYee/ThinkBayes2
|
code/hockey.py
|
2
|
7035
|
"""This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import columns
import thinkbayes2
import thinkbayes2
import thinkplot
USE_SUMMARY_DATA = True
class Hockey(thinkbayes2.Suite):
"""Represents hypotheses about the scoring rate for a team."""
def __init__(self, label=None):
"""Initializes the Hockey object.
label: string
"""
if USE_SUMMARY_DATA:
# prior based on each team's average goals scored
mu = 2.8
sigma = 0.3
else:
# prior based on each pair-wise match-up
mu = 2.8
sigma = 0.85
pmf = thinkbayes2.MakeNormalPmf(mu, sigma, 4)
thinkbayes2.Suite.__init__(self, pmf, label=label)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
Evaluates the Poisson PMF for lambda and k.
hypo: goal scoring rate in goals per game
data: goals scored in one period
"""
lam = hypo
k = data
like = thinkbayes2.EvalPoissonPmf(k, lam)
return like
def MakeGoalPmf(suite, high=10):
"""Makes the distribution of goals scored, given distribution of lam.
suite: distribution of goal-scoring rate
high: upper bound
returns: Pmf of goals per game
"""
metapmf = thinkbayes2.Pmf()
for lam, prob in suite.Items():
pmf = thinkbayes2.MakePoissonPmf(lam, high)
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf, label=suite.label)
return mix
def MakeGoalTimePmf(suite):
"""Makes the distribution of time til first goal.
suite: distribution of goal-scoring rate
returns: Pmf of goals per game
"""
metapmf = thinkbayes2.Pmf()
for lam, prob in suite.Items():
pmf = thinkbayes2.MakeExponentialPmf(lam, high=2, n=2001)
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf, label=suite.label)
return mix
class Game(object):
"""Represents a game.
Attributes are set in columns.read_csv.
"""
convert = dict()
def clean(self):
self.goals = self.pd1 + self.pd2 + self.pd3
def ReadHockeyData(filename='hockey_data.csv'):
"""Read game scores from the data file.
filename: string
"""
game_list = columns.read_csv(filename, Game)
# map from gameID to list of two games
games = {}
for game in game_list:
if game.season != 2011:
continue
key = game.game
games.setdefault(key, []).append(game)
# map from (team1, team2) to (score1, score2)
pairs = {}
for key, pair in games.iteritems():
t1, t2 = pair
key = t1.team, t2.team
entry = t1.total, t2.total
pairs.setdefault(key, []).append(entry)
ProcessScoresTeamwise(pairs)
ProcessScoresPairwise(pairs)
def ProcessScoresPairwise(pairs):
"""Average number of goals for each team against each opponent.
pairs: map from (team1, team2) to (score1, score2)
"""
# map from (team1, team2) to list of goals scored
goals_scored = {}
for key, entries in pairs.iteritems():
t1, t2 = key
for entry in entries:
g1, g2 = entry
goals_scored.setdefault((t1, t2), []).append(g1)
goals_scored.setdefault((t2, t1), []).append(g2)
# make a list of average goals scored
lams = []
for key, goals in goals_scored.iteritems():
if len(goals) < 3:
continue
lam = thinkbayes2.Mean(goals)
lams.append(lam)
# make the distribution of average goals scored
cdf = thinkbayes2.MakeCdfFromList(lams)
thinkplot.Cdf(cdf)
thinkplot.Show()
mu, var = thinkbayes2.MeanVar(lams)
print('mu, sig', mu, math.sqrt(var))
print('BOS v VAN', pairs['BOS', 'VAN'])
def ProcessScoresTeamwise(pairs):
"""Average number of goals for each team.
pairs: map from (team1, team2) to (score1, score2)
"""
# map from team to list of goals scored
goals_scored = {}
for key, entries in pairs.iteritems():
t1, t2 = key
for entry in entries:
g1, g2 = entry
goals_scored.setdefault(t1, []).append(g1)
goals_scored.setdefault(t2, []).append(g2)
# make a list of average goals scored
lams = []
for key, goals in goals_scored.iteritems():
lam = thinkbayes2.Mean(goals)
lams.append(lam)
# make the distribution of average goals scored
cdf = thinkbayes2.MakeCdfFromList(lams)
thinkplot.Cdf(cdf)
thinkplot.Show()
mu, var = thinkbayes2.MeanVar(lams)
print('mu, sig', mu, math.sqrt(var))
def main():
#ReadHockeyData()
#return
formats = ['pdf', 'eps']
suite1 = Hockey('bruins')
suite2 = Hockey('canucks')
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(suite1)
thinkplot.Pmf(suite2)
thinkplot.Save(root='hockey0',
xlabel='Goals per game',
ylabel='Probability',
formats=formats)
suite1.UpdateSet([0, 2, 8, 4])
suite2.UpdateSet([1, 3, 1, 0])
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(suite1)
thinkplot.Pmf(suite2)
thinkplot.Save(root='hockey1',
xlabel='Goals per game',
ylabel='Probability',
formats=formats)
goal_dist1 = MakeGoalPmf(suite1)
goal_dist2 = MakeGoalPmf(suite2)
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(goal_dist1)
thinkplot.Pmf(goal_dist2)
thinkplot.Save(root='hockey2',
xlabel='Goals',
ylabel='Probability',
formats=formats)
time_dist1 = MakeGoalTimePmf(suite1)
time_dist2 = MakeGoalTimePmf(suite2)
print('MLE bruins', suite1.MaximumLikelihood())
print('MLE canucks', suite2.MaximumLikelihood())
thinkplot.Clf()
thinkplot.PrePlot(num=2)
thinkplot.Pmf(time_dist1)
thinkplot.Pmf(time_dist2)
thinkplot.Save(root='hockey3',
xlabel='Games until goal',
ylabel='Probability',
formats=formats)
diff = goal_dist1 - goal_dist2
p_win = diff.ProbGreater(0)
p_loss = diff.ProbLess(0)
p_tie = diff.Prob(0)
print(p_win, p_loss, p_tie)
p_overtime = thinkbayes2.PmfProbLess(time_dist1, time_dist2)
p_adjust = thinkbayes2.PmfProbEqual(time_dist1, time_dist2)
p_overtime += p_adjust / 2
print('p_overtime', p_overtime)
print(p_overtime * p_tie)
p_win += p_overtime * p_tie
print('p_win', p_win)
# win the next two
p_series = p_win**2
# split the next two, win the third
p_series += 2 * p_win * (1-p_win) * p_win
print('p_series', p_series)
if __name__ == '__main__':
main()
|
gpl-2.0
|
gavinfish/leetcode-share
|
python/056 Merge Intervals.py
|
1
|
1138
|
'''
Given a collection of intervals, merge all overlapping intervals.
For example,
Given [1,3],[2,6],[8,10],[15,18],
return [1,6],[8,10],[15,18].
'''
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
# To print the result
def __str__(self):
return "[" + str(self.start) + "," + str(self.end) + "]"
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
result = []
if not intervals:
return result
intervals.sort(key=lambda x: x.start)
result.append(intervals[0])
for interval in intervals[1:]:
prev = result[-1]
if prev.end >= interval.start:
prev.end = max(prev.end, interval.end)
else:
result.append(interval)
return result
if __name__ == "__main__":
intervals = Solution().merge([Interval(1, 3), Interval(2, 6), Interval(8, 10), Interval(15, 18)])
for interval in intervals:
print(interval)
|
mit
|
google-research/recsim
|
recsim/testing/test_environment.py
|
1
|
12670
|
# coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sequential environment with known Q-function for testing purposes.
The user dynamics in this environment follow a 6 state x 4 action MDP with
the following specification:
0 # action
0 (1 0.1) (2 0.1) (3 0.8)
1 (2 0.1) (3 0.1) (4 0.8)
2 (3 0.1) (4 0.1) (5 0.8)
3 (4 0.1) (5 0.1) (0 0.8)
4 (5 1.0)
5 (0 1.0)
reward (0 -10.0) (1 0.0) (2 0.0) (3 4.0) (4 0.0) (5 5.0)
1 # action
0 (2 0.1) (3 0.1) (4 0.8)
1 (3 0.1) (4 0.1) (5 0.8)
2 (4 0.1) (5 0.1) (0 0.8)
3 (5 0.1) (0 0.1) (1 0.8)
4 (0 1.0)
5 (1 1.0)
reward (0 -10.0) (1 0.0) (2 1.0) (3 0.0) (4 0.0) (5 0.0)
2 # action
0 (3 0.1) (4 0.1) (5 0.8)
1 (4 0.1) (5 0.1) (0 0.8)
2 (5 0.1) (0 0.1) (1 0.8)
3 (0 0.1) (1 0.1) (2 0.8)
4 (1 1.0)
5 (2 1.0)
reward (0 -10.0) (1 1.0) (2 0.0) (3 2.0) (4 0.0) (5 2.0)
3 # action
0 (4 0.1) (5 0.1) (0 0.8)
1 (5 0.1) (0 0.1) (1 0.8)
2 (0 0.1) (1 0.1) (2 0.8)
3 (1 0.1) (2 0.1) (3 0.8)
4 (2 1.0)
5 (3 1.0)
reward (0 -10.0) (1 0.0) (2 0.0) (3 0.0) (4 0.0) (5 5.0)
Known Q and value functions for:
* gamma = 0
Q-function:
Action
State 0 1 2 3
0 -10 -10 -10 -10
1 1
2 1
3 4 2
4
5 5 2 5
Value function:
V[0] = -10, V[1] = 1, V[2] = 1, V[3] = 4, V[4] = 0, V[5] = 5
* gamma = 0.5
Q-function:
Action
State 0 1 2 3
0 -8.53022 -8.41259 -7.10072 -12.3547
1 1.58741 2.89928 -1.35468 1.12842
2 2.89928 -1.35468 1.12842 0.94964
3 1.64532 1.12842 2.94964 1.46978
4 3.23741 -3.55036 1.44964 1.44964
5 1.44964 1.44964 3.44964 6.47482
Value function:
V[0] = -7.10072, V[1] = 2.89928, V[2] = 2.89928, V[3] = 2.94964,
V[4] = 3.23741, V[5] = 6.47482
* gamma = 0.9
Q-function:
Action
State 0 1 2 3
0 5.79888 6.59282 8.12425 -0.615864
1 16.5928 18.1242 10.3841 15.641
2 18.1242 10.3841 15.641 15.4118
3 13.3841 15.641 17.4118 15.7989
4 18.6036 7.31182 16.3118 16.3118
5 12.3118 16.3118 18.3118 20.6706
Value function:
V[0] = 8.12425, V[1] = 18.1242, V[2] = 18.1242, V[3] = 17.4118,
V[4] = 18.6036, V[5] = 20.6706
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import gin.tf
from gym import spaces
import numpy as np
from recsim import document
from recsim import user
from recsim.simulator import environment
from recsim.simulator import recsim_gym
FLAGS = flags.FLAGS
QVALUES0 = [[-10.0, -10.0, -10.0, -10.0], [0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0], [4.0, 0.0, 2.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[5.0, 0.0, 2.0, 5.0]]
QVALUES05 = [[-8.53022, -8.41259, -7.10072, -12.3547],
[1.58741, 2.89928, -1.35468, 1.12842],
[2.89928, -1.35468, 1.12842, 0.94964],
[1.64532, 1.12842, 2.94964, 1.46978],
[3.23741, -3.55036, 1.44964, 1.44964],
[1.44964, 1.44964, 3.44964, 6.47482]]
QVALUES09 = [[5.79888, 6.59282, 8.12425, -0.615864],
[16.5928, 18.1242, 10.3841, 15.641],
[18.1242, 10.3841, 15.641, 15.4118],
[13.3841, 15.641, 17.4118, 15.7989],
[18.6036, 7.31182, 16.3118, 16.3118],
[12.3118, 16.3118, 18.3118, 20.6706]]
class SimpleSequentialUserModel(user.AbstractUserModel):
"""Class to model a simple sequential user for testing.
This is a 6-state user with dynamics described above. It can consume one of 4
document types and transition according to a fixed transition matrix. To
facilitate testing of Q-estimation, the entire state is emitted, i.e. the
environment is fully observed.
Args:
seed: random seed.
"""
def __init__(self,
slate_size,
seed=0,
starting_probs=(1.0, 0.0, 0.0, 0.0, 0.0, 0.0)):
super(SimpleSequentialUserModel, self).__init__(
SimpleSequentialResponse,
SimpleSequentialUserSampler(seed=seed, starting_probs=starting_probs),
slate_size)
self._transition_matrix = np.zeros((4, 6, 6))
self._transition_matrix[0, :, :] = np.array([[0, .1, .1, .8, 0, 0],
[0, 0, .1, .1, .8, 0],
[0, 0, 0, .1, .1, .8],
[.8, 0, 0, 0, .1, .1],
[0, 0, 0, 0, 0, 1.0],
[1.0, 0, 0, 0, 0, 0]])
self._transition_matrix[1, :, :] = np.array([[0, 0, .1, .1, .8, 0],
[0, 0, 0, .1, .1, .8],
[.8, 0, 0, 0, .1, .1],
[.1, .8, 0, 0, 0, .1],
[1.0, 0, 0, 0, 0, 0],
[0, 1.0, 0, 0, 0, 0]])
self._transition_matrix[2, :, :] = np.array([[0, 0, 0, .1, .1, .8],
[.8, 0, 0, 0, .1, .1],
[.1, .8, 0, 0, 0, .1],
[.1, .1, .8, 0, 0, 0],
[0, 1.0, 0, 0, 0, 0],
[0, 0, 1.0, 0, 0, 0]])
self._transition_matrix[3, :, :] = np.array([[.8, 0, 0, 0, .1, .1],
[.1, .8, 0, 0, 0, .1],
[.1, .1, .8, 0, 0, 0],
[0, .1, .1, .8, 0, 0],
[0, 0, 1.0, 0, 0, 0],
[0, 0, 0, 1.0, 0, 0]])
self._reward_vector = np.zeros((4, 6))
self._reward_vector[0, :] = np.array([-10.0, 0.0, 0.0, 4.0, 0.0, 5.0])
self._reward_vector[1, :] = np.array([-10.0, 0.0, 1.0, 0.0, 0.0, 0.0])
self._reward_vector[2, :] = np.array([-10.0, 1.0, 0.0, 2.0, 0.0, 2.0])
self._reward_vector[3, :] = np.array([-10.0, 0.0, 0.0, 0.0, 0.0, 5.0])
def is_terminal(self):
"""Returns a boolean indicating if the session is over."""
return False
def update_state(self, slate_documents, responses):
doc = slate_documents[0]
next_state = np.random.choice(
6, p=self._transition_matrix[doc.action_id, self._user_state.state])
self._user_state = SimpleSequentialUserState(next_state)
return
def simulate_response(self, documents):
"""Simulates the user's response to a slate of documents with choice model.
Args:
documents: a list of SimpleSequentialDocument objects in the slate.
Returns:
responses: a list of SimpleSequentialResponse objects,
one for each document.
"""
# List of empty responses
responses = [self._response_model_ctor() for _ in documents]
# Always pick the first document in the slate
selected_index = 0
self._generate_response(documents[selected_index],
responses[selected_index])
return responses
def _generate_response(self, doc, response):
"""Trivial response: sets the clicked property of a clicked document.
Args:
doc: a SimpleSequentialDocument object.
response: a SimpleSequentialResponse for the document.
Updates: response, with whether the document was clicked.
"""
response.reward = self._reward_vector[doc.action_id, self._user_state.state]
class SimpleSequentialUserState(user.AbstractUserState):
"""Class to represent user state for testing. Fully observed.
Attributes:
state: integer in [0...5] representing the state of the user Markov Chain.
"""
def __init__(self, state):
"""Initializes a new user."""
self.state = state
def create_observation(self):
return self.state
def observation_space(self):
return spaces.Discrete(6)
def score_document(self, doc_obs):
del doc_obs # unused
return 1.0
@gin.configurable
class SimpleSequentialUserSampler(user.AbstractUserSampler):
"""Samples initial user state from a multinomial distribution.
Args:
probs: 6-outcome probability mass function for sampling initial state.
"""
def __init__(self, starting_probs=(1.0, 0, 0, 0, 0, 0), **kwargs):
self._probs = starting_probs
super(SimpleSequentialUserSampler, self).__init__(SimpleSequentialUserState,
**kwargs)
def sample_user(self):
starting_state = np.random.choice(6, p=self._probs)
return SimpleSequentialUserState(starting_state)
class SimpleSequentialResponse(user.AbstractResponse):
"""Class to represent a user's response to a document.
Attributes:
reward: a real number representing the state reward of the action executed
by the document.
"""
# The max possible doc ID. We assume the doc ID is in range [0, MAX_DOC_ID].
MAX_DOC_ID = None
def __init__(self, reward=0.0):
self.reward = reward
def __str__(self):
return str(self.reward)
def __repr__(self):
return self.__str__()
def create_observation(self):
return {'reward': np.array(self.reward)}
@classmethod
def response_space(cls):
return spaces.Dict({
'reward':
spaces.Box(low=-10.0, high=5.0, shape=tuple(), dtype=np.float32)
})
class SimpleSequentialDocument(document.AbstractDocument):
"""Class to represent an Simple Sequential Document.
Attributes:
doc_id: integer represents the document id.
action_id: integer represents one of the 4 available actions.
"""
def __init__(self, doc_id, action_id):
self.action_id = action_id
super(SimpleSequentialDocument, self).__init__(doc_id)
def create_observation(self):
return self.action_id
def observation_space(self):
return spaces.Discrete(4)
@gin.configurable
class SimpleSequentialDocumentSampler(document.AbstractDocumentSampler):
"""Round robin a selection of all 4 actions.
As long as the number of candidates is more than 4, this guarantees that all
actions will be available.
"""
def __init__(self, **kwargs):
self._last_action_id = -1
self._doc_count = 0
super(SimpleSequentialDocumentSampler,
self).__init__(SimpleSequentialDocument, **kwargs)
def sample_document(self):
self._last_action_id += 1
self._last_action_id %= 4
self._doc_count += 1
return self._doc_ctor(self._doc_count, self._last_action_id)
def total_reward(responses):
"""Calculates the total reward from a list of responses.
Args:
responses: A list of SimpleSequentialResponse objects
Returns:
reward: A float representing the total clicks from the responses
"""
reward = 0.0
for r in responses:
reward += r.reward
return reward
def create_environment(env_config):
"""Creates an simple sequential testing environment."""
if env_config['num_candidates'] < 4:
raise ValueError('num_candidates must be at least 4.')
SimpleSequentialResponse.MAX_DOC_ID = env_config['num_candidates'] - 1
user_model = SimpleSequentialUserModel(
env_config['slate_size'],
seed=env_config['seed'],
starting_probs=env_config['starting_probs'])
document_sampler = SimpleSequentialDocumentSampler(seed=env_config['seed'])
simple_seq_env = environment.Environment(
user_model,
document_sampler,
env_config['num_candidates'],
env_config['slate_size'],
resample_documents=env_config['resample_documents'])
return recsim_gym.RecSimGymEnv(simple_seq_env, total_reward,
lambda _, __, ___: None, lambda _, __: None)
|
apache-2.0
|
jcderr/kubernetes
|
cluster/juju/charms/trusty/kubernetes/hooks/hooks.py
|
93
|
8164
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file that is called by Juju.
"""
import json
import httplib
import os
import time
import socket
import subprocess
import sys
import urlparse
from charmhelpers.core import hookenv, host
from kubernetes_installer import KubernetesInstaller
from path import path
from lib.registrator import Registrator
hooks = hookenv.Hooks()
@hooks.hook('api-relation-changed')
def api_relation_changed():
"""
On the relation to the api server, this function determines the appropriate
architecture and the configured version to copy the kubernetes binary files
from the kubernetes-master charm and installs it locally on this machine.
"""
hookenv.log('Starting api-relation-changed')
charm_dir = path(hookenv.charm_dir())
# Get the package architecture, rather than the from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
kubernetes_bin_dir = path('/opt/kubernetes/bin')
# Get the version of kubernetes to install.
version = subprocess.check_output(['relation-get', 'version']).strip()
print('Relation version: ', version)
if not version:
print('No version present in the relation.')
exit(0)
version_file = charm_dir / '.version'
if version_file.exists():
previous_version = version_file.text()
print('Previous version: ', previous_version)
if version == previous_version:
exit(0)
# Can not download binaries while the service is running, so stop it.
# TODO: Figure out a better way to handle upgraded kubernetes binaries.
for service in ('kubelet', 'proxy'):
if host.service_running(service):
host.service_stop(service)
command = ['relation-get', 'private-address']
# Get the kubernetes-master address.
server = subprocess.check_output(command).strip()
print('Kubernetes master private address: ', server)
installer = KubernetesInstaller(arch, version, server, kubernetes_bin_dir)
installer.download()
installer.install()
# Write the most recently installed version number to the file.
version_file.write_text(version)
relation_changed()
@hooks.hook('etcd-relation-changed',
'network-relation-changed')
def relation_changed():
"""Connect the parts and go :-)
"""
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers', 'kubeapi_server'):
if not template_data.get(k):
print('Missing data for %s %s' % (k, template_data))
return
print('Running with\n%s' % template_data)
# Setup kubernetes supplemental group
setup_kubernetes_group()
# Register upstart managed services
for n in ('kubelet', 'proxy'):
if render_upstart(n, template_data) or not host.service_running(n):
print('Starting %s' % n)
host.service_restart(n)
# Register machine via api
print('Registering machine')
register_machine(template_data['kubeapi_server'])
# Save the marker (for restarts to detect prev install)
template_data.save()
def get_template_data():
rels = hookenv.relations()
template_data = hookenv.Config()
template_data.CONFIG_FILE_NAME = '.unit-state'
overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type')
etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))
# kubernetes master isn't ha yet.
if api_servers:
api_info = api_servers.pop()
api_servers = 'http://%s:%s' % (api_info[0], api_info[1])
template_data['overlay_type'] = overlay_type
template_data['kubelet_bind_addr'] = _bind_addr(
hookenv.unit_private_ip())
template_data['proxy_bind_addr'] = _bind_addr(
hookenv.unit_get('public-address'))
template_data['kubeapi_server'] = api_servers
template_data['etcd_servers'] = ','.join([
'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)])
template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace(
'/', '-')
return _encode(template_data)
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError('Could not resolve private address')
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
return d
def get_scoped_rel_attr(rel_name, rels, attr):
private_ip = hookenv.unit_private_ip()
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_data.get('private-address') != private_ip:
continue
if unit_data.get(attr):
return unit_data.get(attr)
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_upstart(name, data):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.upstart.tmpl' % name)
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
def register_machine(apiserver, retry=False):
parsed = urlparse.urlparse(apiserver)
# identity = hookenv.local_unit().replace('/', '-')
private_address = hookenv.unit_private_ip()
with open('/proc/meminfo') as fh:
info = fh.readline()
mem = info.strip().split(':')[1].strip().split()[0]
cpus = os.sysconf('SC_NPROCESSORS_ONLN')
# https://github.com/kubernetes/kubernetes/blob/master/docs/admin/node.md
registration_request = Registrator()
registration_request.data['kind'] = 'Node'
registration_request.data['id'] = private_address
registration_request.data['name'] = private_address
registration_request.data['metadata']['name'] = private_address
registration_request.data['spec']['capacity']['mem'] = mem + ' K'
registration_request.data['spec']['capacity']['cpu'] = cpus
registration_request.data['spec']['externalID'] = private_address
registration_request.data['status']['hostIP'] = private_address
response, result = registration_request.register(parsed.hostname,
parsed.port,
'/api/v1/nodes')
print(response)
try:
registration_request.command_succeeded(response, result)
except ValueError:
# This happens when we have already registered
# for now this is OK
pass
def setup_kubernetes_group():
output = subprocess.check_output(['groups', 'kubernetes'])
# TODO: check group exists
if 'docker' not in output:
subprocess.check_output(
['usermod', '-a', '-G', 'docker', 'kubernetes'])
if __name__ == '__main__':
hooks.execute(sys.argv)
|
apache-2.0
|
xya/Spark
|
src/spark/gui/filelist.py
|
1
|
9395
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010 Pierre-André Saulais <[email protected]>
#
# This file is part of the Spark File-transfer Tool.
#
# Spark is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Spark is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Spark; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from spark.gui.resource import iconPath
__all__ = ["FileList", "FileInfoWidget"]
class CustomList(QWidget):
def __init__(self, parent=None):
super(CustomList, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
self.oddColor = QPalette.Base
self.evenColor = QPalette.AlternateBase
self.setBackgroundRole(self.oddColor)
self.setAutoFillBackground(True)
self.items = []
self.selectedIndex = -1
layout = QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
layout.addStretch()
self.setLayout(layout)
def addItem(self, widget):
self.layout().insertWidget(self.layout().count() - 1, widget)
self.items.append(widget)
self.updateItems()
def clear(self):
""" Remove all the items from the list. """
while True:
item = self.layout().takeAt(0)
if item is None:
break
# prevent the widget's parent from keeping it alive
widget = item.widget()
if widget is not None:
widget.setParent(None)
self.layout().addStretch()
self.items = []
self.selectedIndex = -1
def updateItems(self):
for i in range(0, len(self.items)):
self.updateItemPalette(i)
def updateItemPalette(self, index):
if index == self.selectedIndex:
bgColor = QPalette.Highlight
fgColor = QPalette.HighlightedText
else:
bgColor = (index % 2) and self.evenColor or self.oddColor
fgColor = QPalette.WindowText
item = self.items[index]
item.setForegroundRole(fgColor)
item.setBackgroundRole(bgColor)
def findWidgetIndex(self, e):
# the user might have clicked on a child's child widget
# find the direct child widget
widget = self.childAt(e.x(), e.y())
while widget and not (widget.parentWidget() is self):
widget = widget.parentWidget()
if (widget is None) or not (widget in self.items):
return -1
else:
return self.items.index(widget)
def mousePressEvent(self, e):
selected = self.findWidgetIndex(e)
self.updateSelectedIndex(selected)
def mouseDoubleClickEvent(self, e):
item = self.findWidgetIndex(e)
if item >= 0:
self.emit(SIGNAL("itemActivated"), item)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Up:
selected = max(0, self.selectedIndex - 1)
elif e.key() == Qt.Key_Down:
selected = min(self.selectedIndex + 1, len(self.items) - 1)
elif e.key() == Qt.Key_Home:
selected = 0
elif e.key() == Qt.Key_End:
selected = len(self.items) - 1
else:
return
self.updateSelectedIndex(selected)
def updateSelectedIndex(self, newIndex):
if newIndex != self.selectedIndex:
self.selectedIndex = newIndex
self.emit(SIGNAL("selectionChanged"), newIndex)
self.updateItems()
class FileList(QWidget):
""" Lets the user manipulate a list of files. """
def __init__(self, parent=None):
super(FileList, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
self.list = CustomList(self)
self.setAcceptDrops(True)
self.connect(self.list, SIGNAL("selectionChanged"), self.updateSelectedItem)
self.connect(self.list, SIGNAL("itemActivated"), self, SIGNAL("itemActivated"))
self.scrollArea = QScrollArea(self)
self.scrollArea.setFrameStyle(QFrame.NoFrame)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollArea.setWidget(self.list)
def focusInEvent(self, e):
self.list.setFocus()
def minimumSizeHint(self):
minHeight = minWidth = 0
for item in self.list.items:
minSize = item.minimumSizeHint()
minWidth = max(minWidth, minSize.width())
minHeight = max(minHeight, minSize.height())
# some horizontal buffer for the scrollbar, and 2 items visible minimum
return QSize(minWidth + 10, minHeight * 2)
def sizeHint(self):
return self.scrollArea.sizeHint()
def resizeEvent(self, e):
self.scrollArea.resize(e.size())
self.ensureItemVisible(self.list.selectedIndex)
def dragEnterEvent(self, e):
if e.mimeData().hasUrls():
e.acceptProposedAction()
def dropEvent(self, e):
if e.mimeData().hasUrls():
e.acceptProposedAction()
for url in e.mimeData().urls():
if url.scheme() == "file":
self.emit(SIGNAL("fileDropped"), unicode(url.toLocalFile()))
def __getitem__(self, index):
if index < 0 or index >= len(self.list.items):
raise IndexError()
return self.list.items[index]
def selectedIndex(self):
return self.list.selectedIndex
def setSelectedIndex(self, index):
self.list.updateSelectedIndex(index)
def updateSelectedItem(self, index):
self.ensureItemVisible(index)
self.emit(SIGNAL("selectionChanged"), index)
def ensureItemVisible(self, index):
if index >= 0:
w = self.list.items[index]
self.scrollArea.ensureWidgetVisible(w, 0, 0)
def addItem(self, widget):
self.list.addItem(widget)
def clear(self):
self.list.clear()
class FileInfoWidget(QWidget):
""" Shows the relevant information about a file or transfer to the user. """
def __init__(self, parent=None):
super(FileInfoWidget, self).__init__(parent)
self.setAutoFillBackground(True)
self.typeIcon = QLabel()
self.typeIcon.setFixedSize(QSize(48, 48))
self.statusIcons = [QLabel() for i in range(0, 3)]
self.fileName = QLabel()
self.transferSize = QLabel()
self.transferTime = QLabel()
self.transferProgress = QProgressBar()
self.transferProgress.setTextVisible(False)
self.transferProgress.setMaximumHeight(16)
self.transferProgress.hide()
status = QHBoxLayout()
for statusIcon in self.statusIcons:
statusIcon.setFixedSize(QSize(16, 16))
status.addWidget(statusIcon)
transferInfo = QHBoxLayout()
transferInfo.setSpacing(20)
transferInfo.addWidget(self.transferSize)
transferInfo.addStretch()
transferInfo.addWidget(self.transferTime)
content = QVBoxLayout()
content.setSpacing(0)
content.addWidget(self.fileName)
content.addLayout(transferInfo)
grid = QGridLayout()
grid.setMargin(8)
grid.addWidget(self.typeIcon, 0, 0, Qt.AlignCenter)
grid.addLayout(content, 0, 1, Qt.AlignVCenter)
grid.addLayout(status, 1, 0)
grid.addWidget(self.transferProgress, 1, 1)
self.setLayout(grid)
def updatePalette(self, newPalette):
self.fileName.setPalette(newPalette)
self.transferSize.setPalette(newPalette)
self.transferTime.setPalette(newPalette)
self.repaint()
def setName(self, name):
self.fileName.setText(name)
def setTransferSize(self, size):
self.transferSize.setText(size)
def setTransferTime(self, time):
self.transferTime.setText(time)
def setTypeIcon(self, icon):
if isinstance(icon, basestring):
self.typeIconSet = QIcon(iconPath(icon))
self.typeIcon.setPixmap(self.typeIconSet.pixmap(48, 48))
else:
self.typeIcon.setPixmap(icon)
def setStatusIcon(self, icon, index):
statusIcon = self.statusIcons[index]
if icon:
statusIcon.setPixmap(QPixmap(iconPath(icon, 16)))
else:
statusIcon.setPixmap(QPixmap())
def setStatusToolTip(self, text, index):
statusIcon = self.statusIcons[index]
statusIcon.setToolTip(text)
def setTransferProgress(self, progress):
if progress is not None:
self.transferProgress.setValue(progress * 100.0)
#FIXME: UI thread getting stuck here
self.transferProgress.show()
else:
self.transferProgress.hide()
|
gpl-2.0
|
jaxxer/aivm
|
lib/IP.py
|
1
|
1333
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import random
from IPy import IP as ip
class DHCP:
def __hash__(self):
return hash(random.random())
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "<DHCP>"
class _IP:
def __init__(self, address, netmask, gateway):
self.address = ip(address)
self.gateway = ip(gateway)
def __hash__(self):
return hash(self.getAddress()) + hash(self.getLongNetmask()) + hash(self.getGateway())
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "<address: %s/%s, gateway: %s>" % (self.getAddress(), self.getShortNetmask(), self.getGateway())
def getAddress(self):
return str(self.address)
def getLongNetmask(self):
return str(self.netmask.netmask())
def getShortNetmask(self):
return str(self.netmask.prefixlen())
def getGateway(self):
return str(self.gateway)
class IPv4(_IP):
def __init__(self, address, netmask, gateway):
_IP.__init__(self, address, netmask, gateway)
if netmask[0] != "/":
netmask = "/%s" % netmask
self.netmask = ip("0%s" % netmask)
class IPv6(_IP):
def __init__(self, address, netmask, gateway):
_IP.__init__(self, address, netmask, gateway)
if netmask[0] != "/":
netmask = "/%s" % netmask
self.netmask = ip("::%s" % netmask)
|
gpl-2.0
|
Jordan-Zhu/RoboVision
|
unsorted/algo-NDT192139AAAD.py
|
1
|
6993
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage import morphology
from drawlinefeature import DrawLineFeature,drawconvex#zc
from lineseg import lineseg
from drawedgelist import drawedgelist
from Lseg_to_Lfeat_v2 import Lseg_to_Lfeat_v2 #zc
from LabelLineCurveFeature_v2 import LabelLineCurveFeature_v2 #zc
from merge_lines_v2 import merge_lines# zc
from LabelLineCurveFeature import classify_curves
from zeroElimMedianHoleFill import zeroElimMedianHoleFill
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def showimg(img, im_name='image', type='cv', write=False, imagename='img.png'):
if type == 'plt':
plt.figure()
plt.imshow(img, im_name, interpolation='nearest', aspect='auto')
# plt.imshow(img, 'gray', interpolation='none')
plt.title(im_name)
plt.show()
if write:
plt.savefig(imagename, bbox_inches='tight')
elif type == 'cv':
cv2.imshow(im_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if write:
cv2.imwrite("../../images/%s", imagename, img)
def create_img(mat):
blank_image = np.zeros((mat.shape[0], mat.shape[1], 3), np.uint8)
# print(blank_image.shape)
mask = np.array(mat * 255, dtype=np.uint8)
masked = np.ma.masked_where(mask <= 0, mask)
# plt.figure()
# plt.imshow(blank_image, 'gray', interpolation='none')
# plt.imshow(masked, 'gray_r', interpolation='none', alpha=1.0)
# plt.title('canny + morphology')
# plt.savefig('foo.png', bbox_inches='tight')
# plt.show()
return mask
def grad_dir(img):
# compute x and y derivatives
# OpenCV's Sobel operator gives better results than numpy gradient
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=-1)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=-1)
# calculate gradient direction angles
# phase needs 64-bit input
angle = cv2.phase(sobelx, sobely)
# truncates number
gradir = np.fix(180 + angle)
return gradir
# Contrast Limited Adaptive Histogram Equalization
# Improves the contrast of our image.
def clahe(img, iter=1):
for i in range(0, iter):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img = clahe.apply(img)
return img
def normalize_depth(depthimg, colormap=False):
# Normalize depth image
min, max, minloc, maxloc = cv2.minMaxLoc(depthimg)
adjmap = np.zeros_like(depthimg)
dst = cv2.convertScaleAbs(depthimg, adjmap, 255 / (max - min), -min)
if colormap == True:
return cv2.applyColorMap(dst, cv2.COLORMAP_JET)
else:
return dst
def morpho(img):
# kernel for dilation
kernel = np.ones((7, 7), np.uint8)
dilation = cv2.dilate(img, kernel, iterations=1)
skel = morphology.skeletonize(dilation > 0)
return skel
def edge_detect(depth):
# Gradient of depth img
graddir = grad_dir(depth)
# Threshold image to get it in the RGB color space
dimg1 = (((graddir - graddir.min()) / (graddir.max() - graddir.min())) * 255.9).astype(np.uint8)
# Eliminate salt-and-pepper noise
median = cv2.medianBlur(dimg1, 9)
# Further remove noise while keeping edges sharp
blur = cv2.bilateralFilter(median, 9, 25, 25)
dimg1 = auto_canny(blur)
skel1 = morpho(dimg1)
showimg(create_img(skel1))
# cnt1 = find_contours(create_img(skel1))
# Depth discontinuity
depthimg = normalize_depth(depth)
dimg2 = clahe(depthimg, iter=2)
showimg(dimg2)
dimg2 = auto_canny(dimg2)
skel2 = morpho(dimg2)
# cnt2 = find_contours(create_img(skel2))
# combine both images
dst = (np.logical_or(skel1, skel2)).astype('uint8')
dst = create_img(dst)
return dst
def find_contours(im, mode=cv2.RETR_CCOMP):
# im = cv2.imread('circle.png')
# error: (-215) scn == 3 || scn == 4 in function cv::ipp_cvtColor
# imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(imgray, 127, 255, 0)
if mode == cv2.RETR_CCOMP:
im2, contours, hierarchy = cv2.findContours(im, mode, cv2.CHAIN_APPROX_SIMPLE)
newcontours = []
for i in range(len(contours)):
if hierarchy[0][i, 2] < 0:
newcontours.append(contours[i])
cv2.drawContours(im, newcontours, 2, (0, 255, 0), 1)
return newcontours
else:
im2, contours, hierarchy = cv2.findContours(im, mode, cv2.CHAIN_APPROX_SIMPLE)
# cv2.RETR_EXTERNAL cv2.RETR_CCOMP
# show contours
# cv2.drawContours(im, contours, -1, (0, 255, 0), 2)
#
# # Display the image.
# cv2.imshow("contours", im)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return contours
if __name__ == '__main__':
# second argument is a flag which specifies the way
# an image should be read. -1 loads image unchanged with alpha channel
depthimg = cv2.imread('img/learn15.png', -1)
colorimg = cv2.imread('img/clearn17.png', 0)
showimg(normalize_depth(depthimg, colormap=True), 'depth')
id = depthimg[100:, 100:480] ## zc crop the region of interest
siz = id.shape ## image size of the region of interest
thresh_m = 10
label_thresh = 11
# edges = edge_detect(depthimg, colorimg)
edges = edge_detect(id) # zc
showimg(edges)
# showimg(cntr1)
# showimg(cntr2)
cntrs = np.asarray(find_contours(edges))
seglist = lineseg(cntrs, tol=2)
drawedgelist(seglist, rowscols=[])
# Get line features for later processing
[linefeature, listpoint] = Lseg_to_Lfeat_v2(seglist, cntrs, siz)
[line_new, listpoint_new, line_merged] = merge_lines(linefeature, listpoint, thresh_m, siz)
# line_new = LabelLineCurveFeature_v2(depthimg, line_new, listpoint_new, label_thresh)
line_new = classify_curves(depthimg, line_new, listpoint_new, label_thresh)
# line_new = LabelLineCurveFeature_v2(depthimg, line_new, listpoint_new, label_thresh)
DrawLineFeature(linefeature,siz,'line features')
drawconvex(line_new, siz, 'convex')
# TO-DO
# - Check LabelLineCurveFeature_v2 with python input
# - Write a function to make a window mask
# - (Section 4) Take non-zero curve features and sort by angle (index 7 in MATLAB)
# Long-term, to make the program easier to read and use
# *- Create a Line object with properties: start, end, object/background, curvature/discontinuity
# distance from another line, check if line is overlapping
|
gpl-3.0
|
KaranToor/MA450
|
google-cloud-sdk/lib/third_party/dulwich/server.py
|
7
|
40495
|
# server.py -- Implementation of the server side git protocols
# Copyright (C) 2008 John Carr <[email protected]>
# Coprygith (C) 2011-2012 Jelmer Vernooij <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# or (at your option) any later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Git smart network protocol server implementation.
For more detailed implementation on the network protocol, see the
Documentation/technical directory in the cgit distribution, and in particular:
* Documentation/technical/protocol-capabilities.txt
* Documentation/technical/pack-protocol.txt
Currently supported capabilities:
* include-tag
* thin-pack
* multi_ack_detailed
* multi_ack
* side-band-64k
* ofs-delta
* no-progress
* report-status
* delete-refs
* shallow
"""
import collections
import os
import socket
import sys
import zlib
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
from dulwich.errors import (
ApplyDeltaError,
ChecksumMismatch,
GitProtocolError,
NotGitRepository,
UnexpectedCommandError,
ObjectFormatException,
)
from dulwich import log_utils
from dulwich.objects import (
Commit,
valid_hexsha,
)
from dulwich.pack import (
write_pack_objects,
)
from dulwich.protocol import (
BufferedPktLineWriter,
CAPABILITY_DELETE_REFS,
CAPABILITY_INCLUDE_TAG,
CAPABILITY_MULTI_ACK_DETAILED,
CAPABILITY_MULTI_ACK,
CAPABILITY_NO_DONE,
CAPABILITY_NO_PROGRESS,
CAPABILITY_OFS_DELTA,
CAPABILITY_REPORT_STATUS,
CAPABILITY_SHALLOW,
CAPABILITY_SIDE_BAND_64K,
CAPABILITY_THIN_PACK,
COMMAND_DEEPEN,
COMMAND_DONE,
COMMAND_HAVE,
COMMAND_SHALLOW,
COMMAND_UNSHALLOW,
COMMAND_WANT,
MULTI_ACK,
MULTI_ACK_DETAILED,
Protocol,
ProtocolFile,
ReceivableProtocol,
SIDE_BAND_CHANNEL_DATA,
SIDE_BAND_CHANNEL_PROGRESS,
SIDE_BAND_CHANNEL_FATAL,
SINGLE_ACK,
TCP_GIT_PORT,
ZERO_SHA,
ack_type,
extract_capabilities,
extract_want_line_capabilities,
)
from dulwich.refs import (
write_info_refs,
)
from dulwich.repo import (
Repo,
)
# cgit sends this depth to "unshallow" a previously shallow repo.
_INFINITE_DEPTH = (1 << 31) - 1
# The shallow request handler raises an exception if the client requests a
# shallow clone depth above this maximum. (However, a depth of _INFINITE_DEPTH
# is always respected.) Warehouse modifies this value at import-time to
# mitigate a performance issue.
# TODO(user): Remove once Skyloft transitions off of Dulwich.
MAX_SHALLOW_CLONE_DEPTH = _INFINITE_DEPTH
logger = log_utils.getLogger(__name__)
class Backend(object):
"""A backend for the Git smart server implementation."""
def open_repository(self, path):
"""Open the repository at a path.
:param path: Path to the repository
:raise NotGitRepository: no git repository was found at path
:return: Instance of BackendRepo
"""
raise NotImplementedError(self.open_repository)
class BackendRepo(object):
"""Repository abstraction used by the Git server.
The methods required here are a subset of those provided by
dulwich.repo.Repo.
"""
object_store = None
refs = None
def get_refs(self):
"""
Get all the refs in the repository
:return: dict of name -> sha
"""
raise NotImplementedError
def get_peeled(self, name):
"""Return the cached peeled value of a ref, if available.
:param name: Name of the ref to peel
:return: The peeled value of the ref. If the ref is known not point to
a tag, this will be the SHA the ref refers to. If no cached
information about a tag is available, this method may return None,
but it should attempt to peel the tag if possible.
"""
return None
def fetch_objects(self, determine_wants, graph_walker, progress,
get_tagged=None):
"""
Yield the objects required for a list of commits.
:param progress: is a callback to send progress messages to the client
:param get_tagged: Function that returns a dict of pointed-to sha -> tag
sha for including tags.
"""
raise NotImplementedError
class DictBackend(Backend):
"""Trivial backend that looks up Git repositories in a dictionary."""
def __init__(self, repos):
self.repos = repos
def open_repository(self, path):
logger.debug('Opening repository at %s', path)
try:
return self.repos[path]
except KeyError:
raise NotGitRepository(
"No git repository was found at %(path)s" % dict(path=path)
)
class FileSystemBackend(Backend):
"""Simple backend that looks up Git repositories in the local file system."""
def __init__(self, root=os.sep):
super(FileSystemBackend, self).__init__()
self.root = (os.path.abspath(root) + os.sep).replace(os.sep * 2, os.sep)
def open_repository(self, path):
logger.debug('opening repository at %s', path)
abspath = os.path.abspath(os.path.join(self.root, path)) + os.sep
normcase_abspath = os.path.normcase(abspath)
normcase_root = os.path.normcase(self.root)
if not normcase_abspath.startswith(normcase_root):
raise NotGitRepository("Path %r not inside root %r" % (path, self.root))
return Repo(abspath)
class Handler(object):
"""Smart protocol command handler base class."""
def __init__(self, backend, proto, http_req=None):
self.backend = backend
self.proto = proto
self.http_req = http_req
self._client_capabilities = None
# Flags needed for the no-done capability
self._done_received = False
@classmethod
def capability_line(cls):
return b"".join([b" " + c for c in cls.capabilities()])
@classmethod
def capabilities(cls):
raise NotImplementedError(cls.capabilities)
@classmethod
def innocuous_capabilities(cls):
return (CAPABILITY_INCLUDE_TAG, CAPABILITY_THIN_PACK,
CAPABILITY_NO_PROGRESS, CAPABILITY_OFS_DELTA)
@classmethod
def required_capabilities(cls):
"""Return a list of capabilities that we require the client to have."""
return []
def set_client_capabilities(self, caps):
allowable_caps = set(self.innocuous_capabilities())
allowable_caps.update(self.capabilities())
for cap in caps:
if cap not in allowable_caps:
raise GitProtocolError('Client asked for capability %s that '
'was not advertised.' % cap)
for cap in self.required_capabilities():
if cap not in caps:
raise GitProtocolError('Client does not support required '
'capability %s.' % cap)
self._client_capabilities = set(caps)
logger.info('Client capabilities: %s', caps)
def has_capability(self, cap):
if self._client_capabilities is None:
raise GitProtocolError('Server attempted to access capability %s '
'before asking client' % cap)
return cap in self._client_capabilities
def notify_done(self):
self._done_received = True
class UploadPackHandler(Handler):
"""Protocol handler for uploading a pack to the server."""
def __init__(self, backend, args, proto, http_req=None,
advertise_refs=False):
Handler.__init__(self, backend, proto, http_req=http_req)
self.repo = backend.open_repository(args[0])
self._graph_walker = None
self.advertise_refs = advertise_refs
# A state variable for denoting that the have list is still
# being processed, and the client is not accepting any other
# data (such as side-band, see the progress method here).
self._processing_have_lines = False
@classmethod
def capabilities(cls):
return (CAPABILITY_MULTI_ACK_DETAILED, CAPABILITY_MULTI_ACK,
CAPABILITY_SIDE_BAND_64K, CAPABILITY_THIN_PACK,
CAPABILITY_OFS_DELTA, CAPABILITY_NO_PROGRESS,
CAPABILITY_INCLUDE_TAG, CAPABILITY_SHALLOW, CAPABILITY_NO_DONE)
@classmethod
def required_capabilities(cls):
return (CAPABILITY_SIDE_BAND_64K, CAPABILITY_THIN_PACK, CAPABILITY_OFS_DELTA)
def progress(self, message):
if self.has_capability(CAPABILITY_NO_PROGRESS) or self._processing_have_lines:
return
self.proto.write_sideband(SIDE_BAND_CHANNEL_PROGRESS, message)
def get_tagged(self, refs=None, repo=None):
"""Get a dict of peeled values of tags to their original tag shas.
:param refs: dict of refname -> sha of possible tags; defaults to all of
the backend's refs.
:param repo: optional Repo instance for getting peeled refs; defaults to
the backend's repo, if available
:return: dict of peeled_sha -> tag_sha, where tag_sha is the sha of a
tag whose peeled value is peeled_sha.
"""
if not self.has_capability(CAPABILITY_INCLUDE_TAG):
return {}
if refs is None:
refs = self.repo.get_refs()
if repo is None:
repo = getattr(self.repo, "repo", None)
if repo is None:
# Bail if we don't have a Repo available; this is ok since
# clients must be able to handle if the server doesn't include
# all relevant tags.
# TODO: fix behavior when missing
return {}
tagged = {}
for name, sha in refs.items():
peeled_sha = repo.get_peeled(name)
if peeled_sha != sha:
tagged[peeled_sha] = sha
return tagged
def handle(self):
write = lambda x: self.proto.write_sideband(SIDE_BAND_CHANNEL_DATA, x)
graph_walker = ProtocolGraphWalker(self, self.repo.object_store,
self.repo.get_peeled)
objects_iter = self.repo.fetch_objects(
graph_walker.determine_wants, graph_walker, self.progress,
get_tagged=self.get_tagged)
# Note the fact that client is only processing responses related
# to the have lines it sent, and any other data (including side-
# band) will be be considered a fatal error.
self._processing_have_lines = True
# Did the process short-circuit (e.g. in a stateless RPC call)? Note
# that the client still expects a 0-object pack in most cases.
# Also, if it also happens that the object_iter is instantiated
# with a graph walker with an implementation that talks over the
# wire (which is this instance of this class) this will actually
# iterate through everything and write things out to the wire.
if len(objects_iter) == 0:
return
# The provided haves are processed, and it is safe to send side-
# band data now.
self._processing_have_lines = False
if not graph_walker.handle_done(
not self.has_capability(CAPABILITY_NO_DONE), self._done_received):
return
self.progress(b"dul-daemon says what\n")
self.progress(("counting objects: %d, done.\n" % len(objects_iter)).encode('ascii'))
write_pack_objects(ProtocolFile(None, write), objects_iter)
self.progress(b"how was that, then?\n")
# we are done
self.proto.write_pkt_line(None)
def _split_proto_line(line, allowed):
"""Split a line read from the wire.
:param line: The line read from the wire.
:param allowed: An iterable of command names that should be allowed.
Command names not listed below as possible return values will be
ignored. If None, any commands from the possible return values are
allowed.
:return: a tuple having one of the following forms:
('want', obj_id)
('have', obj_id)
('done', None)
(None, None) (for a flush-pkt)
:raise UnexpectedCommandError: if the line cannot be parsed into one of the
allowed return values.
"""
if not line:
fields = [None]
else:
fields = line.rstrip(b'\n').split(b' ', 1)
command = fields[0]
if allowed is not None and command not in allowed:
raise UnexpectedCommandError(command)
if len(fields) == 1 and command in (COMMAND_DONE, None):
return (command, None)
elif len(fields) == 2:
if command in (COMMAND_WANT, COMMAND_HAVE, COMMAND_SHALLOW,
COMMAND_UNSHALLOW):
if not valid_hexsha(fields[1]):
raise GitProtocolError("Invalid sha")
return tuple(fields)
elif command == COMMAND_DEEPEN:
return command, int(fields[1])
raise GitProtocolError('Received invalid line from client: %r' % line)
def _find_shallow(store, heads, depth):
"""Find shallow commits according to a given depth.
:param store: An ObjectStore for looking up objects.
:param heads: Iterable of head SHAs to start walking from.
:param depth: The depth of ancestors to include. A depth of one includes
only the heads themselves.
:return: A tuple of (shallow, not_shallow), sets of SHAs that should be
considered shallow and unshallow according to the arguments. Note that
these sets may overlap if a commit is reachable along multiple paths.
"""
parents = {}
def get_parents(sha):
result = parents.get(sha, None)
if not result:
result = store[sha].parents
parents[sha] = result
return result
todo = [] # stack of (sha, depth)
for head_sha in heads:
obj = store.peel_sha(head_sha)
if isinstance(obj, Commit):
todo.append((obj.id, 1))
not_shallow = set()
shallow = set()
while todo:
sha, cur_depth = todo.pop()
if cur_depth < depth:
not_shallow.add(sha)
new_depth = cur_depth + 1
todo.extend((p, new_depth) for p in get_parents(sha))
else:
shallow.add(sha)
return shallow, not_shallow
def _want_satisfied(store, haves, want, earliest):
o = store[want]
pending = collections.deque([o])
while pending:
commit = pending.popleft()
if commit.id in haves:
return True
if commit.type_name != b"commit":
# non-commit wants are assumed to be satisfied
continue
for parent in commit.parents:
parent_obj = store[parent]
# TODO: handle parents with later commit times than children
if parent_obj.commit_time >= earliest:
pending.append(parent_obj)
return False
def _all_wants_satisfied(store, haves, wants):
"""Check whether all the current wants are satisfied by a set of haves.
:param store: Object store to retrieve objects from
:param haves: A set of commits we know the client has.
:param wants: A set of commits the client wants
:note: Wants are specified with set_wants rather than passed in since
in the current interface they are determined outside this class.
"""
haves = set(haves)
if haves:
earliest = min([store[h].commit_time for h in haves])
else:
earliest = 0
unsatisfied_wants = set()
for want in wants:
if not _want_satisfied(store, haves, want, earliest):
return False
return True
class DisallowedShallowDepthError(ValueError):
"""A shallow clone with a disallowed depth was requested."""
def __init__(self, requested_depth, max_depth):
message = 'Client requested shallow clone depth %d (max is %d).' % (
requested_depth, max_depth)
super(DisallowedShallowDepthError, self).__init__(message)
self.requested_depth = requested_depth
self.max_depth = max_depth
class ProtocolGraphWalker(object):
"""A graph walker that knows the git protocol.
As a graph walker, this class implements ack(), next(), and reset(). It
also contains some base methods for interacting with the wire and walking
the commit tree.
The work of determining which acks to send is passed on to the
implementation instance stored in _impl. The reason for this is that we do
not know at object creation time what ack level the protocol requires. A
call to set_ack_level() is required to set up the implementation, before any
calls to next() or ack() are made.
"""
def __init__(self, handler, object_store, get_peeled):
self.handler = handler
self.store = object_store
self.get_peeled = get_peeled
self.proto = handler.proto
self.http_req = handler.http_req
self.advertise_refs = handler.advertise_refs
self._wants = []
self.shallow = set()
self.client_shallow = set()
self.unshallow = set()
self._cached = False
self._cache = []
self._cache_index = 0
self._impl = None
def determine_wants(self, heads):
"""Determine the wants for a set of heads.
The given heads are advertised to the client, who then specifies which
refs he wants using 'want' lines. This portion of the protocol is the
same regardless of ack type, and in fact is used to set the ack type of
the ProtocolGraphWalker.
If the client has the 'shallow' capability, this method also reads and
responds to the 'shallow' and 'deepen' lines from the client. These are
not part of the wants per se, but they set up necessary state for
walking the graph. Additionally, later code depends on this method
consuming everything up to the first 'have' line.
:param heads: a dict of refname->SHA1 to advertise
:return: a list of SHA1s requested by the client
"""
values = set(heads.values())
if self.advertise_refs or not self.http_req:
for i, (ref, sha) in enumerate(sorted(heads.items())):
line = sha + b' ' + ref
if not i:
line += b'\x00' + self.handler.capability_line()
self.proto.write_pkt_line(line + b'\n')
peeled_sha = self.get_peeled(ref)
if peeled_sha != sha:
self.proto.write_pkt_line(peeled_sha + b' ' + ref + b'^{}\n')
# i'm done..
self.proto.write_pkt_line(None)
if self.advertise_refs:
return []
# Now client will sending want want want commands
want = self.proto.read_pkt_line()
if not want:
return []
line, caps = extract_want_line_capabilities(want)
self.handler.set_client_capabilities(caps)
self.set_ack_type(ack_type(caps))
allowed = (COMMAND_WANT, COMMAND_SHALLOW, COMMAND_DEEPEN, None)
command, sha = _split_proto_line(line, allowed)
want_revs = []
while command == COMMAND_WANT:
if sha not in values:
raise GitProtocolError(
'Client wants invalid object %s' % sha)
want_revs.append(sha)
command, sha = self.read_proto_line(allowed)
self.set_wants(want_revs)
if command in (COMMAND_SHALLOW, COMMAND_DEEPEN):
self.unread_proto_line(command, sha)
self._handle_shallow_request(want_revs)
if self.http_req and self.proto.eof():
# The client may close the socket at this point, expecting a
# flush-pkt from the server. We might be ready to send a packfile at
# this point, so we need to explicitly short-circuit in this case.
return []
return want_revs
def unread_proto_line(self, command, value):
if isinstance(value, int):
value = str(value).encode('ascii')
self.proto.unread_pkt_line(command + b' ' + value)
def ack(self, have_ref):
if len(have_ref) != 40:
raise ValueError("invalid sha %r" % have_ref)
return self._impl.ack(have_ref)
def reset(self):
self._cached = True
self._cache_index = 0
def next(self):
if not self._cached:
if not self._impl and self.http_req:
return None
return next(self._impl)
self._cache_index += 1
if self._cache_index > len(self._cache):
return None
return self._cache[self._cache_index]
__next__ = next
def read_proto_line(self, allowed):
"""Read a line from the wire.
:param allowed: An iterable of command names that should be allowed.
:return: A tuple of (command, value); see _split_proto_line.
:raise UnexpectedCommandError: If an error occurred reading the line.
"""
return _split_proto_line(self.proto.read_pkt_line(), allowed)
def _handle_shallow_request(self, wants):
shallow_commands = (COMMAND_DEEPEN, COMMAND_SHALLOW, None)
while True:
command, val = self.read_proto_line(shallow_commands)
if command == None:
# Update self.shallow instead of reassigning it since we passed
# a reference to it before this method was called.
self.shallow.update(self.client_shallow)
# The client is done sending shallows, but it did not request a
# positive depth. So we do not send any shallow/unshallow lines.
return
if command == COMMAND_DEEPEN:
depth = val
break
self.client_shallow.add(val)
self.read_proto_line((None,)) # consume client's flush-pkt
if depth == _INFINITE_DEPTH:
# A client is performing an unshallow operation. All their shallow
# commits will be unshallowed, and no commits will become shallow.
not_shallow = self.client_shallow
shallow = set()
elif depth > MAX_SHALLOW_CLONE_DEPTH:
raise DisallowedShallowDepthError(depth, MAX_SHALLOW_CLONE_DEPTH)
else:
shallow, not_shallow = _find_shallow(self.store, wants, depth)
# Update self.shallow instead of reassigning it since we passed a
# reference to it before this method was called.
self.shallow.update(shallow - not_shallow)
new_shallow = self.shallow - self.client_shallow
unshallow = self.unshallow = not_shallow & self.client_shallow
for sha in sorted(new_shallow):
self.proto.write_pkt_line(COMMAND_SHALLOW + b' ' + sha)
for sha in sorted(unshallow):
self.proto.write_pkt_line(COMMAND_UNSHALLOW + b' ' + sha)
self.proto.write_pkt_line(None)
def notify_done(self):
# relay the message down to the handler.
self.handler.notify_done()
def send_ack(self, sha, ack_type=b''):
if ack_type:
ack_type = b' ' + ack_type
self.proto.write_pkt_line(b'ACK ' + sha + ack_type + b'\n')
def send_nak(self):
self.proto.write_pkt_line(b'NAK\n')
def handle_done(self, done_required, done_received):
# Delegate this to the implementation.
return self._impl.handle_done(done_required, done_received)
def set_wants(self, wants):
self._wants = wants
def all_wants_satisfied(self, haves):
"""Check whether all the current wants are satisfied by a set of haves.
:param haves: A set of commits we know the client has.
:note: Wants are specified with set_wants rather than passed in since
in the current interface they are determined outside this class.
"""
return _all_wants_satisfied(self.store, haves, self._wants)
def set_ack_type(self, ack_type):
impl_classes = {
MULTI_ACK: MultiAckGraphWalkerImpl,
MULTI_ACK_DETAILED: MultiAckDetailedGraphWalkerImpl,
SINGLE_ACK: SingleAckGraphWalkerImpl,
}
self._impl = impl_classes[ack_type](self)
_GRAPH_WALKER_COMMANDS = (COMMAND_HAVE, COMMAND_DONE, None)
class SingleAckGraphWalkerImpl(object):
"""Graph walker implementation that speaks the single-ack protocol."""
def __init__(self, walker):
self.walker = walker
self._common = []
def ack(self, have_ref):
if not self._common:
self.walker.send_ack(have_ref)
self._common.append(have_ref)
def next(self):
command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS)
if command in (None, COMMAND_DONE):
# defer the handling of done
self.walker.notify_done()
return None
elif command == COMMAND_HAVE:
return sha
__next__ = next
def handle_done(self, done_required, done_received):
if not self._common:
self.walker.send_nak()
if done_required and not done_received:
# we are not done, especially when done is required; skip
# the pack for this request and especially do not handle
# the done.
return False
if not done_received and not self._common:
# Okay we are not actually done then since the walker picked
# up no haves. This is usually triggered when client attempts
# to pull from a source that has no common base_commit.
# See: test_server.MultiAckDetailedGraphWalkerImplTestCase.\
# test_multi_ack_stateless_nodone
return False
return True
class MultiAckGraphWalkerImpl(object):
"""Graph walker implementation that speaks the multi-ack protocol."""
def __init__(self, walker):
self.walker = walker
self._found_base = False
self._common = []
def ack(self, have_ref):
self._common.append(have_ref)
if not self._found_base:
self.walker.send_ack(have_ref, b'continue')
if self.walker.all_wants_satisfied(self._common):
self._found_base = True
# else we blind ack within next
def next(self):
while True:
command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS)
if command is None:
self.walker.send_nak()
# in multi-ack mode, a flush-pkt indicates the client wants to
# flush but more have lines are still coming
continue
elif command == COMMAND_DONE:
self.walker.notify_done()
return None
elif command == COMMAND_HAVE:
if self._found_base:
# blind ack
self.walker.send_ack(sha, b'continue')
return sha
__next__ = next
def handle_done(self, done_required, done_received):
if done_required and not done_received:
# we are not done, especially when done is required; skip
# the pack for this request and especially do not handle
# the done.
return False
if not done_received and not self._common:
# Okay we are not actually done then since the walker picked
# up no haves. This is usually triggered when client attempts
# to pull from a source that has no common base_commit.
# See: test_server.MultiAckDetailedGraphWalkerImplTestCase.\
# test_multi_ack_stateless_nodone
return False
# don't nak unless no common commits were found, even if not
# everything is satisfied
if self._common:
self.walker.send_ack(self._common[-1])
else:
self.walker.send_nak()
return True
class MultiAckDetailedGraphWalkerImpl(object):
"""Graph walker implementation speaking the multi-ack-detailed protocol."""
def __init__(self, walker):
self.walker = walker
self._common = []
def ack(self, have_ref):
# Should only be called iff have_ref is common
self._common.append(have_ref)
self.walker.send_ack(have_ref, b'common')
def next(self):
while True:
command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS)
if command is None:
if self.walker.all_wants_satisfied(self._common):
self.walker.send_ack(self._common[-1], b'ready')
self.walker.send_nak()
if self.walker.http_req:
# The HTTP version of this request a flush-pkt always
# signifies an end of request, so we also return
# nothing here as if we are done (but not really, as
# it depends on whether no-done capability was
# specified and that's handled in handle_done which
# may or may not call post_nodone_check depending on
# that).
return None
elif command == COMMAND_DONE:
# Let the walker know that we got a done.
self.walker.notify_done()
break
elif command == COMMAND_HAVE:
# return the sha and let the caller ACK it with the
# above ack method.
return sha
# don't nak unless no common commits were found, even if not
# everything is satisfied
__next__ = next
def handle_done(self, done_required, done_received):
if done_required and not done_received:
# we are not done, especially when done is required; skip
# the pack for this request and especially do not handle
# the done.
return False
if not done_received and not self._common:
# Okay we are not actually done then since the walker picked
# up no haves. This is usually triggered when client attempts
# to pull from a source that has no common base_commit.
# See: test_server.MultiAckDetailedGraphWalkerImplTestCase.\
# test_multi_ack_stateless_nodone
return False
# don't nak unless no common commits were found, even if not
# everything is satisfied
if self._common:
self.walker.send_ack(self._common[-1])
else:
self.walker.send_nak()
return True
class ReceivePackHandler(Handler):
"""Protocol handler for downloading a pack from the client."""
def __init__(self, backend, args, proto, http_req=None,
advertise_refs=False):
Handler.__init__(self, backend, proto, http_req=http_req)
self.repo = backend.open_repository(args[0])
self.advertise_refs = advertise_refs
@classmethod
def capabilities(cls):
return (CAPABILITY_REPORT_STATUS, CAPABILITY_DELETE_REFS,
CAPABILITY_OFS_DELTA, CAPABILITY_SIDE_BAND_64K, CAPABILITY_NO_DONE)
def _apply_pack(self, refs):
all_exceptions = (IOError, OSError, ChecksumMismatch, ApplyDeltaError,
AssertionError, socket.error, zlib.error,
ObjectFormatException)
status = []
will_send_pack = False
for command in refs:
if command[1] != ZERO_SHA:
will_send_pack = True
if will_send_pack:
# TODO: more informative error messages than just the exception string
try:
recv = getattr(self.proto, "recv", None)
self.repo.object_store.add_thin_pack(self.proto.read, recv)
status.append((b'unpack', b'ok'))
except all_exceptions as e:
status.append((b'unpack', str(e).replace('\n', '')))
# The pack may still have been moved in, but it may contain broken
# objects. We trust a later GC to clean it up.
else:
# The git protocol want to find a status entry related to unpack process
# even if no pack data has been sent.
status.append((b'unpack', b'ok'))
for oldsha, sha, ref in refs:
ref_status = b'ok'
try:
if sha == ZERO_SHA:
if not CAPABILITY_DELETE_REFS in self.capabilities():
raise GitProtocolError(
'Attempted to delete refs without delete-refs '
'capability.')
try:
del self.repo.refs[ref]
except all_exceptions:
ref_status = b'failed to delete'
else:
try:
self.repo.refs[ref] = sha
except all_exceptions:
ref_status = b'failed to write'
except KeyError as e:
ref_status = b'bad ref'
status.append((ref, ref_status))
return status
def _report_status(self, status):
if self.has_capability(CAPABILITY_SIDE_BAND_64K):
writer = BufferedPktLineWriter(
lambda d: self.proto.write_sideband(SIDE_BAND_CHANNEL_DATA, d))
write = writer.write
def flush():
writer.flush()
self.proto.write_pkt_line(None)
else:
write = self.proto.write_pkt_line
flush = lambda: None
for name, msg in status:
if name == b'unpack':
write(b'unpack ' + msg + b'\n')
elif msg == b'ok':
write(b'ok ' + name + b'\n')
else:
write(b'ng ' + name + b' ' + msg + b'\n')
write(None)
flush()
def handle(self):
if self.advertise_refs or not self.http_req:
refs = sorted(self.repo.get_refs().items())
if refs:
self.proto.write_pkt_line(
refs[0][1] + b' ' + refs[0][0] + b'\0' +
self.capability_line() + b'\n')
for i in range(1, len(refs)):
ref = refs[i]
self.proto.write_pkt_line(ref[1] + b' ' + ref[0] + b'\n')
else:
self.proto.write_pkt_line(ZERO_SHA + b" capabilities^{}\0" +
self.capability_line())
self.proto.write_pkt_line(None)
if self.advertise_refs:
return
client_refs = []
ref = self.proto.read_pkt_line()
# if ref is none then client doesnt want to send us anything..
if ref is None:
return
ref, caps = extract_capabilities(ref)
self.set_client_capabilities(caps)
# client will now send us a list of (oldsha, newsha, ref)
while ref:
client_refs.append(ref.split())
ref = self.proto.read_pkt_line()
# backend can now deal with this refs and read a pack using self.read
status = self._apply_pack(client_refs)
# when we have read all the pack from the client, send a status report
# if the client asked for it
if self.has_capability(CAPABILITY_REPORT_STATUS):
self._report_status(status)
# Default handler classes for git services.
DEFAULT_HANDLERS = {
b'git-upload-pack': UploadPackHandler,
b'git-receive-pack': ReceivePackHandler,
}
class TCPGitRequestHandler(SocketServer.StreamRequestHandler):
def __init__(self, handlers, *args, **kwargs):
self.handlers = handlers
SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs)
def handle(self):
proto = ReceivableProtocol(self.connection.recv, self.wfile.write)
command, args = proto.read_cmd()
logger.info('Handling %s request, args=%s', command, args)
cls = self.handlers.get(command, None)
if not callable(cls):
raise GitProtocolError('Invalid service %s' % command)
h = cls(self.server.backend, args, proto)
h.handle()
class TCPGitServer(SocketServer.TCPServer):
allow_reuse_address = True
serve = SocketServer.TCPServer.serve_forever
def _make_handler(self, *args, **kwargs):
return TCPGitRequestHandler(self.handlers, *args, **kwargs)
def __init__(self, backend, listen_addr, port=TCP_GIT_PORT, handlers=None):
self.handlers = dict(DEFAULT_HANDLERS)
if handlers is not None:
self.handlers.update(handlers)
self.backend = backend
logger.info('Listening for TCP connections on %s:%d', listen_addr, port)
SocketServer.TCPServer.__init__(self, (listen_addr, port),
self._make_handler)
def verify_request(self, request, client_address):
logger.info('Handling request from %s', client_address)
return True
def handle_error(self, request, client_address):
logger.exception('Exception happened during processing of request '
'from %s', client_address)
def main(argv=sys.argv):
"""Entry point for starting a TCP git server."""
import optparse
parser = optparse.OptionParser()
parser.add_option("-l", "--listen_address", dest="listen_address",
default="localhost",
help="Binding IP address.")
parser.add_option("-p", "--port", dest="port", type=int,
default=TCP_GIT_PORT,
help="Binding TCP port.")
options, args = parser.parse_args(argv)
log_utils.default_logging_config()
if len(args) > 1:
gitdir = args[1]
else:
gitdir = '.'
from dulwich import porcelain
porcelain.daemon(gitdir, address=options.listen_address,
port=options.port)
def serve_command(handler_cls, argv=sys.argv, backend=None, inf=sys.stdin,
outf=sys.stdout):
"""Serve a single command.
This is mostly useful for the implementation of commands used by e.g. git+ssh.
:param handler_cls: `Handler` class to use for the request
:param argv: execv-style command-line arguments. Defaults to sys.argv.
:param backend: `Backend` to use
:param inf: File-like object to read from, defaults to standard input.
:param outf: File-like object to write to, defaults to standard output.
:return: Exit code for use with sys.exit. 0 on success, 1 on failure.
"""
if backend is None:
backend = FileSystemBackend()
def send_fn(data):
outf.write(data)
outf.flush()
proto = Protocol(inf.read, send_fn)
handler = handler_cls(backend, argv[1:], proto)
# FIXME: Catch exceptions and write a single-line summary to outf.
handler.handle()
return 0
def generate_info_refs(repo):
"""Generate an info refs file."""
refs = repo.get_refs()
return write_info_refs(refs, repo.object_store)
def generate_objects_info_packs(repo):
"""Generate an index for for packs."""
for pack in repo.object_store.packs:
yield b'P ' + pack.data.filename.encode(sys.getfilesystemencoding()) + b'\n'
def update_server_info(repo):
"""Generate server info for dumb file access.
This generates info/refs and objects/info/packs,
similar to "git update-server-info".
"""
repo._put_named_file(os.path.join('info', 'refs'),
b"".join(generate_info_refs(repo)))
repo._put_named_file(os.path.join('objects', 'info', 'packs'),
b"".join(generate_objects_info_packs(repo)))
if __name__ == '__main__':
main()
|
apache-2.0
|
CantemoInternal/django-comments-xtd
|
django_comments_xtd/tests/views.py
|
1
|
9462
|
from __future__ import unicode_literals
from datetime import datetime
import re
import threading
# from django.conf import settings
from django.contrib import comments
from django.contrib.comments.signals import comment_was_posted
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import mail
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponse
from django.test import TestCase
# from django.test.utils import override_settings
from django_comments_xtd import signals, signed
from django_comments_xtd.conf import settings
from django_comments_xtd.models import XtdComment, TmpXtdComment
from django_comments_xtd.tests.models import Article
from django_comments_xtd.views import on_comment_was_posted
from django_comments_xtd.utils import mail_sent_queue
def dummy_view(request, *args, **kwargs):
return HttpResponse("Got it")
class OnCommentWasPostedTestCase(TestCase):
def setUp(self):
self.article = Article.objects.create(title="September",
slug="september",
body="What I did on September...")
self.form = comments.get_form()(self.article)
def post_valid_data(self, wait_mail=True):
data = {"name":"Bob", "email":"[email protected]", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment":"Es war einmal iene kleine..."}
data.update(self.form.initial)
self.response = self.client.post(reverse("comments-post-comment"),
data=data, follow=True)
if wait_mail and mail_sent_queue.get(block=True):
pass
def test_post_as_authenticated_user(self):
auth_user = User.objects.create_user("bob", "[email protected]", "pwd")
self.client.login(username="bob", password="pwd")
self.assertEqual(len(mail.outbox), 0)
self.post_valid_data(wait_mail=False)
# no confirmation email sent as user is authenticated
self.assertEqual(len(mail.outbox), 0)
def test_confirmation_email_is_sent(self):
self.assertEqual(len(mail.outbox), 0)
self.post_valid_data()
self.assertEqual(len(mail.outbox), 1)
self.assertTemplateUsed(self.response, "comments/posted.html")
class ConfirmCommentTestCase(TestCase):
def setUp(self):
self.article = Article.objects.create(title="September",
slug="september",
body="What I did on September...")
self.form = comments.get_form()(self.article)
data = {"name": "Bob", "email": "[email protected]", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment": "Es war einmal iene kleine..." }
data.update(self.form.initial)
self.response = self.client.post(reverse("comments-post-comment"),
data=data)
if mail_sent_queue.get(block=True):
pass
self.key = str(re.search(r'http://.+/confirm/(?P<key>[\S]+)',
mail.outbox[0].body).group("key"))
def get_confirm_comment_url(self, key):
self.response = self.client.get(reverse("comments-xtd-confirm",
kwargs={'key': key}),
follow=True)
def test_404_on_bad_signature(self):
self.get_confirm_comment_url(self.key[:-1])
self.assertContains(self.response, "404", status_code=404)
def test_consecutive_confirmation_url_visits_fail(self):
# test that consecutives visits to the same confirmation URL produce
# an Http 404 code, as the comment has already been verified in the
# first visit
self.get_confirm_comment_url(self.key)
self.get_confirm_comment_url(self.key)
self.assertContains(self.response, "404", status_code=404)
def test_signal_receiver_may_discard_the_comment(self):
# test that receivers of signal confirmation_received may return False
# and thus rendering a template_discarded output
def on_signal(sender, comment, request, **kwargs):
return False
self.assertEqual(len(mail.outbox), 1) # sent during setUp
signals.confirmation_received.connect(on_signal)
self.get_confirm_comment_url(self.key)
self.assertEqual(len(mail.outbox), 1) # mailing avoided by on_signal
self.assertTemplateUsed(self.response,
"django_comments_xtd/discarded.html")
def test_comment_is_created_and_view_redirect(self):
# testing that visiting a correct confirmation URL creates a XtdComment
# and redirects to the article detail page
Site.objects.get_current().domain = "testserver" # django bug #7743
self.get_confirm_comment_url(self.key)
data = signed.loads(self.key, extra_key=settings.COMMENTS_XTD_SALT)
try:
comment = XtdComment.objects.get(
content_type=data["content_type"],
user_name=data["user_name"],
user_email=data["user_email"],
submit_date=data["submit_date"])
except:
comment = None
self.assert_(comment != None)
self.assertRedirects(self.response, self.article.get_absolute_url())
def test_notify_comment_followers(self):
# send a couple of comments to the article with followup=True and check
# that when the second comment is confirmed a followup notification
# email is sent to the user who sent the first comment
self.assertEqual(len(mail.outbox), 1)
self.get_confirm_comment_url(self.key)
self.assertEqual(len(mail.outbox), 1) # no comment followers yet
# send 2nd comment
self.form = comments.get_form()(self.article)
data = {"name":"Alice", "email":"[email protected]", "followup": True,
"reply_to": 0, "level": 1, "order": 1,
"comment":"Es war einmal iene kleine..." }
data.update(self.form.initial)
self.response = self.client.post(reverse("comments-post-comment"),
data=data)
if mail_sent_queue.get(block=True):
pass
self.assertEqual(len(mail.outbox), 2)
self.key = re.search(r'http://.+/confirm/(?P<key>[\S]+)',
mail.outbox[1].body).group("key")
self.get_confirm_comment_url(self.key)
if mail_sent_queue.get(block=True):
pass
self.assertEqual(len(mail.outbox), 3)
self.assert_(mail.outbox[2].to == ["[email protected]"])
self.assert_(mail.outbox[2].body.find("There is a new comment following up yours.") > -1)
class ReplyNoCommentTestCase(TestCase):
def test_reply_non_existing_comment_raises_404(self):
response = self.client.get(reverse("comments-xtd-reply",
kwargs={"cid": 1}))
self.assertContains(response, "404", status_code=404)
class ReplyCommentTestCase(TestCase):
def setUp(self):
article = Article.objects.create(title="September",
slug="september",
body="What I did on September...")
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 1 to article, level 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to article",
submit_date = datetime.now())
# post Comment 2 to article, level 1
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to comment 1",
submit_date = datetime.now(),
parent_id = 1)
# post Comment 3 to article, level 2 (max according to test settings)
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to comment 1",
submit_date = datetime.now(),
parent_id = 2)
def test_reply_renders_max_thread_level_template(self):
response = self.client.get(reverse("comments-xtd-reply",
kwargs={"cid": 3}))
self.assertTemplateUsed(response,
"django_comments_xtd/max_thread_level.html")
|
bsd-2-clause
|
hbrunn/OCB
|
openerp/service/report.py
|
324
|
5148
|
# -*- coding: utf-8 -*-
import base64
import logging
import sys
import threading
import openerp
import openerp.report
from openerp import tools
import security
_logger = logging.getLogger(__name__)
# TODO: set a maximum report number per user to avoid DOS attacks
#
# Report state:
# False -> True
self_reports = {}
self_id = 0
self_id_protect = threading.Semaphore()
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
threading.current_thread().uid = uid
params = params[3:]
if method not in ['report', 'report_get', 'render_report']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()['exp_' + method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def exp_render_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.commit()
cr.close()
return _check_report(id)
def exp_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
def go(id, uid, ids, datas, context):
with openerp.api.Environment.manage():
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.commit()
cr.close()
return True
threading.Thread(target=go, args=(id, uid, ids, datas, context)).start()
return id
def _check_report(report_id):
result = self_reports[report_id]
exc = result['exception']
if exc:
raise openerp.osv.orm.except_orm(exc.message, exc.traceback)
res = {'state': result['state']}
if res['state']:
if tools.config['reportgz']:
import zlib
res2 = zlib.compress(result['result'])
res['code'] = 'zlib'
else:
#CHECKME: why is this needed???
if isinstance(result['result'], unicode):
res2 = result['result'].encode('latin1', 'replace')
else:
res2 = result['result']
if res2:
res['result'] = base64.encodestring(res2)
res['format'] = result['format']
del self_reports[report_id]
return res
def exp_report_get(db, uid, report_id):
if report_id in self_reports:
if self_reports[report_id]['uid'] == uid:
return _check_report(report_id)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mozilla/bedrock
|
tests/pages/regions/send_to_device.py
|
4
|
1547
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as expected
from pages.base import BaseRegion
class SendToDevice(BaseRegion):
_root_locator = (By.CSS_SELECTOR, '.send-to-device')
_email_locator = (By.CSS_SELECTOR, '.send-to-device-input')
_submit_button_locator = (By.CSS_SELECTOR, '.send-to-device .mzp-c-button')
_thank_you_locator = (By.CSS_SELECTOR, '.thank-you')
_error_list_locator = (By.CLASS_NAME, 'mzp-c-form-errors')
@property
def is_form_error_displayed(self):
return self.is_element_displayed(*self._error_list_locator)
def type_email(self, value):
self.find_element(*self._email_locator).send_keys(value)
def click_send(self, expected_result=None):
self.scroll_element_into_view(*self._submit_button_locator).click()
if expected_result == 'error':
self.wait.until(expected.visibility_of_element_located(self._error_list_locator))
else:
self.wait.until(expected.visibility_of_element_located(self._thank_you_locator))
@property
def send_successful(self):
el = self.selenium.find_element(*self._thank_you_locator)
return el.is_displayed()
@property
def is_displayed(self):
return self.page.is_element_displayed(*self._root_locator)
|
mpl-2.0
|
marcinzaremba/libcloud
|
libcloud/test/compute/test_ibm_sce.py
|
29
|
14632
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import unittest
from libcloud.utils.py3 import httplib
import sys
from libcloud.compute.types import InvalidCredsError
from libcloud.compute.drivers.ibm_sce import IBMNodeDriver as IBM
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import IBM_PARAMS
class IBMTests(unittest.TestCase, TestCaseMixin):
"""
Tests the IBM SmartCloud Enterprise driver.
"""
def setUp(self):
IBM.connectionCls.conn_classes = (None, IBMMockHttp)
IBMMockHttp.type = None
self.driver = IBM(*IBM_PARAMS)
def test_auth(self):
IBMMockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertTrue(isinstance(e, InvalidCredsError))
self.assertEqual(e.value, '401: Unauthorized')
else:
self.fail('test should have thrown')
def test_list_nodes(self):
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 3)
self.assertEqual(ret[0].id, '26557')
self.assertEqual(ret[0].name, 'Insight Instance')
self.assertEqual(ret[0].public_ips, ['129.33.196.128'])
self.assertEqual(ret[0].private_ips, []) # Private IPs not supported
self.assertEqual(ret[1].public_ips, []) # Node is non-active (no IP)
self.assertEqual(ret[1].private_ips, [])
self.assertEqual(ret[1].id, '28193')
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 9) # 9 instance configurations supported
self.assertEqual(ret[0].id, 'BRZ32.1/2048/60*175')
self.assertEqual(ret[1].id, 'BRZ64.2/4096/60*500*350')
self.assertEqual(ret[2].id, 'COP32.1/2048/60')
self.assertEqual(ret[0].name, 'Bronze 32 bit')
self.assertEqual(ret[0].disk, None)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(len(ret), 21)
self.assertEqual(ret[10].name, "Rational Asset Manager 7.2.0.1")
self.assertEqual(ret[9].id, '10002573')
def test_list_locations(self):
ret = self.driver.list_locations()
self.assertEqual(len(ret), 6)
self.assertEqual(ret[0].id, '41')
self.assertEqual(ret[0].name, 'Raleigh')
self.assertEqual(ret[0].country, 'U.S.A')
def test_create_node(self):
# Test creation of node
IBMMockHttp.type = 'CREATE'
image = NodeImage(id=11, name='Rational Insight', driver=self.driver)
size = NodeSize('LARGE', 'LARGE', None, None, None, None, self.driver)
location = NodeLocation('1', 'POK', 'US', driver=self.driver)
ret = self.driver.create_node(name='RationalInsight4',
image=image,
size=size,
location=location,
publicKey='MyPublicKey',
configurationData={
'insight_admin_password': 'myPassword1',
'db2_admin_password': 'myPassword2',
'report_user_password': 'myPassword3'})
self.assertTrue(isinstance(ret, Node))
self.assertEqual(ret.name, 'RationalInsight4')
# Test creation attempt with invalid location
IBMMockHttp.type = 'CREATE_INVALID'
location = NodeLocation('3', 'DOESNOTEXIST', 'US', driver=self.driver)
try:
ret = self.driver.create_node(name='RationalInsight5',
image=image,
size=size,
location=location,
publicKey='MyPublicKey',
configurationData={
'insight_admin_password': 'myPassword1',
'db2_admin_password': 'myPassword2',
'report_user_password': 'myPassword3'})
except Exception:
e = sys.exc_info()[1]
self.assertEqual(e.args[0], 'Error 412: No DataCenter with id: 3')
else:
self.fail('test should have thrown')
def test_destroy_node(self):
# Delete existent node
nodes = self.driver.list_nodes() # retrieves 3 nodes
self.assertEqual(len(nodes), 3)
IBMMockHttp.type = 'DELETE'
toDelete = nodes[1]
ret = self.driver.destroy_node(toDelete)
self.assertTrue(ret)
# Delete non-existent node
IBMMockHttp.type = 'DELETED'
nodes = self.driver.list_nodes() # retrieves 2 nodes
self.assertEqual(len(nodes), 2)
try:
self.driver.destroy_node(toDelete) # delete non-existent node
except Exception:
e = sys.exc_info()[1]
self.assertEqual(e.args[0], 'Error 404: Invalid Instance ID 28193')
else:
self.fail('test should have thrown')
def test_reboot_node(self):
nodes = self.driver.list_nodes()
IBMMockHttp.type = 'REBOOT'
# Reboot active node
self.assertEqual(len(nodes), 3)
ret = self.driver.reboot_node(nodes[0])
self.assertTrue(ret)
# Reboot inactive node
try:
ret = self.driver.reboot_node(nodes[1])
except Exception:
e = sys.exc_info()[1]
self.assertEqual(
e.args[0], 'Error 412: Instance must be in the Active state')
else:
self.fail('test should have thrown')
def test_list_volumes(self):
ret = self.driver.list_volumes()
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0].name, 'libcloudvol')
self.assertEqual(ret[0].extra['location'], '141')
self.assertEqual(ret[0].size, '2048')
self.assertEqual(ret[0].id, '39281')
def test_attach_volume(self):
vols = self.driver.list_volumes()
nodes = self.driver.list_nodes()
IBMMockHttp.type = 'ATTACH'
ret = self.driver.attach_volume(nodes[0], vols[0])
self.assertTrue(ret)
def test_create_volume(self):
IBMMockHttp.type = 'CREATE'
ret = self.driver.create_volume('256',
'test-volume',
location='141',
format='RAW',
offering_id='20001208')
self.assertEqual(ret.id, '39293')
self.assertEqual(ret.size, '256')
self.assertEqual(ret.name, 'test-volume')
self.assertEqual(ret.extra['location'], '141')
def test_destroy_volume(self):
vols = self.driver.list_volumes()
IBMMockHttp.type = 'DESTROY'
ret = self.driver.destroy_volume(vols[0])
self.assertTrue(ret)
def test_ex_destroy_image(self):
image = self.driver.list_images()
IBMMockHttp.type = 'DESTROY'
ret = self.driver.ex_destroy_image(image[0])
self.assertTrue(ret)
def test_detach_volume(self):
nodes = self.driver.list_nodes()
vols = self.driver.list_volumes()
IBMMockHttp.type = 'DETACH'
ret = self.driver.detach_volume(nodes[0], vols[0])
self.assertTrue(ret)
def test_ex_allocate_address(self):
IBMMockHttp.type = 'ALLOCATE'
ret = self.driver.ex_allocate_address('141', '20001223')
self.assertEqual(ret.id, '292795')
self.assertEqual(ret.state, '0')
self.assertEqual(ret.options['location'], '141')
def test_ex_delete_address(self):
IBMMockHttp.type = 'DELETE'
ret = self.driver.ex_delete_address('292795')
self.assertTrue(ret)
def test_ex_list_addresses(self):
ret = self.driver.ex_list_addresses()
self.assertEqual(ret[0].ip, '170.225.160.218')
self.assertEqual(ret[0].options['location'], '141')
self.assertEqual(ret[0].id, '292795')
self.assertEqual(ret[0].state, '2')
def test_ex_list_storage_offerings(self):
ret = self.driver.ex_list_storage_offerings()
self.assertEqual(ret[0].name, 'Small')
self.assertEqual(ret[0].location, '61')
self.assertEqual(ret[0].id, '20001208')
class IBMMockHttp(MockHttp):
fixtures = ComputeFileFixtures('ibm_sce')
def _computecloud_enterprise_api_rest_20100331_instances(self, method, url, body, headers):
body = self.fixtures.load('instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_DELETED(self, method, url, body, headers):
body = self.fixtures.load('instances_deleted.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED])
def _computecloud_enterprise_api_rest_20100331_offerings_image(self, method, url, body, headers):
body = self.fixtures.load('images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_locations(self, method, url, body, headers):
body = self.fixtures.load('locations.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_26557_REBOOT(self, method, url, body, headers):
body = self.fixtures.load('reboot_active.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_28193_REBOOT(self, method, url, body, headers):
return (412, 'Error 412: Instance must be in the Active state', {}, 'Precondition Failed')
def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETE(self, method, url, body, headers):
body = self.fixtures.load('delete.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETED(self, method, url, body, headers):
return (404, 'Error 404: Invalid Instance ID 28193', {}, 'Precondition Failed')
def _computecloud_enterprise_api_rest_20100331_instances_CREATE(self, method, url, body, headers):
body = self.fixtures.load('create.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_CREATE_INVALID(self, method, url, body, headers):
return (412, 'Error 412: No DataCenter with id: 3', {}, 'Precondition Failed')
def _computecloud_enterprise_api_rest_20100331_storage(self, method, url, body, headers):
body = self.fixtures.load('list_volumes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_26557_ATTACH(self, method, url, body, headers):
body = self.fixtures.load('attach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_storage_CREATE(self, method, url, body, headers):
body = self.fixtures.load('create_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_storage_39281_DESTROY(self, method, url, body, headers):
body = self.fixtures.load('destroy_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_offerings_image_2_DESTROY(self, method, url, body, headers):
body = self.fixtures.load('destroy_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_instances_26557_DETACH(self, method, url, body, headers):
body = self.fixtures.load('detach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_addresses_ALLOCATE(self, method, url, body, headers):
body = self.fixtures.load('allocate_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_addresses_292795_DELETE(self, method, url, body, headers):
body = self.fixtures.load('delete_address.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_addresses(self, method, url, body, headers):
body = self.fixtures.load('list_addresses.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _computecloud_enterprise_api_rest_20100331_offerings_storage(self, method, url, body, headers):
body = self.fixtures.load('list_storage_offerings.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
# This is only to accommodate the response tests built into test\__init__.py
def _computecloud_enterprise_api_rest_20100331_instances_26557(self, method, url, body, headers):
if method == 'DELETE':
body = self.fixtures.load('delete.xml')
else:
body = self.fixtures.load('reboot_active.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
apache-2.0
|
Inspq/ansible
|
lib/ansible/modules/network/nxos/nxos_install_os.py
|
55
|
7774
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_install_os
extends_documentation_fragment: nxos
short_description: Set boot options like boot image and kickstart image.
description:
- Install an operating system by setting the boot options like boot
image and kickstart image.
notes:
- The module will fail due to timeout issues, but the install will go on
anyway. Ansible's block and rescue can be leveraged to handle this kind
of failure and check actual module results. See EXAMPLE for more about
this. The first task on the rescue block is needed to make sure the
device has completed all checks and it started to reboot. The second
task is needed to wait for the device to come back up. The last two tasks
are used to verify the installation process was successful.
- Do not include full file paths, just the name of the file(s) stored on
the top level flash directory.
- You must know if your platform supports taking a kickstart image as a
parameter. If supplied but not supported, errors may occur.
- This module attempts to install the software immediately,
which may trigger a reboot.
- In check mode, the module tells you if the current boot images are set
to the desired images.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbibo (@GGabriele)
version_added: 2.2
options:
system_image_file:
description:
- Name of the system (or combined) image file on flash.
required: true
kickstart_image_file:
description:
- Name of the kickstart image file on flash.
required: false
default: null
'''
EXAMPLES = '''
- block:
- name: Install OS
nxos_install_os:
system_image_file: nxos.7.0.3.I2.2d.bin
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
transport: nxapi
rescue:
- name: Wait for device to perform checks
wait_for:
port: 22
state: stopped
timeout: 300
delay: 60
host: "{{ inventory_hostname }}"
- name: Wait for device to come back up
wait_for:
port: 22
state: started
timeout: 300
delay: 60
host: "{{ inventory_hostname }}"
- name: Check installed OS
nxos_command:
commands:
- show version
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
transport: nxapi
register: output
- assert:
that:
- output['stdout'][0]['kickstart_ver_str'] == '7.0(3)I4(1)'
'''
RETURN = '''
install_state:
description: Boot and install information.
returned: always
type: dictionary
sample: {
"kick": "n5000-uk9-kickstart.7.2.1.N1.1.bin",
"sys": "n5000-uk9.7.2.1.N1.1.bin",
"status": "This is the log of last installation.\n
Continuing with installation process, please wait.\n
The login will be disabled until the installation is completed.\n
Performing supervisor state verification. \n
SUCCESS\n
Supervisor non-disruptive upgrade successful.\n
Install has been successful.\n",
}
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, command_type='cli_show_ascii'):
cmds = [command]
if module.params['transport'] == 'cli':
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
body = run_commands(module, cmds)
return body
def get_boot_options(module):
"""Get current boot variables
like system image and kickstart image.
Returns:
A dictionary, e.g. { 'kick': router_kick.img, 'sys': 'router_sys.img'}
"""
command = 'show boot'
body = execute_show_command(command, module)[0]
boot_options_raw_text = body.split('Boot Variables on next reload')[1]
if 'kickstart' in boot_options_raw_text:
kick_regex = r'kickstart variable = bootflash:/(\S+)'
sys_regex = r'system variable = bootflash:/(\S+)'
kick = re.search(kick_regex, boot_options_raw_text).group(1)
sys = re.search(sys_regex, boot_options_raw_text).group(1)
retdict = dict(kick=kick, sys=sys)
else:
nxos_regex = r'NXOS variable = bootflash:/(\S+)'
nxos = re.search(nxos_regex, boot_options_raw_text).group(1)
retdict = dict(sys=nxos)
command = 'show install all status'
retdict['status'] = execute_show_command(command, module)[0]
return retdict
def already_set(current_boot_options, system_image_file, kickstart_image_file):
return current_boot_options.get('sys') == system_image_file \
and current_boot_options.get('kick') == kickstart_image_file
def set_boot_options(module, image_name, kickstart=None):
"""Set boot variables
like system image and kickstart image.
Args:
The main system image file name.
Keyword Args: many implementors may choose
to supply a kickstart parameter to specify a kickstart image.
"""
commands = ['terminal dont-ask']
if kickstart is None:
commands.append('install all nxos %s' % image_name)
else:
commands.append(
'install all system %s kickstart %s' % (image_name, kickstart))
load_config(module, commands)
def main():
argument_spec = dict(
system_image_file=dict(required=True),
kickstart_image_file=dict(required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
system_image_file = module.params['system_image_file']
kickstart_image_file = module.params['kickstart_image_file']
if kickstart_image_file == 'null':
kickstart_image_file = None
current_boot_options = get_boot_options(module)
changed = False
if not already_set(current_boot_options,
system_image_file,
kickstart_image_file):
changed = True
if not module.check_mode and changed is True:
set_boot_options(module,
system_image_file,
kickstart=kickstart_image_file)
if not already_set(install_state,
system_image_file,
kickstart_image_file):
module.fail_json(msg='Install not successful',
install_state=install_state)
else:
install_state = current_boot_options
module.exit_json(changed=changed, install_state=install_state, warnings=warnings)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Anaethelion/django-mapentity
|
mapentity/decorators.py
|
1
|
6019
|
from functools import wraps
from django.utils.decorators import available_attrs, method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.http import last_modified as cache_last_modified
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import PermissionDenied
from django.core.cache import get_cache
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.views.generic.edit import BaseUpdateView
from django.views.generic.detail import BaseDetailView
from . import app_settings
from .helpers import user_has_perm
from . import models as mapentity_models
def view_permission_required(login_url=None, raise_exception=None):
if raise_exception is None:
raise_exception = (login_url is None)
def check_perms(request, user, perm):
# Check both authenticated and anonymous
if user_has_perm(user, perm):
return True
if not user.is_anonymous() and raise_exception:
raise PermissionDenied
# As the last resort, redirects
msg = _(u'Access to the requested resource is restricted. You have been redirected.')
messages.warning(request, unicode(msg))
return False
def decorator(view_func):
def _wrapped_view(self, request, *args, **kwargs):
perm = self.get_view_perm()
redirect_url = login_url
if login_url in mapentity_models.ENTITY_KINDS:
is_handle_object = issubclass(self.__class__, (BaseDetailView, BaseUpdateView))
if is_handle_object:
view_subject = self.get_object()
else:
view_subject = self.get_model()
get_url_method = getattr(view_subject, 'get_{0}_url'.format(login_url))
redirect_url = get_url_method()
has_perm_decorator = user_passes_test(lambda u: check_perms(request, u, perm),
login_url=redirect_url,
redirect_field_name=None)
cbv_user_has_perm = method_decorator(has_perm_decorator)
@cbv_user_has_perm
def decorated(self, request, *args, **kwargs):
return view_func(self, request, *args, **kwargs)
return decorated(self, request, *args, **kwargs)
return _wrapped_view
return decorator
def view_cache_latest():
def decorator(view_func):
def _wrapped_view(self, request, *args, **kwargs):
view_model = self.get_model()
cache_latest = cache_last_modified(lambda x: view_model.latest_updated())
cbv_cache_latest = method_decorator(cache_latest)
# The first decorator forces browser's cache revalidation.
# The second one allows browser's cache revalidation.
@method_decorator(never_cache)
@cbv_cache_latest
def decorated(self, request, *args, **kwargs):
return view_func(self, request, *args, **kwargs)
return decorated(self, request, *args, **kwargs)
return _wrapped_view
return decorator
def view_cache_response_content():
def decorator(view_func):
def _wrapped_method(self, *args, **kwargs):
response_class = self.response_class
response_kwargs = dict()
# Do not (re)store cache if filters presents
params = self.request.GET.keys()
with_filters = all([not p.startswith('_') for p in params])
if len(params) > 0 and with_filters:
return view_func(self, *args, **kwargs)
# Otherwise, restore from cache or store view result
if hasattr(self, 'view_cache_key'):
geojson_lookup = self.view_cache_key()
else:
view_model = self.get_model()
language = self.request.LANGUAGE_CODE
latest_saved = view_model.latest_updated()
if latest_saved:
geojson_lookup = '%s_%s_%s_json_layer' % (
language,
view_model._meta.module_name,
latest_saved.strftime('%y%m%d%H%M%S%f')
)
else:
geojson_lookup = None
geojson_cache = get_cache(app_settings['GEOJSON_LAYERS_CACHE_BACKEND'])
if geojson_lookup:
content = geojson_cache.get(geojson_lookup)
if content:
return response_class(content=content, **response_kwargs)
response = view_func(self, *args, **kwargs)
if geojson_lookup:
geojson_cache.set(geojson_lookup, response.content)
return response
return _wrapped_method
return decorator
def save_history():
"""
A decorator for class-based views, which save navigation history in
session.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(self, request, *args, **kwargs):
result = view_func(self, request, *args, **kwargs)
# Stack list of request paths
history = request.session.get('history', [])
# Remove previous visits of this page
history = [h for h in history if h['path'] != request.path]
# Add this one and remove extras
model = self.model or self.queryset.model
history.insert(0, dict(title=unicode(self.get_title()),
path=request.path,
modelname=unicode(model._meta.object_name.lower())))
if len(history) > app_settings['HISTORY_ITEMS_MAX']:
history.pop()
request.session['history'] = history
return result
return _wrapped_view
return decorator
|
bsd-3-clause
|
wdv4758h/ZipPy
|
lib-python/3/sqlite3/test/transactions.py
|
51
|
7338
|
#-*- coding: ISO-8859-1 -*-
# pysqlite2/test/transactions.py: tests transactions
#
# Copyright (C) 2005-2007 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import os, unittest
import sqlite3 as sqlite
def get_db_path():
return "sqlite_testdb"
class TransactionTests(unittest.TestCase):
def setUp(self):
try:
os.remove(get_db_path())
except OSError:
pass
self.con1 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur1 = self.con1.cursor()
self.con2 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur2 = self.con2.cursor()
def tearDown(self):
self.cur1.close()
self.con1.close()
self.cur2.close()
self.con2.close()
try:
os.unlink(get_db_path())
except OSError:
pass
def CheckDMLdoesAutoCommitBefore(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur1.execute("create table test2(j)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckInsertStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 0)
def CheckUpdateStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("update test set i=6")
self.cur2.execute("select i from test")
res = self.cur2.fetchone()[0]
self.assertEqual(res, 5)
def CheckDeleteStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("delete from test")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckReplaceStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("replace into test(i) values (6)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.assertEqual(res[0][0], 5)
def CheckToggleAutoCommit(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.isolation_level = None
self.assertEqual(self.con1.isolation_level, None)
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.con1.isolation_level = "DEFERRED"
self.assertEqual(self.con1.isolation_level , "DEFERRED")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckRaiseTimeout(self):
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
def CheckLocking(self):
"""
This tests the improved concurrency with pysqlite 2.3.4. You needed
to roll back con2 before you could commit con1.
"""
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
# NO self.con2.rollback() HERE!!!
self.con1.commit()
def CheckRollbackCursorConsistency(self):
"""
Checks if cursors on the connection are set into a "reset" state
when a rollback is done on the connection.
"""
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.execute("create table test(x)")
cur.execute("insert into test(x) values (5)")
cur.execute("select 1 union select 2 union select 3")
con.rollback()
try:
cur.fetchall()
self.fail("InterfaceError should have been raised")
except sqlite.InterfaceError as e:
pass
except:
self.fail("InterfaceError should have been raised")
class SpecialCommandTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
def CheckVacuum(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("vacuum")
def CheckDropTable(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("drop table test")
def CheckPragma(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("pragma count_changes=1")
def tearDown(self):
self.cur.close()
self.con.close()
def suite():
default_suite = unittest.makeSuite(TransactionTests, "Check")
special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
return unittest.TestSuite((default_suite, special_command_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
bsd-3-clause
|
jhbradley/moose
|
framework/contrib/nsiqcppstyle/rules/RULE_3_2_H_do_not_use_underbars_for_cpp_filename.py
|
43
|
1450
|
"""
Do not use unberbars for cpp filename.
Only alphabets, numbers can be used for a cpp filename.
== Vilolation ==
/testdir/test_1.cpp <== Violation. - is used.
/testdir1/_test1.cpp <== Violation. _ is used
== Good ==
testdir/test.cpp
testdir1/test_1.c <== Don't care. it's c file.
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, filename, dirname) :
if Search("[_]", filename) and filename[filename.rfind("."):] in (".cpp", ".cxx") :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0,0), __name__,
"Do not use underbar for cpp file name (%s)." % filename)
ruleManager.AddFileStartRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunRule)
def test1(self):
self.Analyze("test/this_file.cpp", "")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisfile.cpp", "")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/this_file.c", "")
assert not CheckErrorContent(__name__)
|
lgpl-2.1
|
dkillick/iris
|
lib/iris/tests/integration/test_pp.py
|
5
|
31904
|
# (C) British Crown Copyright 2013 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Integration tests for loading and saving PP files."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import os
from cf_units import Unit
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.cube import Cube
import iris.fileformats.pp
import iris.fileformats.pp_load_rules
from iris.fileformats.pp_save_rules import verify
from iris.exceptions import IgnoreCubeException
from iris.tests import mock
from iris.fileformats.pp import load_pairs_from_fields
import iris.util
class TestVertical(tests.IrisTest):
def _test_coord(self, cube, point, bounds=None, **kwargs):
coords = cube.coords(**kwargs)
self.assertEqual(len(coords), 1, 'failed to find exactly one coord'
' using: {}'.format(kwargs))
self.assertEqual(coords[0].points, point)
if bounds is not None:
self.assertArrayEqual(coords[0].bounds, [bounds])
def test_soil_level_round_trip(self):
# Use pp.load_cubes() to convert a fake PPField into a Cube.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
soil_level = 1234
mock_data = np.zeros(1)
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(lbvc=6, lblev=soil_level,
stash=iris.fileformats.pp.STASH(1, 0, 9),
lbuser=[0] * 7, lbrsvd=[0] * 4,
brsvd=[0] * 4, brlev=0,
core_data=mock_core_data,
realised_dtype=mock_data.dtype)
load = mock.Mock(return_value=iter([field]))
with mock.patch('iris.fileformats.pp.load', new=load) as load:
cube = next(iris.fileformats.pp.load_cubes('DUMMY'))
self.assertIn('soil', cube.standard_name)
self._test_coord(cube, soil_level, long_name='soil_model_level_number')
# Now use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brsvd = [None] * 4
field.brlev = None
field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 6)
self.assertEqual(field.lblev, soil_level)
self.assertEqual(field.blev, soil_level)
self.assertEqual(field.brsvd[0], 0)
self.assertEqual(field.brlev, 0)
def test_soil_depth_round_trip(self):
# Use pp.load_cubes() to convert a fake PPField into a Cube.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
lower, point, upper = 1.2, 3.4, 5.6
brsvd = [lower, 0, 0, 0]
mock_data = np.zeros(1)
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(lbvc=6, blev=point,
stash=iris.fileformats.pp.STASH(1, 0, 9),
lbuser=[0] * 7, lbrsvd=[0] * 4,
brsvd=brsvd, brlev=upper,
core_data=mock_core_data,
realised_dtype=mock_data.dtype)
load = mock.Mock(return_value=iter([field]))
with mock.patch('iris.fileformats.pp.load', new=load) as load:
cube = next(iris.fileformats.pp.load_cubes('DUMMY'))
self.assertIn('soil', cube.standard_name)
self._test_coord(cube, point, bounds=[lower, upper],
standard_name='depth')
# Now use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brlev = None
field.brsvd = [None] * 4
field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 6)
self.assertEqual(field.blev, point)
self.assertEqual(field.brsvd[0], lower)
self.assertEqual(field.brlev, upper)
def test_potential_temperature_level_round_trip(self):
# Check save+load for data on 'potential temperature' levels.
# Use pp.load_cubes() to convert a fake PPField into a Cube.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
potm_value = 22.5
mock_data = np.zeros(1)
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(lbvc=19, blev=potm_value,
lbuser=[0] * 7, lbrsvd=[0] * 4,
core_data=mock_core_data,
realised_dtype=mock_data.dtype)
load = mock.Mock(return_value=iter([field]))
with mock.patch('iris.fileformats.pp.load', new=load):
cube = next(iris.fileformats.pp.load_cubes('DUMMY'))
self._test_coord(cube, potm_value,
standard_name='air_potential_temperature')
# Now use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 19)
self.assertEqual(field.blev, potm_value)
def test_hybrid_pressure_round_trip(self):
# Use pp.load_cubes() to convert fake PPFields into Cubes.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
def field_with_data(scale=1):
x, y = 40, 30
mock_data = np.arange(1200).reshape(y, x) * scale
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(core_data=mock_core_data,
realised_dtype=mock_data.dtype,
lbcode=[1],
lbnpt=x, lbrow=y, bzx=350, bdx=1.5,
bzy=40, bdy=1.5, lbuser=[0] * 7,
lbrsvd=[0] * 4)
field._x_coord_name = lambda: 'longitude'
field._y_coord_name = lambda: 'latitude'
field.coord_system = lambda: None
return field
# Make a fake reference surface field.
pressure_field = field_with_data(10)
pressure_field.stash = iris.fileformats.pp.STASH(1, 0, 409)
pressure_field.lbuser[3] = 409
# Make a fake data field which needs the reference surface.
model_level = 5678
sigma_lower, sigma, sigma_upper = 0.85, 0.9, 0.95
delta_lower, delta, delta_upper = 0.05, 0.1, 0.15
data_field = field_with_data()
data_field.configure_mock(lbvc=9, lblev=model_level,
bhlev=delta, bhrlev=delta_lower,
blev=sigma, brlev=sigma_lower,
brsvd=[sigma_upper, delta_upper])
# Convert both fields to cubes.
load = mock.Mock(return_value=iter([pressure_field, data_field]))
with mock.patch('iris.fileformats.pp.load', new=load) as load:
pressure_cube, data_cube = iris.fileformats.pp.load_cubes('DUMMY')
# Check the reference surface cube looks OK.
self.assertEqual(pressure_cube.standard_name, 'surface_air_pressure')
self.assertEqual(pressure_cube.units, 'Pa')
# Check the data cube is set up to use hybrid-pressure.
self._test_coord(data_cube, model_level,
standard_name='model_level_number')
self._test_coord(data_cube, delta, [delta_lower, delta_upper],
long_name='level_pressure')
self._test_coord(data_cube, sigma, [sigma_lower, sigma_upper],
long_name='sigma')
aux_factories = data_cube.aux_factories
self.assertEqual(len(aux_factories), 1)
surface_coord = aux_factories[0].dependencies['surface_air_pressure']
self.assertArrayEqual(surface_coord.points,
np.arange(12000, step=10).reshape(30, 40))
# Now use the save rules to convert the Cubes back into PPFields.
pressure_field = iris.fileformats.pp.PPField3()
pressure_field.lbfc = 0
pressure_field.lbvc = 0
pressure_field.brsvd = [None, None]
pressure_field.lbuser = [None] * 7
pressure_field = verify(pressure_cube, pressure_field)
data_field = iris.fileformats.pp.PPField3()
data_field.lbfc = 0
data_field.lbvc = 0
data_field.brsvd = [None, None]
data_field.lbuser = [None] * 7
data_field = verify(data_cube, data_field)
# The reference surface field should have STASH=409
self.assertArrayEqual(pressure_field.lbuser,
[None, None, None, 409, None, None, 1])
# Check the data field has the vertical coordinate as originally
# specified.
self.assertEqual(data_field.lbvc, 9)
self.assertEqual(data_field.lblev, model_level)
self.assertEqual(data_field.bhlev, delta)
self.assertEqual(data_field.bhrlev, delta_lower)
self.assertEqual(data_field.blev, sigma)
self.assertEqual(data_field.brlev, sigma_lower)
self.assertEqual(data_field.brsvd, [sigma_upper, delta_upper])
def test_hybrid_pressure_with_duplicate_references(self):
def field_with_data(scale=1):
x, y = 40, 30
mock_data = np.arange(1200).reshape(y, x) * scale
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(core_data=mock_core_data,
realised_dtype=mock_data.dtype,
lbcode=[1],
lbnpt=x, lbrow=y, bzx=350, bdx=1.5,
bzy=40, bdy=1.5, lbuser=[0] * 7,
lbrsvd=[0] * 4)
field._x_coord_name = lambda: 'longitude'
field._y_coord_name = lambda: 'latitude'
field.coord_system = lambda: None
return field
# Make a fake reference surface field.
pressure_field = field_with_data(10)
pressure_field.stash = iris.fileformats.pp.STASH(1, 0, 409)
pressure_field.lbuser[3] = 409
# Make a fake data field which needs the reference surface.
model_level = 5678
sigma_lower, sigma, sigma_upper = 0.85, 0.9, 0.95
delta_lower, delta, delta_upper = 0.05, 0.1, 0.15
data_field = field_with_data()
data_field.configure_mock(lbvc=9, lblev=model_level,
bhlev=delta, bhrlev=delta_lower,
blev=sigma, brlev=sigma_lower,
brsvd=[sigma_upper, delta_upper])
# Convert both fields to cubes.
load = mock.Mock(return_value=iter([data_field,
pressure_field,
pressure_field]))
msg = 'Multiple reference cubes for surface_air_pressure'
with mock.patch('iris.fileformats.pp.load',
new=load) as load, mock.patch('warnings.warn') as warn:
_, _, _ = iris.fileformats.pp.load_cubes('DUMMY')
warn.assert_called_with(msg)
def test_hybrid_height_with_non_standard_coords(self):
# Check the save rules are using the AuxFactory to find the
# hybrid height coordinates and not relying on their names.
ny, nx = 30, 40
sigma_lower, sigma, sigma_upper = 0.75, 0.8, 0.75
delta_lower, delta, delta_upper = 150, 200, 250
cube = Cube(np.zeros((ny, nx)), 'air_temperature')
level_coord = AuxCoord(0, 'model_level_number')
cube.add_aux_coord(level_coord)
delta_coord = AuxCoord(delta, bounds=[[delta_lower, delta_upper]],
long_name='moog', units='m')
sigma_coord = AuxCoord(sigma, bounds=[[sigma_lower, sigma_upper]],
long_name='mavis')
surface_altitude_coord = AuxCoord(np.zeros((ny, nx)),
'surface_altitude', units='m')
cube.add_aux_coord(delta_coord)
cube.add_aux_coord(sigma_coord)
cube.add_aux_coord(surface_altitude_coord, (0, 1))
cube.add_aux_factory(HybridHeightFactory(delta_coord, sigma_coord,
surface_altitude_coord))
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brsvd = [None, None]
field.lbuser = [None] * 7
field = verify(cube, field)
self.assertEqual(field.blev, delta)
self.assertEqual(field.brlev, delta_lower)
self.assertEqual(field.bhlev, sigma)
self.assertEqual(field.bhrlev, sigma_lower)
self.assertEqual(field.brsvd, [delta_upper, sigma_upper])
def test_hybrid_pressure_with_non_standard_coords(self):
# Check the save rules are using the AuxFactory to find the
# hybrid pressure coordinates and not relying on their names.
ny, nx = 30, 40
sigma_lower, sigma, sigma_upper = 0.75, 0.8, 0.75
delta_lower, delta, delta_upper = 0.15, 0.2, 0.25
cube = Cube(np.zeros((ny, nx)), 'air_temperature')
level_coord = AuxCoord(0, 'model_level_number')
cube.add_aux_coord(level_coord)
delta_coord = AuxCoord(delta, bounds=[[delta_lower, delta_upper]],
long_name='moog', units='Pa')
sigma_coord = AuxCoord(sigma, bounds=[[sigma_lower, sigma_upper]],
long_name='mavis')
surface_air_pressure_coord = AuxCoord(np.zeros((ny, nx)),
'surface_air_pressure',
units='Pa')
cube.add_aux_coord(delta_coord)
cube.add_aux_coord(sigma_coord)
cube.add_aux_coord(surface_air_pressure_coord, (0, 1))
cube.add_aux_factory(HybridPressureFactory(
delta_coord, sigma_coord, surface_air_pressure_coord))
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.brsvd = [None, None]
field.lbuser = [None] * 7
field = verify(cube, field)
self.assertEqual(field.bhlev, delta)
self.assertEqual(field.bhrlev, delta_lower)
self.assertEqual(field.blev, sigma)
self.assertEqual(field.brlev, sigma_lower)
self.assertEqual(field.brsvd, [sigma_upper, delta_upper])
def test_hybrid_height_round_trip_no_reference(self):
# Use pp.load_cubes() to convert fake PPFields into Cubes.
# NB. Use MagicMock so that SplittableInt header items, such as
# LBCODE, support len().
def field_with_data(scale=1):
x, y = 40, 30
mock_data = np.arange(1200).reshape(y, x) * scale
mock_core_data = mock.MagicMock(return_value=mock_data)
field = mock.MagicMock(core_data=mock_core_data,
realised_dtype=mock_data.dtype,
lbcode=[1],
lbnpt=x, lbrow=y, bzx=350, bdx=1.5,
bzy=40, bdy=1.5, lbuser=[0] * 7,
lbrsvd=[0] * 4)
field._x_coord_name = lambda: 'longitude'
field._y_coord_name = lambda: 'latitude'
field.coord_system = lambda: None
return field
# Make a fake data field which needs the reference surface.
model_level = 5678
sigma_lower, sigma, sigma_upper = 0.85, 0.9, 0.95
delta_lower, delta, delta_upper = 0.05, 0.1, 0.15
data_field = field_with_data()
data_field.configure_mock(lbvc=65, lblev=model_level,
bhlev=sigma, bhrlev=sigma_lower,
blev=delta, brlev=delta_lower,
brsvd=[delta_upper, sigma_upper])
# Convert field to a cube.
load = mock.Mock(return_value=iter([data_field]))
with mock.patch('iris.fileformats.pp.load', new=load) as load, \
mock.patch('warnings.warn') as warn:
data_cube, = iris.fileformats.pp.load_cubes('DUMMY')
msg = "Unable to create instance of HybridHeightFactory. " \
"The source data contains no field(s) for 'orography'."
warn.assert_called_once_with(msg)
# Check the data cube is set up to use hybrid height.
self._test_coord(data_cube, model_level,
standard_name='model_level_number')
self._test_coord(data_cube, delta, [delta_lower, delta_upper],
long_name='level_height')
self._test_coord(data_cube, sigma, [sigma_lower, sigma_upper],
long_name='sigma')
# Check that no aux factory is created (due to missing
# reference surface).
aux_factories = data_cube.aux_factories
self.assertEqual(len(aux_factories), 0)
# Now use the save rules to convert the Cube back into a PPField.
data_field = iris.fileformats.pp.PPField3()
data_field.lbfc = 0
data_field.lbvc = 0
data_field.brsvd = [None, None]
data_field.lbuser = [None] * 7
data_field = verify(data_cube, data_field)
# Check the data field has the vertical coordinate as originally
# specified.
self.assertEqual(data_field.lbvc, 65)
self.assertEqual(data_field.lblev, model_level)
self.assertEqual(data_field.bhlev, sigma)
self.assertEqual(data_field.bhrlev, sigma_lower)
self.assertEqual(data_field.blev, delta)
self.assertEqual(data_field.brlev, delta_lower)
self.assertEqual(data_field.brsvd, [delta_upper, sigma_upper])
class TestSaveLBFT(tests.IrisTest):
def create_cube(self, fp_min, fp_mid, fp_max, ref_offset, season=None):
cube = Cube(np.zeros((3, 4)))
cube.add_aux_coord(AuxCoord(standard_name='forecast_period',
units='hours',
points=fp_mid, bounds=[fp_min, fp_max]))
cube.add_aux_coord(AuxCoord(standard_name='time',
units='hours since epoch',
points=ref_offset + fp_mid,
bounds=[ref_offset + fp_min,
ref_offset + fp_max]))
if season:
cube.add_aux_coord(AuxCoord(long_name='clim_season',
points=season))
cube.add_cell_method(CellMethod('DUMMY', 'clim_season'))
return cube
def convert_cube_to_field(self, cube):
# Use the save rules to convert the Cube back into a PPField.
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
field.lbtim = 0
field = verify(cube, field)
return field
def test_time_mean_from_forecast_period(self):
cube = self.create_cube(24, 36, 48, 72)
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 48)
def test_time_mean_from_forecast_reference_time(self):
cube = Cube(np.zeros((3, 4)))
cube.add_aux_coord(AuxCoord(standard_name='forecast_reference_time',
units='hours since epoch',
points=72))
cube.add_aux_coord(AuxCoord(standard_name='time',
units='hours since epoch',
points=72 + 36, bounds=[72 + 24, 72 + 48]))
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 48)
def test_climatological_mean_single_year(self):
cube = Cube(np.zeros((3, 4)))
cube.add_aux_coord(AuxCoord(standard_name='forecast_period',
units='hours',
points=36, bounds=[24, 4 * 24]))
cube.add_aux_coord(AuxCoord(standard_name='time',
units='hours since epoch',
points=240 + 36, bounds=[240 + 24,
240 + 4 * 24]))
cube.add_aux_coord(AuxCoord(long_name='clim_season', points='DUMMY'))
cube.add_cell_method(CellMethod('DUMMY', 'clim_season'))
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 4 * 24)
def test_climatological_mean_multi_year_djf(self):
delta_start = 24
delta_mid = 36
delta_end = 369 * 24
ref_offset = 10 * 24
cube = self.create_cube(24, 36, 369 * 24, 240, 'djf')
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 369 * 24)
def test_climatological_mean_multi_year_mam(self):
cube = self.create_cube(24, 36, 369 * 24, 240, 'mam')
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 369 * 24)
def test_climatological_mean_multi_year_jja(self):
cube = self.create_cube(24, 36, 369 * 24, 240, 'jja')
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 369 * 24)
def test_climatological_mean_multi_year_son(self):
cube = self.create_cube(24, 36, 369 * 24, 240, 'son')
field = self.convert_cube_to_field(cube)
self.assertEqual(field.lbft, 369 * 24)
class TestCoordinateForms(tests.IrisTest):
def _common(self, x_coord):
nx = len(x_coord.points)
ny = 2
data = np.zeros((ny, nx), dtype=np.float32)
test_cube = iris.cube.Cube(data)
y0 = np.float32(20.5)
dy = np.float32(3.72)
y_coord = iris.coords.DimCoord.from_regular(
zeroth=y0,
step=dy,
count=ny,
standard_name='latitude',
units='degrees_north')
test_cube.add_dim_coord(x_coord, 1)
test_cube.add_dim_coord(y_coord, 0)
# Write to a temporary PP file and read it back as a PPField
with self.temp_filename('.pp') as pp_filepath:
iris.save(test_cube, pp_filepath)
pp_loader = iris.fileformats.pp.load(pp_filepath)
pp_field = next(pp_loader)
return pp_field
def test_save_awkward_case_is_regular(self):
# Check that specific "awkward" values still save in a regular form.
nx = 3
x0 = np.float32(355.626)
dx = np.float32(0.0135)
x_coord = iris.coords.DimCoord.from_regular(
zeroth=x0,
step=dx,
count=nx,
standard_name='longitude',
units='degrees_east')
pp_field = self._common(x_coord)
# Check that the result has the regular coordinates as expected.
self.assertEqual(pp_field.bzx, x0)
self.assertEqual(pp_field.bdx, dx)
self.assertEqual(pp_field.lbnpt, nx)
def test_save_irregular(self):
# Check that a non-regular coordinate saves as expected.
nx = 3
x_values = [0.0, 1.1, 2.0]
x_coord = iris.coords.DimCoord(x_values,
standard_name='longitude',
units='degrees_east')
pp_field = self._common(x_coord)
# Check that the result has the regular/irregular Y and X as expected.
self.assertEqual(pp_field.bdx, 0.0)
self.assertArrayAllClose(pp_field.x, x_values)
self.assertEqual(pp_field.lbnpt, nx)
@tests.skip_data
class TestLoadLittleendian(tests.IrisTest):
def test_load_sample(self):
file_path = tests.get_data_path(
('PP', 'little_endian', 'qrparm.orog.pp'))
# Ensure it just loads.
cube = iris.load_cube(file_path, 'surface_altitude')
self.assertEqual(cube.shape, (110, 160))
# Check for sensible floating point numbers.
def check_minmax(array, expect_min, expect_max):
found = np.array([np.min(array), np.max(array)])
expected = np.array([expect_min, expect_max])
self.assertArrayAlmostEqual(found, expected, decimal=2)
lons = cube.coord('grid_longitude').points
lats = cube.coord('grid_latitude').points
data = cube.data
check_minmax(lons, 342.0, 376.98)
check_minmax(lats, -10.48, 13.5)
check_minmax(data, -30.48, 6029.1)
@tests.skip_data
class TestAsCubes(tests.IrisTest):
def setUp(self):
dpath = tests.get_data_path(['PP', 'meanMaxMin',
'200806081200__qwpb.T24.pp'])
self.ppfs = iris.fileformats.pp.load(dpath)
def test_pseudo_level_filter(self):
chosen_ppfs = []
for ppf in self.ppfs:
if ppf.lbuser[4] == 3:
chosen_ppfs.append(ppf)
cubes_fields = list(load_pairs_from_fields(chosen_ppfs))
self.assertEqual(len(cubes_fields), 8)
def test_pseudo_level_filter_none(self):
chosen_ppfs = []
for ppf in self.ppfs:
if ppf.lbuser[4] == 30:
chosen_ppfs.append(ppf)
cubes = list(load_pairs_from_fields(chosen_ppfs))
self.assertEqual(len(cubes), 0)
def test_as_pairs(self):
cube_ppf_pairs = load_pairs_from_fields(self.ppfs)
cubes = []
for cube, ppf in cube_ppf_pairs:
if ppf.lbuser[4] == 3:
cube.attributes['pseudo level'] = ppf.lbuser[4]
cubes.append(cube)
for cube in cubes:
self.assertEqual(cube.attributes['pseudo level'], 3)
class TestSaveLBPROC(tests.IrisTest):
def create_cube(self, longitude_coord='longitude'):
cube = Cube(np.zeros((2, 3, 4)))
tunit = Unit('days since epoch', calendar='gregorian')
tcoord = DimCoord(np.arange(2), standard_name='time', units=tunit)
xcoord = DimCoord(np.arange(3), standard_name=longitude_coord,
units='degrees')
ycoord = DimCoord(points=np.arange(4))
cube.add_dim_coord(tcoord, 0)
cube.add_dim_coord(xcoord, 1)
cube.add_dim_coord(ycoord, 2)
return cube
def convert_cube_to_field(self, cube):
field = iris.fileformats.pp.PPField3()
field.lbvc = 0
return verify(cube, field)
def test_time_mean_only(self):
cube = self.create_cube()
cube.add_cell_method(CellMethod(method='mean', coords='time'))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 128)
def test_longitudinal_mean_only(self):
cube = self.create_cube()
cube.add_cell_method(CellMethod(method=u'mean', coords=u'longitude'))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 64)
def test_grid_longitudinal_mean_only(self):
cube = self.create_cube(longitude_coord='grid_longitude')
cube.add_cell_method(CellMethod(method=u'mean',
coords=u'grid_longitude'))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 64)
def test_time_mean_and_zonal_mean(self):
cube = self.create_cube()
cube.add_cell_method(CellMethod(method=u'mean', coords=u'time'))
cube.add_cell_method(CellMethod(method=u'mean', coords=u'longitude'))
field = self.convert_cube_to_field(cube)
self.assertEqual(int(field.lbproc), 192)
@tests.skip_data
class TestCallbackLoad(tests.IrisTest):
def setUp(self):
self.pass_name = 'air_potential_temperature'
def callback_wrapper(self):
# Wrap the `iris.exceptions.IgnoreCubeException`-calling callback.
def callback_ignore_cube_exception(cube, field, filename):
if cube.name() != self.pass_name:
raise IgnoreCubeException
return callback_ignore_cube_exception
def test_ignore_cube_callback(self):
test_dataset = tests.get_data_path(
['PP', 'globClim1', 'dec_subset.pp'])
exception_callback = self.callback_wrapper()
result_cubes = iris.load(test_dataset, callback=exception_callback)
n_result_cubes = len(result_cubes)
# We ignore all but one cube (the `air_potential_temperature` cube).
self.assertEqual(n_result_cubes, 1)
self.assertEqual(result_cubes[0].name(), self.pass_name)
@tests.skip_data
class TestZonalMeanBounds(tests.IrisTest):
def test_mulitple_longitude(self):
# test that bounds are set for a zonal mean file with many longitude
# values
orig_file = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
f = next(iris.fileformats.pp.load(orig_file))
f.lbproc = 192 # time and zonal mean
# Write out pp file
temp_filename = iris.util.create_temp_filename(".pp")
with open(temp_filename, 'wb') as temp_fh:
f.save(temp_fh)
# Load pp file
cube = iris.load_cube(temp_filename)
self.assertTrue(cube.coord('longitude').has_bounds())
os.remove(temp_filename)
def test_singular_longitude(self):
# test that bounds are set for a zonal mean file with a single
# longitude value
pp_file = tests.get_data_path(('PP', 'zonal_mean', 'zonal_mean.pp'))
# Load pp file
cube = iris.load_cube(pp_file)
self.assertTrue(cube.coord('longitude').has_bounds())
@tests.skip_data
class TestLoadPartialMask(tests.IrisTest):
def test_data(self):
# Ensure that fields merge correctly where one has a mask and one
# doesn't.
filename = tests.get_data_path(['PP', 'simple_pp', 'partial_mask.pp'])
expected_data = np.ma.masked_array([[[0, 1],
[11, 12]],
[[99, 100],
[-1, -1]]],
[[[0, 0],
[0, 0]],
[[0, 0],
[1, 1]]],
dtype=np.int32)
cube = iris.load_cube(filename)
self.assertEqual(expected_data.dtype, cube.data.dtype)
self.assertMaskedArrayEqual(expected_data, cube.data, strict=False)
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
cmelange/ansible
|
lib/ansible/modules/cloud/openstack/os_nova_host_aggregate.py
|
6
|
6737
|
#!/usr/bin/python
# Copyright 2016 Jakub Jursa <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_nova_host_aggregate
short_description: Manage OpenStack host aggregates
extends_documentation_fragment: openstack
author: "Jakub Jursa"
version_added: "2.3"
description:
- Create, update, or delete OpenStack host aggregates. If a aggregate
with the supplied name already exists, it will be updated with the
new name, new availability zone, new metadata and new list of hosts.
options:
name:
description: Name of the aggregate.
required: true
metadata:
description: Metadata dict.
required: false
default: None
availability_zone:
description: Availability zone to create aggregate into.
required: false
default: None
hosts:
description: List of hosts to set for an aggregate.
required: false
default: None
state:
description: Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a host aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: present
name: db_aggregate
hosts:
- host1
- host2
metadata:
type: dbcluster
# Delete an aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: absent
name: db_aggregate
'''
RETURN = '''
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(module, aggregate):
new_metadata = (module.params['metadata'] or {})
new_metadata['availability_zone'] = module.params['availability_zone']
if (module.params['name'] != aggregate.name) or \
(module.params['hosts'] is not None and module.params['hosts'] != aggregate.hosts) or \
(module.params['availability_zone'] is not None and module.params['availability_zone'] != aggregate.availability_zone) or \
(module.params['metadata'] is not None and new_metadata != aggregate.metadata):
return True
return False
def _system_state_change(module, aggregate):
state = module.params['state']
if state == 'absent' and aggregate:
return True
if state == 'present':
if aggregate is None:
return True
return _needs_update(module, aggregate)
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
metadata=dict(required=False, default=None, type='dict'),
availability_zone=dict(required=False, default=None),
hosts=dict(required=False, default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.9.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.9.0")
name = module.params['name']
metadata = module.params['metadata']
availability_zone = module.params['availability_zone']
hosts = module.params['hosts']
state = module.params['state']
if metadata is not None:
metadata.pop('availability_zone', None)
try:
cloud = shade.operator_cloud(**module.params)
aggregates = cloud.search_aggregates(name_or_id=name)
if len(aggregates) == 1:
aggregate = aggregates[0]
elif len(aggregates) == 0:
aggregate = None
else:
raise Exception("Should not happen")
if module.check_mode:
module.exit_json(changed=_system_state_change(module, aggregate))
if state == 'present':
if aggregate is None:
aggregate = cloud.create_aggregate(name=name,
availability_zone=availability_zone)
if hosts:
for h in hosts:
cloud.add_host_to_aggregate(aggregate.id, h)
if metadata:
cloud.set_aggregate_metadata(aggregate.id, metadata)
changed = True
else:
if _needs_update(module, aggregate):
if availability_zone is not None:
aggregate = cloud.update_aggregate(aggregate.id,
name=name, availability_zone=availability_zone)
if metadata is not None:
metas = metadata
for i in (set(aggregate.metadata.keys()) - set(metadata.keys())):
if i != 'availability_zone':
metas[i] = None
cloud.set_aggregate_metadata(aggregate.id, metas)
if hosts is not None:
for i in (set(aggregate.hosts) - set (hosts)):
cloud.remove_host_from_aggregate(aggregate.id, i)
for i in (set(hosts) - set(aggregate.hosts)):
cloud.add_host_to_aggregate(aggregate.id, i)
changed = True
else:
changed = False
module.exit_json(changed=changed)
elif state == 'absent':
if aggregate is None:
changed=False
else:
cloud.delete_aggregate(aggregate.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
twenty0ne/CocosBuilder-wxPython
|
cocos/test/test_camera_orbit_reuse.py
|
6
|
1290
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
# total time duration * action multiplier
testinfo = "s, t 2.0, s, t 4.0, s, t 6.1, s, q"
tags = "OrbitCamera, grid"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from pyglet.gl import *
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
director.set_depth_test()
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
rot = OrbitCamera( delta_z=60, duration=2 )
# In real code after a sequence of grid actions the StopGrid() action
# should be called. Omited here to stay in the last grid action render
main_scene.do( rot * 3 )
director.run (main_scene)
if __name__ == '__main__':
main()
|
mit
|
ssvsergeyev/ZenPacks.zenoss.AWS
|
src/boto/boto/cloudsearch/domain.py
|
153
|
15607
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.cloudsearch.optionstatus import OptionStatus
from boto.cloudsearch.optionstatus import IndexFieldStatus
from boto.cloudsearch.optionstatus import ServicePoliciesStatus
from boto.cloudsearch.optionstatus import RankExpressionStatus
from boto.cloudsearch.document import DocumentServiceConnection
from boto.cloudsearch.search import SearchConnection
def handle_bool(value):
if value in [True, 'true', 'True', 'TRUE', 1]:
return True
return False
class Domain(object):
"""
A Cloudsearch domain.
:ivar name: The name of the domain.
:ivar id: The internally generated unique identifier for the domain.
:ivar created: A boolean which is True if the domain is
created. It can take several minutes to initialize a domain
when CreateDomain is called. Newly created search domains are
returned with a False value for Created until domain creation
is complete
:ivar deleted: A boolean which is True if the search domain has
been deleted. The system must clean up resources dedicated to
the search domain when delete is called. Newly deleted
search domains are returned from list_domains with a True
value for deleted for several minutes until resource cleanup
is complete.
:ivar processing: True if processing is being done to activate the
current domain configuration.
:ivar num_searchable_docs: The number of documents that have been
submittted to the domain and indexed.
:ivar requires_index_document: True if index_documents needs to be
called to activate the current domain configuration.
:ivar search_instance_count: The number of search instances that are
available to process search requests.
:ivar search_instance_type: The instance type that is being used to
process search requests.
:ivar search_partition_count: The number of partitions across which
the search index is spread.
"""
def __init__(self, layer1, data):
self.layer1 = layer1
self.update_from_data(data)
def update_from_data(self, data):
self.created = data['created']
self.deleted = data['deleted']
self.processing = data['processing']
self.requires_index_documents = data['requires_index_documents']
self.domain_id = data['domain_id']
self.domain_name = data['domain_name']
self.num_searchable_docs = data['num_searchable_docs']
self.search_instance_count = data['search_instance_count']
self.search_instance_type = data.get('search_instance_type', None)
self.search_partition_count = data['search_partition_count']
self._doc_service = data['doc_service']
self._search_service = data['search_service']
@property
def doc_service_arn(self):
return self._doc_service['arn']
@property
def doc_service_endpoint(self):
return self._doc_service['endpoint']
@property
def search_service_arn(self):
return self._search_service['arn']
@property
def search_service_endpoint(self):
return self._search_service['endpoint']
@property
def created(self):
return self._created
@created.setter
def created(self, value):
self._created = handle_bool(value)
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, value):
self._deleted = handle_bool(value)
@property
def processing(self):
return self._processing
@processing.setter
def processing(self, value):
self._processing = handle_bool(value)
@property
def requires_index_documents(self):
return self._requires_index_documents
@requires_index_documents.setter
def requires_index_documents(self, value):
self._requires_index_documents = handle_bool(value)
@property
def search_partition_count(self):
return self._search_partition_count
@search_partition_count.setter
def search_partition_count(self, value):
self._search_partition_count = int(value)
@property
def search_instance_count(self):
return self._search_instance_count
@search_instance_count.setter
def search_instance_count(self, value):
self._search_instance_count = int(value)
@property
def num_searchable_docs(self):
return self._num_searchable_docs
@num_searchable_docs.setter
def num_searchable_docs(self, value):
self._num_searchable_docs = int(value)
@property
def name(self):
return self.domain_name
@property
def id(self):
return self.domain_id
def delete(self):
"""
Delete this domain and all index data associated with it.
"""
return self.layer1.delete_domain(self.name)
def get_stemming(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined stemming options for
the domain.
"""
return OptionStatus(self, None,
self.layer1.describe_stemming_options,
self.layer1.update_stemming_options)
def get_stopwords(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined stopword options for
the domain.
"""
return OptionStatus(self, None,
self.layer1.describe_stopword_options,
self.layer1.update_stopword_options)
def get_synonyms(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined synonym options for
the domain.
"""
return OptionStatus(self, None,
self.layer1.describe_synonym_options,
self.layer1.update_synonym_options)
def get_access_policies(self):
"""
Return a :class:`boto.cloudsearch.option.OptionStatus` object
representing the currently defined access policies for
the domain.
"""
return ServicePoliciesStatus(self, None,
self.layer1.describe_service_access_policies,
self.layer1.update_service_access_policies)
def index_documents(self):
"""
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptioState of RequiresIndexDocuments visible in search
results.
"""
self.layer1.index_documents(self.name)
def get_index_fields(self, field_names=None):
"""
Return a list of index fields defined for this domain.
"""
data = self.layer1.describe_index_fields(self.name, field_names)
return [IndexFieldStatus(self, d) for d in data]
def create_index_field(self, field_name, field_type,
default='', facet=False, result=False, searchable=False,
source_attributes=[]):
"""
Defines an ``IndexField``, either replacing an existing
definition or creating a new one.
:type field_name: string
:param field_name: The name of a field in the search index.
:type field_type: string
:param field_type: The type of field. Valid values are
uint | literal | text
:type default: string or int
:param default: The default value for the field. If the
field is of type ``uint`` this should be an integer value.
Otherwise, it's a string.
:type facet: bool
:param facet: A boolean to indicate whether facets
are enabled for this field or not. Does not apply to
fields of type ``uint``.
:type results: bool
:param results: A boolean to indicate whether values
of this field can be returned in search results or
used in ranking. Does not apply to fields of type ``uint``.
:type searchable: bool
:param searchable: A boolean to indicate whether search
is enabled for this field or not. Applies only to fields
of type ``literal``.
:type source_attributes: list of dicts
:param source_attributes: An optional list of dicts that
provide information about attributes for this index field.
A maximum of 20 source attributes can be configured for
each index field.
Each item in the list is a dict with the following keys:
* data_copy - The value is a dict with the following keys:
* default - Optional default value if the source attribute
is not specified in a document.
* name - The name of the document source field to add
to this ``IndexField``.
* data_function - Identifies the transformation to apply
when copying data from a source attribute.
* data_map - The value is a dict with the following keys:
* cases - A dict that translates source field values
to custom values.
* default - An optional default value to use if the
source attribute is not specified in a document.
* name - the name of the document source field to add
to this ``IndexField``
* data_trim_title - Trims common title words from a source
document attribute when populating an ``IndexField``.
This can be used to create an ``IndexField`` you can
use for sorting. The value is a dict with the following
fields:
* default - An optional default value.
* language - an IETF RFC 4646 language code.
* separator - The separator that follows the text to trim.
* name - The name of the document source field to add.
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
data = self.layer1.define_index_field(self.name, field_name,
field_type, default=default,
facet=facet, result=result,
searchable=searchable,
source_attributes=source_attributes)
return IndexFieldStatus(self, data,
self.layer1.describe_index_fields)
def get_rank_expressions(self, rank_names=None):
"""
Return a list of rank expressions defined for this domain.
"""
fn = self.layer1.describe_rank_expressions
data = fn(self.name, rank_names)
return [RankExpressionStatus(self, d, fn) for d in data]
def create_rank_expression(self, name, expression):
"""
Create a new rank expression.
:type rank_name: string
:param rank_name: The name of an expression computed for ranking
while processing a search request.
:type rank_expression: string
:param rank_expression: The expression to evaluate for ranking
or thresholding while processing a search request. The
RankExpression syntax is based on JavaScript expressions
and supports:
* Integer, floating point, hex and octal literals
* Shortcut evaluation of logical operators such that an
expression a || b evaluates to the value a if a is
true without evaluting b at all
* JavaScript order of precedence for operators
* Arithmetic operators: + - * / %
* Boolean operators (including the ternary operator)
* Bitwise operators
* Comparison operators
* Common mathematic functions: abs ceil erf exp floor
lgamma ln log2 log10 max min sqrt pow
* Trigonometric library functions: acosh acos asinh asin
atanh atan cosh cos sinh sin tanh tan
* Random generation of a number between 0 and 1: rand
* Current time in epoch: time
* The min max functions that operate on a variable argument list
Intermediate results are calculated as double precision
floating point values. The final return value of a
RankExpression is automatically converted from floating
point to a 32-bit unsigned integer by rounding to the
nearest integer, with a natural floor of 0 and a ceiling
of max(uint32_t), 4294967295. Mathematical errors such as
dividing by 0 will fail during evaluation and return a
value of 0.
The source data for a RankExpression can be the name of an
IndexField of type uint, another RankExpression or the
reserved name text_relevance. The text_relevance source is
defined to return an integer from 0 to 1000 (inclusive) to
indicate how relevant a document is to the search request,
taking into account repetition of search terms in the
document and proximity of search terms to each other in
each matching IndexField in the document.
For more information about using rank expressions to
customize ranking, see the Amazon CloudSearch Developer
Guide.
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
data = self.layer1.define_rank_expression(self.name, name, expression)
return RankExpressionStatus(self, data,
self.layer1.describe_rank_expressions)
def get_document_service(self):
return DocumentServiceConnection(domain=self)
def get_search_service(self):
return SearchConnection(domain=self)
def __repr__(self):
return '<Domain: %s>' % self.domain_name
|
gpl-2.0
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/external_urllib.py
|
2
|
2093
|
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import newrelic.packages.six as six
from newrelic.agent import (current_transaction,
wrap_function_wrapper, ExternalTrace)
def _nr_wrapper_factory(bind_params_fn, library):
# Wrapper functions will be similar for monkeypatching the different
# urllib functions and methods, so a factory function to create them is
# used to reduce repetitiveness.
# Parameters:
#
# bind_params_fn: Function that returns the URL.
# library: String. The library name to be used for display in the UI
# by ExternalTrace.
def _nr_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
url = bind_params_fn(*args, **kwargs)
details = urlparse.urlparse(url)
if details.hostname is None:
return wrapped(*args, **kwargs)
with ExternalTrace(transaction, library, url):
return wrapped(*args, **kwargs)
return _nr_wrapper
def bind_params_urlretrieve(url, *args, **kwargs):
return url
def bind_params_open(fullurl, *args, **kwargs):
if isinstance(fullurl, six.string_types):
return fullurl
else:
return fullurl.get_full_url()
def instrument(module):
if hasattr(module, 'urlretrieve'):
_nr_wrapper_urlretrieve_ = _nr_wrapper_factory(
bind_params_urlretrieve, 'urllib')
wrap_function_wrapper(module, 'urlretrieve', _nr_wrapper_urlretrieve_)
if hasattr(module, 'URLopener'):
_nr_wrapper_url_opener_open_ = _nr_wrapper_factory(
bind_params_open, 'urllib')
wrap_function_wrapper(module, 'URLopener.open',
_nr_wrapper_url_opener_open_)
if hasattr(module, 'OpenerDirector'):
_nr_wrapper_opener_director_open_ = _nr_wrapper_factory(
bind_params_open, 'urllib2')
wrap_function_wrapper(module, 'OpenerDirector.open',
_nr_wrapper_opener_director_open_)
|
agpl-3.0
|
cihai/cihaidata-unihan
|
unihan_etl/util.py
|
1
|
3111
|
# -*- coding: utf8 -*-
"""Utility and helper methods for script.
util
~~~~
"""
from __future__ import absolute_import, unicode_literals
import re
import sys
from ._compat import Mapping, string_types, text_type, unichr
def ucn_to_unicode(ucn):
"""Return a python unicode value from a UCN.
Converts a Unicode Universal Character Number (e.g. "U+4E00" or "4E00") to
Python unicode (u'\\u4e00')"""
if isinstance(ucn, string_types):
ucn = ucn.strip("U+")
if len(ucn) > int(4):
char = b'\U' + format(int(ucn, 16), '08x').encode('latin1')
char = char.decode('unicode_escape')
else:
char = unichr(int(ucn, 16))
else:
char = unichr(ucn)
assert isinstance(char, text_type)
return char
def ucnstring_to_python(ucn_string):
"""Return string with Unicode UCN (e.g. "U+4E00") to native Python Unicode
(u'\\u4e00').
"""
res = re.findall(r"U\+[0-9a-fA-F]*", ucn_string)
for r in res:
ucn_string = ucn_string.replace(text_type(r), text_type(ucn_to_unicode(r)))
ucn_string = ucn_string.encode('utf-8')
assert isinstance(ucn_string, bytes)
return ucn_string
def ucnstring_to_unicode(ucn_string):
"""Return ucnstring as Unicode."""
ucn_string = ucnstring_to_python(ucn_string).decode('utf-8')
assert isinstance(ucn_string, text_type)
return ucn_string
def _dl_progress(count, block_size, total_size, out=sys.stdout):
"""
MIT License: https://github.com/okfn/dpm-old/blob/master/dpm/util.py
Modification for testing: http://stackoverflow.com/a/4220278
"""
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMb' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%iKb' % (bytes / 1000)
elif bytes > 1000:
return '%.1fKb' % (bytes / 1000.0)
else:
return '%ib' % bytes
if not count:
print('Total size: %s' % format_size(total_size))
last_percent = int((count - 1) * block_size * 100 / total_size)
# may have downloaded less if count*block_size > total_size
maxdownloaded = count * block_size
percent = min(int(maxdownloaded * 100 / total_size), 100)
out.flush()
if percent > last_percent:
# TODO: is this acceptable? Do we want to do something nicer?
out.write(
'%3d%% [%s>%s]\r'
% (
percent,
int(round(percent / 2)) * '=',
int(round(50 - percent / 2)) * ' ',
)
)
out.flush()
if maxdownloaded >= total_size:
print('\n')
def merge_dict(base, additional):
if base is None:
return additional
if additional is None:
return base
if not (isinstance(base, Mapping) and isinstance(additional, Mapping)):
return additional
merged = base
for key, value in additional.items():
if isinstance(value, Mapping):
merged[key] = merge_dict(merged.get(key), value)
else:
merged[key] = value
return merged
|
mit
|
maferelo/saleor
|
saleor/graphql/decorators.py
|
1
|
1429
|
from enum import Enum
from functools import wraps
from typing import Iterable, Union
from graphql_jwt import exceptions
from graphql_jwt.decorators import context
def account_passes_test(test_func):
"""Determine if user/service_account has permission to access to content."""
def decorator(f):
@wraps(f)
@context(f)
def wrapper(context, *args, **kwargs):
if test_func(context):
return f(*args, **kwargs)
raise exceptions.PermissionDenied()
return wrapper
return decorator
def _permission_required(perms: Iterable[Enum], context):
if context.user.has_perms(perms):
return True
service_account = getattr(context, "service_account", None)
if service_account and service_account.has_perms(perms):
return True
return False
def permission_required(perm: Union[Enum, Iterable[Enum]]):
def check_perms(context):
if isinstance(perm, Enum):
perms = (perm,)
else:
perms = perm
return _permission_required(perms, context)
return account_passes_test(check_perms)
def one_of_permissions_required(perms: Iterable[Enum]):
def check_perms(context):
for perm in perms:
has_perm = _permission_required((perm,), context)
if has_perm:
return True
return False
return account_passes_test(check_perms)
|
bsd-3-clause
|
wking/pycalendar
|
pycalendar/component/calendar.py
|
1
|
1371
|
# Copyright (C) 2013 W. Trevor King <[email protected]>
#
# This file is part of pycalender.
#
# pycalender is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pycalender is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pycalender. If not, see <http://www.gnu.org/licenses/>.
from . import base as _base
class Calendar (_base.Component):
"""A calendar
As defined in :RFC:`5545`, section 3.4 (iCalendar Object).
"""
name = 'VCALENDAR'
# contents defined in RFC 5545, section 3.6 (Calendar Components)
required = [
'PRODID',
'VERSION',
]
optional = [
'CALSCALE',
'METHOD',
'X-PROP',
'IANA-PROP',
]
multiple = [
'X-PROP',
'IANA-PROP',
]
subcomponents = [
'VEVENT',
'VTODO',
'VJOURNAL',
'VFREEBUSY',
'VTIMEZONE',
'VIANA-COMP',
'VXCOMP',
]
|
gpl-3.0
|
LamCiuLoeng/gap
|
gapproject/util/master_helper.py
|
1
|
3918
|
# -*- coding: utf-8 -*-
import traceback
from pylons import cache
from tg import config
from gapproject.model import *
__all__ = ['getOrsayCustomerList', 'getOrsayCustomerInfo', 'getOrsayWashingList', 'getOrsayWashingDetailInfo', 'populateTranslation', 'populateTranslation1']
def getOrsayCustomerList():
def _getInfo():
'''
sql = [
"select distinct(th.CUSTOMER_CODE),th.CUSTOMER_NAME from t_sales_contract_dtl td,t_sales_contract_hdr th ",
"where td.program='ORSAY' and td.company_code='RPACEU' and brand like '%ORSAY%' and td.SALES_CONTRACT_NO=th.SALES_CONTRACT_NO",
"and td.COMPANY_CODE=th.COMPANY_CODE",
"order by th.CUSTOMER_NAME"
]
'''
sql = ["select distinct(th.CUST_CODE),th.CUST_NAME from t_cust_hdr th where th.company_code='RPACEU' and th.STATUS='1' order by th.CUST_NAME"]
return searchOracle("\n".join(sql), {})
return _getCacheOrSearch("orsay_customer_list", "all", _getInfo)
def getOrsayCustomerInfo(cn):
def _getInfo():
billto_sql = [
"select tc.ADDRESS_1||tc.ADDRESS_2||tc.ADDRESS_3||tc.ADDRESS_4 BillTo,tc.CONTACT_SALES,tc.TEL_NO,",
"tc.COMPANY_CODE,tc.CUST_NAME,tc.CUST_CODE,tc.LINE_NO,tc.STATUS,tc.CITY, tc.COUNTRY, tc.EXTENSION,tc.FAX_NO,tc.BASE_CCY,tc.PAY_TERM",
"from t_cust_hdr tc",
"where tc.CUST_CODE=:P_Customer",
"and tc.COMPANY_CODE='RPACEU' and tc.STATUS=1 "
]
shipto_sql = [
"select tcd.ADDRESS_1||tcd.ADDRESS_2||tcd.ADDRESS_3||tcd.ADDRESS_4 ShipTo,tcd.CONTACT_PERSON,tcd.TEL_NO,",
"tcd.CUST_CODE,tcd.COMPANY_CODE,tcd.CITY,tcd.COUNTRY,tcd.STATUS,tcd.FAX_NO",
"from t_cust_hdr tc, t_cust_deliv_addr tcd ",
"where tc.CUST_CODE=:P_Customer",
"and tc.LINE_NO=tcd.HDR_LINE_NO",
"and tc.COMPANY_CODE='RPACEU'",
]
return {
"billto": searchOracle("\n".join(billto_sql), {"P_Customer":str(cn)}),
"shipto": searchOracle("\n".join(shipto_sql), {"P_Customer":str(cn)}),
}
return _getCacheOrSearch("orsay_customer_head", cn, _getInfo)
def getOrsayWashingList(cat, season):
def _getInfo():
return DBSession.query(OrsayWashing).filter(OrsayWashing.active == 0).filter(OrsayWashing.type == cat).filter(OrsayWashing.season == season).order_by(OrsayWashing.englisch).all()
return _getCacheOrSearch("orsay_washing_instruction", '%s%s' % (cat, season), _getInfo)
def getOrsayWashingDetailInfo(id):
def _getInfo():
return DBSession.query(OrsayWashing).get(id)
return _getCacheOrSearch("orsay_washing_instruction", "washing_%d" % id, _getInfo)
def _getCacheOrSearch(namepace, key, fun, expiretime=60 * 5):
if config.get("use_cache", None) == "true":
print "-------- cached ----------"
c = cache.get_cache(namepace)
return c.get_value(key=key, createfunc=fun, expiretime=expiretime)
else:
print "-------- no cache --------"
return fun()
def populateTranslation(obj, style=' / ', default="", attrs=["deutsch", "franzosisch", "polnisch", "ungarisch", "tcheschisch", "englisch", "slowakisch", "rumanisch", "slowenisch"]):
if not obj:
return default
elif obj.season == 's12':
attrs = ["englisch", "deutsch", "polnisch", "franzosisch", "ungarisch", "tcheschisch", "slowakisch", "rumanisch", "slowenisch", "russisch"]
try:
content = []
for a in attrs: content.append(getattr(obj, a))
return style.join(content)
except:
traceback.print_exc()
return defalut
def populateTranslation1(obj, style=' / ', default=""):
return populateTranslation(obj, style, default, ["deutsch", "franzosisch", "englisch", "polnisch", "ungarisch", "tcheschisch", "slowakisch", "rumanisch", "slowenisch"])
|
mit
|
TheTypoMaster/asuswrt
|
release/src/router/wget/testenv/Test-auth-basic.py
|
23
|
1565
|
#!/usr/bin/env python3
from sys import exit
from test.http_test import HTTPTest
from misc.wget_file import WgetFile
"""
This test ensures Wget's Basic Authorization Negotiation.
Also, we ensure that Wget saves the host after a successfull auth and
doesn't wait for a challenge the second time.
"""
TEST_NAME = "Basic Authorization"
############# File Definitions ###############################################
File1 = "I am an invisble man."
File2 = "I too am an invisible man."
File1_rules = {
"Authentication" : {
"Type" : "Basic",
"User" : "Sauron",
"Pass" : "TheEye"
}
}
File2_rules = {
"ExpectHeader" : {
"Authorization" : "Basic U2F1cm9uOlRoZUV5ZQ=="
}
}
A_File = WgetFile ("File1", File1, rules=File1_rules)
B_File = WgetFile ("File2", File2, rules=File2_rules)
WGET_OPTIONS = "--user=Sauron --password=TheEye"
WGET_URLS = [["File1", "File2"]]
Files = [[A_File, B_File]]
ExpectedReturnCode = 0
ExpectedDownloadedFiles = [A_File, B_File]
################ Pre and Post Test Hooks #####################################
pre_test = {
"ServerFiles" : Files
}
test_options = {
"WgetCommands" : WGET_OPTIONS,
"Urls" : WGET_URLS
}
post_test = {
"ExpectedFiles" : ExpectedDownloadedFiles,
"ExpectedRetcode" : ExpectedReturnCode
}
err = HTTPTest (
name=TEST_NAME,
pre_hook=pre_test,
test_params=test_options,
post_hook=post_test
).begin ()
exit (err)
|
gpl-2.0
|
python-gitlab/python-gitlab
|
tests/unit/objects/test_hooks.py
|
1
|
5269
|
"""
GitLab API: https://docs.gitlab.com/ce/api/system_hooks.html
GitLab API: https://docs.gitlab.com/ce/api/groups.html#hooks
GitLab API: https://docs.gitlab.com/ee/api/projects.html#hooks
"""
import re
import pytest
import responses
from gitlab.v4.objects import GroupHook, Hook, ProjectHook
hooks_content = [
{
"id": 1,
"url": "testurl",
"push_events": True,
"tag_push_events": True,
},
{
"id": 2,
"url": "testurl_second",
"push_events": False,
"tag_push_events": False,
},
]
hook_content = hooks_content[0]
@pytest.fixture
def resp_hooks_list():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url=re.compile(r"http://localhost/api/v4/((groups|projects)/1/|)hooks"),
json=hooks_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_hook_get():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.GET,
url=re.compile(r"http://localhost/api/v4/((groups|projects)/1/|)hooks/1"),
json=hook_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_hook_create():
with responses.RequestsMock() as rsps:
rsps.add(
method=responses.POST,
url=re.compile(r"http://localhost/api/v4/((groups|projects)/1/|)hooks"),
json=hook_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_hook_update():
with responses.RequestsMock() as rsps:
pattern = re.compile(r"http://localhost/api/v4/((groups|projects)/1/|)hooks/1")
rsps.add(
method=responses.GET,
url=pattern,
json=hook_content,
content_type="application/json",
status=200,
)
rsps.add(
method=responses.PUT,
url=pattern,
json=hook_content,
content_type="application/json",
status=200,
)
yield rsps
@pytest.fixture
def resp_hook_delete():
with responses.RequestsMock() as rsps:
pattern = re.compile(r"http://localhost/api/v4/((groups|projects)/1/|)hooks/1")
rsps.add(
method=responses.GET,
url=pattern,
json=hook_content,
content_type="application/json",
status=200,
)
rsps.add(
method=responses.DELETE,
url=pattern,
status=204,
)
yield rsps
def test_list_system_hooks(gl, resp_hooks_list):
hooks = gl.hooks.list()
assert hooks[0].id == 1
assert hooks[0].url == "testurl"
assert hooks[1].id == 2
assert hooks[1].url == "testurl_second"
def test_get_system_hook(gl, resp_hook_get):
data = gl.hooks.get(1)
assert isinstance(data, Hook)
assert data.url == "testurl"
assert data.id == 1
def test_create_system_hook(gl, resp_hook_create):
hook = gl.hooks.create(hook_content)
assert hook.url == "testurl"
assert hook.push_events is True
assert hook.tag_push_events is True
# there is no update method for system hooks
def test_delete_system_hook(gl, resp_hook_delete):
hook = gl.hooks.get(1)
hook.delete()
gl.hooks.delete(1)
def test_list_group_hooks(group, resp_hooks_list):
hooks = group.hooks.list()
assert hooks[0].id == 1
assert hooks[0].url == "testurl"
assert hooks[1].id == 2
assert hooks[1].url == "testurl_second"
def test_get_group_hook(group, resp_hook_get):
data = group.hooks.get(1)
assert isinstance(data, GroupHook)
assert data.url == "testurl"
assert data.id == 1
def test_create_group_hook(group, resp_hook_create):
hook = group.hooks.create(hook_content)
assert hook.url == "testurl"
assert hook.push_events is True
assert hook.tag_push_events is True
def test_update_group_hook(group, resp_hook_update):
hook = group.hooks.get(1)
assert hook.id == 1
hook.url = "testurl_more"
hook.save()
def test_delete_group_hook(group, resp_hook_delete):
hook = group.hooks.get(1)
hook.delete()
group.hooks.delete(1)
def test_list_project_hooks(project, resp_hooks_list):
hooks = project.hooks.list()
assert hooks[0].id == 1
assert hooks[0].url == "testurl"
assert hooks[1].id == 2
assert hooks[1].url == "testurl_second"
def test_get_project_hook(project, resp_hook_get):
data = project.hooks.get(1)
assert isinstance(data, ProjectHook)
assert data.url == "testurl"
assert data.id == 1
def test_create_project_hook(project, resp_hook_create):
hook = project.hooks.create(hook_content)
assert hook.url == "testurl"
assert hook.push_events is True
assert hook.tag_push_events is True
def test_update_project_hook(project, resp_hook_update):
hook = project.hooks.get(1)
assert hook.id == 1
hook.url = "testurl_more"
hook.save()
def test_delete_project_hook(project, resp_hook_delete):
hook = project.hooks.get(1)
hook.delete()
project.hooks.delete(1)
|
lgpl-3.0
|
nanolearning/edx-platform
|
common/lib/xmodule/xmodule/tests/xml/test_policy.py
|
248
|
1262
|
"""
Tests that policy json files import correctly when loading XML
"""
from nose.tools import assert_equals, assert_raises # pylint: disable=no-name-in-module
from xmodule.tests.xml.factories import CourseFactory
from xmodule.tests.xml import XModuleXmlImportTest
class TestPolicy(XModuleXmlImportTest):
"""
Tests that policy json files import correctly when loading xml
"""
def test_no_attribute_mapping(self):
# Policy files are json, and thus the values aren't passed through 'deserialize_field'
# Therefor, the string 'null' is passed unchanged to the Float field, which will trigger
# a ValueError
with assert_raises(ValueError):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 'null'}))
# Trigger the exception by looking at the imported data
course.days_early_for_beta # pylint: disable=pointless-statement
def test_course_policy(self):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': None}))
assert_equals(None, course.days_early_for_beta)
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 9}))
assert_equals(9, course.days_early_for_beta)
|
agpl-3.0
|
yashodhank/erpnext
|
erpnext/manufacturing/doctype/production_planning_tool/production_planning_tool.py
|
4
|
19282
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, cint, nowdate, add_days, comma_and
from frappe import msgprint, _
from frappe.model.document import Document
from erpnext.manufacturing.doctype.bom.bom import validate_bom_no
from erpnext.manufacturing.doctype.production_order.production_order import get_item_details
class ProductionPlanningTool(Document):
def __init__(self, arg1, arg2=None):
super(ProductionPlanningTool, self).__init__(arg1, arg2)
self.item_dict = {}
def clear_table(self, table_name):
self.set(table_name, [])
def validate_company(self):
if not self.company:
frappe.throw(_("Please enter Company"))
def get_open_sales_orders(self):
""" Pull sales orders which are pending to deliver based on criteria selected"""
so_filter = item_filter = ""
if self.from_date:
so_filter += " and so.transaction_date >= %(from_date)s"
if self.to_date:
so_filter += " and so.transaction_date <= %(to_date)s"
if self.customer:
so_filter += " and so.customer = %(customer)s"
if self.project:
so_filter += " and so.project = %(project)s"
if self.fg_item:
item_filter += " and so_item.item_code = %(item)s"
open_so = frappe.db.sql("""
select distinct so.name, so.transaction_date, so.customer, so.base_grand_total
from `tabSales Order` so, `tabSales Order Item` so_item
where so_item.parent = so.name
and so.docstatus = 1 and so.status != "Stopped"
and so.company = %(company)s
and so_item.qty > so_item.delivered_qty {0} {1}
and (exists (select name from `tabBOM` bom where bom.item=so_item.item_code
and bom.is_active = 1)
or exists (select name from `tabPacked Item` pi
where pi.parent = so.name and pi.parent_item = so_item.item_code
and exists (select name from `tabBOM` bom where bom.item=pi.item_code
and bom.is_active = 1)))
""".format(so_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"customer": self.customer,
"project": self.project,
"item": self.fg_item,
"company": self.company
}, as_dict=1)
self.add_so_in_table(open_so)
def add_so_in_table(self, open_so):
""" Add sales orders in the table"""
self.clear_table("sales_orders")
so_list = []
for r in open_so:
if cstr(r['name']) not in so_list:
pp_so = self.append('sales_orders', {})
pp_so.sales_order = r['name']
pp_so.sales_order_date = cstr(r['transaction_date'])
pp_so.customer = cstr(r['customer'])
pp_so.grand_total = flt(r['base_grand_total'])
def get_pending_material_requests(self):
""" Pull Material Requests that are pending based on criteria selected"""
mr_filter = item_filter = ""
if self.from_date:
mr_filter += " and mr.transaction_date >= %(from_date)s"
if self.to_date:
mr_filter += " and mr.transaction_date <= %(to_date)s"
if self.warehouse:
mr_filter += " and mr_item.warehouse = %(warehouse)s"
if self.fg_item:
item_filter += " and mr_item.item_code = %(item)s"
pending_mr = frappe.db.sql("""
select distinct mr.name, mr.transaction_date
from `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
where mr_item.parent = mr.name
and mr.material_request_type = "Manufacture"
and mr.docstatus = 1
and mr_item.qty > ifnull(mr_item.ordered_qty,0) {0} {1}
and (exists (select name from `tabBOM` bom where bom.item=mr_item.item_code
and bom.is_active = 1))
""".format(mr_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"warehouse": self.warehouse,
"item": self.fg_item
}, as_dict=1)
self.add_mr_in_table(pending_mr)
def add_mr_in_table(self, pending_mr):
""" Add Material Requests in the table"""
self.clear_table("material_requests")
mr_list = []
for r in pending_mr:
if cstr(r['name']) not in mr_list:
mr = self.append('material_requests', {})
mr.material_request = r['name']
mr.material_request_date = cstr(r['transaction_date'])
def get_items(self):
if self.get_items_from == "Sales Order":
self.get_so_items()
elif self.get_items_from == "Material Request":
self.get_mr_items()
def get_so_items(self):
so_list = [d.sales_order for d in self.get('sales_orders') if d.sales_order]
if not so_list:
msgprint(_("Please enter Sales Orders in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and so_item.item_code = "{0}"'.format(frappe.db.escape(self.fg_item))
items = frappe.db.sql("""select distinct parent, item_code, warehouse,
(qty - delivered_qty) as pending_qty
from `tabSales Order Item` so_item
where parent in (%s) and docstatus = 1 and qty > delivered_qty
and exists (select name from `tabBOM` bom where bom.item=so_item.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
if self.fg_item:
item_condition = ' and pi.item_code = "{0}"'.format(frappe.db.escape(self.fg_item))
packed_items = frappe.db.sql("""select distinct pi.parent, pi.item_code, pi.warehouse as warehouse,
(((so_item.qty - so_item.delivered_qty) * pi.qty) / so_item.qty)
as pending_qty
from `tabSales Order Item` so_item, `tabPacked Item` pi
where so_item.parent = pi.parent and so_item.docstatus = 1
and pi.parent_item = so_item.item_code
and so_item.parent in (%s) and so_item.qty > so_item.delivered_qty
and exists (select name from `tabBOM` bom where bom.item=pi.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
self.add_items(items + packed_items)
def get_mr_items(self):
mr_list = [d.material_request for d in self.get('material_requests') if d.material_request]
if not mr_list:
msgprint(_("Please enter Material Requests in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and mr_item.item_code = "' + frappe.db.escape(self.fg_item, percent=False) + '"'
items = frappe.db.sql("""select distinct parent, name, item_code, warehouse,
(qty - ordered_qty) as pending_qty
from `tabMaterial Request Item` mr_item
where parent in (%s) and docstatus = 1 and qty > ordered_qty
and exists (select name from `tabBOM` bom where bom.item=mr_item.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(mr_list)), item_condition), tuple(mr_list), as_dict=1)
self.add_items(items)
def add_items(self, items):
self.clear_table("items")
for p in items:
item_details = get_item_details(p['item_code'])
pi = self.append('items', {})
pi.warehouse = p['warehouse']
pi.item_code = p['item_code']
pi.description = item_details and item_details.description or ''
pi.stock_uom = item_details and item_details.stock_uom or ''
pi.bom_no = item_details and item_details.bom_no or ''
pi.planned_qty = flt(p['pending_qty'])
pi.pending_qty = flt(p['pending_qty'])
if self.get_items_from == "Sales Order":
pi.sales_order = p['parent']
elif self.get_items_from == "Material Request":
pi.material_request = p['parent']
pi.material_request_item = p['name']
def validate_data(self):
self.validate_company()
for d in self.get('items'):
if not d.bom_no:
frappe.throw(_("Please select BOM for Item in Row {0}".format(d.idx)))
else:
validate_bom_no(d.item_code, d.bom_no)
if not flt(d.planned_qty):
frappe.throw(_("Please enter Planned Qty for Item {0} at row {1}").format(d.item_code, d.idx))
def raise_production_orders(self):
"""It will raise production order (Draft) for all distinct FG items"""
self.validate_data()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "planned_qty")
items = self.get_production_items()
pro_list = []
frappe.flags.mute_messages = True
for key in items:
production_order = self.create_production_order(items[key])
if production_order:
pro_list.append(production_order)
frappe.flags.mute_messages = False
if pro_list:
pro_list = ["""<a href="#Form/Production Order/%s" target="_blank">%s</a>""" % \
(p, p) for p in pro_list]
msgprint(_("{0} created").format(comma_and(pro_list)))
else :
msgprint(_("No Production Orders created"))
def get_production_items(self):
item_dict = {}
for d in self.get("items"):
item_details= {
"production_item" : d.item_code,
"sales_order" : d.sales_order,
"material_request" : d.material_request,
"material_request_item" : d.material_request_item,
"bom_no" : d.bom_no,
"description" : d.description,
"stock_uom" : d.stock_uom,
"company" : self.company,
"wip_warehouse" : "",
"fg_warehouse" : d.warehouse,
"status" : "Draft",
"project" : frappe.db.get_value("Sales Order", d.sales_order, "project")
}
""" Club similar BOM and item for processing in case of Sales Orders """
if self.get_items_from == "Material Request":
item_details.update({
"qty": d.planned_qty
})
item_dict[(d.item_code, d.material_request_item, d.warehouse)] = item_details
else:
item_details.update({
"qty":flt(item_dict.get((d.item_code, d.sales_order, d.warehouse),{})
.get("qty")) + flt(d.planned_qty)
})
item_dict[(d.item_code, d.sales_order, d.warehouse)] = item_details
return item_dict
def create_production_order(self, item_dict):
"""Create production order. Called from Production Planning Tool"""
from erpnext.manufacturing.doctype.production_order.production_order import OverProductionError, get_default_warehouse
warehouse = get_default_warehouse()
pro = frappe.new_doc("Production Order")
pro.update(item_dict)
pro.set_production_order_operations()
if warehouse:
pro.wip_warehouse = warehouse.get('wip_warehouse')
if not pro.fg_warehouse:
pro.fg_warehouse = warehouse.get('fg_warehouse')
try:
pro.insert()
return pro.name
except OverProductionError:
pass
def get_so_wise_planned_qty(self):
"""
bom_dict {
bom_no: ['sales_order', 'qty']
}
"""
bom_dict = {}
for d in self.get("items"):
if self.get_items_from == "Material Request":
bom_dict.setdefault(d.bom_no, []).append([d.material_request_item, flt(d.planned_qty)])
else:
bom_dict.setdefault(d.bom_no, []).append([d.sales_order, flt(d.planned_qty)])
return bom_dict
def download_raw_materials(self):
""" Create csv data for required raw material to produce finished goods"""
self.validate_data()
bom_dict = self.get_so_wise_planned_qty()
self.get_raw_materials(bom_dict)
return self.get_csv()
def get_raw_materials(self, bom_dict,non_stock_item=0):
""" Get raw materials considering sub-assembly items
{
"item_code": [qty_required, description, stock_uom, min_order_qty]
}
"""
item_list = []
for bom, so_wise_qty in bom_dict.items():
bom_wise_item_details = {}
if self.use_multi_level_bom and self.only_raw_materials and self.include_subcontracted:
# get all raw materials with sub assembly childs
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
for d in frappe.db.sql("""select fb.item_code,
ifnull(sum(fb.qty/ifnull(bom.quantity, 1)), 0) as qty,
fb.description, fb.stock_uom, item.min_order_qty
from `tabBOM Explosion Item` fb, `tabBOM` bom, `tabItem` item
where bom.name = fb.parent and item.name = fb.item_code
and (item.is_sub_contracted_item = 0 or ifnull(item.default_bom, "")="")
""" + ("and item.is_stock_item = 1","")[non_stock_item] + """
and fb.docstatus<2 and bom.name=%(bom)s
group by fb.item_code, fb.stock_uom""", {"bom":bom}, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
else:
# Get all raw materials considering SA items as raw materials,
# so no childs of SA items
bom_wise_item_details = self.get_subitems(bom_wise_item_details, bom,1, \
self.use_multi_level_bom,self.only_raw_materials, self.include_subcontracted,non_stock_item)
for item, item_details in bom_wise_item_details.items():
for so_qty in so_wise_qty:
item_list.append([item, flt(item_details.qty) * so_qty[1], item_details.description,
item_details.stock_uom, item_details.min_order_qty, so_qty[0]])
self.make_items_dict(item_list)
def get_subitems(self,bom_wise_item_details, bom, parent_qty, include_sublevel, only_raw, supply_subs,non_stock_item=0):
for d in frappe.db.sql("""SELECT bom_item.item_code, default_material_request_type,
ifnull(%(parent_qty)s * sum(bom_item.qty/ifnull(bom.quantity, 1)), 0) as qty,
item.is_sub_contracted_item as is_sub_contracted, item.default_bom as default_bom,
bom_item.description as description, bom_item.stock_uom as stock_uom, item.min_order_qty
as min_order_qty FROM `tabBOM Item` bom_item, `tabBOM` bom, tabItem item
where bom.name = bom_item.parent and bom.name = %(bom)s and bom_item.docstatus < 2
and bom_item.item_code = item.name
""" + ("and item.is_stock_item = 1","")[non_stock_item] + """
group by bom_item.item_code""", {"bom": bom, "parent_qty": parent_qty}, as_dict=1):
if (d.default_material_request_type == "Purchase" and not (d.is_sub_contracted \
and only_raw and include_sublevel)) or (d.default_material_request_type == \
"Manufacture" and not only_raw):
if d.item_code in bom_wise_item_details:
bom_wise_item_details[d.item_code].qty = bom_wise_item_details[d.item_code].qty\
+ d.qty
else:
bom_wise_item_details[d.item_code] = d
if include_sublevel:
if (d.default_material_request_type == "Purchase" and d.is_sub_contracted \
and supply_subs) or (d.default_material_request_type == "Manufacture"):
self.get_subitems(bom_wise_item_details,d.default_bom, \
d.qty, include_sublevel, only_raw, supply_subs)
return bom_wise_item_details
def make_items_dict(self, item_list):
for i in item_list:
self.item_dict.setdefault(i[0], []).append([flt(i[1]), i[2], i[3], i[4], i[5]])
def get_csv(self):
item_list = [['Item Code', 'Description', 'Stock UOM', 'Required Qty', 'Warehouse',
'Quantity Requested for Purchase', 'Ordered Qty', 'Actual Qty']]
for item in self.item_dict:
total_qty = sum([flt(d[0]) for d in self.item_dict[item]])
item_list.append([item, self.item_dict[item][0][1], self.item_dict[item][0][2], total_qty])
item_qty = frappe.db.sql("""select warehouse, indented_qty, ordered_qty, actual_qty
from `tabBin` where item_code = %s""", item, as_dict=1)
i_qty, o_qty, a_qty = 0, 0, 0
for w in item_qty:
i_qty, o_qty, a_qty = i_qty + flt(w.indented_qty), o_qty + flt(w.ordered_qty), a_qty + flt(w.actual_qty)
item_list.append(['', '', '', '', w.warehouse, flt(w.indented_qty),
flt(w.ordered_qty), flt(w.actual_qty)])
if item_qty:
item_list.append(['', '', '', '', 'Total', i_qty, o_qty, a_qty])
return item_list
def raise_material_requests(self):
"""
Raise Material Request if projected qty is less than qty required
Requested qty should be shortage qty considering minimum order qty
"""
self.validate_data()
if not self.purchase_request_for_warehouse:
frappe.throw(_("Please enter Warehouse for which Material Request will be raised"))
bom_dict = self.get_so_wise_planned_qty()
self.get_raw_materials(bom_dict,self.create_material_requests_non_stock_request)
if self.item_dict:
self.create_material_request()
def get_requested_items(self):
items_to_be_requested = frappe._dict()
if not self.create_material_requests_for_all_required_qty:
item_projected_qty = self.get_projected_qty()
for item, so_item_qty in self.item_dict.items():
total_qty = sum([flt(d[0]) for d in so_item_qty])
requested_qty = 0
if self.create_material_requests_for_all_required_qty:
requested_qty = total_qty
elif total_qty > item_projected_qty.get(item, 0):
# shortage
requested_qty = total_qty - flt(item_projected_qty.get(item))
# consider minimum order qty
if requested_qty and requested_qty < flt(so_item_qty[0][3]):
requested_qty = flt(so_item_qty[0][3])
# distribute requested qty SO wise
for item_details in so_item_qty:
if requested_qty:
sales_order = item_details[4] or "No Sales Order"
if self.get_items_from == "Material Request":
sales_order = "No Sales Order"
if requested_qty <= item_details[0]:
adjusted_qty = requested_qty
else:
adjusted_qty = item_details[0]
items_to_be_requested.setdefault(item, {}).setdefault(sales_order, 0)
items_to_be_requested[item][sales_order] += adjusted_qty
requested_qty -= adjusted_qty
else:
break
# requested qty >= total so qty, due to minimum order qty
if requested_qty:
items_to_be_requested.setdefault(item, {}).setdefault("No Sales Order", 0)
items_to_be_requested[item]["No Sales Order"] += requested_qty
return items_to_be_requested
def get_projected_qty(self):
items = self.item_dict.keys()
item_projected_qty = frappe.db.sql("""select item_code, sum(projected_qty)
from `tabBin` where item_code in (%s) and warehouse=%s group by item_code""" %
(", ".join(["%s"]*len(items)), '%s'), tuple(items + [self.purchase_request_for_warehouse]))
return dict(item_projected_qty)
def create_material_request(self):
items_to_be_requested = self.get_requested_items()
material_request_list = []
if items_to_be_requested:
for item in items_to_be_requested:
item_wrapper = frappe.get_doc("Item", item)
material_request = frappe.new_doc("Material Request")
material_request.update({
"transaction_date": nowdate(),
"status": "Draft",
"company": self.company,
"requested_by": frappe.session.user
})
material_request.update({"material_request_type": item_wrapper.default_material_request_type})
for sales_order, requested_qty in items_to_be_requested[item].items():
material_request.append("items", {
"doctype": "Material Request Item",
"__islocal": 1,
"item_code": item,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": requested_qty,
"schedule_date": add_days(nowdate(), cint(item_wrapper.lead_time_days)),
"warehouse": self.purchase_request_for_warehouse,
"sales_order": sales_order if sales_order!="No Sales Order" else None,
"project": frappe.db.get_value("Sales Order", sales_order, "project") \
if sales_order!="No Sales Order" else None
})
material_request.flags.ignore_permissions = 1
material_request.submit()
material_request_list.append(material_request.name)
if material_request_list:
message = ["""<a href="#Form/Material Request/%s" target="_blank">%s</a>""" % \
(p, p) for p in material_request_list]
msgprint(_("Material Requests {0} created").format(comma_and(message)))
else:
msgprint(_("Nothing to request"))
|
agpl-3.0
|
TangHao1987/intellij-community
|
python/lib/Lib/site-packages/django/contrib/auth/tests/views.py
|
71
|
14822
|
import os
import re
import urllib
from django.conf import settings
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.test import TestCase
from django.core import mail
from django.core.urlresolvers import reverse
from django.http import QueryDict
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
self.old_LANGUAGES = settings.LANGUAGES
self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE
settings.LANGUAGES = (('en', 'English'),)
settings.LANGUAGE_CODE = 'en'
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
def tearDown(self):
settings.LANGUAGES = self.old_LANGUAGES
settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password
}
)
self.assertEquals(response.status_code, 302)
self.assert_(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
self.assert_(SESSION_KEY in self.client.session)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEquals(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertContains(response, "That e-mail address doesn't have an associated user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assert_("http://" in mail.outbox[0].body)
self.assertEquals(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals("[email protected]", mail.outbox[0].from_email)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assert_(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertEquals(response.status_code, 200)
self.assert_("Please enter your new password" in response.content)
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456-1-1/')
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2':' anewpassword'})
# Check the password has not been changed
u = User.objects.get(email='[email protected]')
self.assert_(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# It redirects us to a 'complete' page:
self.assertEquals(response.status_code, 302)
# Check the password has been changed
u = User.objects.get(email='[email protected]')
self.assert_(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2':' x'})
self.assertEquals(response.status_code, 200)
self.assert_("The two password fields didn't match" in response.content)
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password
}
)
self.assertEquals(response.status_code, 200)
self.assert_("Please enter a correct username and password. Note that both fields are case-sensitive." in response.content)
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
}
)
self.assertEquals(response.status_code, 200)
self.assert_("Your old password was entered incorrectly. Please enter it again." in response.content)
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
}
)
self.assertEquals(response.status_code, 200)
self.assert_("The two password fields didn't match." in response.content)
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
}
)
self.assertEquals(response.status_code, 302)
self.assert_(response['Location'].endswith('/password_change/done/'))
self.fail_login()
self.login(password='password1')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('django.contrib.auth.views.login'))
self.assertEquals(response.status_code, 200)
site = Site.objects.get_current()
self.assertEquals(response.context['site'], site)
self.assertEquals(response.context['site_name'], site.name)
self.assert_(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('django.contrib.auth.views.login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urllib.quote(bad_url)
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
}
)
self.assertEquals(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'//testserver/'):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urllib.quote(good_url)
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
}
)
self.assertEquals(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
class LoginURLSettings(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
super(LoginURLSettings, self).setUp()
self.old_LOGIN_URL = settings.LOGIN_URL
def tearDown(self):
super(LoginURLSettings, self).tearDown()
settings.LOGIN_URL = self.old_LOGIN_URL
def get_login_required_url(self, login_url):
settings.LOGIN_URL = login_url
response = self.client.get('/login_required/')
self.assertEquals(response.status_code, 302)
return response['Location']
def test_standard_login_url(self):
login_url = '/login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url,
'http://testserver%s?%s' % (login_url, querystring.urlencode('/')))
def test_remote_login_url(self):
login_url = 'http://remote.example.com/login'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_https_login_url(self):
login_url = 'https:///login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_login_url_with_querystring(self):
login_url = '/login/?pretty=1'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('pretty=1', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver/login/?%s' %
querystring.urlencode('/'))
def test_remote_login_url_with_next_querystring(self):
login_url = 'http://remote.example.com/login/'
login_required_url = self.get_login_required_url('%s?next=/default/' %
login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url, '%s?%s' % (login_url,
querystring.urlencode('/')))
class LogoutTest(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls'
def confirm_logged_out(self):
self.assert_(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertEquals(200, response.status_code)
self.assert_('Logged out' in response.content)
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assert_(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assert_(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assert_(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
|
apache-2.0
|
jn7163/django
|
tests/admin_views/customadmin.py
|
379
|
2366
|
"""
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.http import HttpResponse
from . import admin as base_admin, forms, models
class Admin2(admin.AdminSite):
app_index_template = 'custom_admin/app_index.html'
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = ['custom_admin/index.html'] # a list, to test fix for #18697
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return [
url(r'^my_view/$', self.admin_view(self.my_view), name='my_view'),
] + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
def password_change(self, request, extra_context=None):
return super(Admin2, self).password_change(request, {'spam': 'eggs'})
class UserLimitedAdmin(UserAdmin):
# used for testing password change on a user not in queryset
def get_queryset(self, request):
qs = super(UserLimitedAdmin, self).get_queryset(request)
return qs.filter(is_superuser=False)
class CustomPwdTemplateUserAdmin(UserAdmin):
change_user_password_template = ['admin/auth/user/change_password.html'] # a list, to test fix for #18697
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Section, inlines=[base_admin.ArticleInline])
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
site.register(User, UserLimitedAdmin)
site.register(models.UndeletableObject, base_admin.UndeletableObjectAdmin)
site.register(models.Simple, base_admin.AttributeErrorRaisingAdmin)
simple_site = Admin2(name='admin4')
simple_site.register(User, CustomPwdTemplateUserAdmin)
|
bsd-3-clause
|
mintoo/NetDim
|
pyNMS/graph_generation/graph_generation_window.py
|
2
|
3175
|
# Copyright (C) 2017 Antoine Fourmy <antoine dot fourmy at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from objects.objects import *
from os.path import join
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QIcon
from .graph_dimension import GraphDimensionWindow
from PyQt5.QtWidgets import (
QGridLayout,
QGroupBox,
QWidget,
QLabel,
QLineEdit,
QComboBox,
QPushButton
)
class GraphGenerationWindow(QWidget):
classic_graph = ('ring', 'tree', 'star', 'full-mesh')
complex_graph = ('square-tiling', 'hypercube', 'kneser', 'petersen')
def __init__(self, controller):
super(GraphGenerationWindow, self).__init__()
self.controller = controller
grid = QGridLayout()
grid.addWidget(self.classic_graph_generation(), 0, 0)
grid.addWidget(self.complex_graph_generation(), 1, 0)
self.setLayout(grid)
self.setWindowTitle('Graph generation')
self.resize(480, 320)
def classic_graph_generation(self):
classic_graph_groupbox = QGroupBox('Classic graph generation')
layout = QGridLayout(classic_graph_groupbox)
for index, graph_type in enumerate(self.classic_graph):
button = QPushButton()
button.clicked.connect(lambda _, g=graph_type: self.graph_dimension(g))
image_path = join(self.controller.path_icon, graph_type + '.png')
icon = QIcon(image_path)
button.setIcon(icon)
button.setIconSize(QSize(50, 50))
layout.addWidget(button, index // 2, index % 2)
return classic_graph_groupbox
def complex_graph_generation(self):
complex_graph_groupbox = QGroupBox('Complex graph generation')
layout = QGridLayout(complex_graph_groupbox)
for index, graph_type in enumerate(self.complex_graph):
button = QPushButton()
button.clicked.connect(lambda _, g=graph_type: self.graph_dimension(g))
image_path = join(self.controller.path_icon, graph_type + '.png')
icon = QIcon(image_path)
button.setIcon(icon)
button.setIconSize(QSize(50, 50))
layout.addWidget(button, index // 2, index % 2)
return complex_graph_groupbox
def graph_dimension(self, graph_type):
self.window = GraphDimensionWindow(graph_type, self.controller)
self.window.show()
|
gpl-3.0
|
a-e-m/psd-tools
|
src/psd_tools/decoder/image_resources.py
|
7
|
5317
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import io
import warnings
import collections
from psd_tools.utils import (read_pascal_string, unpack, read_fmt,
read_unicode_string, be_array_from_bytes,
decode_fixed_point_32bit)
from psd_tools.constants import ImageResourceID, PrintScaleStyle, DisplayResolutionUnit, DimensionUnit
from psd_tools.decoder import decoders
_image_resource_decoders, register = decoders.new_registry()
_image_resource_decoders.update({
ImageResourceID.LAYER_STATE_INFO: decoders.single_value("H"),
ImageResourceID.WATERMARK: decoders.single_value("B"),
ImageResourceID.ICC_UNTAGGED_PROFILE: decoders.boolean(),
ImageResourceID.EFFECTS_VISIBLE: decoders.boolean(),
ImageResourceID.IDS_SEED_NUMBER: decoders.single_value("I"),
ImageResourceID.INDEXED_COLOR_TABLE_COUNT: decoders.single_value("H"),
ImageResourceID.TRANSPARENCY_INDEX: decoders.single_value("H"),
ImageResourceID.GLOBAL_ALTITUDE: decoders.single_value("I"),
ImageResourceID.GLOBAL_ANGLE_OBSOLETE: decoders.single_value("I"),
ImageResourceID.COPYRIGHT_FLAG: decoders.boolean("H"),
ImageResourceID.ALPHA_NAMES_UNICODE: decoders.unicode_string,
ImageResourceID.WORKFLOW_URL: decoders.unicode_string,
})
PrintScale = collections.namedtuple('PrintScale', 'style, x, y, scale')
PrintFlags = collections.namedtuple('PrintFlags', 'labels, crop_marks, color_bars, registration_marks, negative, flip, interpolate, caption, print_flags')
PrintFlagsInfo = collections.namedtuple('PrintFlagsInfo', 'version, center_crop_marks, bleed_width_value, bleed_width_scale')
VersionInfo = collections.namedtuple('VersionInfo', 'version, has_real_merged_data, writer_name, reader_name, file_version')
PixelAspectRation = collections.namedtuple('PixelAspectRatio', 'version aspect')
_ResolutionInfo = collections.namedtuple('ResolutionInfo', 'h_res, h_res_unit, width_unit, v_res, v_res_unit, height_unit')
class ResolutionInfo(_ResolutionInfo):
def __repr__(self):
return "ResolutionInfo(h_res=%s, h_res_unit=%s, v_res=%s, v_res_unit=%s, width_unit=%s, height_unit=%s)" % (
self.h_res,
DisplayResolutionUnit.name_of(self.h_res_unit),
self.v_res,
DisplayResolutionUnit.name_of(self.v_res_unit),
DimensionUnit.name_of(self.width_unit),
DimensionUnit.name_of(self.height_unit),
)
def decode(image_resource_blocks):
"""
Replaces ``data`` of image resource blocks with parsed data structures.
"""
return [parse_image_resource(res) for res in image_resource_blocks]
def parse_image_resource(resource):
"""
Replaces ``data`` of image resource block with a parsed data structure.
"""
if not ImageResourceID.is_known(resource.resource_id):
warnings.warn("Unknown resource_id (%s)" % resource.resource_id)
decoder = _image_resource_decoders.get(resource.resource_id, lambda data: data)
return resource._replace(data = decoder(resource.data))
@register(ImageResourceID.LAYER_GROUP_INFO)
def _decode_layer_group_info(data):
return be_array_from_bytes("H", data)
@register(ImageResourceID.LAYER_SELECTION_IDS)
def _decode_layer_selection(data):
return be_array_from_bytes("I", data[2:])
@register(ImageResourceID.LAYER_GROUPS_ENABLED_ID)
def _decode_layer_groups_enabled_id(data):
return be_array_from_bytes("B", data)
@register(ImageResourceID.VERSION_INFO)
def _decode_version_info(data):
fp = io.BytesIO(data)
return VersionInfo(
read_fmt("I", fp)[0],
read_fmt("?", fp)[0],
read_unicode_string(fp),
read_unicode_string(fp),
read_fmt("I", fp)[0],
)
@register(ImageResourceID.PIXEL_ASPECT_RATIO)
def _decode_pixel_aspect_ration(data):
version = unpack("I", data[:4])[0]
aspect = unpack("d", data[4:])[0]
return PixelAspectRation(version, aspect)
@register(ImageResourceID.PRINT_FLAGS)
def _decode_print_flags(data):
return PrintFlags(*(unpack("9?x", data)))
@register(ImageResourceID.PRINT_FLAGS_INFO)
def _decode_print_flags_info(data):
return PrintFlagsInfo(*(unpack("HBxIh", data)))
@register(ImageResourceID.PRINT_SCALE)
def _decode_print_scale(data):
style, x, y, scale = unpack("H3f", data)
if not PrintScaleStyle.is_known(style):
warnings.warn("Unknown print scale style (%s)" % style)
return PrintScale(style, x, y, scale)
@register(ImageResourceID.CAPTION_PASCAL)
def _decode_caption_pascal(data):
fp = io.BytesIO(data)
return read_pascal_string(fp, 'ascii')
@register(ImageResourceID.RESOLUTION_INFO)
def _decode_resolution(data):
h_res, h_res_unit, width_unit, v_res, v_res_unit, height_unit = unpack("4s HH 4s HH", data)
h_res = decode_fixed_point_32bit(h_res)
v_res = decode_fixed_point_32bit(v_res)
return ResolutionInfo(h_res, h_res_unit, width_unit, v_res, v_res_unit, height_unit)
@register(ImageResourceID.ICC_PROFILE)
def _decode_icc(data):
try:
from PIL import ImageCms
except ImportError:
return data
return ImageCms.ImageCmsProfile(io.BytesIO(data))
|
mit
|
rubennj/pvlib-python
|
pvlib/atmosphere.py
|
1
|
7702
|
"""
The ``atmosphere`` module contains methods to calculate
relative and absolute airmass and to determine
pressure from altitude or vice versa.
"""
from __future__ import division
import logging
pvl_logger = logging.getLogger('pvlib')
import numpy as np
AIRMASS_MODELS = ['kastenyoung1989', 'kasten1966', 'simple',
'pickering2002', 'youngirvine1967', 'young1994',
'gueymard1993']
def pres2alt(pressure):
'''
Determine altitude from site pressure.
Parameters
----------
Pressure : scalar or Series
Atomspheric pressure (Pascals)
Returns
-------
altitude : scalar or Series
Altitude in meters above sea level
Notes
------
The following assumptions are made
============================ ================
Parameter Value
============================ ================
Base pressure 101325 Pa
Temperature at zero altitude 288.15 K
Gravitational acceleration 9.80665 m/s^2
Lapse rate -6.5E-3 K/m
Gas constant for air 287.053 J/(kgK)
Relative Humidity 0%
============================ ================
References
-----------
"A Quick Derivation relating altitude to air pressure" from Portland
State Aerospace Society, Version 1.03, 12/22/2004.
'''
alt = 44331.5 - 4946.62 * pressure ** (0.190263)
return alt
def alt2pres(altitude):
'''
Determine site pressure from altitude.
Parameters
----------
Altitude : scalar or Series
Altitude in meters above sea level
Returns
-------
Pressure : scalar or Series
Atmospheric pressure (Pascals)
Notes
------
The following assumptions are made
============================ ================
Parameter Value
============================ ================
Base pressure 101325 Pa
Temperature at zero altitude 288.15 K
Gravitational acceleration 9.80665 m/s^2
Lapse rate -6.5E-3 K/m
Gas constant for air 287.053 J/(kgK)
Relative Humidity 0%
============================ ================
References
-----------
"A Quick Derivation relating altitude to air pressure" from Portland
State Aerospace Society, Version 1.03, 12/22/2004.
'''
press = 100 * ((44331.514 - altitude) / 11880.516) ** (1 / 0.1902632)
return press
def absoluteairmass(AMrelative, pressure=101325.):
'''
Determine absolute (pressure corrected) airmass from relative
airmass and pressure
Gives the airmass for locations not at sea-level (i.e. not at standard
pressure). The input argument "AMrelative" is the relative airmass. The
input argument "pressure" is the pressure (in Pascals) at the location
of interest and must be greater than 0. The calculation for
absolute airmass is
.. math::
absolute airmass = (relative airmass)*pressure/101325
Parameters
----------
AMrelative : scalar or Series
The airmass at sea-level.
pressure : scalar or Series
The site pressure in Pascal.
Returns
-------
scalar or Series
Absolute (pressure corrected) airmass
References
----------
[1] C. Gueymard, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured data,"
Solar Energy, vol. 51, pp. 121-138, 1993.
'''
AMa = AMrelative * pressure / 101325.
return AMa
def relativeairmass(z, model='kastenyoung1989'):
'''
Gives the relative (not pressure-corrected) airmass
Gives the airmass at sea-level when given a sun zenith angle, z (in
degrees).
The "model" variable allows selection of different airmass models
(described below). "model" must be a valid string. If "model" is not
included or is not valid, the default model is 'kastenyoung1989'.
Parameters
----------
z : float or DataFrame
Zenith angle of the sun in degrees.
Note that some models use the apparent (refraction corrected)
zenith angle, and some models use the true (not refraction-corrected)
zenith angle. See model descriptions to determine which type of zenith
angle is required. Apparent zenith angles must be calculated at sea level.
model : String
Available models include the following:
* 'simple' - secant(apparent zenith angle) - Note that this gives -inf at zenith=90
* 'kasten1966' - See reference [1] - requires apparent sun zenith
* 'youngirvine1967' - See reference [2] - requires true sun zenith
* 'kastenyoung1989' - See reference [3] - requires apparent sun zenith
* 'gueymard1993' - See reference [4] - requires apparent sun zenith
* 'young1994' - See reference [5] - requries true sun zenith
* 'pickering2002' - See reference [6] - requires apparent sun zenith
Returns
-------
AM : float or DataFrame
Relative airmass at sea level. Will return NaN values for any
zenith angle greater than 90 degrees.
References
----------
[1] Fritz Kasten. "A New Table and Approximation Formula for the
Relative Optical Air Mass". Technical Report 136, Hanover, N.H.: U.S.
Army Material Command, CRREL.
[2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric Photometry
of the Brighter Planets," The Astronomical Journal, vol. 72,
pp. 945-950, 1967.
[3] Fritz Kasten and Andrew Young. "Revised optical air mass tables and
approximation formula". Applied Optics 28:4735-4738
[4] C. Gueymard, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured data,"
Solar Energy, vol. 51, pp. 121-138, 1993.
[5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33,
pp. 1108-1110, Feb 1994.
[6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20,
[7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein,
"Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis"
Sandia Report, (2012).
'''
zenith_rad = np.radians(z)
model = model.lower()
if 'kastenyoung1989' == model:
AM = 1.0 / (np.cos(zenith_rad) + 0.50572*(((6.07995 + (90 - z)) ** - 1.6364)))
elif 'kasten1966' == model:
AM = 1.0 / (np.cos(zenith_rad) + 0.15*((93.885 - z) ** - 1.253))
elif 'simple' == model:
AM = 1.0 / np.cos(zenith_rad)
elif 'pickering2002' == model:
AM = 1.0 / (np.sin(np.radians(90 - z + 244.0 / (165 + 47.0 * (90 - z) ** 1.1))))
elif 'youngirvine1967' == model:
AM = (1.0 / np.cos(zenith_rad)) * (1 - 0.0012*( (1.0 / np.cos(zenith_rad)) ** 2) - 1)
elif 'young1994' == model:
AM = (1.002432*((np.cos(zenith_rad)) ** 2) + 0.148386*(np.cos(zenith_rad)) + 0.0096467) / (np.cos(zenith_rad) ** 3 + 0.149864*(np.cos(zenith_rad) ** 2) + 0.0102963*(np.cos(zenith_rad)) + 0.000303978)
elif 'gueymard1993' == model:
AM = 1.0 / (np.cos(zenith_rad) + 0.00176759*(z)*((94.37515 - z) ** - 1.21563))
else:
pvl_logger.warning("{} is not a valid model type for relative airmass. The 'kastenyoung1989' model was used.".format(model))
AM = 1.0 / (np.cos(zenith_rad) + 0.50572*(((6.07995 + (90 - z)) ** - 1.6364)))
try:
AM[z > 90] = np.nan
except TypeError:
AM = np.nan if z > 90 else AM
return AM
|
bsd-3-clause
|
kurli/blink-crosswalk
|
Tools/Scripts/webkitpy/common/checkout/scm/svn.py
|
24
|
8502
|
# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import random
import re
import shutil
import string
import sys
import tempfile
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import Executive, ScriptError
from .scm import SCM
_log = logging.getLogger(__name__)
class SVN(SCM):
executable_name = "svn"
_svn_metadata_files = frozenset(['.svn', '_svn'])
def __init__(self, cwd, patch_directories, **kwargs):
SCM.__init__(self, cwd, **kwargs)
self._bogus_dir = None
if patch_directories == []:
raise Exception(message='Empty list of patch directories passed to SCM.__init__')
elif patch_directories == None:
self._patch_directories = [self._filesystem.relpath(cwd, self.checkout_root)]
else:
self._patch_directories = patch_directories
@classmethod
def in_working_directory(cls, path, executive=None):
if os.path.isdir(os.path.join(path, '.svn')):
# This is a fast shortcut for svn info that is usually correct for SVN < 1.7,
# but doesn't work for SVN >= 1.7.
return True
executive = executive or Executive()
svn_info_args = [cls.executable_name, 'info']
try:
exit_code = executive.run_command(svn_info_args, cwd=path, return_exit_code=True)
except OSError, e:
# svn is not installed
return False
return (exit_code == 0)
def _find_uuid(self, path):
if not self.in_working_directory(path):
return None
return self.value_from_svn_info(path, 'Repository UUID')
@classmethod
def value_from_svn_info(cls, path, field_name):
svn_info_args = [cls.executable_name, 'info']
# FIXME: This method should use a passed in executive or be made an instance method and use self._executive.
info_output = Executive().run_command(svn_info_args, cwd=path).rstrip()
match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
if not match:
raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
return match.group('value').rstrip('\r')
def find_checkout_root(self, path):
uuid = self._find_uuid(path)
# If |path| is not in a working directory, we're supposed to return |path|.
if not uuid:
return path
# Search up the directory hierarchy until we find a different UUID.
last_path = None
while True:
if uuid != self._find_uuid(path):
return last_path
last_path = path
(path, last_component) = self._filesystem.split(path)
if last_path == path:
return None
def _run_svn(self, args, **kwargs):
return self._run([self.executable_name] + args, **kwargs)
@memoized
def _svn_version(self):
return self._run_svn(['--version', '--quiet'])
def has_working_directory_changes(self):
# FIXME: What about files which are not committed yet?
return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) != ""
def status_command(self):
return [self.executable_name, 'status']
def _status_regexp(self, expected_types):
field_count = 6 if self._svn_version() > "1.6" else 5
return "^(?P<status>[%s]).{%s} (?P<filename>.+)$" % (expected_types, field_count)
def _add_parent_directories(self, path, recurse):
"""Does 'svn add' to the path and its parents."""
if self.in_working_directory(path):
return
self.add(path, recurse=recurse)
def add_list(self, paths, return_exit_code=False, recurse=True):
for path in paths:
self._add_parent_directories(os.path.dirname(os.path.abspath(path)),
recurse=False)
if recurse:
cmd = ["add"] + paths
else:
cmd = ["add", "--depth", "empty"] + paths
return self._run_svn(cmd, return_exit_code=return_exit_code)
def _delete_parent_directories(self, path):
if not self.in_working_directory(path):
return
if set(os.listdir(path)) - self._svn_metadata_files:
return # Directory has non-trivial files in it.
self.delete(path)
def delete_list(self, paths):
for path in paths:
abs_path = os.path.abspath(path)
parent, base = os.path.split(abs_path)
result = self._run_svn(["delete", "--force", base], cwd=parent)
self._delete_parent_directories(os.path.dirname(abs_path))
return result
def move(self, origin, destination):
return self._run_svn(["mv", "--force", origin, destination], return_exit_code=True)
def exists(self, path):
return not self._run_svn(["info", path], return_exit_code=True, decode_output=False)
def changed_files(self, git_commit=None):
status_command = [self.executable_name, "status"]
status_command.extend(self._patch_directories)
# ACDMR: Addded, Conflicted, Deleted, Modified or Replaced
return self._run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
def _added_files(self):
return self._run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
def _deleted_files(self):
return self._run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
@staticmethod
def supports_local_commits():
return False
def display_name(self):
return "svn"
def svn_revision(self, path):
return self.value_from_svn_info(path, 'Revision')
def timestamp_of_revision(self, path, revision):
# We use --xml to get timestamps like 2013-02-08T08:18:04.964409Z
repository_root = self.value_from_svn_info(self.checkout_root, 'Repository Root')
info_output = Executive().run_command([self.executable_name, 'log', '-r', revision, '--xml', repository_root], cwd=path).rstrip()
match = re.search(r"^<date>(?P<value>.+)</date>\r?$", info_output, re.MULTILINE)
return match.group('value')
def create_patch(self, git_commit=None, changed_files=None):
"""Returns a byte array (str()) representing the patch file.
Patch files are effectively binary since they may contain
files of multiple different encodings."""
if changed_files == []:
return ""
elif changed_files == None:
changed_files = []
return self._run([self._filesystem.join(self.checkout_root, 'Tools', 'Scripts', 'svn-create-patch')] + changed_files,
cwd=self.checkout_root, return_stderr=False,
decode_output=False)
def blame(self, path):
return self._run_svn(['blame', path])
|
bsd-3-clause
|
mjs/juju
|
acceptancetests/repository/charms/statusstresser/tests/10-deploy-test.py
|
8
|
2356
|
#!/usr/bin/python3
# This Amulet based tests
# The goal is to ensure the Ubuntu charm
# sucessfully deploys and can be accessed.
# Note the Ubuntu charm does not have any
# relations or config options.
import amulet
#import os
#import requests
# Timeout value, in seconds to deploy the environment
seconds = 900
# Set up the deployer module to interact and set up the environment.
d = amulet.Deployment()
# Define the environment in terms of charms, their config, and relations.
# Add the Ubuntu charm to the deployment.
d.add('ubuntu')
# Deploy the environment currently defined
try:
# Wait the defined about amount of time to deploy the environment.
# Setup makes sure the services are deployed, related, and in a
# "started" state.
d.setup(timeout=seconds)
# Use a sentry to ensure there are no remaining hooks being execute
# on any of the nodes
## d.sentry.wait()
except amulet.helpers.TimeoutError:
# Pending the configuration the test will fail or be skipped
# if not deployed properly.
error_message = 'The environment did not deploy in %d seconds.' % seconds
amulet.raise_status(amulet.SKIP, msg=error_message)
except:
# Something else has gone wrong, raise the error so we can see it and this
# will automatically "FAIL" the test.
raise
# Access the Ubuntu instance to ensure it has been deployed correctly
# Define the commands to be ran
lsb_release_command = 'cat /etc/lsb-release'
uname_command = 'uname -a'
# Cat the release information
output, code = d.sentry.unit['ubuntu/0'].run(lsb_release_command)
# Confirm the lsb-release command was ran successfully
if (code != 0):
error_message = 'The ' + lsb_release_command + ' did not return the expected return code of 0.'
print(output)
amulet.raise_status(amulet.FAIL, msg=error_message)
else:
message = 'The lsb-release command successfully executed.'
print(output)
print(message)
# Get the uname -a output
output, code = d.sentry.unit['ubuntu/0'].run(uname_command)
# Confirm the uname command was ran successfully
if (code != 0):
error_message = 'The ' + uname_command + ' did not return the expected return code of 0.'
print(output)
amulet.raise_status(amulet.FAIL, msg=error_message)
else:
message = 'The uname command successfully executed.'
print(output)
print(message)
|
agpl-3.0
|
Spiderlover/Toontown
|
toontown/building/DistributedBankInterior.py
|
3
|
8875
|
from direct.distributed.ClockDelta import *
from direct.distributed.DistributedObject import DistributedObject
from direct.fsm import ClassicFSM, State
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
import random
import ToonInteriorColors
from toontown.dna.DNAParser import DNADoor
from toontown.hood import ZoneUtil
from toontown.toon.DistributedNPCToonBase import DistributedNPCToonBase
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import *
class DistributedBankInterior(DistributedObject):
def __init__(self, cr):
DistributedObject.__init__(self, cr)
self.dnaStore = cr.playGame.dnaStore
self.inVault = False
self.vaultCollNodePath = None
self.fsm = ClassicFSM.ClassicFSM(
'DistributedBankInterior',
[
State.State('off', self.enterOff, self.exitOff,
['vaultClosed', 'vaultOpening', 'vaultOpen', 'vaultClosing']),
State.State('vaultClosed', self.enterVaultClosed, self.exitVaultClosed,
['vaultOpening']),
State.State('vaultOpening', self.enterVaultOpening, self.exitVaultOpening,
['vaultOpen']),
State.State('vaultOpen', self.enterVaultOpen, self.exitVaultOpen,
['vaultClosing']),
State.State('vaultClosing', self.enterVaultClosing, self.exitVaultClosing,
['vaultClosed'])
], 'off', 'off')
self.fsm.enterInitialState()
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.setup()
def disable(self):
self.ignoreAll()
self.interior.removeNode()
del self.interior
if self.collNodePath is not None:
self.collNodePath.removeNode()
self.collNodePath = None
del self.vaultOpenSfx
del self.vaultCloseSfx
DistributedObject.disable(self)
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setState(self, name, timestamp):
self.fsm.request(name, [globalClockDelta.localElapsedTime(timestamp)])
def enterOff(self):
pass
def exitOff(self):
pass
def enterVaultClosed(self, timestamp):
vaultDoor = render.find('**/vault_door')
vaultDoor.setH(0)
if self.inVault:
self.clearVault()
def exitVaultClosed(self):
pass
def enterVaultOpening(self, timestamp):
vaultDoor = render.find('**/vault_door')
doorTrack = Sequence()
# First, spin the vault lock dial:
dial = vaultDoor.find('**/vault_door_front_dial')
doorTrack.append(LerpHprInterval(dial, 2, Vec3(0, 0, -2160), startHpr=(0, 0, 0), blendType='easeOut', fluid=1))
# Then, open the vault door:
doorTrack.append(LerpHprInterval(vaultDoor, 3, Vec3(-120, 0, 0), startHpr=Vec3(0, 0, 0), blendType='easeOut'))
# We need the sound effect to play in parallel:
track = Parallel(SoundInterval(self.vaultOpenSfx, node=vaultDoor), doorTrack)
track.start(timestamp)
def exitVaultOpening(self):
pass
def enterVaultOpen(self, timestamp):
vaultDoor = render.find('**/vault_door')
vaultDoor.setH(-120)
def exitVaultOpen(self):
pass
def enterVaultClosing(self, timestamp):
vaultDoor = render.find('**/vault_door')
doorTrack = Sequence()
# First, close the vault door:
doorTrack.append(LerpHprInterval(vaultDoor, 3, Vec3(0, 0, 0), startHpr=Vec3(-120, 0, 0), blendType='easeOut'))
# Then, spin the vault lock dial:
dial = vaultDoor.find('**/vault_door_front_dial')
doorTrack.append(LerpHprInterval(dial, 2, Vec3(0, 0, 2160), startHpr=(0, 0, 0), blendType='easeOut', fluid=1))
# We need the sound effect to play in parallel:
track = Parallel(SoundInterval(self.vaultCloseSfx, node=vaultDoor), doorTrack)
track.start(timestamp)
def exitVaultClosing(self):
pass
def __handleEnterVaultBox(self, collEntry=None):
self.inVault = True
if self.fsm.getCurrentState().getName() == 'vaultClosed':
self.clearVault()
def __handleExitVaultBox(self, collEntry=None):
self.inVault = False
def clearVault(self):
place = base.cr.playGame.getPlace()
place.setState('stopped')
self.teleportTrack = Sequence()
self.teleportTrack.append(Func(base.localAvatar.b_setAnimState, 'TeleportOut'))
self.teleportTrack.append(Wait(3.5))
self.teleportTrack.append(Func(base.localAvatar.setPos, Point3(0, 0, 0)))
self.teleportTrack.append(Func(base.localAvatar.b_setAnimState, 'TeleportIn'))
self.teleportTrack.append(Wait(2.25))
self.teleportTrack.append(Func(place.setState, 'walk'))
self.teleportTrack.start()
def randomDNAItem(self, category, findFunc):
codeCount = self.dnaStore.getNumCatalogCodes(category)
index = self.randomGenerator.randint(0, codeCount - 1)
code = self.dnaStore.getCatalogCode(category, index)
return findFunc(code)
def replaceRandomInModel(self, model):
baseTag = 'random_'
npc = model.findAllMatches('**/' + baseTag + '???_*')
for i in xrange(npc.getNumPaths()):
np = npc.getPath(i)
name = np.getName()
b = len(baseTag)
category = name[b + 4:]
key1 = name[b]
key2 = name[b + 1]
if key1 == 'm':
model = self.randomDNAItem(category, self.dnaStore.findNode)
newNP = model.copyTo(np)
if key2 == 'r':
self.replaceRandomInModel(newNP)
elif key1 == 't':
texture = self.randomDNAItem(category, self.dnaStore.findTexture)
np.setTexture(texture, 100)
newNP = np
if key2 == 'c':
if category == 'TI_wallpaper' or category == 'TI_wallpaper_border':
self.randomGenerator.seed(self.zoneId)
newNP.setColorScale(self.randomGenerator.choice(self.colors[category]))
else:
newNP.setColorScale(self.randomGenerator.choice(self.colors[category]))
def chooseDoor(self):
doorModelName = 'door_double_round_ul'
if doorModelName[-1:] == 'r':
doorModelName = doorModelName[:-1] + 'l'
else:
doorModelName = doorModelName[:-1] + 'r'
door = self.dnaStore.findNode(doorModelName)
return door
def setup(self):
self.dnaStore = base.cr.playGame.dnaStore
self.randomGenerator = random.Random()
self.randomGenerator.seed(self.zoneId)
self.interior = loader.loadModel('phase_4/models/modules/ttc_bank_interior.bam')
self.interior.reparentTo(render)
self.vaultOpenSfx = loader.loadSfx('phase_4/audio/sfx/vault_door_open.ogg')
self.vaultCloseSfx = loader.loadSfx('phase_4/audio/sfx/vault_door_close.ogg')
hoodId = ZoneUtil.getCanonicalHoodId(self.zoneId)
self.colors = ToonInteriorColors.colors[hoodId]
self.replaceRandomInModel(self.interior)
door = self.chooseDoor()
doorOrigin = render.find('**/door_origin;+s')
doorNP = door.copyTo(doorOrigin)
doorOrigin.setScale(0.8, 0.8, 0.8)
doorOrigin.setPos(doorOrigin, 0, -0.025, 0)
doorColor = self.randomGenerator.choice(self.colors['TI_door'])
DNADoor.setupDoor(doorNP, self.interior, doorOrigin, self.dnaStore, str(self.block), doorColor)
doorFrame = doorNP.find('door_*_flat')
doorFrame.wrtReparentTo(self.interior)
doorFrame.setColor(doorColor)
del self.colors
del self.dnaStore
del self.randomGenerator
room = render.find('**/vault_walls')
minPoint, maxPoint = room.getTightBounds()
offset = 1 # We want a slight offset
maxPoint -= offset
collBox = CollisionBox(minPoint, maxPoint)
collBox.setTangible(0)
collNode = CollisionNode(self.uniqueName('vaultBox'))
collNode.setIntoCollideMask(BitMask32(1))
collNode.addSolid(collBox)
self.collNodePath = render.attachNewNode(collNode)
radius = ((maxPoint-minPoint) / 2).getZ()
self.collNodePath.setPos(-11.2 + (offset/2), 14 + radius + offset, 0)
for npcToon in self.cr.doFindAllInstances(DistributedNPCToonBase):
npcToon.initToonState()
self.accept(self.uniqueName('entervaultBox'), self.__handleEnterVaultBox)
self.accept(self.uniqueName('exitvaultBox'), self.__handleExitVaultBox)
|
mit
|
cntnboys/410Lab6
|
build/django/build/lib.linux-x86_64-2.7/django/db/backends/mysql/client.py
|
84
|
1380
|
import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if db:
args += [db]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
|
apache-2.0
|
adamlwgriffiths/Pyglet
|
tools/upload/upload.py
|
28
|
2341
|
#!/usr/bin/env python
'''Upload dist/ files to code.google.com. For Alex only :-)
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import re
import sys
base = os.path.dirname(__file__)
root = os.path.join(base, '../..')
dist = os.path.join(root, 'dist')
sys.path.insert(0, root)
import pyglet
import googlecode_upload
if __name__ == '__main__':
version = 'pyglet-%s' % pyglet.version
print 'Preparing to upload %s' % version
password = open(os.path.expanduser('~/.googlecode-passwd')).read().strip()
descriptions = {}
for line in open(os.path.join(base, 'descriptions.txt')):
suffix, description = line.split(' ', 1)
descriptions[suffix] = description.strip()
files = {}
version_pattern = re.compile('%s[.-].*' % version)
for filename in os.listdir(dist):
if version_pattern.match(filename):
description = descriptions.get(filename[len(version):])
if not description:
print 'No description for %s' % filename
sys.exit(1)
description = '%s %s' % (pyglet.version, description)
labels = []
if filename.endswith('.tar.gz') or filename.endswith('.zip') and\
'docs' not in filename:
labels.append('Type-Source')
elif filename.endswith('.msi'):
labels.append('OpSys-Windows')
elif filename.endswith('.dmg'):
labels.append('OpSys-OSX')
# Don't feature 1.1 until release time
#if not filename.endswith('.egg'):
# labels.append('Featured')
files[filename] = description, labels
print filename
print ' %s' % description
print ' %s' % ', '.join(labels)
print 'Ok to upload? [type "y"]'
if raw_input().strip() != 'y':
print 'Aborted.'
sys.exit(1)
for filename, (description, labels) in files.items():
status, reason, url = googlecode_upload.upload(
os.path.join(dist, filename),
'pyglet',
'Alex.Holkner',
password,
description,
labels)
if url:
print 'OK: %s' % url
else:
print 'Error: %s (%s)' % (reason, status)
print 'Done!'
|
bsd-3-clause
|
Imaginashion/cloud-vision
|
.fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/encodings/utf_16_le.py
|
860
|
1037
|
""" Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mit
|
miloharper/neural-network-animation
|
neural_network.py
|
4
|
4978
|
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
from math import fabs
from formulae import sigmoid, sigmoid_derivative, random_weight, get_synapse_colour, adjust_line_to_perimeter_of_circle, layer_left_margin
import parameters
class Synapse():
def __init__(self, input_neuron_index, x1, x2, y1, y2):
self.input_neuron_index = input_neuron_index
self.weight = random_weight()
self.signal = 0
x1, x2, y1, y2 = adjust_line_to_perimeter_of_circle(x1, x2, y1, y2)
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
def draw(self):
line = pyplot.Line2D((self.x1, self.x2), (self.y1, self.y2), lw=fabs(self.weight), color=get_synapse_colour(self.weight), zorder=1)
outer_glow = pyplot.Line2D((self.x1, self.x2), (self.y1, self.y2), lw=(fabs(self.weight) * 2), color=get_synapse_colour(self.weight), zorder=2, alpha=self.signal * 0.4)
pyplot.gca().add_line(line)
pyplot.gca().add_line(outer_glow)
class Neuron():
def __init__(self, x, y, previous_layer):
self.x = x
self.y = y
self.output = 0
self.synapses = []
self.error = 0
index = 0
if previous_layer:
for input_neuron in previous_layer.neurons:
synapse = Synapse(index, x, input_neuron.x, y, input_neuron.y)
self.synapses.append(synapse)
index += 1
def train(self, previous_layer):
for synapse in self.synapses:
# Propagate the error back down the synapse to the neuron in the layer below
previous_layer.neurons[synapse.input_neuron_index].error += self.error * sigmoid_derivative(self.output) * synapse.weight
# Adjust the synapse weight
synapse.weight += synapse.signal * self.error * sigmoid_derivative(self.output)
return previous_layer
def think(self, previous_layer):
activity = 0
for synapse in self.synapses:
synapse.signal = previous_layer.neurons[synapse.input_neuron_index].output
activity += synapse.weight * synapse.signal
self.output = sigmoid(activity)
def draw(self):
circle = pyplot.Circle((self.x, self.y), radius=parameters.neuron_radius, fill=True, color=(0.2, 0.2, 0), zorder=3)
outer_glow = pyplot.Circle((self.x, self.y), radius=parameters.neuron_radius * 1.5, fill=True, color=(self.output, self.output, 0), zorder=4, alpha=self.output * 0.5)
pyplot.gca().add_patch(circle)
pyplot.gca().add_patch(outer_glow)
pyplot.text(self.x + 0.8, self.y, round(self.output, 2))
for synapse in self.synapses:
synapse.draw()
class Layer():
def __init__(self, network, number_of_neurons):
if len(network.layers) > 0:
self.is_input_layer = False
self.previous_layer = network.layers[-1]
self.y = self.previous_layer.y + parameters.vertical_distance_between_layers
else:
self.is_input_layer = True
self.previous_layer = None
self.y = parameters.bottom_margin
self.neurons = []
x = layer_left_margin(number_of_neurons)
for iteration in xrange(number_of_neurons):
neuron = Neuron(x, self.y, self.previous_layer)
self.neurons.append(neuron)
x += parameters.horizontal_distance_between_neurons
def think(self):
for neuron in self.neurons:
neuron.think(self.previous_layer)
def draw(self):
for neuron in self.neurons:
neuron.draw()
class NeuralNetwork():
def __init__(self, requested_layers):
self.layers = []
for number_of_neurons in requested_layers:
self.layers.append(Layer(self, number_of_neurons))
def train(self, example):
error = example.output - self.think(example.inputs)
self.reset_errors()
self.layers[-1].neurons[0].error = error
for l in range(len(self.layers) - 1, 0, -1):
for neuron in self.layers[l].neurons:
self.layers[l - 1] = neuron.train(self.layers[l - 1])
return fabs(error)
def do_not_think(self):
for layer in self.layers:
for neuron in layer.neurons:
neuron.output = 0
for synapse in neuron.synapses:
synapse.signal = 0
def think(self, inputs):
for layer in self.layers:
if layer.is_input_layer:
for index, value in enumerate(inputs):
self.layers[0].neurons[index].output = value
else:
layer.think()
return self.layers[-1].neurons[0].output
def draw(self):
pyplot.cla()
for layer in self.layers:
layer.draw()
def reset_errors(self):
for layer in self.layers:
for neuron in layer.neurons:
neuron.error = 0
|
mit
|
schleichdi2/OpenNfr_E2_Gui-6.0
|
lib/python/Components/Converter/ValueToPixmap.py
|
50
|
1197
|
from Components.Converter.Converter import Converter
from Components.Element import cached, ElementError
from Tools.Directories import SCOPE_SKIN_IMAGE, SCOPE_ACTIVE_SKIN, resolveFilename
from Tools.LoadPixmap import LoadPixmap
class ValueToPixmap(Converter, object):
LANGUAGE_CODE = 0
PATH = 1
def __init__(self, type):
Converter.__init__(self, type)
if type == "LanguageCode":
self.type = self.LANGUAGE_CODE
elif type == "Path":
self.type = self.PATH
else:
raise ElementError("'%s' is not <LanguageCode|Path> for ValueToPixmap converter" % type)
@cached
def getPixmap(self):
if self.source:
val = self.source.text
if val in (None, ""):
return None
if self.type == self.PATH:
return LoadPixmap(val)
if self.type == self.LANGUAGE_CODE:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "countries/" + val[3:].lower() + ".png"))
if png is None:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_SKIN_IMAGE, "countries/missing.png"))
return png
return None
pixmap = property(getPixmap)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] == self.type:
Converter.changed(self, what)
|
gpl-2.0
|
texperience/wagtail-pythonanywhere-quickstart
|
home/migrations/0002_create_homepage.py
|
1
|
1714
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Home",
draft_title="Home",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
|
isc
|
thenewguy/wagtailplus
|
wagtailplus/wagtaillinks/tests/test_views.py
|
2
|
7163
|
"""
Contains view unit tests.
"""
from django.core.urlresolvers import reverse
from wagtailplus.tests import views
from ..models import Link
class TestLinkIndexView(views.BaseTestIndexView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
def _create_sequential_instance(self, index):
Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Link #{0}'.format(index),
external_url = 'http://www.site-{0}.com'.format(index)
)
class TestLinkCreateView(views.BaseTestCreateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
model_class = Link
filter_keys = ['title']
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EXTERNAL,
'title': 'Test Link',
'external_url': 'http://www.test.com/'
}
class TestLinkUpdateView(views.BaseTestUpdateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link',
external_url = 'http://www.test.com/'
)
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EXTERNAL,
'title': 'Test Link Changed',
'external_url': 'http://www.test.com/'
}
class TestLinkDeleteView(views.BaseTestDeleteView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/links'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link',
external_url = 'http://www.test.com/'
)
class TestEmailLinkChooserView(views.BaseTestChooserView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _create_sequential_instance(self, index):
return Link.objects.create(
link_type = Link.LINK_TYPE_EMAIL,
title = 'Test Email #{0}'.format(index),
email = 'somebody-{0}@something.com'.format(index)
)
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('wagtailadmin_choose_page_email_link'),
params
)
class TestExternalLinkChooserView(views.BaseTestChooserView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _create_sequential_instance(self, index):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link #{0}'.format(index),
external_url = 'http://www.site-{0}.com'.format(index)
)
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('wagtailadmin_choose_page_external_link'),
params
)
class TestEmailLinkChosenView(views.BaseTestChosenView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EMAIL,
title = 'Test Email',
email = '[email protected]'
)
class TestExternalLinkChosenView(views.BaseTestChosenView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_instance(self):
return Link.objects.create(
link_type = Link.LINK_TYPE_EXTERNAL,
title = 'Test Link',
external_url = 'http://www.test.com/'
)
class TestChooserCreateEmailLinkView(views.BaseTestChooserCreateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EMAIL,
'title': 'Test Email',
'email': '[email protected]',
}
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('wagtailadmin_choose_page_email_link')
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('wagtailadmin_choose_page_email_link'),
data
)
# Check assertions.
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
self.assertContains(
response,
'modal.respond'
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
class TestChooserCreateExternalLinkView(views.BaseTestChooserCreateView):
url_namespace = 'wagtaillinks'
template_dir = 'wagtaillinks/chooser'
model_class = Link
def _get_post_data(self):
return {
'link_type': Link.LINK_TYPE_EXTERNAL,
'title': 'Test Link',
'external_url': 'http://www.test.com/',
}
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('wagtailadmin_choose_page_external_link')
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('wagtailadmin_choose_page_external_link'),
data
)
# Check assertions.
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
self.assertContains(
response,
'modal.respond'
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
|
bsd-2-clause
|
bytor99999/vertx-web
|
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langhebrewmodel.py
|
2763
|
11318
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
|
apache-2.0
|
mgit-at/ansible
|
lib/ansible/modules/cloud/amazon/lightsail.py
|
40
|
15620
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
user_data:
description:
- Launch script that can configure the instance with additional data
key_pair_name:
description:
- Name of the key pair to use with the instance
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
type: bool
default: 'yes'
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.public_ip_address }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
HAS_BOTO3, camel_dict_to_snake_dict)
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
key_pair_name = module.params.get('key_pair_name')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
changed = True
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.