repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
briceparent/py_vosfactures | vosfactures/utils.py | 1 | 1529 | import json
import requests
from vosfactures import settings
class HttpError(Exception):
pass
def get(**kwargs):
return query(method="GET", **kwargs)
def delete(**kwargs):
return query(method="DELETE", **kwargs)
def post(**kwargs):
return query(method="POST", **kwargs)
def put(**kwargs):
return query(method="PUT", **kwargs)
def query(json_page=None, action=None, instance_id=None, method="GET", **kwargs):
if instance_id is None:
url = "https://{}/{}.json".format(settings.HOST, json_page)
else:
url = "https://{}/{}/{}.json".format(settings.HOST, json_page, instance_id)
if method == "GET":
req_method = requests.get
elif method == "POST":
req_method = requests.post
elif method == "DELETE":
req_method = requests.delete
elif method == "PUT":
req_method = requests.put
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
# Creating the passed data as json
data = json.dumps({"api_token": settings.API_TOKEN, action: kwargs})
response = req_method(url=url, headers=headers, data=data)
right_responses = {'GET': [200, 204, 205], 'POST': [201], 'DELETE': [200], 'PUT': [200]}
if response.status_code in right_responses[method]:
return response.json()
error_msg = "Error {} during the query process for {} ({}). Data : {}, response : {}"
raise HttpError(error_msg.format(response.status_code, url, method, data, response.json()))
| gpl-3.0 | -5,627,060,479,928,397,000 | 25.362069 | 95 | 0.620013 | false |
django-bmf/django-bmf | djangobmf/tasks/document.py | 2 | 1289 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.utils import six
from djangobmf.decorators import optional_celery
import hashlib
import mimetypes
if six.PY2:
class FileNotFoundError(OSError):
pass
@optional_celery
def generate_sha1(pk):
from djangobmf.models import Document
obj = Document.objects.get(pk=pk)
hash = hashlib.sha1()
try:
obj.file.open('rb')
exists = True
size = obj.file.size
mimetype, encoding = mimetypes.guess_type(obj.file.name)
if obj.file.multiple_chunks():
for chunk in obj.file.chunks():
hash.update(chunk)
else:
hash.update(obj.file.read())
obj.file.close()
sha1 = hash.hexdigest()
except FileNotFoundError:
exists = False
size = None
mimetype = None
encoding = None
sha1 = None
if sha1 != obj.sha1 or exists != obj.file_exists or mimetype != obj.mimetype \
or encoding != obj.encoding or size != obj.size:
Document.objects.filter(pk=pk).update(
sha1=sha1,
file_exists=exists,
mimetype=mimetype,
encoding=encoding,
size=size,
)
| bsd-3-clause | 5,599,808,207,140,588,000 | 20.483333 | 82 | 0.591932 | false |
skolome/son-sp-infrabstract | wim-adaptor/vtn-api/newim.py | 2 | 3742 | #
# Copyright (c) 2015 SONATA-NFV, UCL, NOKIA, NCSR Demokritos
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UCL, NOKIA, NCSR Demokritos
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
#
__author__ = "Stavros Kolometsos - NCSR Demokritos, Dario Valocchi(Ph.D.) - UCL"
import argparse,parser
import requests
import socket
import json
import utils
from flowchart import FlowChart, Flows, Location
import logging
from flask import Flask, jsonify
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_info():
logging.debug("Request for info")
return username, password, host, url, headers
def get_vtn():
logging.debug("Got request for VTN name")
try:
vtn_name
logging.debug("VTN name was defined already: "+vtn_name)
except NameError:
logging.debug("VTN name not defined")
s_url = 'operational/vtn:vtns/'
username, password, host, url, headers = get_info()
r = requests.get(url + s_url, headers=headers, auth=(username, password))
json_data = json.loads(r.text)
# at the moment there is only on vtn tenant, so one name. TODO --- think
# about if more
vtn_name = json_data['vtns']['vtn'][0]['name']
logging.info("VTN name recieved. Sending back: "+vtn_name)
finally:
return vtn_name
parser = argparse.ArgumentParser() #handler for arguments passed
parser.add_argument("-v", "--host",help="Enter the address for the host containing VTN",type=str, required=True) # option configurations, needs to be required
parser.add_argument("-u", "--user",help="Enter Username",type=str,required=True)
parser.add_argument("-p", "--password",help="Enter Password",type=str, required=True)
parser.add_argument("-n", "--name",help="Enter VTN user name",type=str)
args = parser.parse_args()
if args.host:
host = args.host
if args.user:
username = args.user
if args.password:
password = args.password
if args.name:
vtn_name = args.name
url = 'http://'+host+':8181/restconf/' #this is should be the same always
headers = {'Content type' : 'application/json'} #also this
api.add_resource(FlowChart, '/flowchart/')
api.add_resource(Flows, '/flowchart/<string:res_name>')
api.add_resource(Location, '/location/')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, filename='wicm.log', format='%(asctime)s - %(levelname)s - %(message)s')
try:
vtn_name
except NameError:
vtn_name = get_vtn()
logging.debug("VTN name recieved: " + vtn_name)
local = get_ip()
app.run(debug=True,host=local)
| apache-2.0 | 5,734,090,108,605,875,000 | 35.330097 | 159 | 0.694816 | false |
marcoantoniooliveira/labweb | oscar/core/context_processors.py | 1 | 2200 | import oscar
import re
import platform
import django
from six.moves.urllib import parse
from django.conf import settings
from django.utils.safestring import mark_safe
def strip_language_code(request):
"""
When using Django's i18n_patterns, we need a language-neutral variant of
the current URL to be able to use set_language to change languages.
This naive approach strips the language code from the beginning of the URL
and will likely fail if using translated URLs.
"""
path = request.path
if settings.USE_I18N and hasattr(request, 'LANGUAGE_CODE'):
return re.sub('^/%s/' % request.LANGUAGE_CODE, '/', path)
return path
def usage_statistics_string():
"""
For Oscar development, it is helpful to know which versions of Oscar,
Django and Python are in use, and which can be safely deprecated or
removed. If tracking is enabled, this function builds a query string with
that information. It is used in dashboard/layout.html with an invisible
tracker pixel.
If you're developing locally or tracking is disabled, the tracker pixel
does not get rendered and no information is collected.
"""
if not settings.DEBUG and getattr(settings, 'OSCAR_TRACKING', True):
params = {
'django': django.get_version(),
'python': platform.python_version(),
'oscar': oscar.get_version(),
}
return mark_safe(parse.urlencode(params))
else:
return None
def metadata(request):
"""
Add some generally useful metadata to the template context
"""
return {'display_version': getattr(settings, 'DISPLAY_VERSION', False),
'version': getattr(settings, 'VERSION', 'N/A'),
'shop_name': settings.OSCAR_SHOP_NAME,
'shop_tagline': settings.OSCAR_SHOP_TAGLINE,
'homepage_url': settings.OSCAR_HOMEPAGE,
'use_less': getattr(settings, 'USE_LESS', False),
'call_home': usage_statistics_string(),
'language_neutral_url_path': strip_language_code(request),
'google_analytics_id': getattr(settings,
'GOOGLE_ANALYTICS_ID', None)}
| bsd-3-clause | 7,059,000,760,504,418,000 | 37.596491 | 78 | 0.655909 | false |
danyaqp/gr-baz | python/baudline.py | 3 | 5680 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# baudline.py
#
# Copyright 2013 Balint Seeber <balint@crawfish>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# le32f - 1 Msps
# le16 - 4 Msps
# Pipe mode kill works, FIFO doesn't
import sys, subprocess, tempfile, os, signal
from gnuradio import gr, gru, blocks
class baudline_sink(gr.hier_block2):
def __init__(self, fmt, item_size, channels, is_complex, sample_rate, aggregate_channel_count=1,
flip_complex=False, baseband_freq=None, decimation=1, scale=1.0, overlap=None, slide_size=None, fft_size=None, jump_step=None, x_slip=None,
mode='pipe', buffered=True, kill_on_del=True, memory=None, peak_hold=False, **kwds):
gr.hier_block2.__init__(self, "baudline_sink",
gr.io_signature(1, 1, item_size),
gr.io_signature(0, 0, 0))
baudline_path = gr.prefs().get_string('baudline', 'path', 'baudline')
#tf = tempfile.NamedTemporaryFile(delete=False)
#tf.write(gp)
#tf.close()
#print tf.name
self.mode = mode
self.kill_on_del = kill_on_del
if mode == 'fifo':
fifo_name = 'baudline_fifo'
self.tmpdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tmpdir, fifo_name)
print self.filename
try:
os.mkfifo(self.filename)
except OSError, e:
print "Failed to create FIFO: %s" % e
raise
baudline_exec = [
baudline_path,
"-stdin",
"-record",
"-spacebar", "recordpause",
"-samplerate", str(int(sample_rate)),
"-channels", str(channels * aggregate_channel_count),
"-format", fmt,
#"-backingstore",
# #
#"-threads",
#"-pipeline",
#"-memory", # MB
#"-verticalsync"
#"-realtime",
#"-psd"
#"-reversetimeaxis",
#"-overclock",
#"-debug",
#"-debugtimer", str(1000)
#"-debugfragments",
#"-debugcadence",
#"-debugjitter",
#"-debugrate",
#"-debugmeasure
]
if is_complex:
baudline_exec += ["-quadrature"]
if flip_complex:
baudline_exec += ["-flipcomplex"]
if baseband_freq is not None and baseband_freq > 0:
baudline_exec += ["-basefrequency", str(baseband_freq)]
if decimation > 1:
baudline_exec += ["-decimateby", str(decimation)]
if scale != 1.0:
baudline_exec += ["-scaleby", str(scale)]
if overlap is not None and overlap > 0:
baudline_exec += ["-overlap", str(overlap)]
#"-slidesize"
if slide_size is not None and slide_size > 0:
baudline_exec += ["-slidesize", str(slide_size)]
if fft_size is not None and fft_size > 0:
baudline_exec += ["-fftsize", str(fft_size)]
if jump_step is not None and jump_step > 0:
baudline_exec += ["-jumpstep", str(jump_step)]
if x_slip is not None and x_slip > 0:
baudline_exec += ["-xslip", str(x_slip)]
if memory is not None and memory > 0:
baudline_exec += ["-memory", str(memory)]
if peak_hold:
baudline_exec += ["-peakhold"]
for k in kwds.keys():
arg = str(k).strip()
if arg[0] != '-':
arg = "-" + arg
baudline_exec += [arg]
val = kwds[k]
if val is not None:
val = str(val).strip()
if val.find(' ') > -1 and len(val) > 1:
if val[0] != '\"':
val = "\"" + val
if val[-1] != '\"':
val += "\""
baudline_exec += [val]
if mode == 'fifo':
baudline_exec += ["<", self.filename]
#baudline_exec = ["cat", self.filename, "|"] + baudline_exec
baudline_exec = [" ".join(baudline_exec)]
self.p = None
#res = 0
try:
#res = subprocess.call(gp_exec)
print baudline_exec
if mode == 'pipe':
self.p = subprocess.Popen(baudline_exec, stdin=subprocess.PIPE) # , stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=16384 or -1
elif mode == 'fifo':
self.p = subprocess.Popen(baudline_exec, shell=True)
#self.p.communicate(input=)
#self.p.stdin.write()
#self.p.wait()
#except KeyboardInterrupt:
# print "Caught CTRL+C"
except Exception, e:
print e
raise
#if self.p is not None and not self.p.returncode == 0:
# print "Failed to run subprocess (result: %d)" % (self.p.returncode)
#if res != 0:
# print "Failed to run subprocess (result: %d)" % (res)
if mode == 'pipe':
print "==> Using FD:", self.p.stdin.fileno()
self.file_sink = blocks.file_descriptor_sink(item_size, self.p.stdin.fileno()) # os.dup
elif mode == 'fifo':
self.file_sink = blocks.file_sink(item_size, self.filename) # os.dup
self.file_sink.set_unbuffered(not buffered) # Flowgraph won't die if baudline exits
self.connect(self, self.file_sink)
def __del__(self):
#os.unlink(tf.name)
if self.p is not None: # Won't work in FIFO mode as it blocks
if self.kill_on_del:
print "==> Killing baudline..."
#self.p.kill()
#self.p.terminate()
os.kill(self.p.pid, signal.SIGTERM)
if self.mode == 'fifo':
try:
print "==> Deleting:", self.filename
os.unlink(self.filename)
os.rmdir(self.tmpdir)
except OSError, e:
print "Failed to delete FIFO: %s" % e
raise
def main():
return 0
if __name__ == '__main__':
main()
| gpl-3.0 | -7,117,699,006,643,680,000 | 27.979592 | 141 | 0.629401 | false |
flurischt/jedi | jedi/evaluate/__init__.py | 25 | 16430 | """
Evaluation of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. If you
don't know about it, google it. That said, the typical entry point for static
analysis is calling ``eval_statement``. There's separate logic for
autocompletion in the API, the evaluator is all about evaluating an expression.
Now you need to understand what follows after ``eval_statement``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_statement`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment.
- ``Evaluator.eval_element`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_element`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_statement`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy evaluation is really good because it
only *evaluates* what needs to be *evaluated*. All the statements and modules
that are not used are just being ignored.
"""
import copy
from itertools import chain
from jedi.parser import tree
from jedi import debug
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate.cache import memoize_default
from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import helpers
class Evaluator(object):
def __init__(self, grammar):
self.grammar = grammar
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `compiled.create()`
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector()
self.analysis = []
def wrap(self, element):
if isinstance(element, tree.Class):
return er.Class(self, element)
elif isinstance(element, tree.Function):
if isinstance(element, tree.Lambda):
return er.LambdaWrapper(self, element)
else:
return er.Function(self, element)
elif isinstance(element, (tree.Module)) \
and not isinstance(element, er.ModuleWrapper):
return er.ModuleWrapper(self, element)
else:
return element
def find_types(self, scope, name_str, position=None, search_global=False,
is_goto=False):
"""
This is the search function. The most important part to debug.
`remove_statements` and `filter_statements` really are the core part of
this completion.
:param position: Position of the last statement -> tuple of line, column
:return: List of Names. Their parents are the types.
"""
f = finder.NameFinder(self, scope, name_str, position)
scopes = f.scopes(search_global)
if is_goto:
return f.filter_name(scopes)
return f.find(scopes, search_global)
@memoize_default(default=[], evaluator_is_first_arg=True)
@recursion.recursion_decorator
@debug.increase_indent
def eval_statement(self, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
types = self.eval_element(stmt.get_rhs())
if seek_name:
types = finder.check_tuple_assignments(types, seek_name)
first_operation = stmt.first_operation()
if first_operation not in ('=', None) and not isinstance(stmt, er.InstanceElement): # TODO don't check for this.
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operation)
operator.value = operator.value[:-1]
name = str(stmt.get_defined_names()[0])
parent = self.wrap(stmt.get_parent_scope())
left = self.find_types(parent, name, stmt.start_pos, search_global=True)
if isinstance(stmt.get_parent_until(tree.ForStmt), tree.ForStmt):
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable.
for r in types:
left = precedence.calculate(self, left, operator, [r])
types = left
else:
types = precedence.calculate(self, left, operator, types)
debug.dbg('eval_statement result %s', types)
return types
@memoize_default(evaluator_is_first_arg=True)
def eval_element(self, element):
if isinstance(element, iterable.AlreadyEvaluated):
return list(element)
elif isinstance(element, iterable.MergedNodes):
return iterable.unite(self.eval_element(e) for e in element)
debug.dbg('eval_element %s@%s', element, element.start_pos)
if isinstance(element, (tree.Name, tree.Literal)) or tree.is_node(element, 'atom'):
return self._eval_atom(element)
elif isinstance(element, tree.Keyword):
# For False/True/None
if element.value in ('False', 'True', 'None'):
return [compiled.builtin.get_by_name(element.value)]
else:
return []
elif element.isinstance(tree.Lambda):
return [er.LambdaWrapper(self, element)]
elif element.isinstance(er.LambdaWrapper):
return [element] # TODO this is no real evaluation.
elif element.type == 'expr_stmt':
return self.eval_statement(element)
elif element.type == 'power':
types = self._eval_atom(element.children[0])
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
raise NotImplementedError
types = self.eval_trailer(types, trailer)
return types
elif element.type in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
return [iterable.ImplicitTuple(self, element)]
elif element.type in ('not_test', 'factor'):
types = self.eval_element(element.children[-1])
for operator in element.children[:-1]:
types = list(precedence.factor_calculate(self, types, operator))
return types
elif element.type == 'test':
# `x if foo else y` case.
return (self.eval_element(element.children[0]) +
self.eval_element(element.children[-1]))
elif element.type == 'operator':
# Must be an ellipsis, other operators are not evaluated.
return [] # Ignore for now.
elif element.type == 'dotted_name':
types = self._eval_atom(element.children[0])
for next_name in element.children[2::2]:
types = list(chain.from_iterable(self.find_types(typ, next_name)
for typ in types))
return types
else:
return precedence.calculate_children(self, element.children)
def _eval_atom(self, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if isinstance(atom, tree.Name):
# This is the first global lookup.
stmt = atom.get_definition()
scope = stmt.get_parent_until(tree.IsScope, include_current=True)
if isinstance(stmt, tree.CompFor):
stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt))
if stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = atom
return self.find_types(scope, atom, stmt.start_pos, search_global=True)
elif isinstance(atom, tree.Literal):
return [compiled.create(self, atom.eval())]
else:
c = atom.children
# Parentheses without commas are not tuples.
if c[0] == '(' and not len(c) == 2 \
and not(tree.is_node(c[1], 'testlist_comp')
and len(c[1].children) > 1):
return self.eval_element(c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if isinstance(comp_for, tree.CompFor):
return [iterable.Comprehension.from_atom(self, atom)]
return [iterable.Array(self, atom)]
def eval_trailer(self, types, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = ()
new_types = []
for typ in types:
debug.dbg('eval_trailer: %s in scope %s', trailer, typ)
if trailer_op == '.':
new_types += self.find_types(typ, node)
elif trailer_op == '(':
new_types += self.execute(typ, node, trailer)
elif trailer_op == '[':
try:
get = typ.get_index_types
except AttributeError:
debug.warning("TypeError: '%s' object is not subscriptable"
% typ)
else:
new_types += get(self, node)
return new_types
def execute_evaluated(self, obj, *args):
"""
Execute a function with already executed arguments.
"""
args = [iterable.AlreadyEvaluated([arg]) for arg in args]
return self.execute(obj, args)
@debug.increase_indent
def execute(self, obj, arguments=(), trailer=None):
if not isinstance(arguments, param.Arguments):
arguments = param.Arguments(self, arguments, trailer)
if obj.isinstance(er.Function):
obj = obj.get_decorated_func()
debug.dbg('execute: %s %s', obj, arguments)
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self, obj, arguments)
except stdlib.NotInStdLib:
pass
try:
func = obj.py__call__
except AttributeError:
debug.warning("no execution possible %s", obj)
return []
else:
types = func(self, arguments)
debug.dbg('execute result: %s in %s', types, obj)
return types
def goto_definition(self, name):
def_ = name.get_definition()
if def_.type == 'expr_stmt' and name in def_.get_defined_names():
return self.eval_statement(def_, name)
call = helpers.call_of_name(name)
return self.eval_element(call)
def goto(self, name):
def resolve_implicit_imports(names):
for name in names:
if isinstance(name.parent, helpers.FakeImport):
# Those are implicit imports.
s = imports.ImportWrapper(self, name)
for n in s.follow(is_goto=True):
yield n
else:
yield name
stmt = name.get_definition()
par = name.parent
if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
types = self.eval_element(trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_evaluate = trailer.parent.children[:i]
types = self.eval_element(to_evaluate[0])
for trailer in to_evaluate[1:]:
types = self.eval_trailer(types, trailer)
param_names = []
for typ in types:
try:
params = typ.params
except AttributeError:
pass
else:
param_names += [param.name for param in params
if param.name.value == name.value]
return param_names
elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names():
# Only take the parent, because if it's more complicated than just
# a name it's something you can "goto" again.
return [name]
elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name:
return [name]
elif isinstance(stmt, tree.Import):
modules = imports.ImportWrapper(self, name).follow(is_goto=True)
return list(resolve_implicit_imports(modules))
elif par.type == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
types = self.eval_element(new_dotted)
return resolve_implicit_imports(iterable.unite(
self.find_types(typ, name, is_goto=True) for typ in types
))
scope = name.get_parent_scope()
if tree.is_node(name.parent, 'trailer'):
call = helpers.call_of_name(name, cut_own_trailer=True)
types = self.eval_element(call)
return resolve_implicit_imports(iterable.unite(
self.find_types(typ, name, is_goto=True) for typ in types
))
else:
if stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = name
return self.find_types(scope, name, stmt.start_pos,
search_global=True, is_goto=True)
| mit | 1,380,526,699,612,606,700 | 42.350923 | 121 | 0.591905 | false |
dirtshell/smashonline | smashonline.py | 1 | 4573 | import web
from web import form
import model
from captcha import getCaptcha
# List of valid time zones
olsonDB = [
'Pacific/Majuro',
'Pacific/Pago_Pago',
'America/Adak',
'Pacific/Honolulu',
'Pacific/Marquesas',
'Pacific/Gambier',
'America/Anchorage',
'America/Los_Angeles',
'Pacific/Pitcairn',
'America/Phoenix',
'America/Denver',
'America/Guatemala',
'America/Chicago',
'Pacific/Easter',
'America/Bogota',
'America/New_York',
'America/Caracas',
'America/Halifax',
'America/Santo_Domingo',
'America/Santiago',
'America/St_Johns',
'America/Godthab',
'America/Argentina/Buenos_Aires',
'America/Montevideo',
'America/Noronha',
'America/Noronha',
'Atlantic/Azores',
'Atlantic/Cape_Verde',
'UTC',
'Europe/London',
'Europe/Berlin',
'Africa/Lagos',
'Africa/Windhoek',
'Asia/Beirut',
'Africa/Johannesburg',
'Asia/Baghdad',
'Europe/Moscow',
'Asia/Tehran',
'Asia/Dubai',
'Asia/Baku',
'Asia/Kabul',
'Asia/Yekaterinburg',
'Asia/Karachi',
'Asia/Kolkata',
'Asia/Kathmandu',
'Asia/Dhaka',
'Asia/Omsk',
'Asia/Rangoon',
'Asia/Krasnoyarsk',
'Asia/Jakarta',
'Asia/Shanghai',
'Asia/Irkutsk',
'Australia/Eucla',
'Australia/Eucla',
'Asia/Yakutsk',
'Asia/Tokyo',
'Australia/Darwin',
'Australia/Adelaide',
'Australia/Brisbane',
'Asia/Vladivostok',
'Australia/Sydney',
'Australia/Lord_Howe',
'Asia/Kamchatka',
'Pacific/Noumea',
'Pacific/Norfolk',
'Pacific/Auckland',
'Pacific/Tarawa',
'Pacific/Chatham',
'Pacific/Tongatapu',
'Pacific/Apia',
'Pacific/Kiritimati'
]
render = web.template.render('templates/')
urls = (
'/', 'index',
'/create', 'create_match',
'/captcha.gif', 'captcha',
)
# Some session code for the captcha
app = web.application(urls, locals())
if web.config.get('_session') is None:
session = web.session.Session(app, web.session.DiskStore('sessions'), initializer={'captcha': ''})
web.config._session = session
else:
session = web.config._session
matchForm = form.Form(
form.Textbox('title', form.notnull, description="Match Title"),
form.Textbox('net_code', form.notnull, description="Net Code"),
form.Password('password', description="Password", post="<span> Optional</span>"),
form.Textbox('captcha', form.notnull, description="Validation Code", pre="<img src='/captcha.gif' valign=center><br>", style="width:70px;"),
form.Hidden('timezone', form.notnull, value="America/New_York"),
form.Button('Create Game'),
validators = [
form.Validator("Invalid net code", lambda i: len(i.net_code) == 8 ), # Check to make sure the netcode is legit
form.Validator("Title too long", lambda i: len(i.title) <= 25), # Check to make sure the title is within 25 characters
form.Validator("Invalid password", lambda i: len(i.password) <=25), # Check to make sure the password isn't too long
form.Validator("Title is required", lambda i: len(i.title) != 0), # Check to make sure a title was entered
form.Validator("You shouldn't see this error", lambda i: i.timezone in olsonDB), # Check to make sure the time zone is valid
form.Validator("Incorrect CAPTCHA", lambda i: i.captcha == session.captcha),] # Check the CAPTCHA they entered against the one in the session variable
)
class index:
# Display the index page
def GET(self):
matches = model.getMatches() # Get a list of the matches and store them in matches var
return render.index(matches)
class create_match:
# Display the create match page
def GET(self):
form = matchForm()
return render.create(form)
# Save the data
def POST(self):
form = matchForm()
if not form.validates(): # If there is an issue
return render.create(form) # Return them to the create page
else: # Otherwise save it
# form.d.boe and form['boe'].value are both ways to extract data
model.newMatch(title=form.d.title, net_code=form.d.net_code, password=form.d.password, timezone=form.d.timezone)
raise web.seeother('/') # Send em to the home page
class captcha:
# Display the captcha
def GET(self):
web.header("Content-Type", "image/gif")
captcha = getCaptcha()
session.captcha = captcha[0]
return captcha[1].read()
if __name__ == "__main__":
app.run() | mit | 3,073,423,190,335,068,700 | 30.763889 | 161 | 0.629346 | false |
fedora-conary/conary | conary_test/clienttest/resolvetest.py | 1 | 25181 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
import socket
#testsuite
from conary_test import rephelp
#conary
from conary.conaryclient import update
from conary.deps import deps
from conary import conarycfg, versions
class ClientResolveTest(rephelp.RepositoryHelper):
def testAutoResolveShouldInstallNewPackage(self):
# if we're keeping the old component (bc its needed for deps),
# we should keep its package too.
dep1 = 'trove: prov:lib(1)'
dep2 = 'trove: prov:lib(2)'
self.addComponent('prov:lib', '1.0', provides=dep1)
self.addComponent('prov:lib', '2.0', provides=dep2, filePrimer=1)
self.addComponent('req:lib', '1.0', requires=dep1, filePrimer=2)
self.addComponent('req:lib', '2.0', requires=dep2, filePrimer=3)
self.addCollection('prov', '1.0', [':lib'])
self.addCollection('prov', '2.0', [':lib'])
self.addCollection('req', '1.0', [':lib'])
self.addCollection('req', '2.0', [':lib'])
self.updatePkg(['req=1.0', 'prov=1.0'])
self.logFilter.add()
self.checkUpdate('req=--2.0', ['req=--2.0', 'req:lib=--2.0',
'prov=--2.0', 'prov:lib=2.0'],
resolve=True, keepRequired = True)
def testGroupRemovesRequiredComponent(self):
# in this scenario, you have a component "req:runtime" that
# requires "prov:runtime". group-test installed "prov:runtime"
# on the system. When moving to a new version of "group-test"
# that does not include "prov:runtime", we expect prov:runtime to
# be left behind since it satisfies a dependency
b1 = '/localhost@rpl:branch/'
b2 = '/localhost@rpl:compat/'
myDep = deps.parseDep('trove: prov:runtime file:/usr/bin/prov')
# create initial components
# we create 2 versions of req:runtime to trigger bugs related
# to sorting on untimestamped versions.
self.addComponent('req:runtime', '1.0-1-1', requires=myDep,
filePrimer=1)
self.addComponent('req:runtime', '1.0-1-2',
requires='file:/usr/bin/prov',
filePrimer=2)
self.addComponent('prov:runtime', '1.0-1-1', provides=myDep,
filePrimer=3)
self.addComponent('test:runtime', '1.0-1-1',
filePrimer=4)
# add prov:runtime and test:runtime to group-test (we have
# test:runtime so we won't have an empty group later on)
self.addCollection('group-test', '1.0-1-1', ['prov:runtime',
'test:runtime'])
# install group-test and req:runtime.
self.updatePkg(self.rootDir, 'group-test', '1.0-1-1')
self.updatePkg(self.rootDir, 'req:runtime', '1.0-1-1')
self.updatePkg(self.rootDir, 'req:runtime', '1.0-1-2',
keepExisting=True)
# now, add the trove that provides our dep into the :compat branch
self.addComponent('prov:runtime', b2+'1.0-1-1',
provides=myDep, filePrimer=2)
# make a group-test which only has test:runtime in it
self.addComponent('test:runtime', b1+'1.0-1-1',
filePrimer=3)
self.addCollection('group-test', b1+'1.0-1-1',
['test:runtime'])
# update to the group-test on the new branch
# set the installLabelPath to include the new branch
# and the compat branch. Use resolve=True to get prov:runtime from
# the :compat branch
self.cfg.installLabelPath = conarycfg.CfgLabelList(
[ versions.Label('localhost@rpl:branch'),
versions.Label('localhost@rpl:compat') ] )
# this should leave prov installed
self.logFilter.add()
self.checkUpdate('group-test=%s1.0-1-1' % b1,
['group-test=:linux--:branch',
'test:runtime=:linux--:branch'], resolve=True,
keepRequired = True)
self.logFilter.compare('warning: keeping prov:runtime - required by at least req:runtime')
self.logFilter.remove()
def testGroupDoesOneThingDepsDoesAnother(self):
# create foo:lib and group-a
# group-a=2.0-1-1
# `- foo:lib=2.1-1-1
self.addComponent('foo:lib', '2.1-1-1')
self.addCollection('group-a', '2.1-1-1', [ 'foo:lib' ])
# update to group-a
self.updatePkg(self.rootDir, 'group-a')
# group-a=1.0-1-1
# `- foo:lib=2.0-1-1
self.addComponent('foo:lib', '2.0-1-1')
self.addCollection('group-a', '2.0-1-1',
[ ('foo:lib', '2.0-1-1') ])
# create bar:runtime which requires foo:lib from 1.0-1-1
# (which does not conflict with foo:lib 2.0-1-1)
dep = deps.parseDep('soname: ELF32/libfoo.so.1(SysV x86)')
self.addComponent('foo:lib', '1.0-1-1',
provides=dep, filePrimer=1)
self.addComponent('bar:runtime', '1.0-1-1',
requires=dep)
# now try to downgrade group-a and install bar:runtime with
# dependency solving at the same time. We should get
# a job that updates foo:lib 2.1-1-1 to 2.0-1-1, and a
# new install of foo:lib=1.0-1-1
self.checkUpdate(['group-a=2.0-1-1', 'bar:runtime'],
['foo:lib=2.1-1-1--2.0-1-1',
'foo:lib=--1.0-1-1',
'bar:runtime=1.0-1-1',
'group-a=2.1-1-1--2.0-1-1'], resolve=True)
def testExistingDepResolution(self):
# something which is recursively included from the update, but
# normally wouldn't be installed, is needed to resolve a dependency
self.addQuickTestComponent("test:runtime", '1.0-1-1')
self.addQuickTestComponent("test:lib", '1.0-1-1', filePrimer = 1)
self.addQuickTestCollection("test", '1.0-1-1',
[ ("test:lib", '1.0-1-1'),
("test:runtime", '1.0-1-1') ])
self.updatePkg(self.rootDir, "test")
self.erasePkg(self.rootDir, "test:lib")
self.addQuickTestComponent("test:runtime", '2.0-1-1',
requires = deps.parseDep('trove: test:lib'))
self.addQuickTestComponent("test:lib", '2.0-1-1', filePrimer = 1)
self.addQuickTestCollection("test", '2.0-1-1',
[ ("test:lib", '2.0-1-1'),
("test:runtime", '2.0-1-1') ])
(rc, str) = self.captureOutput(self.updatePkg, self.rootDir,
"test", resolve = True)
assert(str == 'Including extra troves to resolve dependencies:\n'
' test:lib=2.0-1-1\n')
def testDepResolutionWouldSwitchBranches(self):
# we shouldn't switch branches due to dep resolution
self.addComponent('prov:lib', '1.0')
trv = self.addComponent('prov:lib', ':branch/2.0',
provides='trove: prov:lib(2)')
self.addComponent('req:lib', '1.0', requires='trove:prov:lib',
filePrimer=2)
self.addComponent('req:lib', '2.0', requires='trove:prov:lib(2)',
filePrimer=2)
self.updatePkg(['req:lib=1.0', 'prov:lib=1.0'])
self.cfg.installLabelPath.append(trv.getVersion().trailingLabel())
try:
self.checkUpdate('req:lib', ['req:lib=1.0--2.0',
'prov:lib=1.0--:branch/2.0'],
resolve=True)
raise RuntimeError
except update.DepResolutionFailure:
pass
trv = self.addComponent('prov:lib', ':branch/2.0.1',
provides='trove: prov:lib(2)',
filePrimer=3)
self.checkUpdate('req:lib', ['req:lib=1.0--2.0',
'prov:lib=--:branch/2.0.1'],
resolve=True)
def testResolveErasureFailure(self):
for v in '1.0', '1.1':
self.addComponent('foo:python', v,
provides='trove: foo:python(%s)' % v,
filePrimer=1)
self.addComponent('foo:runtime', v,
requires='trove: foo:python(%s)' % v,
filePrimer=2)
self.addCollection('foo', v, [':python', ':runtime'])
self.addComponent('foo-build:python', v,
requires='trove: foo:python(%s)' % v,
filePrimer=3)
self.addComponent('foo-build:devel', v, filePrimer=4)
if v == '1.0':
self.addCollection('foo-build', v, [':python', ':devel'])
else:
self.addCollection('foo-build', v, [':python'])
self.updatePkg(['foo=1.0', 'foo-build=1.0'])
self.checkUpdate(['foo=1.1'], ['foo=1.0--1.1',
'foo:runtime=1.0--1.1',
'foo:python=1.0--1.1',
'foo-build=1.0--1.1',
'foo-build:python=1.0--1.1',
'foo-build:devel=1.0--',
])
def testResolveErasureFailure2(self):
# we are updating foo from 1 -- 2
# bar=1 requires foo=1. So, we attempt to update bar.
# but bam requires bar=1. We don't allow that sort of recursion.
for v in '1', '2':
self.addComponent('foo:run', v, provides='trove:foo:run(%s)' % v,
filePrimer=1)
self.addComponent('bar:run', v, provides='trove:bar:run(%s)' % v,
requires='trove:foo:run(%s)' % v,
filePrimer=2)
self.addComponent('bam:run', v, provides='trove:bam:run(%s)' % v,
requires='trove:bar:run(%s)' % v,
filePrimer=3)
self.addCollection('foo', v, [':run'])
self.addCollection('bar', v, [':run'])
self.addCollection('bam', v, [':run'])
self.updatePkg(['foo=1', 'bar=1', 'bam=1'])
try:
self.checkUpdate('foo', [])
except update.EraseDepFailure, err:
# this will give some lame message
# about erasing bar:runtime=1
# cause bam:runtime=1 to fail.
pass
else:
assert(0)
v = 2
# make a copy of bar that will install side-by-side
self.addComponent('bar:run', '2.1', provides='trove:bar:run(%s)' % v,
requires='trove:foo:run(%s)' % v,
filePrimer=4)
self.addCollection('bar', '2.1', [':run'])
self.logFilter.add()
try:
self.checkUpdate('foo', ['foo=1--2', 'foo:run=1--2',
'bar=--2', 'bar:run=--2'],
keepRequired = True)
except update.EraseDepFailure, err:
# this gives a message about bar:runtime=1 requiring
# foo:runtime=1, since at some time we attempt to resolve
# a situation by leaving old bar in place and updating
# new bar.
pass
else:
assert(0)
self.logFilter.compare('warning: keeping bar:run - required by at least bam:run')
def testResolveErasureNeedsResolution(self):
# we are updating foo from 1 -- 2
# bar=1 requires foo=1. So, we attempt to update bar.
# bar needs bam to be installed.
for v in '1', '2':
self.addComponent('foo:run', v, provides='trove:foo:run(%s)' % v,
filePrimer=1)
self.addComponent('bar:run', v, provides='trove:bar:run(%s)' % v,
requires='trove:foo:run(%s) trove:bam:run(%s)' % (v, v),
filePrimer=2)
self.addComponent('bam:run', v, provides='trove:bam:run(%s)' % v,
filePrimer=3)
self.addCollection('foo', v, [':run'])
self.addCollection('bar', v, [':run'])
self.addCollection('bam', v, [':run'])
self.updatePkg(['foo=1', 'bar=1', 'bam=1'])
self.checkUpdate('foo', ['foo=1--2', 'foo:run=1--2',
'bar=1--2', 'bar:run=1--2',
'bam=1--2', 'bam:run=1--2'], resolve=True)
def testResolveAgainstDownRepository(self):
try:
socket.gethostbyname('www.rpath.com')
except:
raise testhelp.SkipTestException('Test requires networking')
trv, cs = self.Component('foo:run', requires='trove:bar:run')
self.addComponent('bar:run')
oldILP = self.cfg.installLabelPath
try:
self.cfg.installLabelPath = [versions.Label('doesnotexist@rpl:devel')] + self.cfg.installLabelPath
self.logFilter.add()
self.checkUpdate(['foo:run'], ['foo:run', 'bar:run'],
fromChangesets=[cs], resolve=True)
if self.cfg.proxy:
proxyPort = self.cfg.proxy['http'].split(':')[-1][:-1]
msg = ('warning: Could not access doesnotexist@rpl:devel'
' for dependency resolution: Error occurred'
' opening repository'
' https://test:<PASSWD>@doesnotexist/conary/:'
' Error talking to HTTP proxy localhost:%s:'
' 404 (Not Found)' % proxyPort)
else:
msg = ('warning: Could not access doesnotexist@rpl:devel'
' for dependency resolution: Error occurred'
' opening repository'
' https://test:<PASSWD>@doesnotexist/conary/:'
' Name or service not known')
self.logFilter.compare(msg)
finally:
self.cfg.installLabelPath = oldILP
def testResolveLevel2UpdatesNew(self):
# There's a new version of foo, but we've explicitly updated foo
# to a broken version - we shouldn't try to override the user's
# decision on what to update that package to.
for v in '1', '2':
self.addComponent('foo:run', v, requires="trove:gcc(1)")
self.addCollection('foo', v, [':run'])
self.addComponent('gcc:run', v, provides="trove:gcc(%s)" % v,
filePrimer=1)
self.addCollection('gcc', v, [':run'])
self.addComponent('foo:run', '3', requires="trove:gcc(2)")
self.addCollection('foo', '3', [':run'])
self.updatePkg(['foo=1', 'gcc=1'])
try:
self.updatePkg(['foo=2', 'gcc'], raiseError=True)
except update.EraseDepFailure, err:
expectedStr = """\
The following dependencies would not be met after this update:
foo:run=2-1-1 (Would be updated from 1-1-1) requires:
trove: gcc(1)
which is provided by:
gcc:run=1-1-1 (Would be updated to 2-1-1)"""
assert(str(err) == expectedStr)
else:
assert(0)
def testResolveLevel2UpdatesTwoFromSameReq(self):
# There's a new version of foo, but we've explicitly updated foo
# to a broken version - we shouldn't try to override the user's
# decision on what to update that package to.
for v in '1', '2':
self.addComponent('foo:run', v, requires="trove:gcc(%s)" % v)
self.addCollection('foo', v, [':run'])
self.addComponent('bar:run', v, requires="trove:gcc(%s)" % v,
filePrimer=1)
self.addCollection('bar', v, [':run'])
self.addComponent('gcc:run', v, provides="trove:gcc(%s)" % v,
filePrimer=2)
self.addCollection('gcc', v, [':run'])
self.updatePkg(['foo=1', 'bar=1', 'gcc=1'])
self.updatePkg(['gcc'], raiseError=True)
def testPullInX86WhenWeHaveX86_64(self):
# pull in an x86 flavor of a lib when the x86_64 flavor is already
# installed
self.addComponent('foo:lib=1[is:x86]', provides='trove:foo:lib(x86)')
self.addComponent('foo:lib=1[is:x86_64]', provides='trove:foo:lib(x86_64)', filePrimer=1)
self.addComponent('bar:lib', requires='trove:foo:lib(x86)')
self.updatePkg('foo:lib[is:x86_64]')
self.cfg.flavor = [ deps.parseFlavor('is:x86_64'),
deps.parseFlavor('is: x86 x86_64') ]
self.checkUpdate('bar:lib', ['foo:lib[is:x86]', 'bar:lib'],
resolve=True)
def testNeverAddAnArch(self):
Flavor = deps.parseFlavor
repos = self.openRepository()
self.cfg.flavorPreferences = [ Flavor('is:x86_64'), Flavor('is:x86')]
self.cfg.flavor = [Flavor('is: x86 x86_64')]
self.addComponent('foo:lib=1[is:x86]')
self.addComponent('foo:lib=2[is:x86 x86_64]',
provides='trove:foo:lib(2)')
self.addComponent('bar:lib', requires='trove:foo:lib(2)')
self.updatePkg('foo:lib=1[is:x86]')
self.assertRaises(update.NoNewTrovesError, self.checkUpdate,
'foo:lib', [])
self.assertRaises(update.DepResolutionFailure,
self.checkUpdate, 'bar:lib', [], resolve=True)
# this will install side-by-side so should work
self.addComponent('foo:lib=3[is:x86 x86_64]',
provides='trove:foo:lib(2)',
filePrimer=3)
self.checkUpdate('bar:lib', ['foo:lib=--3', 'bar:lib'], resolve=True)
def testPickLatestByLabel(self):
self.addComponent('foo:lib=/localhost@rpl:branch//linux/1:1[ssl]')
self.addComponent('foo:lib=2:2')
self.addComponent('bar:lib', requires='trove:foo:lib')
self.checkUpdate('bar:lib', ['foo:lib=--2', 'bar:lib'], resolve=True)
def testResolveFlavorPreferences(self):
Flavor = deps.parseFlavor
self.cfg.flavor = [Flavor('ssl is:x86 x86_64')]
self.cfg.flavorPreferences = [Flavor('is:x86_64'), Flavor('is:x86')]
self.addComponent('foo:lib=1-1-1[is:x86_64]')
self.addComponent('foo:lib=2-1-1[is:x86]')
self.addComponent('bar:lib', requires='trove:foo:lib')
# updates to the x86_64 even though there's an x86 available.
self.checkUpdate('bar:lib', ['foo:lib=--1', 'bar:lib'], resolve=True)
def testResolveFailsDueToErase(self):
self.addComponent('foo:lib', provides='trove:foo:lib(1)')
self.addCollection('group-foo', ['foo:lib'])
self.addComponent('bar:lib', requires='trove:foo:lib(1)', filePrimer=1)
self.updatePkg(['group-foo', 'bar:lib'])
self.addComponent('foo:lib=2', provides='trove:foo:lib(2)')
self.addCollection('group-foo=2', ['foo:lib'])
try:
self.updatePkg(['group-foo'], raiseError=True)
except Exception, e:
self.assertEquals(str(e), '''\
The following dependencies would not be met after this update:
bar:lib=1.0-1-1 (Already installed) requires:
trove: foo:lib(1)
which is provided by:
foo:lib=1.0-1-1 (Would be updated to 2-1-1)''')
def testResolveLevel3UpdatesNew(self):
# There's a new version of foo, but we've explicitly updated foo
# to a broken version - we shouldn't try to override the user's
# decision on what to update that package to.
for v in '1', '2':
self.addComponent('foo:run', v, requires="trove:gcc(1)")
self.addCollection('foo', v, [':run'])
self.addComponent('gcc:run', v, provides="trove:gcc(%s)" % v,
filePrimer=1)
self.addCollection('gcc', v, [':run'])
self.addComponent('foo:run', '3', requires="trove:gcc(2)")
self.addCollection('foo', '3', [':run'])
self.updatePkg(['foo=1', 'gcc=1'])
try:
self.updatePkg(['foo=2', 'gcc'], raiseError=True)
assert(0)
except update.EraseDepFailure, err:
assert(str(err) == '''\
The following dependencies would not be met after this update:
foo:run=2-1-1 (Would be updated from 1-1-1) requires:
trove: gcc(1)
which is provided by:
gcc:run=1-1-1 (Would be updated to 2-1-1)''')
try:
self.cfg.fullVersions = True
self.cfg.fullFlavors = True
self.updatePkg(['foo=2', 'gcc'], raiseError=True)
assert(0)
except update.EraseDepFailure, err:
expectedStr = '''\
The following dependencies would not be met after this update:
foo:run=/localhost@rpl:linux/2-1-1[] (Would be updated from \
/localhost@rpl:linux/1-1-1[]) requires:
trove: gcc(1)
which is provided by:
gcc:run=/localhost@rpl:linux/1-1-1[] (Would be updated to \
/localhost@rpl:linux/2-1-1[])'''
assert(str(err) == expectedStr)
def testResolveEncountersErased(self):
# CNY-2996
self.addComponent('foo:lib')
self.addComponent('foo:devellib',
requires='trove: foo:lib', filePrimer=1)
self.addComponent('foo:lib=:branch')
self.addComponent('foo:devellib=:branch',
requires='trove: foo:lib', filePrimer=1)
self.addComponent('bar:devellib',
requires='trove: foo:devellib trove:bam:runtime(1)',
filePrimer=2)
self.addComponent('bam:runtime=1',
requires='trove:bam:runtime(2)',
provides='trove:bam:runtime(1)', filePrimer=3)
self.addComponent('bam:runtime=2',
provides='trove:bam:runtime(2)', filePrimer=4)
self.updatePkg(['foo:lib=:branch', 'foo:devellib=:branch'],
raiseError=True)
err = self.assertRaises(update.EraseDepFailure,
self.checkUpdate,
['-foo:lib', '-foo:devellib', 'bar:devellib'], [], resolve=True)
self.assertEquals(str(err), '''\
The following dependencies would not be met after this update:
bar:devellib=1.0-1-1 (Would be newly installed) requires:
trove: foo:devellib
which is provided by:
foo:devellib=1-1-1 (Would be erased)''')
self.addComponent('baz:devellib', requires='trove:bar:devellib',
filePrimer=5)
err = self.assertRaises(update.EraseDepFailure,
self.checkUpdate,
['-foo:lib', '-foo:devellib', 'baz:devellib'], [], resolve=True)
self.assertEquals(str(err), '''\
The following dependencies would not be met after this update:
bar:devellib=1.0-1-1 (Would be added due to resolution) requires:
trove: foo:devellib
which is provided by:
foo:devellib=1-1-1 (Would be erased)''')
def testResatisfiedUsergroup(self):
"""
Info dep is resatisfied during a migrate, e.g. because of splitting
:user to :user and :group, while also being depended on by both a trove
being updated in the same operation, and by another trove not in the
operation.
@tests: CNY-3685
"""
d = 'groupinfo: nobody'
self.addComponent('info-nobody:user', provides=d, filePrimer=1)
self.addComponent('info-nobody:group', provides=d, filePrimer=1)
self.addComponent('updated:runtime', '1.0', requires=d, filePrimer=2)
self.addComponent('updated:runtime', '2.0', requires=d, filePrimer=2)
self.addComponent('leftalone:runtime', requires=d, filePrimer=3)
self.addCollection('group-foo', '1', [
'info-nobody:user=1.0',
'updated:runtime=1.0',
'leftalone:runtime=1.0',
])
self.addCollection('group-foo', '2', [
'info-nobody:group=1.0',
'updated:runtime=2.0',
'leftalone:runtime=1.0',
])
self.updatePkg(['group-foo=1'], raiseError=True)
self.updatePkg(['group-foo=2'], raiseError=True)
| apache-2.0 | 3,038,407,383,393,122,300 | 43.885918 | 110 | 0.541202 | false |
awatts/boto | boto/s3/connection.py | 16 | 26349 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import base64
from boto.compat import six, urllib
import time
from boto.auth import detect_potential_s3sigv4
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError, S3ResponseError
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
return ''
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.parse.quote(key)
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.parse.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.parse.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location(object):
DEFAULT = '' # US Classic Region
EU = 'EU' # Ireland
EUCentral1 = 'eu-central-1' # Frankfurt
USWest = 'us-west-1'
USWest2 = 'us-west-2'
SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
APSoutheast2 = 'ap-southeast-2'
CNNorth1 = 'cn-north-1'
class NoHostProvided(object):
# An identifying object to help determine whether the user provided a
# ``host`` or not. Never instantiated.
pass
class HostRequiredError(BotoClientError):
pass
class S3Connection(AWSAuthConnection):
DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com')
DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat')
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=NoHostProvided, debug=0, https_connection_factory=None,
calling_format=DefaultCallingFormat, path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=False,
validate_certs=None, profile_name=None):
no_host_provided = False
if host is NoHostProvided:
no_host_provided = True
host = self.DefaultHost
if isinstance(calling_format, six.string_types):
calling_format=boto.utils.find_class(calling_format)()
self.calling_format = calling_format
self.bucket_class = bucket_class
self.anon = anon
super(S3Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider, security_token=security_token,
suppress_consec_slashes=suppress_consec_slashes,
validate_certs=validate_certs, profile_name=profile_name)
# We need to delay until after the call to ``super`` before checking
# to see if SigV4 is in use.
if no_host_provided:
if 'hmac-v4-s3' in self._required_auth_capability():
raise HostRequiredError(
"When using SigV4, you must specify a 'host' parameter."
)
@detect_potential_s3sigv4
def _required_auth_capability(self):
if self.anon:
return ['anon']
else:
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert isinstance(expiration_time, time.struct_time), \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in=6000,
acl=None, success_action_redirect=None,
max_content_length=None,
http_method='http', fields=None,
conditions=None, storage_class='STANDARD',
server_side_encryption=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: string
:param acl: A canned ACL. One of:
* private
* public-read
* public-read-write
* authenticated-read
* bucket-owner-read
* bucket-owner-full-control
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:type storage_class: string
:param storage_class: Storage class to use for storing the object.
Valid values: STANDARD | REDUCED_REDUNDANCY
:type server_side_encryption: string
:param server_side_encryption: Specifies server-side encryption
algorithm to use when Amazon S3 creates an object.
Valid values: None | AES256
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
"""
if fields is None:
fields = []
if conditions is None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({"name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({"name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
if self.provider.security_token:
fields.append({'name': 'x-amz-security-token',
'value': self.provider.security_token})
conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token)
if storage_class:
fields.append({'name': 'x-amz-storage-class',
'value': storage_class})
conditions.append('{"x-amz-storage-class": "%s"}' % storage_class)
if server_side_encryption:
fields.append({'name': 'x-amz-server-side-encryption',
'value': server_side_encryption})
conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption)
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the
# 'signature' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields}
def generate_url_sigv4(self, expires_in, method, bucket='', key='',
headers=None, force_http=False,
response_headers=None, version_id=None,
iso_date=None):
path = self.calling_format.build_path_base(bucket, key)
auth_path = self.calling_format.build_auth_path(bucket, key)
host = self.calling_format.build_host(self.server_name(), bucket)
# For presigned URLs we should ignore the port if it's HTTPS
if host.endswith(':443'):
host = host[:-4]
params = {}
if version_id is not None:
params['VersionId'] = version_id
if response_headers is not None:
params.update(response_headers)
http_request = self.build_base_http_request(method, path, auth_path,
headers=headers, host=host,
params=params)
return self._auth_handler.presign(http_request, expires_in,
iso_date=iso_date)
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
if self._auth_handler.capability[0] == 'hmac-v4-s3' and query_auth:
# Handle the special sigv4 case
return self.generate_url_sigv4(expires_in, method, bucket=bucket,
key=key, headers=headers, force_http=force_http,
response_headers=response_headers, version_id=version_id)
headers = headers or {}
if expires_in_absolute:
expires = int(expires_in)
else:
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# optional version_id and response_headers need to be added to
# the query param list.
extra_qp = []
if version_id is not None:
extra_qp.append("versionId=%s" % version_id)
if response_headers:
for k, v in response_headers.items():
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if self.provider.security_token:
headers['x-amz-security-token'] = self.provider.security_token
if extra_qp:
delimiter = '?' if '?' not in auth_path else '&'
auth_path += delimiter + '&'.join(extra_qp)
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.parse.quote(b64_hmac, safe='')
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
else:
query_part = ''
if headers:
hdr_prefix = self.provider.header_prefix
for k, v in headers.items():
if k.startswith(hdr_prefix):
# headers used for sig generation must be
# included in the url also.
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if extra_qp:
delimiter = '?' if not query_part else '&'
query_part += delimiter + '&'.join(extra_qp)
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol,
self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the
user who's credentials are associated with the connection.
The only way to get this value is to do a GET request on the
service which returns all buckets associated with the account.
As part of that response, the canonical userid is returned.
This method simply does all of that and then returns just the
user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.owner.id
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
If ``validate=False`` is passed, no request is made to the service (no
charge/communication delay). This is only safe to do if you are **sure**
the bucket exists.
If the default ``validate=True`` is passed, a request is made to the
service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched
a list of keys (but with a max limit set to ``0``, always returning an empty
list) in the bucket (& included better error messages), at an
increased expense. As of Boto v2.25.0, this now performs a HEAD request
(less expensive but worse error messages).
If you were relying on parsing the error message before, you should call
something like::
bucket = conn.get_bucket('<bucket_name>', validate=False)
bucket.get_all_keys(maxkeys=0)
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to verify the bucket exists
on the service-side. (Default: ``True``)
"""
if validate:
return self.head_bucket(bucket_name, headers=headers)
else:
return self.bucket_class(self, bucket_name)
def head_bucket(self, bucket_name, headers=None):
"""
Determines if a bucket exists by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:returns: A <Bucket> object
"""
response = self.make_request('HEAD', bucket_name, headers=headers)
body = response.read()
if response.status == 200:
return self.bucket_class(self, bucket_name)
elif response.status == 403:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'AccessDenied'
err.error_message = 'Access Denied'
raise err
elif response.status == 404:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'NoSuchBucket'
err.error_message = 'The specified bucket does not exist'
raise err
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def lookup(self, bucket_name, validate=True, headers=None):
"""
Attempts to get a bucket from S3.
Works identically to ``S3Connection.get_bucket``, save for that it
will return ``None`` if the bucket does not exist instead of throwing
an exception.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create a European bucket (S3) or European Union bucket
(GCS).
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: str
:param location: The location of the new bucket. You can use one of the
constants in :class:`boto.s3.connection.Location` (e.g. Location.EU,
Location.USWest, etc.).
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header: policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConfiguration><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConfiguration>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
"""
Removes an S3 bucket.
In order to remove the bucket, it must first be empty. If the bucket is
not empty, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
"""
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None,
retry_handler=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return super(S3Connection, self).make_request(
method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries,
retry_handler=retry_handler
)
| mit | -2,118,226,682,433,625,900 | 38.38565 | 111 | 0.597973 | false |
JPJPJPOPOP/zulip | zerver/webhooks/gitlab/view.py | 10 | 13038 | from __future__ import absolute_import
from functools import partial
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.webhooks.git import get_push_commits_event_message, EMPTY_SHA,\
get_remove_branch_event_message, get_pull_request_event_message,\
get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_commits_comment_action_message, get_push_tag_event_message
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any, Iterable, Optional, Text
class UnknownEventType(Exception):
pass
def get_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
if payload.get('after') == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
compare_url = u'{}/compare/{}...{}'.format(
get_repository_homepage(payload),
payload['before'],
payload['after']
)
commits = [
{
'sha': commit.get('id'),
'message': commit.get('message'),
'url': commit.get('url')
}
for commit in payload.get('commits')
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits
)
def get_remove_branch_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload)
)
def get_tag_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_push_tag_event_message(
get_user_name(payload),
get_tag_name(payload),
action="pushed" if payload.get('checkout_sha') else "removed"
)
def get_issue_created_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_issue_event_message(
get_issue_user_name(payload),
'created',
get_object_url(payload),
payload.get('object_attributes').get('iid'),
payload.get('object_attributes').get('description'),
get_objects_assignee(payload)
)
def get_issue_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
return get_issue_event_message(
get_issue_user_name(payload),
action,
get_object_url(payload),
payload.get('object_attributes').get('iid'),
)
def get_merge_request_updated_event_body(payload):
# type: (Dict[str, Any]) -> Text
if payload.get('object_attributes').get('oldrev'):
return get_merge_request_event_body(payload, "added commit(s) to")
return get_merge_request_open_or_updated_body(payload, "updated")
def get_merge_request_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
type='MR',
)
def get_merge_request_open_or_updated_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
pull_request.get('source_branch'),
pull_request.get('target_branch'),
pull_request.get('description'),
get_objects_assignee(payload),
type='MR',
)
def get_objects_assignee(payload):
# type: (Dict[str, Any]) -> Optional[Text]
assignee_object = payload.get('assignee')
if assignee_object:
return assignee_object.get('name')
return None
def get_commented_commit_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({})'.format(comment['url'])
return get_commits_comment_action_message(
get_issue_user_name(payload),
action,
payload.get('commit').get('url'),
payload.get('commit').get('id'),
comment['note'],
)
def get_commented_merge_request_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/merge_requests/{}'.format(
payload.get('project').get('web_url'),
payload.get('merge_request').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('merge_request').get('iid'),
message=comment['note'],
type='MR'
)
def get_commented_issue_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/issues/{}'.format(
payload.get('project').get('web_url'),
payload.get('issue').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('issue').get('iid'),
message=comment['note'],
type='Issue'
)
def get_commented_snippet_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/snippets/{}'.format(
payload.get('project').get('web_url'),
payload.get('snippet').get('id')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('snippet').get('id'),
message=comment['note'],
type='Snippet'
)
def get_wiki_page_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
return u"{} {} [Wiki Page \"{}\"]({}).".format(
get_issue_user_name(payload),
action,
payload.get('object_attributes').get('title'),
payload.get('object_attributes').get('url'),
)
def get_build_hook_event_body(payload):
# type: (Dict[str, Any]) -> Text
build_status = payload.get('build_status')
if build_status == 'created':
action = 'was created'
elif build_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(build_status)
return u"Build {} from {} stage {}.".format(
payload.get('build_name'),
payload.get('build_stage'),
action
)
def get_pipeline_event_body(payload):
# type: (Dict[str, Any]) -> Text
pipeline_status = payload.get('object_attributes').get('status')
if pipeline_status == 'pending':
action = 'was created'
elif pipeline_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(pipeline_status)
builds_status = u""
for build in payload.get('builds'):
builds_status += u"* {} - {}\n".format(build.get('name'), build.get('status'))
return u"Pipeline {} with build(s):\n{}.".format(action, builds_status[:-1])
def get_repo_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['project']['name']
def get_user_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['user_name']
def get_issue_user_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['user']['name']
def get_repository_homepage(payload):
# type: (Dict[str, Any]) -> Text
return payload['repository']['homepage']
def get_branch_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['ref'].replace('refs/heads/', '')
def get_tag_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['ref'].replace('refs/tags/', '')
def get_object_iid(payload):
# type: (Dict[str, Any]) -> Text
return payload['object_attributes']['iid']
def get_object_url(payload):
# type: (Dict[str, Any]) -> Text
return payload['object_attributes']['url']
EVENT_FUNCTION_MAPPER = {
'Push Hook': get_push_event_body,
'Tag Push Hook': get_tag_push_event_body,
'Issue Hook open': get_issue_created_event_body,
'Issue Hook close': partial(get_issue_event_body, action='closed'),
'Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Issue Hook update': partial(get_issue_event_body, action='updated'),
'Note Hook Commit': get_commented_commit_event_body,
'Note Hook MergeRequest': get_commented_merge_request_event_body,
'Note Hook Issue': get_commented_issue_event_body,
'Note Hook Snippet': get_commented_snippet_event_body,
'Merge Request Hook open': partial(get_merge_request_open_or_updated_body, action='created'),
'Merge Request Hook update': get_merge_request_updated_event_body,
'Merge Request Hook merge': partial(get_merge_request_event_body, action='merged'),
'Merge Request Hook close': partial(get_merge_request_event_body, action='closed'),
'Wiki Page Hook create': partial(get_wiki_page_event_body, action='created'),
'Wiki Page Hook update': partial(get_wiki_page_event_body, action='updated'),
'Build Hook': get_build_hook_event_body,
'Pipeline Hook': get_pipeline_event_body,
}
@api_key_only_webhook_view("Gitlab")
@has_request_variables
def api_gitlab_webhook(request, user_profile, client,
stream=REQ(default='gitlab'),
payload=REQ(argument_type='body')):
# type: (HttpRequest, UserProfile, Client, Text, Dict[str, Any]) -> HttpResponse
event = get_event(request, payload)
body = get_body_based_on_event(event)(payload)
subject = get_subject_based_on_event(event, payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_body_based_on_event(event):
# type: (str) -> Any
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(event, payload):
# type: (str, Dict[str, Any]) -> Text
if event == 'Push Hook':
return u"{} / {}".format(get_repo_name(payload), get_branch_name(payload))
elif event == 'Build Hook':
return u"{} / {}".format(payload.get('repository').get('name'), get_branch_name(payload))
elif event == 'Pipeline Hook':
return u"{} / {}".format(
get_repo_name(payload),
payload.get('object_attributes').get('ref').replace('refs/heads/', ''))
elif event.startswith('Merge Request Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event.startswith('Issue Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event == 'Note Hook Issue':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('issue').get('iid'),
title=payload.get('issue').get('title')
)
elif event == 'Note Hook MergeRequest':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('merge_request').get('iid'),
title=payload.get('merge_request').get('title')
)
elif event == 'Note Hook Snippet':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Snippet',
id=payload.get('snippet').get('id'),
title=payload.get('snippet').get('title')
)
return get_repo_name(payload)
def get_event(request, payload):
# type: (HttpRequest, Dict[str, Any]) -> str
event = request.META['HTTP_X_GITLAB_EVENT']
if event == 'Issue Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Note Hook':
action = payload.get('object_attributes').get('noteable_type')
event = "{} {}".format(event, action)
elif event == 'Merge Request Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Wiki Page Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnknownEventType(u'Event {} is unknown and cannot be handled'.format(event))
| apache-2.0 | 2,735,772,732,093,666,000 | 35.116343 | 97 | 0.616812 | false |
mvcsantos/QGIS | python/plugins/processing/gui/AlgorithmDialogBase.py | 1 | 4883 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmDialogBase.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import uic
from PyQt4.QtCore import QCoreApplication, QUrl
from PyQt4.QtGui import QApplication, QDialogButtonBox, QPushButton
from qgis.utils import iface
from processing.core.ProcessingConfig import ProcessingConfig
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgAlgorithmBase.ui'))
class AlgorithmDialogBase(BASE, WIDGET):
class InvalidParameterValue(Exception):
def __init__(self, param, widget):
(self.parameter, self.widget) = (param, widget)
def __init__(self, alg):
super(AlgorithmDialogBase, self).__init__(iface.mainWindow())
self.setupUi(self)
self.executed = False
self.mainWidget = None
self.alg = alg
# Rename OK button to Run
self.btnRun = self.buttonBox.button(QDialogButtonBox.Ok)
self.btnRun.setText(self.tr('Run'))
self.btnClose = self.buttonBox.button(QDialogButtonBox.Close)
self.btnCancel = QPushButton("Cancel")
self.btnCancel.setEnabled(False)
self.buttonBox.addButton(self.btnCancel, QDialogButtonBox.ActionRole)
self.setWindowTitle(self.alg.name)
# load algorithm help if available
isText, algHelp = self.alg.help()
if algHelp is not None:
algHelp = algHelp if isText else QUrl(algHelp)
else:
algHelp = self.tr('<h2>Sorry, no help is available for this '
'algorithm.</h2>')
try:
if isText:
self.txtHelp.setHtml(algHelp)
else:
self.txtHelp.load(algHelp)
except:
self.txtHelp.setHtml(
self.tr('<h2>Could not open help file :-( </h2>'))
self.showDebug = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_DEBUG_IN_DIALOG)
def setMainWidget(self):
self.tabWidget.widget(0).layout().addWidget(self.mainWidget)
def error(self, msg):
QApplication.restoreOverrideCursor()
self.setInfo(msg, True)
self.resetGUI()
self.tabWidget.setCurrentIndex(1)
def resetGUI(self):
QApplication.restoreOverrideCursor()
self.lblProgress.setText('')
self.progressBar.setMaximum(100)
self.progressBar.setValue(0)
self.btnRun.setEnabled(True)
self.btnClose.setEnabled(True)
self.btnCancel.setEnabled(False)
def setInfo(self, msg, error=False):
if error:
self.txtLog.append('<span style="color:red">%s</span>' % msg)
else:
self.txtLog.append(msg)
QCoreApplication.processEvents()
def setCommand(self, cmd):
if self.showDebug:
self.setInfo('<code>%s<code>' % cmd)
QCoreApplication.processEvents()
def setDebugInfo(self, msg):
if self.showDebug:
self.setInfo('<span style="color:blue">%s</span>' % msg)
QCoreApplication.processEvents()
def setConsoleInfo(self, msg):
if self.showDebug:
self.setCommand('<span style="color:darkgray">%s</span>' % msg)
QCoreApplication.processEvents()
def setPercentage(self, value):
if self.progressBar.maximum() == 0:
self.progressBar.setMaximum(100)
self.progressBar.setValue(value)
QCoreApplication.processEvents()
def setText(self, text):
self.lblProgress.setText(text)
self.setInfo(text, False)
QCoreApplication.processEvents()
def setParamValues(self):
pass
def setParamValue(self, param, widget, alg=None):
pass
def accept(self):
pass
def finish(self):
pass
| gpl-2.0 | -650,460,406,269,059,600 | 30.707792 | 77 | 0.562154 | false |
krkeegan/insteon-manager | insteon_mngr/sequences/modem.py | 3 | 3757 | from insteon_mngr.trigger import PLMTrigger
from insteon_mngr.sequences.common import WriteALDBRecord
class WriteALDBRecordModem(WriteALDBRecord):
def _perform_write(self):
super()._perform_write()
if self.in_use is True:
self.data1 = self._linked_group.device.dev_cat
self.data2 = self._linked_group.device.sub_cat
self.data3 = self._linked_group.device.firmware
msg = self._group.device.create_message('all_link_manage_rec')
msg_attributes = self._compiled_record()
msg.insert_bytes_into_raw(msg_attributes)
trigger_attributes = {
'plm_cmd': 0x6F,
'ctrl_code': msg_attributes['ctrl_code'],
'link_flags': msg_attributes['link_flags'],
'group': msg_attributes['group'],
'dev_addr_hi': msg_attributes['dev_addr_hi'],
'dev_addr_mid': msg_attributes['dev_addr_mid'],
'dev_addr_low': msg_attributes['dev_addr_low'],
'data_1': msg_attributes['data_1'],
'data_2': msg_attributes['data_2'],
'data_3': msg_attributes['data_3']
}
trigger = PLMTrigger(plm=self._group.device,
attributes=trigger_attributes)
trigger.trigger_function = lambda: self._save_record()
trigger.name = self._group.device.dev_addr_str + 'write_aldb'
trigger.queue()
self._group.device.queue_device_msg(msg)
def _ctrl_code(self, search_bytes):
records = self._group.device.aldb.get_matching_records(search_bytes)
ctrl_code = 0x20
if len(records) == 0 and self.controller is True:
ctrl_code = 0x40
if len(records) == 0 and self.controller is False:
ctrl_code = 0x41
return ctrl_code
def _compiled_record(self):
ret = super()._compiled_record()
del ret['msb']
del ret['lsb']
if not self.in_use:
record = self._group.device.aldb.get_record(self.key)
record_parsed = record.parse_record()
ret['link_flags'] = record_parsed['link_flags']
ret['group'] = record_parsed['group']
ret['dev_addr_hi'] = record_parsed['dev_addr_hi']
ret['dev_addr_mid'] = record_parsed['dev_addr_mid']
ret['dev_addr_low'] = record_parsed['dev_addr_low']
ret['ctrl_code'] = 0x80
else:
search_bytes = {
'link_flags': ret['link_flags'],
'group': ret['group'],
'dev_addr_hi': ret['dev_addr_hi'],
'dev_addr_mid': ret['dev_addr_mid'],
'dev_addr_low': ret['dev_addr_low']
}
ret['ctrl_code'] = self._ctrl_code(search_bytes)
return ret
def _save_record(self):
compiled = self._compiled_record()
aldb_entry = bytearray([
compiled['link_flags'],
compiled['group'],
compiled['dev_addr_hi'],
compiled['dev_addr_mid'],
compiled['dev_addr_low'],
compiled['data_1'],
compiled['data_2'],
compiled['data_3']
])
if self.in_use is False:
aldb_entry = bytearray(8)
record = self._group.device.aldb.get_record(self.key)
record.edit_record(aldb_entry)
self._on_success()
def _write_failure(self):
self._on_failure()
def start(self):
'''Starts the sequence to write the aldb record'''
if self.linked_group is None and self.in_use:
print('error no linked_group defined')
else:
self._perform_write()
| gpl-2.0 | 2,785,511,654,145,972,000 | 38.397849 | 76 | 0.538195 | false |
scaphilo/koalixcrm | koalixcrm/crm/migrations/0052_auto_20181014_2304.py | 2 | 2741 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-10-12 20:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def reverse_func(apps, schema_editor):
return 1
def restore_from_backup(apps, schema_editor):
Position = apps.get_model("crm", "Position")
ProductType = apps.get_model("crm", "ProductType")
CustomerGroupTransform = apps.get_model("crm", "CustomerGroupTransform")
Price = apps.get_model("crm", "Price")
ProductPrice = apps.get_model("crm", "ProductPrice")
UnitTransform = apps.get_model("crm", "UnitTransform")
db_alias = schema_editor.connection.alias
all_positions = Position.objects.using(db_alias).all()
for position in all_positions:
product_type = ProductType.objects.using(db_alias).get(id=position.product_backup)
position.product_type = product_type
position.save()
all_customer_group_transforms = CustomerGroupTransform.objects.using(db_alias).all()
for customer_group_transform in all_customer_group_transforms:
product_type = ProductType.objects.using(db_alias).get(id=customer_group_transform.product_backup)
customer_group_transform.product_type = product_type
customer_group_transform.save()
all_prices = Price.objects.using(db_alias).all()
for price in all_prices:
product_type = ProductType.objects.using(db_alias).get(id=price.product_backup)
new_product_price = ProductPrice.objects.using(db_alias).create(unit=price.unit,
currency=price.currency,
customer_group=price.customer_group,
price=price.price,
valid_from=price.valid_from,
valid_until=price.valid_until,
product_type=product_type)
new_product_price.save()
price.delete()
all_unit_transforms = UnitTransform.objects.using(db_alias).all()
for unit_transform in all_unit_transforms:
product_type = ProductType.objects.using(db_alias).get(id=unit_transform.product_backup)
unit_transform.product_type = product_type
unit_transform.save()
class Migration(migrations.Migration):
dependencies = [
('crm', '0051_auto_20181014_2302'),
]
operations = [
migrations.RunPython(restore_from_backup, reverse_func),
]
| bsd-3-clause | -5,343,144,506,403,943,000 | 45.457627 | 108 | 0.599051 | false |
popgengui/negui | agestrucne/pginputneestimator.py | 1 | 2282 | '''
Description
Object manages input data to be used by the pgopneestimator object.
'''
from builtins import object
__filename__ = "pginputneestimator.py"
__date__ = "20160502"
__author__ = "Ted Cosart<[email protected]>"
import copy
class PGInputNeEstimator (object):
'''
CLass to provide class PGOpNeEstimator
with input file and params needed to run
the ne-estimator object as coded in Tiago Antao's
ne2.py, one the age structure modules
'''
def __init__( self, s_genepop_filename = None ):
self.__genepopfile=s_genepop_filename
#these are the default params in the signature of the run method of
#Tiago's NeEstimator2Controller
self.__run_params={ "crits":None, "LD":True,
"hets":False, "coanc":False,
"temp":None, "monogamy":False, "options":None }
'''
2018_04_28. These new params are passed to LDNe2, and are not part
of the original set of params Tiago used calling NeEstimator.
Note that NeEstimator, then or in later versions, does implement
the loci/chrom table, but we have implemented a direct call to LDNe2
customized for our program, to take these params.
'''
self.__ldne2_only_params={ "chromlocifile":"None", "allele_pairing_scheme":0 }
return
#end __init__
@property
def genepop_file(self):
return self.__genepopfile
#end genepop_file
@property
def run_params( self ):
#not to be used as a setter
#for parms, so pass a deep copy
return copy.deepcopy( self.__run_params )
#end run_params
@run_params.setter
def run_params( self, dv_params ):
for s_name in dv_params:
self.__run_params[ s_name ] = dv_params[ s_name ]
#end for each param
return
#end setRunParams
'''
2018_04_28. Added to implement new chromlocifile
and allele_pairing_scheme parameters passed to LDNe2.
'''
@property
def ldne2_only_params( self ):
#not to be used as a setter
#for parms, so pass a deep copy
return copy.deepcopy( self.__ldne2_only_params )
#end property ldne2_only_params
@ldne2_only_params.setter
def ldne2_only_params( self, dv_params ):
for s_name in dv_params:
self.__ldne2_only_params[ s_name ] = dv_params[ s_name ]
#end for each param
return
#end setter for ldne2_only_params
#end class PgInputNeEstimator
if __name__ == "__main__":
pass
#end if main
| agpl-3.0 | 6,959,040,335,069,520,000 | 26.166667 | 80 | 0.702454 | false |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/attr/_make.py | 2 | 35624 | from __future__ import absolute_import, division, print_function
import hashlib
import linecache
from operator import itemgetter
from . import _config
from ._compat import PY2, iteritems, isclass, iterkeys, metadata_proxy
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_convert_pat = "__attr_convert_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = " {attr_name} = property(itemgetter({index}))"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
All instances of `_Nothing` are equal.
"""
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __eq__(self, other):
return other.__class__ == _Nothing
def __ne__(self, other):
return not self == other
def __repr__(self):
return "NOTHING"
def __hash__(self):
return 0xdeadbeef
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attr(default=NOTHING, validator=None,
repr=True, cmp=True, hash=None, init=True,
convert=None, metadata={}):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable datatypes like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable convert: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to convert attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default .
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
convert=convert,
metadata=metadata,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(_tuple_property_pat.format(
index=i,
attr_name=attr_name,
))
else:
attr_class_template.append(" pass")
globs = {"itemgetter": itemgetter}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
def _transform_attrs(cls, these):
"""
Transforms all `_CountingAttr`s on a class into `Attribute`s and saves the
list in `__attrs_attrs__`.
If *these* is passed, use that and don't look for them on the class.
"""
super_cls = []
for c in reversed(cls.__mro__[1:-1]):
sub_attrs = getattr(c, "__attrs_attrs__", None)
if sub_attrs is not None:
super_cls.extend(a for a in sub_attrs if a not in super_cls)
if these is None:
ca_list = [(name, attr)
for name, attr
in cls.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = [(name, ca)
for name, ca
in iteritems(these)]
non_super_attrs = [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
]
attr_names = [a.name for a in super_cls + non_super_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
cls.__attrs_attrs__ = AttrsClass(super_cls + [
Attribute.from_counting_attr(name=attr_name, ca=ca)
for attr_name, ca
in sorted(ca_list, key=lambda e: e[1].counter)
])
had_default = False
for a in cls.__attrs_attrs__:
if these is None and a not in super_cls:
setattr(cls, a.name, a)
if had_default is True and a.default is NOTHING and a.init is True:
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: {a!r}"
.format(a=a)
)
elif had_default is False and \
a.default is not NOTHING and \
a.init is not False:
had_default = True
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
def attributes(maybe_cls=None, these=None, repr_ns=None,
repr=True, cmp=True, hash=None, init=True,
slots=False, frozen=False, str=False):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
represantation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the type of both classes is *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the superclass will be used (if superclass is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initialiazes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3.5/reference/datamodel.html#slots
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
.. versionchanged::
17.1.0 *hash* supports ``None`` as value which is also the default
now.
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
if repr is False and str is True:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
if slots:
# Only need this later if we're using slots.
if these is None:
ca_list = [name
for name, attr
in cls.__dict__.items()
if isinstance(attr, _CountingAttr)]
else:
ca_list = list(iterkeys(these))
_transform_attrs(cls, these)
# Can't just re-use frozen name because Python's scoping. :(
# Can't compare function objects because Python 2 is terrible. :(
effectively_frozen = _has_frozen_superclass(cls) or frozen
if repr is True:
cls = _add_repr(cls, ns=repr_ns)
if str is True:
cls.__str__ = cls.__repr__
if cmp is True:
cls = _add_cmp(cls)
if hash is not True and hash is not False and hash is not None:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
pass
elif hash is True or (hash is None and cmp is True and frozen is True):
cls = _add_hash(cls)
else:
cls.__hash__ = None
if init is True:
cls = _add_init(cls, effectively_frozen)
if effectively_frozen is True:
cls.__setattr__ = _frozen_setattrs
cls.__delattr__ = _frozen_delattrs
if slots is True:
# slots and frozen require __getstate__/__setstate__ to work
cls = _add_pickle(cls)
if slots is True:
cls_dict = dict(cls.__dict__)
cls_dict["__slots__"] = tuple(ca_list)
for ca_name in ca_list:
# It might not actually be in there, e.g. if using 'these'.
cls_dict.pop(ca_name, None)
cls_dict.pop("__dict__", None)
qualname = getattr(cls, "__qualname__", None)
cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
if qualname is not None:
cls.__qualname__ = qualname
return cls
# attrs_or class type depends on the usage of the decorator. It's a class
# if it's used as `@attributes` but ``None`` if used # as `@attributes()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
if PY2:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(
cls.__setattr__, "__module__", None
) == _frozen_setattrs.__module__ and
cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_superclass(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _add_hash(cls, attrs=None):
"""
Add a hash method to *cls*.
"""
if attrs is None:
attrs = [a
for a in cls.__attrs_attrs__
if a.hash is True or (a.hash is None and a.cmp is True)]
def hash_(self):
"""
Automatically created by attrs.
"""
return hash(_attrs_to_tuple(self, attrs))
cls.__hash__ = hash_
return cls
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = [a for a in cls.__attrs_attrs__ if a.cmp]
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def eq(self, other):
"""
Automatically created by attrs.
"""
if other.__class__ is self.__class__:
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
"""
Automatically created by attrs.
"""
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
cls.__eq__ = eq
cls.__ne__ = ne
cls.__lt__ = lt
cls.__le__ = le
cls.__gt__ = gt
cls.__ge__ = ge
return cls
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = [a for a in cls.__attrs_attrs__ if a.repr]
def repr_(self):
"""
Automatically created by attrs.
"""
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
return "{0}({1})".format(
class_name,
", ".join(a.name + "=" + repr(getattr(self, a.name))
for a in attrs)
)
cls.__repr__ = repr_
return cls
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
attrs = [a for a in cls.__attrs_attrs__
if a.init or a.default is not NOTHING]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(
sha1.hexdigest()
)
script, globs = _attrs_to_script(
attrs,
frozen,
getattr(cls, "__attrs_post_init__", False),
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({
"NOTHING": NOTHING,
"attr_dict": attr_dict,
})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
init = locs["__init__"]
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename
)
cls.__init__ = init
return cls
def _add_pickle(cls):
"""
Add pickle helpers, needed for frozen and slotted classes
"""
def _slots_getstate__(obj):
"""
Play nice with pickle.
"""
return tuple(getattr(obj, a.name) for a in fields(obj.__class__))
def _slots_setstate__(obj, state):
"""
Play nice with pickle.
"""
__bound_setattr = _obj_setattr.__get__(obj, Attribute)
for a, value in zip(fields(obj.__class__), state):
__bound_setattr(a.name, value)
cls.__getstate__ = _slots_getstate__
cls.__setstate__ = _slots_setstate__
return cls
def fields(cls):
"""
Returns the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accesors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _attrs_to_script(attrs, frozen, post_init):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
if frozen is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_convert_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
if a.convert is not None:
lines.append(fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
conv_name = _init_convert_pat.format(a.name)
names_for_globals[conv_name] = a.convert
else:
lines.append(fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default"
.format(attr_name=attr_name)
))
elif a.default is not NOTHING and not has_factory:
args.append(
"{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name,
attr_name=attr_name,
)
)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
lines.append("if {arg_name} is not NOTHING:"
.format(arg_name=arg_name))
init_factory_name = _init_factory_pat.format(a.name)
if a.convert is not None:
lines.append(" " + fmt_setter_with_converter(attr_name,
arg_name))
lines.append("else:")
lines.append(" " + fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(" " + fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self)
))
names_for_globals[init_factory_name] = a.default.factory
else:
args.append(arg_name)
if a.convert is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[_init_convert_pat.format(a.name)] = a.convert
else:
lines.append(fmt_setter(attr_name, arg_name))
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(" {}(self, {}, self.{})".format(
val_name, attr_name, a.name))
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
return """\
def __init__(self, {args}):
{lines}
""".format(
args=", ".join(args),
lines="\n ".join(lines) if lines else "pass",
), names_for_globals
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
"""
__slots__ = (
"name", "default", "validator", "repr", "cmp", "hash", "init",
"convert", "metadata",
)
def __init__(self, name, default, validator, repr, cmp, hash, init,
convert=None, metadata=None):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("convert", convert)
bound_setattr("metadata", (metadata_proxy(metadata) if metadata
else _empty_metadata_singleton))
def __setattr__(self, name, value):
raise FrozenInstanceError()
@classmethod
def from_counting_attr(cls, name, ca):
inst_dict = {
k: getattr(ca, k)
for k
in Attribute.__slots__
if k not in (
"name", "validator", "default",
) # exclude methods
}
return cls(name=name, validator=ca._validator, default=ca._default,
**inst_dict)
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) if name != "metadata"
else dict(self.metadata)
for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(self.__slots__, state):
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(name, metadata_proxy(value) if value else
_empty_metadata_singleton)
_a = [Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=(name != "metadata"), init=True)
for name in Attribute.__slots__]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash]
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
"metadata", "_validator", "convert")
__attrs_attrs__ = tuple(
Attribute(name=name, default=NOTHING, validator=None,
repr=True, cmp=True, hash=True, init=True)
for name
in ("counter", "_default", "repr", "cmp", "hash", "init",)
) + (
Attribute(name="metadata", default=None, validator=None,
repr=True, cmp=True, hash=False, init=True),
)
cls_counter = 0
def __init__(self, default, validator, repr, cmp, hash, init, convert,
metadata):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.convert = convert
self.metadata = metadata
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attributes(slots=True, init=False)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attr()
takes_self = attr()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attr()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
return attributes(**attributes_arguments)(type(name, bases, cls_dict))
# These are required by whithin this module so we define them here and merely
# import into .validators.
@attributes(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attr()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| mit | -3,406,400,153,928,851,000 | 32.639282 | 79 | 0.56316 | false |
KaranToor/MA450 | google-cloud-sdk/platform/gsutil/gslib/__main__.py | 3 | 30144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for Google Cloud Storage command line tool."""
from __future__ import absolute_import
import ConfigParser
import datetime
import errno
import getopt
import logging
import os
import re
import signal
import socket
import sys
import textwrap
import traceback
# Load the gsutil version number and append it to boto.UserAgent so the value is
# set before anything instantiates boto. This has to run after THIRD_PARTY_DIR
# is modified (done in gsutil.py) but before any calls are made that would cause
# boto.s3.Connection to be loaded - otherwise the Connection class would end up
# with a static reference to the pre-modified version of the UserAgent field,
# so boto requests would not include gsutil/version# in the UserAgent string.
import boto
import gslib
# TODO: gsutil-beta: Cloud SDK scans for this string and performs
# substitution; ensure this works with both apitools and boto.
boto.UserAgent += ' gsutil/%s (%s)' % (gslib.VERSION, sys.platform)
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
boto.UserAgent += ' google-cloud-sdk'
if os.environ.get('CLOUDSDK_VERSION'):
boto.UserAgent += '/%s' % os.environ.get('CLOUDSDK_VERSION')
# pylint: disable=g-import-not-at-top
# This module also imports boto, and will override the UserAgent global variable
# if imported above.
from gslib import metrics
if metrics.MetricsCollector.IsDisabled():
boto.UserAgent += ' analytics/disabled'
else:
boto.UserAgent += ' analytics/enabled'
# pylint: disable=g-bad-import-order
import httplib2
import oauth2client
from gslib import wildcard_iterator
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import ProjectIdException
from gslib.cloud_api import ServiceException
from gslib.command_runner import CommandRunner
import gslib.exception
from gslib.exception import CommandException
from gslib.exception import ControlCException
import apitools.base.py.exceptions as apitools_exceptions
from gslib.util import CreateLock
from gslib.util import DEBUGLEVEL_DUMP_REQUESTS
from gslib.util import DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS
from gslib.util import GetBotoConfigFileList
from gslib.util import GetCertsFile
from gslib.util import GetCleanupFiles
from gslib.util import GetGsutilClientIdAndSecret
from gslib.util import GsutilStreamHandler
from gslib.util import ProxyInfoFromEnvironmentVar
from gslib.util import UTF8
from gslib.sig_handling import GetCaughtSignals
from gslib.sig_handling import InitializeSignalHandling
from gslib.sig_handling import RegisterSignalHandler
CONFIG_KEYS_TO_REDACT = ['proxy', 'proxy_port', 'proxy_user', 'proxy_pass']
# We don't use the oauth2 authentication plugin directly; importing it here
# ensures that it's loaded and available by default when an operation requiring
# authentication is performed.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
except ImportError:
pass
DEBUG_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with debug output enabled.
*** Be aware that debug output includes authentication credentials.
*** Make sure to remove the value of the Authorization header for
*** each HTTP request printed to the console prior to posting to
*** a public medium such as a forum post or Stack Overflow.
***************************** WARNING *****************************
""".lstrip()
TRACE_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with trace output enabled.
*** Be aware that trace output includes authentication credentials
*** and may include the contents of any files accessed during the trace.
***************************** WARNING *****************************
""".lstrip()
HTTP_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with the "https_validate_certificates" config
*** variable set to False. This option should always be set to True in
*** production environments to protect against man-in-the-middle attacks,
*** and leaking of user data.
***************************** WARNING *****************************
""".lstrip()
debug = 0
test_exception_traces = False
# pylint: disable=unused-argument
def _CleanupSignalHandler(signal_num, cur_stack_frame):
"""Cleans up if process is killed with SIGINT, SIGQUIT or SIGTERM."""
_Cleanup()
if gslib.util.CheckMultiprocessingAvailableAndInit().is_available:
gslib.command.TeardownMultiprocessingProcesses()
def _Cleanup():
for fname in GetCleanupFiles():
try:
os.unlink(fname)
except: # pylint: disable=bare-except
pass
def _OutputAndExit(message, exception=None):
"""Outputs message to stderr and exits gsutil with code 1.
This function should only be called in single-process, single-threaded mode.
Args:
message: Message to print to stderr.
exception: The exception that caused gsutil to fail.
"""
if debug >= DEBUGLEVEL_DUMP_REQUESTS or test_exception_traces:
stack_trace = traceback.format_exc()
err = ('DEBUG: Exception stack trace:\n %s\n%s\n' %
(re.sub('\\n', '\n ', stack_trace), message))
else:
err = '%s\n' % message
try:
sys.stderr.write(err.encode(UTF8))
except UnicodeDecodeError:
# Can happen when outputting invalid Unicode filenames.
sys.stderr.write(err)
if exception:
metrics.LogFatalError(exception)
sys.exit(1)
def _OutputUsageAndExit(command_runner):
command_runner.RunNamedCommand('help')
sys.exit(1)
class GsutilFormatter(logging.Formatter):
"""A logging.Formatter that supports logging microseconds (%f)."""
def formatTime(self, record, datefmt=None):
if datefmt:
return datetime.datetime.fromtimestamp(record.created).strftime(datefmt)
# Use default implementation if datefmt is not specified.
return super(GsutilFormatter, self).formatTime(record, datefmt=datefmt)
def _ConfigureLogging(level=logging.INFO):
"""Similar to logging.basicConfig() except it always adds a handler."""
log_format = '%(levelname)s %(asctime)s %(filename)s] %(message)s'
date_format = '%m%d %H:%M:%S.%f'
formatter = GsutilFormatter(fmt=log_format, datefmt=date_format)
handler = GsutilStreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(level)
def main():
InitializeSignalHandling()
# Any modules used in initializing multiprocessing variables must be
# imported after importing gslib.__main__.
# pylint: disable=redefined-outer-name,g-import-not-at-top
import gslib.boto_translation
import gslib.command
import gslib.util
from gslib.util import BOTO_IS_SECURE
from gslib.util import CERTIFICATE_VALIDATION_ENABLED
# pylint: disable=unused-variable
from gcs_oauth2_boto_plugin import oauth2_client
from apitools.base.py import credentials_lib
# pylint: enable=unused-variable
from gslib.util import CheckMultiprocessingAvailableAndInit
if CheckMultiprocessingAvailableAndInit().is_available:
# These setup methods must be called, and, on Windows, they can only be
# called from within an "if __name__ == '__main__':" block.
gslib.command.InitializeMultiprocessingVariables()
gslib.boto_translation.InitializeMultiprocessingVariables()
else:
gslib.command.InitializeThreadingVariables()
# This needs to be done after gslib.util.InitializeMultiprocessingVariables(),
# since otherwise we can't call gslib.util.CreateLock.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
gsutil_client_id, gsutil_client_secret = GetGsutilClientIdAndSecret()
gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret(
gsutil_client_id, gsutil_client_secret)
gcs_oauth2_boto_plugin.oauth2_helper.SetLock(CreateLock())
credentials_lib.SetCredentialsCacheFileLock(CreateLock())
except ImportError:
pass
global debug
global test_exception_traces
if not (2, 6) <= sys.version_info[:3] < (3,):
raise CommandException('gsutil requires python 2.6 or 2.7.')
# In gsutil 4.0 and beyond, we don't use the boto library for the JSON
# API. However, we still store gsutil configuration data in the .boto
# config file for compatibility with previous versions and user convenience.
# Many users have a .boto configuration file from previous versions, and it
# is useful to have all of the configuration for gsutil stored in one place.
command_runner = CommandRunner()
if not BOTO_IS_SECURE:
raise CommandException('\n'.join(textwrap.wrap(
'Your boto configuration has is_secure = False. Gsutil cannot be '
'run this way, for security reasons.')))
headers = {}
parallel_operations = False
quiet = False
version = False
debug = 0
trace_token = None
perf_trace_token = None
test_exception_traces = False
# If user enters no commands just print the usage info.
if len(sys.argv) == 1:
sys.argv.append('help')
# Change the default of the 'https_validate_certificates' boto option to
# True (it is currently False in boto).
if not boto.config.has_option('Boto', 'https_validate_certificates'):
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.setbool('Boto', 'https_validate_certificates', True)
gslib.util.certs_file_lock = CreateLock()
for signal_num in GetCaughtSignals():
RegisterSignalHandler(signal_num, _CleanupSignalHandler)
GetCertsFile()
try:
try:
opts, args = getopt.getopt(sys.argv[1:], 'dDvo:h:mq',
['debug', 'detailedDebug', 'version', 'option',
'help', 'header', 'multithreaded', 'quiet',
'testexceptiontraces', 'trace-token=',
'perf-trace-token='])
except getopt.GetoptError as e:
_HandleCommandException(CommandException(e.msg))
for o, a in opts:
if o in ('-d', '--debug'):
# Also causes boto to include httplib header output.
debug = DEBUGLEVEL_DUMP_REQUESTS
elif o in ('-D', '--detailedDebug'):
# We use debug level 3 to ask gsutil code to output more detailed
# debug output. This is a bit of a hack since it overloads the same
# flag that was originally implemented for boto use. And we use -DD
# to ask for really detailed debugging (i.e., including HTTP payload).
if debug == DEBUGLEVEL_DUMP_REQUESTS:
debug = DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS
else:
debug = DEBUGLEVEL_DUMP_REQUESTS
elif o in ('-?', '--help'):
_OutputUsageAndExit(command_runner)
elif o in ('-h', '--header'):
(hdr_name, _, hdr_val) = a.partition(':')
if not hdr_name:
_OutputUsageAndExit(command_runner)
headers[hdr_name.lower()] = hdr_val
elif o in ('-m', '--multithreaded'):
parallel_operations = True
elif o in ('-q', '--quiet'):
quiet = True
elif o in ('-v', '--version'):
version = True
elif o == '--perf-trace-token':
perf_trace_token = a
elif o == '--trace-token':
trace_token = a
elif o == '--testexceptiontraces': # Hidden flag for integration tests.
test_exception_traces = True
# Avoid printing extra warnings to stderr regarding long retries by
# setting the threshold very high.
gslib.util.LONG_RETRY_WARN_SEC = 3600
elif o in ('-o', '--option'):
(opt_section_name, _, opt_value) = a.partition('=')
if not opt_section_name:
_OutputUsageAndExit(command_runner)
(opt_section, _, opt_name) = opt_section_name.partition(':')
if not opt_section or not opt_name:
_OutputUsageAndExit(command_runner)
if not boto.config.has_section(opt_section):
boto.config.add_section(opt_section)
boto.config.set(opt_section, opt_name, opt_value)
metrics.LogCommandParams(global_opts=opts)
httplib2.debuglevel = debug
if trace_token:
sys.stderr.write(TRACE_WARNING)
if debug >= DEBUGLEVEL_DUMP_REQUESTS:
sys.stderr.write(DEBUG_WARNING)
_ConfigureLogging(level=logging.DEBUG)
command_runner.RunNamedCommand('ver', ['-l'])
config_items = []
try:
config_items.extend(boto.config.items('Boto'))
config_items.extend(boto.config.items('GSUtil'))
except ConfigParser.NoSectionError:
pass
for i in xrange(len(config_items)):
config_item_key = config_items[i][0]
if config_item_key in CONFIG_KEYS_TO_REDACT:
config_items[i] = (config_item_key, 'REDACTED')
sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv))
sys.stderr.write('config_file_list: %s\n' % GetBotoConfigFileList())
sys.stderr.write('config: %s\n' % str(config_items))
elif quiet:
_ConfigureLogging(level=logging.WARNING)
else:
_ConfigureLogging(level=logging.INFO)
# oauth2client uses info logging in places that would better
# correspond to gsutil's debug logging (e.g., when refreshing
# access tokens).
oauth2client.client.logger.setLevel(logging.WARNING)
if not CERTIFICATE_VALIDATION_ENABLED:
sys.stderr.write(HTTP_WARNING)
if version:
command_name = 'version'
elif not args:
command_name = 'help'
else:
command_name = args[0]
_CheckAndWarnForProxyDifferences()
if not test_exception_traces:
# Disable warning for tests, as it interferes with test stderr parsing.
_CheckAndWarnForPython26()
if os.environ.get('_ARGCOMPLETE', '0') == '1':
return _PerformTabCompletion(command_runner)
return _RunNamedCommandAndHandleExceptions(
command_runner, command_name, args=args[1:], headers=headers,
debug_level=debug, trace_token=trace_token,
parallel_operations=parallel_operations,
perf_trace_token=perf_trace_token)
finally:
_Cleanup()
def _CheckAndWarnForPython26():
if (2, 6) == sys.version_info[:2]:
sys.stderr.write('\n'.join(textwrap.wrap(
'Warning: You are running Python 2.6, which stopped receiving '
'security patches as of October 2013. gsutil will stop supporting '
'Python 2.6 on September 1, 2016. Please update your Python '
'installation to 2.7 to ensure compatibility with future gsutil '
'versions.\n')))
def _CheckAndWarnForProxyDifferences():
# If there are both boto config and environment variable config present for
# proxies, unset the environment variable and warn if it differs.
boto_port = boto.config.getint('Boto', 'proxy_port', 0)
if boto.config.get('Boto', 'proxy', None) or boto_port:
for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']:
if proxy_env_var in os.environ and os.environ[proxy_env_var]:
differing_values = []
proxy_info = ProxyInfoFromEnvironmentVar(proxy_env_var)
if proxy_info.proxy_host != boto.config.get('Boto', 'proxy', None):
differing_values.append(
'Boto proxy host: "%s" differs from %s proxy host: "%s"' %
(boto.config.get('Boto', 'proxy', None), proxy_env_var,
proxy_info.proxy_host))
if (proxy_info.proxy_user !=
boto.config.get('Boto', 'proxy_user', None)):
differing_values.append(
'Boto proxy user: "%s" differs from %s proxy user: "%s"' %
(boto.config.get('Boto', 'proxy_user', None), proxy_env_var,
proxy_info.proxy_user))
if (proxy_info.proxy_pass !=
boto.config.get('Boto', 'proxy_pass', None)):
differing_values.append(
'Boto proxy password differs from %s proxy password' %
proxy_env_var)
# Only compare ports if at least one is present, since the
# boto logic for selecting default ports has not yet executed.
if ((proxy_info.proxy_port or boto_port) and
proxy_info.proxy_port != boto_port):
differing_values.append(
'Boto proxy port: "%s" differs from %s proxy port: "%s"' %
(boto_port, proxy_env_var, proxy_info.proxy_port))
if differing_values:
sys.stderr.write('\n'.join(textwrap.wrap(
'WARNING: Proxy configuration is present in both the %s '
'environment variable and boto configuration, but '
'configuration differs. boto configuration proxy values will '
'be used. Differences detected:' % proxy_env_var)))
sys.stderr.write('\n%s\n' % '\n'.join(differing_values))
# Regardless of whether the proxy configuration values matched,
# delete the environment variable so as not to confuse boto.
del os.environ[proxy_env_var]
def _HandleUnknownFailure(e):
# Called if we fall through all known/handled exceptions.
_OutputAndExit(message='Failure: %s.' % e, exception=e)
def _HandleCommandException(e):
if e.informational:
_OutputAndExit(message=e.reason, exception=e)
else:
_OutputAndExit(message='CommandException: %s' % e.reason, exception=e)
# pylint: disable=unused-argument
def _HandleControlC(signal_num, cur_stack_frame):
"""Called when user hits ^C.
This function prints a brief message instead of the normal Python stack trace
(unless -D option is used).
Args:
signal_num: Signal that was caught.
cur_stack_frame: Unused.
"""
if debug >= 2:
stack_trace = ''.join(traceback.format_list(traceback.extract_stack()))
_OutputAndExit(
'DEBUG: Caught CTRL-C (signal %d) - Exception stack trace:\n'
' %s' % (signal_num, re.sub('\\n', '\n ', stack_trace)),
exception=ControlCException())
else:
_OutputAndExit('Caught CTRL-C (signal %d) - exiting' % signal_num,
exception=ControlCException())
def _HandleSigQuit(signal_num, cur_stack_frame):
r"""Called when user hits ^\, so we can force breakpoint a running gsutil."""
import pdb # pylint: disable=g-import-not-at-top
pdb.set_trace()
def _ConstructAccountProblemHelp(reason):
"""Constructs a help string for an access control error.
Args:
reason: e.reason string from caught exception.
Returns:
Contructed help text.
"""
default_project_id = boto.config.get_value('GSUtil', 'default_project_id')
# pylint: disable=line-too-long, g-inconsistent-quotes
acct_help = (
"Your request resulted in an AccountProblem (403) error. Usually this "
"happens if you attempt to create a bucket without first having "
"enabled billing for the project you are using. Please ensure billing is "
"enabled for your project by following the instructions at "
"`Google Cloud Platform Console<https://support.google.com/cloud/answer/6158867>`. ")
if default_project_id:
acct_help += (
"In the project overview, ensure that the Project Number listed for "
"your project matches the project ID (%s) from your boto config file. "
% default_project_id)
acct_help += (
"If the above doesn't resolve your AccountProblem, please send mail to "
"[email protected] requesting assistance, noting the exact command you "
"ran, the fact that you received a 403 AccountProblem error, and your "
"project ID. Please do not post your project ID on StackOverflow. "
"Note: It's possible to use Google Cloud Storage without enabling "
"billing if you're only listing or reading objects for which you're "
"authorized, or if you're uploading objects to a bucket billed to a "
"project that has billing enabled. But if you're attempting to create "
"buckets or upload objects to a bucket owned by your own project, you "
"must first enable billing for that project.")
return acct_help
def _CheckAndHandleCredentialException(e, args):
# Provide detail to users who have no boto config file (who might previously
# have been using gsutil only for accessing publicly readable buckets and
# objects).
# pylint: disable=g-import-not-at-top
from gslib.util import HasConfiguredCredentials
if (not HasConfiguredCredentials() and
not boto.config.get_value('Tests', 'bypass_anonymous_access_warning',
False)):
# The check above allows tests to assert that we get a particular,
# expected failure, rather than always encountering this error message
# when there are no configured credentials. This allows tests to
# simulate a second user without permissions, without actually requiring
# two separate configured users.
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
message = '\n'.join(textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gcloud auth login" command to '
'configure gsutil to use these credentials.'))
else:
message = '\n'.join(textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gsutil config" command to configure '
'gsutil to use these credentials.'))
_OutputAndExit(message=message, exception=e)
elif (e.reason and
(e.reason == 'AccountProblem' or e.reason == 'Account disabled.' or
'account for the specified project has been disabled' in e.reason)
and ','.join(args).find('gs://') != -1):
_OutputAndExit(
'\n'.join(textwrap.wrap(_ConstructAccountProblemHelp(e.reason))),
exception=e)
def _RunNamedCommandAndHandleExceptions(
command_runner, command_name, args=None, headers=None, debug_level=0,
trace_token=None, parallel_operations=False, perf_trace_token=None):
"""Runs the command and handles common exceptions."""
# pylint: disable=g-import-not-at-top
from gslib.util import GetConfigFilePath
from gslib.util import IS_WINDOWS
from gslib.util import IsRunningInteractively
try:
# Catch ^C so we can print a brief message instead of the normal Python
# stack trace. Register as a final signal handler because this handler kills
# the main gsutil process (so it must run last).
RegisterSignalHandler(signal.SIGINT, _HandleControlC, is_final_handler=True)
# Catch ^\ so we can force a breakpoint in a running gsutil.
if not IS_WINDOWS:
RegisterSignalHandler(signal.SIGQUIT, _HandleSigQuit)
return command_runner.RunNamedCommand(command_name, args, headers,
debug_level, trace_token,
parallel_operations,
perf_trace_token=perf_trace_token,
collect_analytics=True)
except AttributeError as e:
if str(e).find('secret_access_key') != -1:
_OutputAndExit(
'Missing credentials for the given URI(s). Does your '
'boto config file contain all needed credentials?',
exception=e)
else:
_OutputAndExit(message=str(e), exception=e)
except CommandException as e:
_HandleCommandException(e)
except getopt.GetoptError as e:
_HandleCommandException(CommandException(e.msg))
except boto.exception.InvalidUriError as e:
_OutputAndExit(message='InvalidUriError: %s.' % e.message, exception=e)
except gslib.exception.InvalidUrlError as e:
_OutputAndExit(message='InvalidUrlError: %s.' % e.message, exception=e)
except boto.auth_handler.NotReadyToAuthenticate:
_OutputAndExit(message='NotReadyToAuthenticate', exception=e)
except OSError as e:
_OutputAndExit(message='OSError: %s.' % e.strerror, exception=e)
except IOError as e:
if (e.errno == errno.EPIPE or (IS_WINDOWS and e.errno == errno.EINVAL)
and not IsRunningInteractively()):
# If we get a pipe error, this just means that the pipe to stdout or
# stderr is broken. This can happen if the user pipes gsutil to a command
# that doesn't use the entire output stream. Instead of raising an error,
# just swallow it up and exit cleanly.
sys.exit(0)
else:
raise
except wildcard_iterator.WildcardException as e:
_OutputAndExit(message=e.reason, exception=e)
except ProjectIdException as e:
_OutputAndExit(
'You are attempting to perform an operation that requires a '
'project id, with none configured. Please re-run '
'gsutil config and make sure to follow the instructions for '
'finding and entering your default project id.',
exception=e)
except BadRequestException as e:
if e.reason == 'MissingSecurityHeader':
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(message=e, exception=e)
except AccessDeniedException as e:
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(message=e, exception=e)
except ArgumentException as e:
_OutputAndExit(message=e, exception=e)
except ServiceException as e:
_OutputAndExit(message=e, exception=e)
except oauth2client.client.HttpAccessTokenRefreshError as e:
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
_OutputAndExit('Your credentials are invalid. '
'Please run\n$ gcloud auth login',
exception=e)
else:
_OutputAndExit(
'Your credentials are invalid. For more help, see '
'"gsutil help creds", or re-run the gsutil config command (see '
'"gsutil help config").',
exception=e)
except apitools_exceptions.HttpError as e:
# These should usually be retried by the underlying implementation or
# wrapped by CloudApi ServiceExceptions, but if we do get them,
# print something useful.
_OutputAndExit('HttpError: %s, %s' %
(getattr(e.response, 'status', ''), e.content or ''),
exception=e)
except socket.error as e:
if e.args[0] == errno.EPIPE:
# Retrying with a smaller file (per suggestion below) works because
# the library code send loop (in boto/s3/key.py) can get through the
# entire file and then request the HTTP response before the socket
# gets closed and the response lost.
_OutputAndExit(
'Got a "Broken pipe" error. This can happen to clients using Python '
'2.x, when the server sends an error response and then closes the '
'socket (see http://bugs.python.org/issue5542). If you are trying to '
'upload a large object you might retry with a small (say 200k) '
'object, and see if you get a more specific error code.',
exception=e)
elif e.args[0] == errno.ECONNRESET and ' '.join(args).contains('s3://'):
_OutputAndExit('\n'.join(textwrap.wrap(
'Got a "Connection reset by peer" error. One way this can happen is '
'when copying data to/from an S3 regional bucket. If you are using a '
'regional S3 bucket you could try re-running this command using the '
'regional S3 endpoint, for example '
's3://s3-<region>.amazonaws.com/your-bucket. For details about this '
'problem see https://github.com/boto/boto/issues/2207')),
exception=e)
else:
_HandleUnknownFailure(e)
except Exception as e: # pylint: disable=broad-except
# Check for two types of errors related to service accounts. These errors
# appear to be the same except for their messages, but they are caused by
# different problems and both have unhelpful error messages. Moreover,
# the error type belongs to PyOpenSSL, which is not necessarily installed.
if 'mac verify failure' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file_password field in your config file,'
'\n%s, is correct.' % GetConfigFilePath(),
exception=e)
elif 'asn1 encoding routines' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file field in your config file,\n%s, is correct.' %
GetConfigFilePath(),
exception=e)
_HandleUnknownFailure(e)
def _PerformTabCompletion(command_runner):
"""Performs gsutil-specific tab completion for the shell."""
# argparse and argcomplete are bundled with the Google Cloud SDK.
# When gsutil is invoked from the Google Cloud SDK, both should be available.
try:
import argcomplete
import argparse
except ImportError as e:
_OutputAndExit(
'A library required for performing tab completion was'
' not found.\nCause: %s' % e,
exception=e)
parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers()
command_runner.ConfigureCommandArgumentParsers(subparsers)
argcomplete.autocomplete(parser, exit_method=sys.exit)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 4,820,458,504,548,332,000 | 41.337079 | 91 | 0.677581 | false |
MotionFunProject/MotionFunProject | blender_files/autoload.py | 1 | 1325 | import bpy
import os
from math import radians
import time
print("Items in scene: " + str(len(bpy.context.scene.objects)));
bpy.ops.mesh.primitive_monkey_add(radius=1, view_align=False, enter_editmode=False, location=(0, 0, 0), layers=(
True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False))
bpy.ops.object.shade_smooth();
bpy.ops.object.modifier_add(type='SUBSURF');
bpy.context.object.modifiers["Subsurf"].levels = 3
context = bpy.context
scene = context.scene
ob = scene.objects.active
# ob.rotation_euler = (0, 0, radians(20))
dir_path = os.path.dirname(os.path.realpath(__file__))
print (dir_path)
f = open("/Users/juancarlosnavarrete/Desktop/BlenderEnv/cor.txt", "r")
for line in f:
inner_list = [float(elt.strip()) for elt in line.split(',')]
positions = [];
# set up motion of the monkey
i = 0;
while i < len(inner_list):
a, b, c = inner_list[i], inner_list[i + 1], inner_list[i + 2];
i = i + 3;
arr = [a, b, c];
positions.append(arr);
ob = bpy.context.active_object
frame_num = 0
for position in positions:
bpy.context.scene.frame_set(frame_num)
ob.location = position
print(position)
ob.keyframe_insert(data_path="location", index=-1)
frame_num += 10
f.close()
print('file is close') | gpl-3.0 | 343,122,350,106,372,900 | 27.826087 | 117 | 0.682264 | false |
manderson23/NewsBlur | vendor/feedvalidator/demo/src/demo.py | 16 | 1786 | #!/usr/bin/python
"""$Id: demo.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
import sys
import os
import urllib
import urllib2
import urlparse
if __name__ == '__main__':
# arg 1 is URL to validate
link = sys.argv[1:] and sys.argv[1] or 'http://www.intertwingly.net/blog/index.atom'
link = urlparse.urljoin('file:' + urllib.pathname2url(os.getcwd()) + '/', link)
try:
link = link.decode('utf-8').encode('idna')
except:
pass
print 'Validating %s' % link
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
basedir = urlparse.urljoin('file:' + curdir, ".")
try:
if link.startswith(basedir):
events = feedvalidator.validateStream(urllib.urlopen(link), firstOccurrenceOnly=1,base=link.replace(basedir,"http://www.feedvalidator.org/"))['loggedEvents']
else:
events = feedvalidator.validateURL(link, firstOccurrenceOnly=1)['loggedEvents']
except feedvalidator.logging.ValidationFailure, vf:
events = [vf.event]
# (optional) arg 2 is compatibility level
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
from feedvalidator import compatibility
filter = sys.argv[2:] and sys.argv[2] or "AA"
filterFunc = getattr(compatibility, filter)
events = filterFunc(events)
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
sys.exit(1)
else:
print "No errors or warnings"
| mit | -1,737,553,560,961,960,200 | 32.698113 | 163 | 0.68757 | false |
Anaethelion/Geotrek | geotrek/trekking/serializers.py | 1 | 21245 | import copy
import datetime
import json
import gpxpy.gpx
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import translation
from django.utils.translation import get_language, ugettext_lazy as _
from django.utils.timezone import utc, make_aware
from django.utils.xmlutils import SimplerXMLGenerator
from rest_framework import serializers as rest_serializers
from mapentity.serializers import GPXSerializer, plain_text
from geotrek.common.serializers import (
PictogramSerializerMixin, ThemeSerializer,
TranslatedModelSerializer, PicturesSerializerMixin,
PublishableSerializerMixin, RecordSourceSerializer,
)
from geotrek.authent import models as authent_models
from geotrek.cirkwi.models import CirkwiTag
from geotrek.zoning.serializers import ZoningSerializerMixin
from geotrek.altimetry.serializers import AltimetrySerializerMixin
from geotrek.trekking import models as trekking_models
class TrekGPXSerializer(GPXSerializer):
def end_object(self, trek):
super(TrekGPXSerializer, self).end_object(trek)
for poi in trek.pois.all():
geom_3d = poi.geom_3d.transform(4326, clone=True) # GPX uses WGS84
wpt = gpxpy.gpx.GPXWaypoint(latitude=geom_3d.y,
longitude=geom_3d.x,
elevation=geom_3d.z)
wpt.name = u"%s: %s" % (poi.type, poi.name)
wpt.description = poi.description
self.gpx.waypoints.append(wpt)
class DifficultyLevelSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.Field(source='difficulty')
class Meta:
model = trekking_models.DifficultyLevel
fields = ('id', 'pictogram', 'label')
class RouteSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.Field(source='route')
class Meta:
model = trekking_models.Route
fields = ('id', 'pictogram', 'label')
class NetworkSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
name = rest_serializers.Field(source='network')
class Meta:
model = trekking_models.Route
fields = ('id', 'pictogram', 'name')
class PracticeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.Field(source='name')
class Meta:
model = trekking_models.Practice
fields = ('id', 'pictogram', 'label')
class AccessibilitySerializer(PictogramSerializerMixin, TranslatedModelSerializer):
label = rest_serializers.Field(source='name')
class Meta:
model = trekking_models.Accessibility
fields = ('id', 'pictogram', 'label')
class TypeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
name = rest_serializers.Field(source='name')
class Meta:
model = trekking_models.Practice
fields = ('id', 'pictogram', 'name')
class WebLinkCategorySerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.WebLinkCategory
fields = ('id', 'pictogram', 'label')
class WebLinkSerializer(TranslatedModelSerializer):
category = WebLinkCategorySerializer()
class Meta:
model = trekking_models.WebLink
fields = ('id', 'name', 'category', 'url')
class CloseTrekSerializer(TranslatedModelSerializer):
category_id = rest_serializers.Field(source='prefixed_category_id')
class Meta:
model = trekking_models.Trek
fields = ('id', 'category_id')
class RelatedTrekSerializer(TranslatedModelSerializer):
pk = rest_serializers.Field(source='id')
slug = rest_serializers.Field(source='slug')
category_slug = rest_serializers.SerializerMethodField('get_category_slug')
class Meta:
model = trekking_models.Trek
fields = ('id', 'pk', 'slug', 'name', 'category_slug')
def get_category_slug(self, obj):
if settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE and obj.practice:
return obj.practice.slug
else:
# Translators: This is a slug (without space, accent or special char)
return _('trek')
class TrekRelationshipSerializer(rest_serializers.ModelSerializer):
published = rest_serializers.Field(source='trek_b.published')
trek = RelatedTrekSerializer(source='trek_b')
class Meta:
model = trekking_models.TrekRelationship
fields = ('has_common_departure', 'has_common_edge', 'is_circuit_step',
'trek', 'published')
class StructureSerializer(rest_serializers.ModelSerializer):
class Meta:
model = authent_models.Structure
fields = ('id', 'name')
class ChildSerializer(TranslatedModelSerializer):
class Meta:
model = trekking_models.Trek
fields = ('id', )
class TrekSerializer(PublishableSerializerMixin, PicturesSerializerMixin,
AltimetrySerializerMixin, ZoningSerializerMixin,
TranslatedModelSerializer):
duration_pretty = rest_serializers.Field(source='duration_pretty')
difficulty = DifficultyLevelSerializer()
route = RouteSerializer()
networks = NetworkSerializer(many=True)
themes = ThemeSerializer(many=True)
practice = PracticeSerializer()
usages = PracticeSerializer(source='usages', many=True) # Rando v1 compat
accessibilities = AccessibilitySerializer(many=True)
web_links = WebLinkSerializer(many=True)
relationships = TrekRelationshipSerializer(many=True, source='published_relationships')
treks = CloseTrekSerializer(many=True, source='published_treks')
source = RecordSourceSerializer()
children = rest_serializers.Field(source='children_id')
parents = rest_serializers.Field(source='parents_id')
previous = rest_serializers.Field(source='previous_id')
next = rest_serializers.Field(source='next_id')
# Idea: use rest-framework-gis
parking_location = rest_serializers.SerializerMethodField('get_parking_location')
points_reference = rest_serializers.SerializerMethodField('get_points_reference')
gpx = rest_serializers.SerializerMethodField('get_gpx_url')
kml = rest_serializers.SerializerMethodField('get_kml_url')
structure = StructureSerializer()
# For consistency with touristic contents
type1 = TypeSerializer(source='usages', many=True)
type2 = TypeSerializer(source='accessibilities', many=True)
category = rest_serializers.SerializerMethodField('get_category')
# Method called to retrieve relevant pictures based on settings
pictures = rest_serializers.SerializerMethodField('get_pictures')
def __init__(self, instance=None, *args, **kwargs):
# duplicate each trek for each one of its accessibilities
if instance and hasattr(instance, '__iter__') and settings.SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY:
treks = []
for trek in instance:
treks.append(trek)
for accessibility in trek.accessibilities.all():
clone = copy.copy(trek)
clone.accessibility = accessibility
treks.append(clone)
instance = treks
super(TrekSerializer, self).__init__(instance, *args, **kwargs)
from geotrek.tourism import serializers as tourism_serializers
if settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE:
del self.fields['type1']
if settings.SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY:
del self.fields['type2']
self.fields['information_desks'] = tourism_serializers.InformationDeskSerializer(many=True)
self.fields['touristic_contents'] = tourism_serializers.CloseTouristicContentSerializer(many=True, source='published_touristic_contents')
self.fields['touristic_events'] = tourism_serializers.CloseTouristicEventSerializer(many=True, source='published_touristic_events')
class Meta:
model = trekking_models.Trek
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
geo_field = 'geom'
fields = ('id', 'departure', 'arrival', 'duration',
'duration_pretty', 'description', 'description_teaser',
'networks', 'advice', 'ambiance', 'difficulty',
'information_desks', 'themes', 'practice', 'accessibilities',
'usages', 'access', 'route', 'public_transport', 'advised_parking',
'web_links', 'is_park_centered', 'disabled_infrastructure',
'parking_location', 'relationships', 'points_reference',
'gpx', 'kml', 'source', 'type1', 'type2', 'category', 'structure',
'treks', 'children', 'parents', 'previous', 'next') + \
AltimetrySerializerMixin.Meta.fields + \
ZoningSerializerMixin.Meta.fields + \
PublishableSerializerMixin.Meta.fields + \
PicturesSerializerMixin.Meta.fields
def get_pictures(self, obj):
pictures_list = []
pictures_list.extend(obj.serializable_pictures)
if settings.TREK_WITH_POIS_PICTURES:
for poi in obj.published_pois:
pictures_list.extend(poi.serializable_pictures)
return pictures_list
def get_parking_location(self, obj):
if not obj.parking_location:
return None
return obj.parking_location.transform(settings.API_SRID, clone=True).coords
def get_points_reference(self, obj):
if not obj.points_reference:
return None
geojson = obj.points_reference.transform(settings.API_SRID, clone=True).geojson
return json.loads(geojson)
def get_gpx_url(self, obj):
return reverse('trekking:trek_gpx_detail', kwargs={'lang': get_language(), 'pk': obj.pk, 'slug': obj.slug})
def get_kml_url(self, obj):
return reverse('trekking:trek_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk, 'slug': obj.slug})
def get_category(self, obj):
accessibility = getattr(obj, 'accessibility', None)
if accessibility:
data = {
'id': accessibility.prefixed_id,
'label': accessibility.name,
'pictogram': accessibility.get_pictogram_url(),
'slug': accessibility.slug,
}
elif settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE and obj.practice:
data = {
'id': obj.prefixed_category_id,
'label': obj.practice.name,
'pictogram': obj.practice.get_pictogram_url(),
'slug': obj.practice.slug,
}
else:
data = {
'id': obj.category_id_prefix,
'label': obj._meta.verbose_name,
'pictogram': '/static/trekking/trek.svg',
# Translators: This is a slug (without space, accent or special char)
'slug': _('trek'),
}
if settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE:
data['order'] = obj.practice and obj.practice.order
else:
data['order'] = settings.TREK_CATEGORY_ORDER
if not settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE:
data['type1_label'] = obj._meta.get_field('practice').verbose_name
if not settings.SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY:
data['type2_label'] = obj._meta.get_field('accessibilities').verbose_name
return data
class POITypeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.POIType
fields = ('id', 'pictogram', 'label')
class ClosePOISerializer(TranslatedModelSerializer):
slug = rest_serializers.Field(source='slug')
type = POITypeSerializer()
class Meta:
model = trekking_models.Trek
fields = ('id', 'slug', 'name', 'type')
class POISerializer(PublishableSerializerMixin, PicturesSerializerMixin,
ZoningSerializerMixin, TranslatedModelSerializer):
type = POITypeSerializer()
structure = StructureSerializer()
def __init__(self, *args, **kwargs):
super(POISerializer, self).__init__(*args, **kwargs)
from geotrek.tourism import serializers as tourism_serializers
self.fields['touristic_contents'] = tourism_serializers.CloseTouristicContentSerializer(many=True, source='published_touristic_contents')
self.fields['touristic_events'] = tourism_serializers.CloseTouristicEventSerializer(many=True, source='published_touristic_events')
class Meta:
model = trekking_models.Trek
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
geo_field = 'geom'
fields = ('id', 'description', 'type',) + \
('min_elevation', 'max_elevation', 'structure') + \
ZoningSerializerMixin.Meta.fields + \
PublishableSerializerMixin.Meta.fields + \
PicturesSerializerMixin.Meta.fields
class ServiceTypeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = trekking_models.ServiceType
fields = ('id', 'pictogram', 'name')
class ServiceSerializer(rest_serializers.ModelSerializer):
type = ServiceTypeSerializer()
structure = StructureSerializer()
class Meta:
model = trekking_models.Service
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
geo_field = 'geom'
fields = ('id', 'type', 'structure')
def timestamp(dt):
epoch = make_aware(datetime.datetime(1970, 1, 1), utc)
return str(int((dt - epoch).total_seconds()))
class CirkwiPOISerializer:
def __init__(self, request, stream):
self.xml = SimplerXMLGenerator(stream, 'utf8')
self.request = request
self.stream = stream
def serialize_field(self, name, value, attrs={}):
if not value and not attrs:
return
value = unicode(value)
self.xml.startElement(name, attrs)
if u'<' in value or u'>' in value or u'&' in value:
self.stream.write('<![CDATA[%s]]>' % value)
else:
self.xml.characters(value)
self.xml.endElement(name)
def serialize_medias(self, request, pictures):
if not pictures:
return
self.xml.startElement('medias', {})
self.xml.startElement('images', {})
for picture in pictures:
self.xml.startElement('image', {})
self.serialize_field('legende', picture['legend'])
self.serialize_field('url', request.build_absolute_uri(picture['url']))
self.serialize_field('credit', picture['author'])
self.xml.endElement('image')
self.xml.endElement('images')
self.xml.endElement('medias')
def serialize_pois(self, pois):
for poi in pois:
self.xml.startElement('poi', {
'date_creation': timestamp(poi.date_insert),
'date_modification': timestamp(poi.date_update),
'id_poi': str(poi.pk),
})
if poi.type.cirkwi:
self.xml.startElement('categories', {})
self.serialize_field('categorie', str(poi.type.cirkwi.eid), {'nom': poi.type.cirkwi.name})
self.xml.endElement('categories')
orig_lang = translation.get_language()
self.xml.startElement('informations', {})
for lang in poi.published_langs:
translation.activate(lang)
self.xml.startElement('information', {'langue': lang})
self.serialize_field('titre', poi.name)
self.serialize_field('description', plain_text(poi.description))
self.serialize_medias(self.request, poi.serializable_pictures)
self.xml.endElement('information')
translation.activate(orig_lang)
self.xml.endElement('informations')
self.xml.startElement('adresse', {})
self.xml.startElement('position', {})
coords = poi.geom.transform(4326, clone=True).coords
self.serialize_field('lat', coords[1])
self.serialize_field('lng', coords[0])
self.xml.endElement('position')
self.xml.endElement('adresse')
self.xml.endElement('poi')
def serialize(self, pois):
self.xml.startDocument()
self.xml.startElement('pois', {'version': '2'})
self.serialize_pois(pois)
self.xml.endElement('pois')
self.xml.endDocument()
class CirkwiTrekSerializer(CirkwiPOISerializer):
def serialize_additionnal_info(self, trek, name):
value = getattr(trek, name)
if not value:
return
value = plain_text(value)
self.xml.startElement('information_complementaire', {})
self.serialize_field('titre', trek._meta.get_field(name).verbose_name)
self.serialize_field('description', value)
self.xml.endElement('information_complementaire')
def serialize_trace(self, trek):
self.xml.startElement('trace', {})
for c in trek.geom.transform(4326, clone=True).coords:
self.xml.startElement('point', {})
self.serialize_field('lat', c[1])
self.serialize_field('lng', c[0])
self.xml.endElement('point')
self.xml.endElement('trace')
def serialize_locomotions(self, trek):
attrs = {}
if trek.practice and trek.practice.cirkwi:
attrs['type'] = trek.practice.cirkwi.name
attrs['id_locomotion'] = str(trek.practice.cirkwi.eid)
if trek.difficulty and trek.difficulty.cirkwi_level:
attrs['difficulte'] = str(trek.difficulty.cirkwi_level)
if trek.duration:
attrs['duree'] = str(int(trek.duration * 3600))
if attrs:
self.xml.startElement('locomotions', {})
self.serialize_field('locomotion', '', attrs)
self.xml.endElement('locomotions')
def serialize_description(self, trek):
description = trek.description_teaser
if description and trek.description:
description += u'\n\n'
description += trek.description
if description:
self.serialize_field('description', plain_text(description))
def serialize_tags(self, trek):
self.xml.startElement('tags_publics', {})
tag_ids = list(trek.themes.values_list('cirkwi_id', flat=True))
tag_ids += trek.accessibilities.values_list('cirkwi_id', flat=True)
if trek.difficulty and trek.difficulty.cirkwi_id:
tag_ids.append(trek.difficulty.cirkwi_id)
for tag in CirkwiTag.objects.filter(id__in=tag_ids):
self.serialize_field('tag_public', '', {'id': str(tag.eid), 'nom': tag.name})
self.xml.endElement('tags_publics')
# TODO: parking location (POI?), points_reference
def serialize(self, treks):
self.xml.startDocument()
self.xml.startElement('circuits', {'version': '2'})
for trek in treks:
self.xml.startElement('circuit', {
'date_creation': timestamp(trek.date_insert),
'date_modification': timestamp(trek.date_update),
'id_circuit': str(trek.pk),
})
orig_lang = translation.get_language()
self.xml.startElement('informations', {})
for lang in trek.published_langs:
translation.activate(lang)
self.xml.startElement('information', {'langue': lang})
self.serialize_field('titre', trek.name)
self.serialize_description(trek)
self.serialize_medias(self.request, trek.serializable_pictures)
self.xml.startElement('informations_complementaires', {})
self.serialize_additionnal_info(trek, 'departure')
self.serialize_additionnal_info(trek, 'arrival')
self.serialize_additionnal_info(trek, 'ambiance')
self.serialize_additionnal_info(trek, 'access')
self.serialize_additionnal_info(trek, 'disabled_infrastructure')
self.serialize_additionnal_info(trek, 'advised_parking')
self.serialize_additionnal_info(trek, 'public_transport')
self.serialize_additionnal_info(trek, 'advice')
self.xml.endElement('informations_complementaires')
self.serialize_tags(trek)
self.xml.endElement('information')
translation.activate(orig_lang)
self.xml.endElement('informations')
self.serialize_field('distance', int(trek.length))
self.serialize_locomotions(trek)
self.serialize_trace(trek)
if trek.published_pois:
self.xml.startElement('pois', {})
self.serialize_pois(trek.published_pois.transform(4326, field_name='geom'))
self.xml.endElement('pois')
self.xml.endElement('circuit')
self.xml.endElement('circuits')
self.xml.endDocument()
| bsd-2-clause | 2,639,157,288,231,134,000 | 40.413255 | 145 | 0.640056 | false |
toddheitmann/PetroPy | petropy/datasets.py | 1 | 1116 | # -*- coding: utf-8 -*-
"""
Datasets is a way to retrieve included logs with petropy. It currently
supports reading a sample log from the Permain Basin in Reagan County.
"""
import os
from .log import Log
def log_data(source):
"""
retrieves log data for a formation
Parameters
----------
source : str {'WFMP'}
source location for log data
Returns
-------
:class:`petropy.Log`
Log object of data source
Raises
------
ValueError
If source is not in dictionary key
Example
-------
>>> import petropy as ptr
>>> # reads sample Wolfcamp Log from las file
>>> log = ptr.log_data('WFMP')
"""
file_dir = os.path.dirname(__file__)
paths = {
'WFMP': os.path.join(file_dir, 'data', '42303347740000.las')
}
p = os.path.join(file_dir, 'data', 'tops.csv')
if source in paths:
las_path = paths[source]
else:
raise ValueError('%s is not valid source' % source)
log = Log(las_path)
log.tops_from_csv()
return log
| mit | -5,022,807,454,048,327,000 | 18.666667 | 70 | 0.551075 | false |
mahim97/zulip | zerver/lib/actions.py | 2 | 190670 | from typing import (
AbstractSet, Any, AnyStr, Callable, Dict, Iterable, List, Mapping, MutableMapping,
Optional, Sequence, Set, Text, Tuple, TypeVar, Union, cast
)
from mypy_extensions import TypedDict
import django.db.utils
from django.contrib.contenttypes.models import ContentType
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core import validators
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat, \
RealmCount
from zerver.lib.bugdown import (
BugdownRenderingException,
version as bugdown_version,
url_embed_preview_enabled_for_realm
)
from zerver.lib.addressee import (
Addressee,
user_profiles_from_unvalidated_emails,
)
from zerver.lib.cache import (
delete_user_profile_caches,
to_dict_cache_key_id,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.emoji import emoji_name_to_emoji_code
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.message import (
access_message,
MessageDict,
render_markdown,
)
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.retention import move_message_to_archive
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.topic_mutes import (
get_topic_mutes,
add_topic_mute,
remove_topic_mute,
)
from zerver.lib.users import bulk_get_users, check_full_name
from zerver.lib.user_groups import create_user_group, access_user_group_by_id
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, \
RealmDomain, \
Subscription, Recipient, Message, Attachment, UserMessage, RealmAuditLog, \
UserHotspot, \
Client, DefaultStream, DefaultStreamGroup, UserPresence, PushDeviceToken, \
ScheduledEmail, MAX_SUBJECT_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, get_personal_recipient, get_huddle, \
get_user_profile_by_id, PreregistrationUser, get_display_recipient, \
get_realm, bulk_get_recipients, get_stream_recipient, get_stream_recipients, \
email_allowed_for_realm, email_to_username, display_recipient_cache_key, \
get_user, get_stream_cache_key, \
UserActivityInterval, active_user_ids, get_active_streams, \
realm_filters_for_realm, RealmFilter, \
get_owned_bot_dicts, stream_name_in_use, \
get_old_unclaimed_attachments, is_cross_realm_bot_email, \
Reaction, EmailChangeStatus, CustomProfileField, \
custom_profile_fields_for_realm, get_huddle_user_ids, \
CustomProfileFieldValue, validate_attachment_request, get_system_bot, \
get_display_recipient_by_id, query_for_ids, get_huddle_recipient, \
UserGroup, UserGroupMembership, get_default_stream_groups
from zerver.lib.alert_words import alert_words_in_realm
from zerver.lib.avatar import avatar_url
from zerver.lib.stream_recipient import StreamRecipientMap
from django.db import transaction, IntegrityError, connection
from django.db.models import F, Q, Max, Sum
from django.db.models.query import QuerySet
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link
from confirmation import settings as confirmation_settings
from six import unichr
from zerver.lib.create_user import random_api_key
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.queue import queue_json_publish
from zerver.lib.create_user import create_user
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, user_profile_cache_key, \
cache_set_many, cache_delete, cache_delete_many
from zerver.decorator import statsd_increment
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.html_diff import highlight_html_differences
from zerver.lib.alert_words import user_alert_words, add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.notifications import clear_scheduled_emails, \
clear_scheduled_invitation_emails, enqueue_welcome_emails
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.exceptions import JsonableError, ErrorCode
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.upload import attachment_url_re, attachment_url_to_path_id, \
claim_attachment, delete_message_image
from zerver.lib.str_utils import NonBinaryStr, force_str
from zerver.tornado.event_queue import request_event_queue, send_event
import DNS
import ujson
import time
import traceback
import re
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
from operator import itemgetter
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[Text], AbstractSet[Text]]
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description
if stream.is_public():
return set(active_user_ids(stream.realm_id))
else:
return private_stream_user_ids(stream.id)
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id, }
else:
users = {user.id for user in user_profile.realm.get_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def get_topic_history_for_stream(user_profile: UserProfile,
recipient: Recipient) -> List[Dict[str, Any]]:
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
INNER JOIN "zerver_usermessage" ON (
"zerver_usermessage"."message_id" = "zerver_message"."id"
)
WHERE (
"zerver_usermessage"."user_profile_id" = %s AND
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor = connection.cursor()
cursor.execute(query, [user_profile.id, recipient.id])
rows = cursor.fetchall()
cursor.close()
canonical_topic_names = set() # type: Set[str]
history = []
for (topic_name, max_message_id) in rows:
canonical_name = topic_name.lower()
if canonical_name in canonical_topic_names:
continue
canonical_topic_names.add(canonical_name)
history.append(dict(
name=topic_name,
max_id=max_message_id))
return history
def send_signup_message(sender, admin_realm_signup_notifications_stream, user_profile,
internal=False, realm=None):
# type: (UserProfile, Text, UserProfile, bool, Optional[Realm]) -> None
if internal:
# When this is done using manage.py vs. the web interface
internal_blurb = " **INTERNAL SIGNUP** "
else:
internal_blurb = " "
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
internal_send_message(
user_profile.realm,
sender,
"stream",
signup_notifications_stream.name,
"signups",
"%s (%s) just signed up for Zulip. (total: %i)" % (
user_profile.full_name, user_profile.email, user_count
)
)
# We also send a notification to the Zulip administrative realm
admin_realm = get_system_bot(sender).realm
try:
# Check whether the stream exists
get_stream(admin_realm_signup_notifications_stream, admin_realm)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
return
internal_send_message(
admin_realm,
sender,
"stream",
admin_realm_signup_notifications_stream,
user_profile.realm.display_subdomain,
"%s <`%s`> just signed up for Zulip!%s(total: **%i**)" % (
user_profile.full_name,
user_profile.email,
internal_blurb,
user_count,
)
)
def notify_new_user(user_profile: UserProfile, internal: bool=False) -> None:
if settings.NEW_USER_BOT is not None:
send_signup_message(settings.NEW_USER_BOT, "signups", user_profile, internal)
statsd.gauge("users.signups.%s" % (user_profile.realm.string_id), 1, delta=True)
# We also clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.email)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last 1000 messages on your public streams, so you have
something to look at in your home view once you finish the
tutorial."""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
stream_ids = [stream.id for stream in streams if not stream.invite_only]
recipients = get_stream_recipients(stream_ids)
recent_messages = Message.objects.filter(recipient_id__in=recipients,
pub_date__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list('id', flat=True)[0:1000]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
ums_to_create = [UserMessage(user_profile=user_profile, message_id=message_id,
flags=UserMessage.flags.read)
for message_id in message_ids_to_use
if message_id not in already_ids]
UserMessage.objects.bulk_create(ums_to_create)
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile, prereg_user=None, newsletter_data=None,
default_stream_groups=[]):
# type: (UserProfile, Optional[PreregistrationUser], Optional[Dict[str, str]], List[DefaultStreamGroup]) -> None
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams = prereg_user.streams.all()
acting_user = prereg_user.referred_by # type: Optional[UserProfile]
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None \
and settings.NOTIFICATION_BOT is not None:
# This is a cross-realm private message.
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).exclude(
id=prereg_user.id).update(status=0)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).update(status=0)
notify_new_user(user_profile)
enqueue_welcome_emails(user_profile)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
event = dict(type="realm_user", op="add",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=user_profile.is_realm_admin,
full_name=user_profile.full_name,
avatar_url=avatar_url(user_profile),
timezone=user_profile.timezone,
is_bot=user_profile.is_bot))
send_event(event, active_user_ids(user_profile.realm_id))
def notify_created_bot(user_profile: UserProfile) -> None:
def stream_name(stream: Optional[Stream]) -> Optional[Text]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=user_profile.api_key,
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner'] = user_profile.bot_owner.email
event = dict(type="realm_bot", op="add", bot=bot)
send_event(event, bot_owner_user_ids(user_profile))
def do_create_user(email, password, realm, full_name, short_name,
is_realm_admin=False, bot_type=None, bot_owner=None, tos_version=None,
timezone=u"", avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream=None, default_events_register_stream=None,
default_all_public_streams=None, prereg_user=None,
newsletter_data=None, default_stream_groups=[]):
# type: (Text, Optional[Text], Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, Text, Optional[Stream], Optional[Stream], bool, Optional[PreregistrationUser], Optional[Dict[str, str]], List[DefaultStreamGroup]) -> UserProfile
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
is_realm_admin=is_realm_admin,
bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type='user_created', event_time=event_time)
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
notify_created_user(user_profile)
if bot_type:
notify_created_bot(user_profile)
else:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups)
return user_profile
def do_activate_user(user_profile: UserProfile) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type='user_activated', event_time=event_time)
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type='user_reactivated', event_time=event_time,
acting_user=acting_user)
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any) -> None:
"""Takes in a realm object, the name of an attribute to update, and the
value to update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, property_type,))
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(event, active_user_ids(realm.id))
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool]) -> None:
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict())
)
send_event(event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.save(update_fields=['allow_message_editing', 'message_content_edit_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds),
)
send_event(event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Stream, stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id
)
send_event(event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Stream,
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id
)
send_event(event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
clear_scheduled_emails(user.id)
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails(user_profile.id)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
acting_user=acting_user,
event_type='user_deactivated', event_time=event_time)
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(event, affected_user_ids)
def do_change_user_email(user_profile: UserProfile, new_email: Text) -> None:
delete_user_profile_caches([user_profile])
user_profile.email = new_email
user_profile.save(update_fields=["email"])
payload = dict(user_id=user_profile.id,
new_email=new_email)
send_event(dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type='user_email_changed',
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: Text) -> None:
old_email = user_profile.email
user_profile.email = new_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, user_profile.realm.host, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url
})
send_email('zerver/emails/confirm_new_email', to_email=new_email,
from_name='Zulip Account Security', from_address=FromAddress.NOREPLY,
context=context)
def compute_irc_user_fullname(email: NonBinaryStr) -> NonBinaryStr:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: NonBinaryStr) -> NonBinaryStr:
return email.split("@")[0] + " (XMPP)"
def compute_mit_user_fullname(email: NonBinaryStr) -> NonBinaryStr:
try:
# Input is either e.g. [email protected] or user|[email protected]
match_user = re.match(r'^([a-zA-Z0-9_.-]+)(\|.+)?@mit\.edu$', email.lower())
if match_user and match_user.group(2) is None:
answer = DNS.dnslookup(
"%s.passwd.ns.athena.mit.edu" % (match_user.group(1),),
DNS.Type.TXT)
hesiod_name = force_str(answer[0][0]).split(':')[4].split(',')[0].strip()
if hesiod_name != "":
return hesiod_name
elif match_user:
return match_user.group(1).lower() + "@" + match_user.group(2).upper()[1:]
except DNS.Base.ServerError:
pass
except Exception:
print("Error getting fullname for %s:" % (email,))
traceback.print_exc()
return email.lower()
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: Text,
email_to_fullname: Callable[[Text], Text]) -> UserProfile:
try:
return get_user(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(email, None, realm,
email_to_fullname(email), email_to_username(email),
active=False, is_mirror_dummy=True)
except IntegrityError:
return get_user(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient = get_personal_recipient(message['message'].sender.id)
if Message.objects.filter(sender=welcome_bot, recipient=human_recipient).count() < 2:
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender,
"Congratulations on your first reply! :tada:\n\n"
"Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
def render_incoming_message(message: Message,
content: Text,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: Optional[bool]=False) -> Text:
realm_alert_words = alert_words_in_realm(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words=realm_alert_words,
user_ids=user_ids,
mention_data=mention_data,
email_gateway=email_gateway,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
def get_typing_user_profiles(recipient: Recipient, sender_id: int) -> List[UserProfile]:
if recipient.type == Recipient.STREAM:
'''
We don't support typing indicators for streams because they
are expensive and initial user feedback was they were too
distracting.
'''
raise ValueError('Typing indicators not supported for streams')
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
user_ids = list({recipient.type_id, sender_id})
assert(len(user_ids) in [1, 2])
elif recipient.type == Recipient.HUDDLE:
user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
users = [get_user_profile_by_id(user_id) for user_id in user_ids]
return users
RecipientInfoResult = TypedDict('RecipientInfoResult', {
'active_user_ids': Set[int],
'push_notify_user_ids': Set[int],
'stream_push_user_ids': Set[int],
'um_eligible_user_ids': Set[int],
'long_term_idle_user_ids': Set[int],
'default_bot_user_ids': Set[int],
'service_bot_tuples': List[Tuple[int, int]],
})
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: Optional[Set[int]]=None) -> RecipientInfoResult:
if recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
stream_push_user_ids = set() # type: Set[int]
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
subscription_rows = stream_topic.get_active_subscriptions().values(
'user_profile_id',
'push_notifications',
'in_home_view',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if row['push_notifications'] and row['in_home_view']
} - stream_topic.user_ids_muting_topic()
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
if possibly_mentioned_user_ids:
# Important note: Because we haven't rendered bugdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id'
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications']
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r)
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle']
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via bugdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = set([
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
])
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples
) # type: RecipientInfoResult
return info
def get_service_bot_events(sender, service_bot_tuples, mentioned_user_ids,
active_user_ids, recipient_type):
# type: (UserProfile, List[Tuple[int, int]], Set[int], Set[int], int) -> Dict[str, List[Dict[str, Any]]]
event_dict = defaultdict(list) # type: Dict[str, List[Dict[str, Any]]]
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
for user_profile_id, bot_type in service_bot_tuples:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s' %
(user_profile_id, bot_type))
continue
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep[ this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
continue
# Mention triggers, primarily for stream messages
if user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messsages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
continue
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
return event_dict
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: Optional[bool]=False) -> List[int]:
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = [] # type: List[int]
new_messages = [] # type: List[MutableMapping[str, Any]]
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed = set() # type: Set[Text]
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = bugdown.MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name()
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
mention_data = message['mention_data']
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Update calculated fields of the message
message['message'].update_calculated_fields()
# Save the message receipts in the database
user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
ums = [] # type: List[UserMessageLite]
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
mentioned_user_ids=mentioned_user_ids,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
# Claim attachments in message
for message in messages:
if Message.content_has_attachment(message['message'].content):
do_claim_attachments(message['message'])
for message in messages:
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'])
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related("realm").get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(event, users)
if url_embed_preview_enabled_for_realm(message['message']) and links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if (settings.ENABLE_FEEDBACK and settings.FEEDBACK_BOT and
message['message'].recipient.type == Recipient.PERSONAL):
feedback_bot_id = get_system_bot(email=settings.FEEDBACK_BOT).id
if feedback_bot_id in message['active_user_ids']:
queue_json_publish(
'feedback_messages',
wide_message_dict,
lambda x: None
)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
}
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = 0
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: Set[int],
long_term_idle_user_ids: Set[int],
mentioned_user_ids: Set[int]) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if um.user_profile_id == message.sender.id and \
message.sent_by_human():
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = ','.join([
'(%d, %d, %d)' % (um.user_profile_id, um.message_id, um.flags)
for um in ums
])
query = '''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES
''' + vals
with connection.cursor() as cursor:
cursor.execute(query)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: Text) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event = {'type': 'reaction',
'op': op,
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type} # type: Dict[str, Any]
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(event, [um.user_profile_id for um in ums])
def do_add_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: Text) -> None:
(emoji_code, reaction_type) = emoji_name_to_emoji_code(user_profile.realm, emoji_name)
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: Text) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: Text, emoji_code: Text, reaction_type: Text) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: Text, reaction_type: Text) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(notification: Dict[str, Any]) -> None:
recipient_user_profiles = get_typing_user_profiles(notification['recipient'],
notification['sender'].id)
# Only deliver the notification to active user recipients
user_ids_to_notify = [profile.id for profile in recipient_user_profiles if profile.is_active]
sender_dict = {'user_id': notification['sender'].id, 'email': notification['sender'].email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type = 'typing',
op = notification['op'],
sender = sender_dict,
recipients = recipient_dicts)
send_event(event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, notification_to: Sequence[Text],
operator: Text) -> None:
typing_notification = check_typing_notification(sender, notification_to, operator)
do_send_typing_notification(typing_notification)
# check_typing_notification:
# Returns typing notification ready for sending with do_send_typing_notification on success
# or the error message (string) on error.
def check_typing_notification(sender: UserProfile, notification_to: Sequence[Text],
operator: Text) -> Dict[str, Any]:
if len(notification_to) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
else:
try:
recipient = recipient_for_emails(notification_to, False,
sender, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
if recipient.type == Recipient.STREAM:
raise ValueError('Forbidden recipient type')
return {'sender': sender, 'recipient': recipient, 'op': operator}
def stream_welcome_message(stream: Stream) -> Text:
content = _('Welcome to #**%s**.') % (stream.name,)
if stream.description:
content += '\n\n**' + _('Description') + '**: '
content += stream.description
return content
def prep_stream_welcome_message(stream: Stream) -> Optional[Dict[str, Any]]:
realm = stream.realm
sender = get_system_bot(settings.WELCOME_BOT)
topic = _('hello')
content = stream_welcome_message(stream)
message = internal_prep_stream_message(
realm=realm,
sender=sender,
stream_name=stream.name,
topic=topic,
content=content)
return message
def send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(event, user_ids)
def create_stream_if_needed(realm: Realm,
stream_name: Text,
invite_only: bool=False,
stream_description: Text="") -> Tuple[Stream, bool]:
(stream, created) = Stream.objects.get_or_create(
realm=realm,
name__iexact=stream_name,
defaults = dict(
name=stream_name,
description=stream_description,
invite_only=invite_only,
is_in_zephyr_realm=realm.is_zephyr_mirror_realm
)
)
if created:
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
if stream.is_public():
send_stream_creation_event(stream, active_user_ids(stream.realm_id))
return stream, created
def create_streams_if_needed(realm: Realm,
stream_dicts: List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams = [] # type: List[Stream]
existing_streams = [] # type: List[Stream]
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
stream_description=stream_dict.get("description", ""))
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def get_recipient_from_user_ids(recipient_profile_ids: Set[int],
not_forged_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in set of recipient_profile_ids.
recipient_profile_ids = set(recipient_profile_ids)
# If the private message is just between the sender and
# another person, force it to be a personal internally
if not_forged_mirror_message:
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profile_ids:
raise ValidationError(_("User not authorized for this query"))
if (len(recipient_profile_ids) == 2 and sender.id in recipient_profile_ids):
recipient_profile_ids.remove(sender.id)
if len(recipient_profile_ids) > 1:
# Make sure the sender is included in huddle messages
recipient_profile_ids.add(sender.id)
return get_huddle_recipient(recipient_profile_ids)
else:
return get_personal_recipient(list(recipient_profile_ids)[0])
def validate_recipient_user_profiles(user_profiles: List[UserProfile],
sender: UserProfile) -> Set[int]:
recipient_profile_ids = set()
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \
user_profile.realm.deactivated:
raise ValidationError(_("'%s' is no longer using Zulip.") % (user_profile.email,))
recipient_profile_ids.add(user_profile.id)
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return recipient_profile_ids
def recipient_for_emails(emails, not_forged_mirror_message,
forwarder_user_profile, sender):
# type: (Iterable[Text], bool, Optional[UserProfile], UserProfile) -> Recipient
user_profiles = user_profiles_from_unvalidated_emails(emails, sender.realm)
return recipient_for_user_profiles(
user_profiles=user_profiles,
not_forged_mirror_message=not_forged_mirror_message,
forwarder_user_profile=forwarder_user_profile,
sender=sender
)
def recipient_for_user_profiles(user_profiles, not_forged_mirror_message,
forwarder_user_profile, sender):
# type: (List[UserProfile], bool, Optional[UserProfile], UserProfile) -> Recipient
recipient_profile_ids = validate_recipient_user_profiles(user_profiles, sender)
return get_recipient_from_user_ids(recipient_profile_ids, not_forged_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
messages = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
subject=message.subject,
sending_client=message.sending_client,
pub_date__gte=message.pub_date - time_window,
pub_date__lte=message.pub_date + time_window)
if messages.exists():
return messages[0].id
return None
def extract_recipients(s: Union[str, Iterable[Text]]) -> List[Text]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s) # type: ignore # This function has a super weird union argument.
except ValueError:
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
recipients = data
# Strip recipients, and then remove any duplicates and any that
# are the empty string after being stripped.
recipients = [recipient.strip() for recipient in recipients]
return list(set(recipient for recipient in recipients if recipient))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: Text,
topic: Text, body: Text) -> int:
addressee = Addressee.for_stream(stream_name, topic)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: Text) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender, client, message_type_name, message_to,
topic_name, message_content, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
# type: (UserProfile, Client, Text, Sequence[Text], Optional[Text], Text, Optional[Realm], bool, Optional[float], Optional[UserProfile], Optional[Text], Optional[Text]) -> int
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id)
return do_send_messages([message])[0]
def check_stream_name(stream_name: Text) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Invalid stream name '%s'" % (stream_name)))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long (limit: %s characters)" % (Stream.MAX_NAME_LENGTH)))
if set(stream_name).intersection(Stream.NAME_INVALID_CHARS):
raise JsonableError(_("Invalid characters in stream name (disallowed characters: %s)."
% ((', ').join(Stream.NAME_INVALID_CHARS))))
for i in stream_name:
if ord(i) == 0:
raise JsonableError(_("Stream name '%s' contains NULL (0x00) characters." % (stream_name)))
def check_default_stream_group_name(group_name: Text) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '%s'" % (group_name)))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: %s characters)"
% (DefaultStreamGroup.MAX_NAME_LENGTH)))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '%s' contains NULL (0x00) characters."
% (group_name)))
def send_pm_if_empty_stream(sender: UserProfile,
stream: Optional[Stream],
stream_name: Text,
realm: Realm) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
if stream is not None:
num_subscribers = num_subscribers_for_stream_id(stream.id)
if num_subscribers > 0:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
if stream is None:
error_msg = "that stream does not yet exist. To create it, "
else:
# num_subscribers == 0
error_msg = "there are no subscribers to that stream. To join it, "
content = ("Hi there! We thought you'd like to know that your bot **%s** just "
"tried to send a message to stream `%s`, but %s"
"click the gear in the left-side stream list." %
(sender.full_name, stream_name, error_msg))
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender, client, addressee,
message_content_raw, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
# type: (UserProfile, Client, Addressee, Text, Optional[Realm], bool, Optional[float], Optional[UserProfile], Optional[Text], Optional[Text]) -> Dict[str, Any]
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
stream_name = addressee.stream_name()
stream_name = stream_name.strip()
check_stream_name(stream_name)
topic_name = addressee.topic()
if topic_name is None:
raise JsonableError(_("Missing topic"))
topic_name = topic_name.strip()
if topic_name == "":
raise JsonableError(_("Topic can't be empty"))
topic_name = truncate_topic(topic_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(sender, stream, stream_name, realm)
except Stream.DoesNotExist:
send_pm_if_empty_stream(sender, None, stream_name, realm)
raise JsonableError(_("Stream '%(stream_name)s' "
"does not exist") % {'stream_name': escape(stream_name)})
recipient = get_stream_recipient(stream.id)
if not stream.invite_only:
# This is a public stream
pass
elif subscribed_to_stream(sender, stream.id):
# Or it is private, but your are subscribed
pass
elif sender.is_api_super_user or (forwarder_user_profile is not None and
forwarder_user_profile.is_api_super_user):
# Or this request is being done on behalf of a super user
pass
elif sender.is_bot and (sender.bot_owner is not None and
subscribed_to_stream(sender.bot_owner, stream.id)):
# Or you're a bot and your owner is subscribed.
pass
elif sender.email == settings.WELCOME_BOT:
# The welcome bot welcomes folks to the stream.
pass
elif sender.email == settings.NEW_USER_BOT:
pass
else:
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '%s'") % (stream.name,))
elif addressee.is_private():
user_profiles = addressee.user_profiles()
if user_profiles is None or len(user_profiles) == 0:
raise JsonableError(_("Message must have recipients"))
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
not_forged_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles, not_forged_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
raise JsonableError(_("Invalid message type"))
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.subject = topic_name
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.pub_date = timestamp_to_datetime(forged_timestamp)
else:
message.pub_date = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: Text) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
if realm is None:
raise RuntimeError("None is not a valid realm for internal_prep_message!")
if addressee.is_stream():
stream, _ = create_stream_if_needed(realm, addressee.stream_name())
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception(u"Error queueing internal message by %s: %s" % (sender.email, e))
return None
def internal_prep_stream_message(realm: Realm, sender: UserProfile,
stream_name: Text, topic: Text,
content: Text) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: Text) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_message(realm, sender_email, recipient_type_name, recipients,
topic_name, content, email_gateway=False):
# type: (Realm, Text, str, Text, Text, Text, Optional[bool]) -> None
"""internal_send_message should only be used where `sender_email` is a
system bot."""
# Verify the user is in fact a system bot
assert(is_cross_realm_bot_email(sender_email) or sender_email == settings.ERROR_BOT)
sender = get_system_bot(sender_email)
parsed_recipients = extract_recipients(recipients)
addressee = Addressee.legacy_build(
sender,
recipient_type_name,
parsed_recipients,
topic_name,
realm=realm)
msg = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if msg is None:
return
do_send_messages([msg], email_gateway=email_gateway)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: Text) -> None:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return
do_send_messages([message])
def internal_send_stream_message(realm: Realm, sender: UserProfile, stream_name: str,
topic: str, content: str) -> None:
message = internal_prep_stream_message(realm, sender, stream_name, topic, content)
if message is None:
return
do_send_messages([message])
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> None:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return
do_send_messages([message])
def pick_color(user_profile: UserProfile) -> Text:
subs = get_stream_subscriptions_for_user(user_profile).filter(active=True)
return pick_color_helper(user_profile, subs)
def pick_color_helper(user_profile: UserProfile, subs: Iterable[Subscription]) -> Text:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(cast(UserProfile, user_profile), stream.id))
def validate_user_access_to_subscribers_helper(user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[], bool]) -> None:
""" Helper for validate_user_access_to_subscribers that doesn't require a full stream object
* check_user_subscribed reports whether the user is subscribed to the stream.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
if user_profile.realm.is_zephyr_mirror_realm and not stream_dict["invite_only"]:
raise JsonableError(_("You cannot get subscribers for public streams in this realm"))
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for invite-only stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
stream_recipient.populate_for_stream_ids(stream_ids)
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
id_list = ', '.join(str(recipient_id) for recipient_id in recipient_ids)
query = '''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in (%s) AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id
''' % (id_list,)
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True
)
return subscriptions
def get_subscribers(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[UserProfile]:
subscriptions = get_subscribers_query(stream, requesting_user).select_related()
return [subscription.user_profile for subscription in subscriptions]
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[Text]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def maybe_get_subscriber_emails(stream: Stream, user_profile: UserProfile) -> List[Text]:
""" Alternate version of get_subscriber_emails that takes a Stream object only
(not a name), and simply returns an empty list if unable to get a real
subscriber list (because we're on the MIT realm). """
try:
subscribers = get_subscriber_emails(stream, requesting_user=user_profile)
except JsonableError:
subscribers = []
return subscribers
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
# Send a notification to the user who subscribed.
payload = [dict(name=stream.name,
stream_id=stream.id,
in_home_view=subscription.in_home_view,
invite_only=stream.invite_only,
color=subscription.color,
email_address=encode_email_address(stream),
desktop_notifications=subscription.desktop_notifications,
audible_notifications=subscription.audible_notifications,
push_notifications=subscription.push_notifications,
description=stream.description,
pin_to_top=subscription.pin_to_top,
subscribers=stream_user_ids(stream))
for (subscription, stream) in sub_pairs]
event = dict(type="subscription", op="add",
subscriptions=payload)
send_event(event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
return set(subscribed_user_ids) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream = defaultdict(list) # type: Dict[int, List[int]]
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams]) # type: Mapping[int, Recipient]
recipients = [recipient.id for recipient in recipients_map.values()] # type: List[int]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]
new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]
for user_profile in users:
needs_new_sub = set(recipients) # type: Set[int]
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = [] # type: List[Tuple[Subscription, Stream]]
for (user_profile, recipient_id, stream) in new_subs:
color = pick_color_helper(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id,
desktop_notifications=user_profile.enable_stream_desktop_notifications,
audible_notifications=user_profile.enable_stream_sounds,
push_notifications=user_profile.enable_stream_push_notifications,
)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = Message.objects.aggregate(Max('id'))['id__max']
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type='subscription_created',
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type='subscription_activated',
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(event, active_user_ids(user_profile.realm_id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]
new_streams = set() # type: Set[Tuple[int, int]]
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
new_users = [user for user in users if (user.id, stream.id) in new_streams]
# Users newly added to invite-only streams need a `create`
# notification, since they didn't have the invite-only stream
# in their browser yet.
if not stream.is_public():
send_stream_creation_event(stream, [user.id for user in new_users])
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=new_user_id)
send_event(event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]
sub_ids_to_deactivate = [] # type: List[int]
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = Message.objects.aggregate(Max('id'))['id__max']
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type='subscription_deactivated',
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream)
altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]
streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(event, peer_user_ids)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: Text, stream_name: Text, property: Text,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile, sub, stream,
property_name, value):
# type: (UserProfile, Subscription, Stream, Text, Any) -> None
setattr(sub, property_name, value)
sub.save(update_fields=[property_name])
log_subscription_property_change(user_profile.email, stream.name,
property_name, value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=property_name,
value=value,
stream_id=stream.id,
name=stream.name)
send_event(event, [user_profile.id])
def do_change_password(user_profile, password, commit=True,
hashed_password=False):
# type: (UserProfile, Text, bool, bool) -> None
if hashed_password:
# This is a hashed password, not the password itself.
user_profile.set_password(password)
else:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type='user_change_password',
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: Text,
acting_user: UserProfile) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type='user_full_name_changed',
event_time=event_time, extra_data=old_name)
payload = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: Text,
acting_user: UserProfile) -> Text:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type='bot_owner_changed',
event_time=event_time)
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
bot_owner_user_ids(user_profile))
def do_change_tos_version(user_profile: UserProfile, tos_version: Text) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type='user_tos_version_changed',
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> None:
user_profile.api_key = random_api_key()
user_profile.save(update_fields=["api_key"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type='user_api_key_changed',
event_time=event_time)
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
api_key=user_profile.api_key,
)),
bot_owner_user_ids(user_profile))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: Text) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type='user_change_avatar_source',
extra_data={'avatar_source': avatar_source},
event_time=event_time)
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
email=user_profile.email,
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
user_id=user_profile.id
)
send_event(dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_icon_source(realm: Realm, icon_source: Text, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def _default_stream_permision_check(user_profile: UserProfile, stream: Optional[Stream]) -> None:
# Any user can have a None default stream
if stream is not None:
if user_profile.is_bot:
user = user_profile.bot_owner
else:
user = user_profile
if stream.invite_only and (user is None or not subscribed_to_stream(user, stream.id)):
raise JsonableError(_('Insufficient permission'))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
_default_stream_permision_check(user_profile, stream)
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[Text]
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
_default_stream_permision_check(user_profile, stream)
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[Text]
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_is_admin(user_profile: UserProfile, value: bool,
permission: str='administer') -> None:
if permission == "administer":
user_profile.is_realm_admin = value
user_profile.save(update_fields=["is_realm_admin"])
elif permission == "api_super_user":
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
else:
raise Exception("Unknown permission")
if permission == 'administer':
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=value))
send_event(event, active_user_ids(user_profile.realm_id))
def do_change_bot_type(user_profile: UserProfile, value: int) -> None:
user_profile.bot_type = value
user_profile.save(update_fields=["bot_type"])
def do_change_stream_invite_only(stream: Stream, invite_only: bool) -> None:
stream.invite_only = invite_only
stream.save(update_fields=['invite_only'])
def do_rename_stream(stream: Stream, new_name: Text, log: bool=True) -> Dict[str, Text]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient = get_stream_recipient(stream.id)
messages = Message.objects.filter(recipient=recipient).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient.id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(event, can_access_stream_user_ids(stream))
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: Text) -> None:
stream.description = new_description
stream.save(update_fields=['description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
)
send_event(event, can_access_stream_user_ids(stream))
def do_create_realm(string_id, name, restricted_to_domain=None,
invite_required=None, org_type=None):
# type: (Text, Text, Optional[bool], Optional[bool], Optional[int]) -> Realm
existing_realm = get_realm(string_id)
if existing_realm is not None:
raise AssertionError("Realm %s already exists!" % (string_id,))
kwargs = {} # type: Dict[str, Any]
if restricted_to_domain is not None:
kwargs['restricted_to_domain'] = restricted_to_domain
if invite_required is not None:
kwargs['invite_required'] = invite_required
if org_type is not None:
kwargs['org_type'] = org_type
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream, _ = create_stream_if_needed(realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME)
realm.notifications_stream = notifications_stream
signup_notifications_stream, _ = create_stream_if_needed(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.")
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"restricted_to_domain": restricted_to_domain,
"invite_required": invite_required,
"org_type": org_type})
# Send a notification to the admin realm (if configured)
if settings.NEW_USER_BOT is not None:
signup_message = "Signups enabled"
admin_realm = get_system_bot(settings.NEW_USER_BOT).realm
internal_send_message(admin_realm, settings.NEW_USER_BOT, "stream",
"signups", realm.display_subdomain, signup_message)
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str, value: bool,
log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, notification_setting_type,))
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails(user_profile.id, ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_autoscroll_forever(user_profile: UserProfile, autoscroll_forever: bool,
log: bool=True) -> None:
user_profile.autoscroll_forever = autoscroll_forever
user_profile.save(update_fields=["autoscroll_forever"])
if log:
log_event({'type': 'autoscroll_forever',
'user': user_profile.email,
'autoscroll_forever': autoscroll_forever})
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_change_default_desktop_notifications(user_profile: UserProfile,
default_desktop_notifications: bool) -> None:
user_profile.default_desktop_notifications = default_desktop_notifications
user_profile.save(update_fields=["default_desktop_notifications"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, Text]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
send_event(event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group %s' % (group_name,)))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def set_default_streams(realm: Realm, stream_dict: Dict[Text, Dict[Text, Any]]) -> None:
DefaultStream.objects.filter(realm=realm).delete()
stream_names = []
for name, options in stream_dict.items():
stream_names.append(name)
stream, _ = create_stream_if_needed(realm,
name,
invite_only = options.get("invite_only", False),
stream_description = options.get("description", ''))
DefaultStream.objects.create(stream=stream, realm=realm)
# Always include the realm's default notifications streams, if it exists
if realm.notifications_stream is not None:
DefaultStream.objects.get_or_create(stream=realm.notifications_stream, realm=realm)
log_event({'type': 'default_streams',
'realm': realm.string_id,
'streams': stream_names})
def notify_default_streams(realm_id: int) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm_id))
)
send_event(event, active_user_ids(realm_id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm))
)
send_event(event, active_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(realm_id)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(realm_id)
def do_create_default_stream_group(realm: Realm, group_name: Text,
description: Text, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group_name})
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_("Default stream group '%(group_name)s' already exists")
% {'group_name': group_name})
group.streams = streams
group.save()
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group.name)s'")
% {'stream_name': stream.name, 'group.name': group.name})
if stream in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is already present in default stream group '%(group.name)s'")
% {'stream_name': stream.name, 'group.name': group.name})
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is not present in default stream group '%(group.name)s'")
% {'stream_name': stream.name, 'group.name': group.name})
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: Text) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '%s'") % (new_group_name,))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '%s' already exists") % (new_group_name,))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: Text) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related("stream", "stream__realm").filter(
realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile: UserProfile,
client: Client,
query: Text,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile = user_profile,
client = client,
query = query,
defaults={'last_visit': log_time, 'count': 0})
activity.count += 1
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence", email=user_profile.email,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = {'timestamp': log_time,
'status': status})
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.is_zephyr_mirror_realm and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile, client, log_time, status,
new_user_input):
# type: (UserProfile, Client, datetime.datetime, int, bool) -> None
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile: UserProfile, pointer: int, update_flags: bool=False) -> None:
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags:
# Until we handle the new read counts in the Android app
# natively, this is a shim that will mark as read any messages
# up until the pointer move
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer,
flags=~UserMessage.flags.read) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
event = dict(type='pointer', pointer=pointer)
send_event(event, [user_profile.id])
def do_mark_all_as_read(user_profile: UserProfile) -> int:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(
user_profile=user_profile
).extra(
where=[UserMessage.where_unread()]
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True
)
send_event(event, [user_profile.id])
statsd.incr("mark_all_as_read", count)
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
stream: Optional[Stream],
topic_name: Optional[Text]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile
)
recipient = get_stream_recipient(stream.id)
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = msgs.filter(message__subject__iexact=topic_name)
msgs = msgs.extra(
where=[UserMessage.where_unread()]
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
send_event(event, [user_profile.id])
statsd.incr("mark_stream_as_read", count)
return count
def do_update_message_flags(user_profile: UserProfile,
operation: Text,
flag: Text,
messages: Optional[Sequence[int]]) -> int:
flagattr = getattr(UserMessage.flags, flag)
assert messages is not None
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# Hack to let you star any message
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(event, [user_profile.id])
statsd.incr("flags.%s.%s" % (flag, operation), count)
return count
def subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:
try:
if Subscription.objects.get(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id):
return True
return False
except Subscription.DoesNotExist:
return False
def truncate_content(content: Text, max_length: int, truncation_message: Text) -> Text:
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body: Text) -> Text:
return truncate_content(body, MAX_MESSAGE_LENGTH, "...")
def truncate_topic(topic: Text) -> Text:
return truncate_content(topic, MAX_SUBJECT_LENGTH, "...")
MessageUpdateUserInfoResult = TypedDict('MessageUpdateUserInfoResult', {
'message_user_ids': Set[int],
'mention_user_ids': Set[int],
})
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set() # type: Set[UserMessage]
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message]) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
for changed_message in changed_messages:
message_ids.append(changed_message.id)
key = to_dict_cache_key_id(changed_message.id)
value = MessageDict.to_dict_uncached(changed_message)
items_for_remote_cache[key] = (value,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[Text],
rendered_content: Optional[Text]) -> None:
event = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, list(map(user_info, ums)))
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile, message, topic_name, propagate_mode,
content, rendered_content,
prior_mention_user_ids, mention_user_ids):
# type: (UserProfile, Message, Optional[Text], str, Optional[Text], Optional[Text], Set[int], Set[int]) -> int
event = {'type': 'update_message',
# TODO: We probably want to remove the 'sender' field
# after confirming it isn't used by any consumers.
'sender': user_profile.email,
'user_id': user_profile.id,
'message_id': message.id} # type: Dict[str, Any]
edit_history_event = {
'user_id': user_profile.id,
} # type: Dict[str, Any]
changed_messages = [message]
if message.is_stream_message():
stream_id = message.recipient.type_id
event['stream_name'] = Stream.objects.get(id=stream_id).name
# Set first_rendered_content to be the oldest version of the
# rendered content recorded; which is the current version if the
# content hasn't been edited before. Note that because one could
# have edited just the topic_name, not every edit history event
# contains a prev_rendered_content element.
first_rendered_content = message.rendered_content
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
for old_edit_history_event in edit_history:
if 'prev_rendered_content' in old_edit_history_event:
first_rendered_content = old_edit_history_event['prev_rendered_content']
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
# We are turning off diff highlighting everywhere until ticket #1532 is addressed.
if False:
# Don't highlight message edit diffs on prod
rendered_content = highlight_html_differences(first_rendered_content, rendered_content)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
prev_content = edit_history_event['prev_content']
if Message.content_has_attachment(prev_content) or Message.content_has_attachment(message.content):
check_attachment_reference_change(prev_content, message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
# TODO: We may want a slightly leaner of this function for updates.
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if topic_name is not None:
orig_topic_name = message.topic_name()
topic_name = truncate_topic(topic_name)
event["orig_subject"] = orig_topic_name
event["propagate_mode"] = propagate_mode
message.subject = topic_name
event["stream_id"] = message.recipient.type_id
event["subject"] = topic_name
event['subject_links'] = bugdown.subject_links(message.sender.realm_id, topic_name)
edit_history_event["prev_subject"] = orig_topic_name
if propagate_mode in ["change_later", "change_all"]:
propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)
# We only change messages up to 2 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
if propagate_mode == 'change_all':
before_bound = timezone_now() - datetime.timedelta(days=2)
propagate_query = (propagate_query & ~Q(id = message.id) &
Q(pub_date__range=(before_bound, timezone_now())))
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
messages = Message.objects.filter(propagate_query).select_related()
# Evaluate the query before running the update
messages_list = list(messages)
messages.update(subject=topic_name)
for m in messages_list:
# The cached ORM object is not changed by messages.update()
# and the remote cache update requires the new value
m.subject = topic_name
changed_messages += messages_list
message.last_edit_time = timezone_now()
assert message.last_edit_time is not None # assert needed because stubs for django are missing
event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)
edit_history_event['timestamp'] = event['edit_timestamp']
if message.edit_history is not None:
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
message.save(update_fields=["subject", "content", "rendered_content",
"rendered_content_version", "last_edit_time",
"edit_history"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, list(map(user_info, ums)))
return len(changed_messages)
def do_delete_message(user_profile: UserProfile, message: Message) -> None:
event = {
'type': 'delete_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
ums = [{'id': um.user_profile_id} for um in
UserMessage.objects.filter(message=message.id)]
move_message_to_archive(message.id)
send_event(event, ums)
def encode_email_address(stream: Stream) -> Text:
return encode_email_address_helper(stream.name, stream.email_token)
def encode_email_address_helper(name: Text, email_token: Text) -> Text:
# Some deployments may not use the email gateway
if settings.EMAIL_GATEWAY_PATTERN == '':
return ''
# Given the fact that we have almost no restrictions on stream names and
# that what characters are allowed in e-mail addresses is complicated and
# dependent on context in the address, we opt for a very simple scheme:
#
# Only encode the stream name (leave the + and token alone). Encode
# everything that isn't alphanumeric plus _ as the percent-prefixed integer
# ordinal of that character, padded with zeroes to the maximum number of
# bytes of a UTF-8 encoded Unicode character.
encoded_name = re.sub("\W", lambda x: "%" + str(ord(x.group(0))).zfill(4), name)
encoded_token = "%s+%s" % (encoded_name, email_token)
return settings.EMAIL_GATEWAY_PATTERN % (encoded_token,)
def get_email_gateway_message_string_from_address(address: Text) -> Optional[Text]:
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
if settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK:
# Accept mails delivered to any Zulip server
pattern_parts[-1] = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK
match_email_re = re.compile("(.*?)".join(pattern_parts))
match = match_email_re.match(address)
if not match:
return None
msg_string = match.group(1)
return msg_string
def decode_email_address(email: Text) -> Optional[Tuple[Text, Text]]:
# Perform the reverse of encode_email_address. Returns a tuple of (streamname, email_token)
msg_string = get_email_gateway_message_string_from_address(email)
if msg_string is None:
return None
elif '.' in msg_string:
# Workaround for Google Groups and other programs that don't accept emails
# that have + signs in them (see Trac #2102)
encoded_stream_name, token = msg_string.split('.')
else:
encoded_stream_name, token = msg_string.split('+')
stream_name = re.sub("%\d{4}", lambda x: unichr(int(x.group(0)[1:])), encoded_stream_name)
return stream_name, token
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
"recipient_id", "in_home_view", "color", "desktop_notifications",
"audible_notifications", "push_notifications", "active", "pin_to_top"
).order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids = set() # type: Set[int]
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values("id", "name", "invite_only", "realm_id",
"email_token", "description")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = dict((sub["stream_id"], sub["active"]) for sub in sub_dicts)
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient
) # type: Mapping[int, Optional[List[int]]]
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
subscribers = subscriber_map[stream["id"]] # type: Optional[List[int]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore.
if stream["invite_only"] and not sub["active"]:
subscribers = None
stream_dict = {'name': stream["name"],
'in_home_view': sub["in_home_view"],
'invite_only': stream["invite_only"],
'color': sub["color"],
'desktop_notifications': sub["desktop_notifications"],
'audible_notifications': sub["audible_notifications"],
'push_notifications': sub["push_notifications"],
'pin_to_top': sub["pin_to_top"],
'stream_id': stream["id"],
'description': stream["description"],
'email_address': encode_email_address_helper(stream["name"], stream["email_token"])}
if subscribers is not None:
stream_dict['subscribers'] = subscribers
if sub["active"]:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
# Listing public streams are disabled for Zephyr mirroring realms.
if user_profile.realm.is_zephyr_mirror_realm:
never_subscribed_stream_ids = set() # type: Set[int]
else:
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {'name': stream['name'],
'invite_only': stream['invite_only'],
'stream_id': stream['id'],
'description': stream['description']}
if is_public:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(user_profile: UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
user_ids = set()
for subs in [subscribed, unsubscribed, never_subscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([email_dict[user_id] for user_id in sub['subscribers']])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags = user_flags.get(user_id, []) # type: Iterable[str]
mentioned = 'mentioned' in flags
private_message = is_pm and user_id != sender_id
if mentioned or private_message:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
if not user_ids:
return []
# 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS
recent = timezone_now() - datetime.timedelta(seconds=140)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
timestamp__gte=recent
).distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def get_status_dict(requesting_user_profile: UserProfile) -> Dict[Text, Dict[Text, Dict[str, Any]]]:
if requesting_user_profile.realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)
def get_cross_realm_dicts() -> List[Dict[str, Any]]:
users = bulk_get_users(list(settings.CROSS_REALM_BOT_EMAILS), None,
base_query=UserProfile.objects.filter(
realm__string_id=settings.SYSTEM_BOT_REALM)).values()
return [{'email': user.email,
'user_id': user.id,
'is_admin': user.is_realm_admin,
'is_bot': user.is_bot,
'full_name': user.full_name}
for user in users
# Important: We filter here, is addition to in
# `base_query`, because of how bulk_get_users shares its
# cache with other UserProfile caches.
if user.realm.string_id == settings.SYSTEM_BOT_REALM]
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> None:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = {'referrer': referrer, 'activate_url': activation_url,
'referrer_realm_name': referrer.realm.name}
from_name = u"%s (via Zulip)" % (referrer.full_name,)
send_email('zerver/emails/invitation', to_email=invitee.email, from_name=from_name,
from_address=FromAddress.NOREPLY, context=context)
def email_not_system_bot(email: Text) -> None:
if is_cross_realm_bot_email(email):
raise ValidationError('%s is an email address reserved for system bots' % (email,))
def validate_email_for_realm(target_realm: Realm, email: Text) -> None:
try:
# Registering with a system bot's email is not allowed...
email_not_system_bot(email)
except ValidationError:
# ... unless this is the first user with that email. This
# should be impossible in production, because these users are
# created by initialize_voyager_db, but it happens in a test's
# setup. (This would be a good wrinkle to clean up.)
if UserProfile.objects.filter(email__iexact=email).exists():
raise
try:
existing_user_profile = get_user(email, target_realm)
except UserProfile.DoesNotExist:
return
if existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
if existing_user_profile.is_active:
raise AssertionError("Mirror dummy user is already active!")
else:
# Other users should not already exist at all.
raise ValidationError('%s already has an account' % (email,))
def validate_email(user_profile: UserProfile, email: Text) -> Tuple[Optional[str], Optional[str]]:
try:
validators.validate_email(email)
except ValidationError:
return _("Invalid address."), None
if not email_allowed_for_realm(email, user_profile.realm):
return _("Outside your domain."), None
try:
validate_email_for_realm(user_profile.realm, email)
except ValidationError:
return None, _("Already has an account.")
return None, None
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: Text, errors: List[Tuple[Text, str]], sent_invitations: bool) -> None:
self._msg = msg # type: Text
self.errors = errors # type: List[Tuple[Text, str]]
self.sent_invitations = sent_invitations # type: bool
def estimate_recent_invites(realm: Realm, *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm=realm,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days)
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(user: UserProfile, num_invitees: int) -> None:
# Discourage using invitation emails as a vector for carrying spam
if settings.OPEN_REALM_CREATION:
recent_invites = estimate_recent_invites(user.realm, days=1)
if num_invitees + recent_invites > user.realm.max_invites:
raise InvitationError(
_("You do not have enough remaining invites. "
"Please contact %s to have your limit raised. "
"No invitations were sent." % (settings.ZULIP_ADMINISTRATOR)),
[], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as_admin: Optional[bool]=False) -> None:
check_invite_limit(user_profile, len(invitee_emails))
validated_emails = [] # type: List[Text]
errors = [] # type: List[Tuple[Text, str]]
skipped = [] # type: List[Tuple[Text, str]]
for email in invitee_emails:
if email == '':
continue
email_error, email_skipped = validate_email(user_profile, email)
if not (email_error or email_skipped):
validated_emails.append(email)
elif email_error:
errors.append((email, email_error))
elif email_skipped:
skipped.append((email, email_skipped))
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as_admin=invite_as_admin,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
days_to_activate = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', 7)
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', 1)
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
prereg_users = PreregistrationUser.objects.exclude(status=active_value).filter(
invited_at__gte=lowest_datetime,
referred_by__realm=user_profile.realm)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
ref=invitee.referred_by.email,
invited=invitee.invited_at.strftime("%Y-%m-%d %H:%M:%S"),
id=invitee.id))
return invites
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actaully want to set the confirmation objects
# to a "revoked" status so that we can give the user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> str:
check_invite_limit(prereg_user.referred_by, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return prereg_user.invited_at.strftime("%Y-%m-%d %H:%M:%S")
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: Text,
file_name: Text,
author: Optional[UserProfile]=None) -> None:
emoji = RealmEmoji(realm=realm, name=name, file_name=file_name, author=author)
emoji.full_clean()
emoji.save()
notify_realm_emoji(realm)
def do_remove_realm_emoji(realm: Realm, name: Text) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[Text]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[Text]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[Text]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile: UserProfile, alert_words: List[Text]) -> None:
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str) -> None:
add_topic_mute(user_profile, stream.id, recipient.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: Text, url_format_string: Text) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[Text]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, Text]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: Text, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.restricted_to_domain:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'restricted_to_domain', False)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,
user_profile__is_active=True).values('recipient_id')
stream_ids = Recipient.objects.filter(
type=Recipient.STREAM, id__in=subs_filter).values('type_id')
return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
def do_get_streams(user_profile, include_public=True, include_subscribed=True,
include_all_active=False, include_default=False):
# type: (UserProfile, bool, bool, bool, bool) -> List[Dict[str, Any]]
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
# Listing public streams are disabled for Zephyr mirroring realms.
include_public = include_public and not user_profile.realm.is_zephyr_mirror_realm
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if not include_all_active:
user_subs = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
).select_related('recipient')
if include_subscribed:
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
if include_public:
invite_only_check = Q(invite_only=False)
if include_subscribed and include_public:
query = query.filter(recipient_check | invite_only_check)
elif include_public:
query = query.filter(invite_only_check)
elif include_subscribed:
query = query.filter(recipient_check)
else:
# We're including nothing, so don't bother hitting the DB.
query = []
streams = [(row.to_dict()) for row in query]
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def do_claim_attachments(message: Message) -> None:
attachment_url_list = attachment_url_re.findall(message.content)
for url in attachment_url_list:
path_id = attachment_url_to_path_id(url)
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning("User %s tried to share upload %s in message %s, but lacks permission" % (
user_profile.id, path_id, message.id))
continue
claim_attachment(user_profile, path_id, message, is_message_realm_public)
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(prev_content: Text, message: Message) -> None:
new_content = message.content
prev_attachments = set(attachment_url_re.findall(prev_content))
new_attachments = set(attachment_url_re.findall(new_content))
to_remove = list(prev_attachments - new_attachments)
path_ids = []
for url in to_remove:
path_id = attachment_url_to_path_id(url)
path_ids.append(path_id)
attachments_to_update = Attachment.objects.filter(path_id__in=path_ids).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message)
def notify_realm_custom_profile_fields(realm: Realm) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
fields=[f.as_dict() for f in fields])
send_event(event, active_user_ids(realm.id))
def try_add_realm_custom_profile_field(realm: Realm, name: Text, field_type: int) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.save()
notify_realm_custom_profile_fields(realm)
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm)
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: Text) -> None:
field.name = name
field.save(update_fields=['name'])
notify_realm_custom_profile_fields(realm)
def do_update_user_custom_profile_data(user_profile: UserProfile,
data: List[Dict[str, Union[int, Text]]]) -> None:
with transaction.atomic():
update_or_create = CustomProfileFieldValue.objects.update_or_create
for field in data:
update_or_create(user_profile=user_profile,
field_id=field['id'],
defaults={'value': field['value']})
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: Text, initial_members: List[UserProfile],
description: Text) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists." % (name,)))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: Text) -> None:
user_group.name = name
user_group.save(update_fields=['name'])
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: Text) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_send_user_group_members_update_event(event_name: Text,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(user_group_id: int, realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, realm: Realm) -> None:
user_group = access_user_group_by_id(user_group_id, realm)
user_group.delete()
do_send_delete_user_group_event(user_group_id, realm.id)
| apache-2.0 | 7,379,102,020,731,750,000 | 42.5618 | 265 | 0.623375 | false |
gitaarik/adyengo | adyengo/constants.py | 1 | 7207 | # Session Types
SESSION_TYPE_HPP_REGULAR = 'hpp_regular'
SESSION_TYPE_HPP_RECURRING = 'hpp_recurring'
SESSION_TYPE_API_RECURRING = 'api_recurring'
SESSION_TYPES_HPP = {
SESSION_TYPE_HPP_REGULAR: "HPP Regular",
SESSION_TYPE_HPP_RECURRING: "HPP Recurring",
}
SESSION_TYPES_API = {
SESSION_TYPE_API_RECURRING: "API Recurring"
}
SESSION_TYPES = SESSION_TYPES_HPP.copy()
SESSION_TYPES.update(SESSION_TYPES_API)
# Page Types
PAGE_TYPE_MULTIPLE = 'multiple'
PAGE_TYPE_SINGLE = 'single'
PAGE_TYPE_SKIP = 'skip'
PAGE_TYPES = {
PAGE_TYPE_MULTIPLE: "Multiple",
PAGE_TYPE_SINGLE: "Single",
PAGE_TYPE_SKIP: "Skip"
}
# Currency Code
CURRENCY_CODE_EUR = 'EUR'
CURRENCY_CODES = {
CURRENCY_CODE_EUR: "Euro",
}
# Locales
LOCALE_DE_DE = 'de_DE'
LOCALE_EN_GB = 'en_GB'
LOCALE_FR_BE = 'fr_BE'
LOCALE_NL_BE = 'nl_BE'
LOCALE_NL_NL = 'nl_NL'
LOCALES = {
LOCALE_DE_DE: "German (Germany)",
LOCALE_EN_GB: "English (United Kingdom)",
LOCALE_FR_BE: "French (Belgium)",
LOCALE_NL_BE: "Dutch (Belgium)",
LOCALE_NL_NL: "Dutch (Holland)"
}
# Country Code
COUNTRY_CODE_BE = 'BE'
COUNTRY_CODE_DE = 'DE'
COUNTRY_CODE_GB = 'GB'
COUNTRY_CODE_NL = 'NL'
COUNTRY_CODES = {
COUNTRY_CODE_BE: "Belgium",
COUNTRY_CODE_DE: "Germany",
COUNTRY_CODE_GB: "United Kingdom",
COUNTRY_CODE_NL: "Netherlands"
}
# Payment Method
PAYMENT_METHOD_AMEX = 'amex'
PAYMENT_METHOD_BANKTRANSFER = 'bankTransfer'
PAYMENT_METHOD_BANKTRANSFER_DE = 'bankTransfer_DE'
PAYMENT_METHOD_BANKTRANSFER_IBAN = 'bankTransfer_IBAN'
PAYMENT_METHOD_BANKTRANSFER_NL = 'bankTransfer_NL'
PAYMENT_METHOD_BCMC = 'bcmc'
PAYMENT_METHOD_CARD = 'card'
PAYMENT_METHOD_DINERS = 'diners'
PAYMENT_METHOD_DIRECTDEBIT_NL = 'directdebit_NL'
PAYMENT_METHOD_DIRECT_E_BANKING = 'directEbanking'
PAYMENT_METHOD_DISCOVER = 'discover'
PAYMENT_METHOD_DOTPAY = 'dotpay'
PAYMENT_METHOD_EBANKING_FI = 'ebanking_FI'
PAYMENT_METHOD_ELV = 'elv'
PAYMENT_METHOD_GIROPAY = 'giropay'
PAYMENT_METHOD_IDEAL = 'ideal'
PAYMENT_METHOD_MAESTRO = 'maestro'
PAYMENT_METHOD_MC = 'mc'
PAYMENT_METHOD_PAYPAL = 'paypal'
PAYMENT_METHOD_SEPADIRECTDEBIT = 'sepadirectdebit'
PAYMENT_METHOD_VISA = 'visa'
PAYMENT_METHODS = {
PAYMENT_METHOD_AMEX: "Amex",
PAYMENT_METHOD_BANKTRANSFER: "All banktransfers",
PAYMENT_METHOD_BANKTRANSFER_DE: "German Banktransfer",
PAYMENT_METHOD_BANKTRANSFER_IBAN: "International Bank Transfer (IBAN)",
PAYMENT_METHOD_BANKTRANSFER_NL: "Dutch Banktransfer",
PAYMENT_METHOD_BCMC: "Bancontact card",
PAYMENT_METHOD_CARD: "All debit and credit cards",
PAYMENT_METHOD_DINERS: "Diners Club",
PAYMENT_METHOD_DIRECTDEBIT_NL: "Direct Debit (Netherlands)",
PAYMENT_METHOD_DIRECT_E_BANKING: "SofortUberweisung",
PAYMENT_METHOD_DISCOVER: "Discover",
PAYMENT_METHOD_DOTPAY: "Dotpay",
PAYMENT_METHOD_EBANKING_FI: "Finnish E-Banking",
PAYMENT_METHOD_ELV: "ELV",
PAYMENT_METHOD_GIROPAY: "GiroPay",
PAYMENT_METHOD_IDEAL: "iDEAL",
PAYMENT_METHOD_MAESTRO: "Maestro",
PAYMENT_METHOD_MC: "Master Card",
PAYMENT_METHOD_PAYPAL: "PayPal",
PAYMENT_METHOD_SEPADIRECTDEBIT: "SEPA Direct Debit",
PAYMENT_METHOD_VISA: "Visa",
}
# Recurring contract types
RECURRING_CONTRACT_TYPE_RECURRING = 'RECURRING'
RECURRING_CONTRACT_TYPE_ONECLICK = 'ONECLICK'
RECURRING_CONTRACT_TYPES = {
RECURRING_CONTRACT_TYPE_RECURRING: "Recurring",
RECURRING_CONTRACT_TYPE_ONECLICK: "One click"
}
# Recurring contract types plus combinations
RECURRING_CONTRACT_TYPES_PLUS_COMBOS = RECURRING_CONTRACT_TYPES.copy()
RECURRING_CONTRACT_TYPES_PLUS_COMBOS.update({
'{},{}'.format(
RECURRING_CONTRACT_TYPE_RECURRING,
RECURRING_CONTRACT_TYPE_ONECLICK
): "Recurring and One click (user chooses)"
})
# Recurring contract variant fields
RECURRING_CONTRACT_VARIANT_FIELDS = {
'card': (
'expiryMonth',
'expiryYear',
'holderName',
'number',
'cvc',
'issueNumber',
'startMonth',
'startYear'
),
'elv': (
'bankLocation',
'bankName',
'bankLocationId',
'accountHolderName',
'bankAccountNumber'
),
'bank': (
'bankAccountNumber',
'bankLocationId',
'bankName',
'bic',
'countryCode',
'iban',
'ownerName'
)
}
# Recurring payment result codes
RECURRING_PAYMENT_RESULT_AUTHORISED = 'Authorised'
RECURRING_PAYMENT_RESULT_REFUSED = 'Refused'
RECURRING_PAYMENT_RESULT_RECEIVED = 'Received'
RECURRING_PAYMENT_RESULT_ERROR = 'Error'
RECURRING_PAYMENT_RESULT_CODES = {
RECURRING_PAYMENT_RESULT_AUTHORISED: 'Authorised',
RECURRING_PAYMENT_RESULT_REFUSED: 'Refused',
RECURRING_PAYMENT_RESULT_RECEIVED: 'Received',
RECURRING_PAYMENT_RESULT_ERROR: 'Error'
}
# Notification event codes
NOTIFICATION_EVENT_CODE_AUTHORISATION = 'AUTHORISATION'
NOTIFICATION_EVENT_CODE_CANCELLATION = 'CANCELLATION'
NOTIFICATION_EVENT_CODE_REFUND = 'REFUND'
NOTIFICATION_EVENT_CODE_CANCEL_OR_REFUND = 'CANCEL_OR_REFUND'
NOTIFICATION_EVENT_CODE_CAPTURE = 'CAPTURE'
NOTIFICATION_EVENT_CODE_REFUNDED_REVERSED = 'REFUNDED_REVERSED'
NOTIFICATION_EVENT_CODE_CAPTURE_FAILED = 'CAPTURE_FAILED'
NOTIFICATION_EVENT_CODE_REFUND_FAILED = 'REFUND_FAILED'
NOTIFICATION_EVENT_CODE_REQUEST_FOR_INFORMATION = 'REQUEST_FOR_INFORMATION'
NOTIFICATION_EVENT_CODE_NOTIFICATION_OF_CHARGEBACK = 'NOTIFICATION_OF_CHARGEBACK'
NOTIFICATION_EVENT_CODE_ADVICE_OF_DEBIT = 'ADVICE_OF_DEBIT'
NOTIFICATION_EVENT_CODE_CHARGEBACK = 'CHARGEBACK'
NOTIFICATION_EVENT_CODE_CHARGEBACK_REVERSED = 'CHARGEBACK_REVERSED'
NOTIFICATION_EVENT_CODE_REPORT_AVAILABLE = 'REPORT_AVAILABLE'
NOTIFICATION_EVENT_CODES = {
NOTIFICATION_EVENT_CODE_AUTHORISATION: "Authorisation",
NOTIFICATION_EVENT_CODE_CANCELLATION: "Cancellation",
NOTIFICATION_EVENT_CODE_REFUND: "Refund",
NOTIFICATION_EVENT_CODE_CANCEL_OR_REFUND: "Cancel or refund",
NOTIFICATION_EVENT_CODE_CAPTURE: "Capture",
NOTIFICATION_EVENT_CODE_REFUNDED_REVERSED: "Refunded reversed",
NOTIFICATION_EVENT_CODE_CAPTURE_FAILED: "Capture failed",
NOTIFICATION_EVENT_CODE_REFUND_FAILED: "Refund failed",
NOTIFICATION_EVENT_CODE_REQUEST_FOR_INFORMATION: "Request for information",
NOTIFICATION_EVENT_CODE_NOTIFICATION_OF_CHARGEBACK: "Notification of chargeback",
NOTIFICATION_EVENT_CODE_ADVICE_OF_DEBIT: "Advice of debit",
NOTIFICATION_EVENT_CODE_CHARGEBACK: "Chargeback",
NOTIFICATION_EVENT_CODE_CHARGEBACK_REVERSED: "Chargeback reversed",
NOTIFICATION_EVENT_CODE_REPORT_AVAILABLE: "Report available"
}
# Adyen servers ip addresses
ADYEN_SERVERS_IP_ADDRESS_RANGES = (
u'82.199.87.128/26', # 82.199.87.129 to 82.199.87.191
u'82.199.90.136/29', # 82.199.90.137 to 82.199.90.142
u'82.199.90.160/27', # 82.199.90.161 to 82.199.90.190
u'91.212.42.0/24' # 91.212.42.1 to 91.212.42.254
)
# Only these IP addresses should be allowed to send notifications.
# You can check if an IP exists inside a range using ipaddress:
# https://pypi.python.org/pypi/ipaddress/1.0.4
#
# import ipaddress
# ipaddress.ip_address(u'82.199.87.135') in ipaddress.ip_network(u'82.199.87.128/26')
#
# Note that both IP addresses SHOULD be unicode.
| lgpl-3.0 | 3,119,429,773,629,368,300 | 29.029167 | 85 | 0.712085 | false |
kidaa30/spacewalk | client/debian/packages-already-in-debian/rhnlib/test/23-digest-auth.py | 36 | 2399 | #!/usr/bin/python
#
# Test case for digest authentication
#
#
# USAGE: (echo -n) | $0 PORT
#
# Few notes about what is done here:
# - thread AUTH sends authentication digest
# - thread NC uses netcat and grep to see results
# - Little hack with (echo -n) is much more easier to use,
# than some settrace machinary
import sys
import socket
import os
import httplib
from threading import Thread
sys.path.append('..')
from rhn.rpclib import Server
SERVER = "longusername0123456789:longpassword0123456789@localhost"
PORT = "1234"
HANDLER = "/XMLRPC"
try:
PORT = sys.argv[1]
except:
pass
class killable(Thread):
"""Just Thread with a kill() method."""
def __init__(self, *args, **keywords):
Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
Thread.start(self)
def __run(self):
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def authenticate():
global SERVER, PORT, HANDLER
s = Server("http://" + SERVER + ":" + PORT + HANDLER);
connected = False;
while not connected:
try:
connected = True;
print s.test.method()
except socket.error, e:
# nobody is listenning, try to authenticate again
connected = False;
pass;
except httplib.BadStatusLine, e:
# This is ok, netcat does not send apropriate response
pass
def netcat():
global auth
cmd = "nc -l " + PORT + " | grep authorization\:\ Basic\ bG9uZ3VzZXJuYW1lMDEyMzQ1Njc4OTpsb25ncGFzc3dvcmQwMTIzNDU2Nzg5"
result = os.system(cmd);
if (result == 0):
print "Tests PASSES"
else:
auth.kill();
print "Test FAILS"
if __name__ == '__main__':
global nc, auth
nc = killable(target = netcat);
auth = killable(target = authenticate);
nc.start();
auth.start();
| gpl-2.0 | 3,679,093,189,985,728,500 | 23.731959 | 122 | 0.596082 | false |
bank-netforce/netforce | netforce_general/netforce_general/models/language.py | 4 | 2606 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
from netforce import database
from netforce import static
class Language(Model):
_name = "language"
_string = "Language"
_key = ["code"]
_fields = {
"name": fields.Char("Name", required=True, search=True),
"code": fields.Char("Code", required=True),
"num_translations": fields.Integer("Number of translations", function="get_num_translations"),
"active": fields.Boolean("Active"),
"comments": fields.One2Many("message", "related_id", "Comments"),
}
_defaults = {
"active": True,
}
def create(self, vals, **kw):
res = super().create(vals, **kw)
static.clear_translations()
return res
def write(self, ids, vals, **kw):
super().write(ids, vals, **kw)
static.clear_translations()
def delete(self, ids, **kw):
super().delete(ids, **kw)
static.clear_translations()
def get_num_translations(self, ids, context={}):
db = database.get_connection()
res = db.query(
"SELECT lang_id,COUNT(*) AS num FROM translation WHERE lang_id IN %s GROUP BY lang_id", tuple(ids))
vals = {r.lang_id: r.num for r in res}
return vals
def get_active_langs(self):
db = database.get_connection()
res = db.query("SELECT code,name FROM language WHERE active=true ORDER BY name")
active_langs = [dict(r) for r in res]
return active_langs
Language.register()
| mit | 630,789,722,860,901,400 | 37.895522 | 111 | 0.675365 | false |
spandanb/horizon | openstack_dashboard/dashboards/router/nexus1000v/views.py | 5 | 4922 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
# @author: Sergey Sudakovich, Cisco Systems, Inc.
import logging
from django.core import urlresolvers
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.router.nexus1000v \
import forms as profileforms
from openstack_dashboard.dashboards.router.nexus1000v \
import tables as profiletables
LOG = logging.getLogger(__name__)
def _get_tenant_list(request):
tenants = []
try:
tenants, has_more = api.keystone.tenant_list(request)
except Exception:
msg = _('Unable to retrieve project information.')
exceptions.handle(request, msg)
return datastructures.SortedDict([(t.id, t) for t in tenants])
def _get_profiles(request, type_p):
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
profiles = []
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
if profiles:
# Set project name
tenant_dict = _get_tenant_list(request)
bindings = api.neutron.profile_bindings_list(request, type_p)
bindings_dict = datastructures.SortedDict(
[(b.profile_id, b.tenant_id) for b in bindings])
for p in profiles:
project_id = bindings_dict.get(p.id)
project = tenant_dict.get(project_id)
p.project_name = getattr(project, 'name', None)
return profiles
class NetworkProfileIndexView(tables.DataTableView):
table_class = profiletables.NetworkProfile
template_name = 'router/nexus1000v/network_profile/index.html'
def get_data(self):
return _get_profiles(self.request, 'network')
class PolicyProfileIndexView(tables.DataTableView):
table_class = profiletables.PolicyProfile
template_name = 'router/nexus1000v/policy_profile/index.html'
def get_data(self):
return _get_profiles(self.request, 'policy')
class IndexTabGroup(tabs.TabGroup):
slug = "group"
tabs = (NetworkProfileIndexView, PolicyProfileIndexView,)
class IndexView(tables.MultiTableView):
table_classes = (profiletables.NetworkProfile,
profiletables.PolicyProfile,)
template_name = 'router/nexus1000v/index.html'
def get_network_profile_data(self):
return _get_profiles(self.request, 'network')
def get_policy_profile_data(self):
return _get_profiles(self.request, 'policy')
class CreateNetworkProfileView(forms.ModalFormView):
form_class = profileforms.CreateNetworkProfile
template_name = 'router/nexus1000v/create_network_profile.html'
success_url = urlresolvers.reverse_lazy('horizon:router:nexus1000v:index')
class UpdateNetworkProfileView(forms.ModalFormView):
form_class = profileforms.UpdateNetworkProfile
template_name = 'router/nexus1000v/update_network_profile.html'
context_object_name = 'network_profile'
success_url = urlresolvers.reverse_lazy('horizon:router:nexus1000v:index')
def get_context_data(self, **kwargs):
context = super(UpdateNetworkProfileView,
self).get_context_data(**kwargs)
context["profile_id"] = self.kwargs['profile_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
profile_id = self.kwargs['profile_id']
try:
profile = api.neutron.profile_get(self.request,
profile_id)
LOG.debug("Network Profile object=%s", profile)
return profile
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve network profile details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
profile = self._get_object()
return {'profile_id': profile['id'],
'name': profile['name'],
'segment_range': profile['segment_range'],
'segment_type': profile['segment_type'],
'physical_network': profile['physical_network']}
| apache-2.0 | 7,971,148,248,862,334,000 | 34.157143 | 78 | 0.678586 | false |
fvincenzo/mbed-os | tools/libraries.py | 8 | 4627 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tools.paths import MBED_RTX, RTOS_LIBRARIES, MBED_LIBRARIES, MBED_RPC,\
RTOS_ABSTRACTION, RPC_LIBRARY, USB, USB_LIBRARIES, USB_HOST,\
USB_HOST_LIBRARIES, FAT_FS, DSP_ABSTRACTION, DSP_CMSIS, DSP_LIBRARIES,\
SD_FS, FS_LIBRARY, ETH_SOURCES, LWIP_SOURCES, ETH_LIBRARY, UBLOX_SOURCES,\
UBLOX_LIBRARY, CELLULAR_SOURCES, CELLULAR_USB_SOURCES, CPPUTEST_SRC,\
CPPUTEST_PLATFORM_SRC, CPPUTEST_TESTRUNNER_SCR, CPPUTEST_LIBRARY,\
CPPUTEST_INC, CPPUTEST_PLATFORM_INC, CPPUTEST_TESTRUNNER_INC,\
CPPUTEST_INC_EXT
from tools.data.support import DEFAULT_SUPPORT
from tools.tests import TEST_MBED_LIB
LIBRARIES = [
# RTOS libraries
{
"id": "rtx",
"source_dir": MBED_RTX,
"build_dir": RTOS_LIBRARIES,
"dependencies": [MBED_LIBRARIES],
},
{
"id": "rtos",
"source_dir": RTOS_ABSTRACTION,
"build_dir": RTOS_LIBRARIES,
"dependencies": [MBED_LIBRARIES, MBED_RTX],
},
# RPC
{
"id": "rpc",
"source_dir": MBED_RPC,
"build_dir": RPC_LIBRARY,
"dependencies": [MBED_LIBRARIES],
},
# USB Device libraries
{
"id": "usb",
"source_dir": USB,
"build_dir": USB_LIBRARIES,
"dependencies": [MBED_LIBRARIES],
},
# USB Host libraries
{
"id": "usb_host",
"source_dir": USB_HOST,
"build_dir": USB_HOST_LIBRARIES,
"dependencies": [MBED_LIBRARIES, FAT_FS, MBED_RTX, RTOS_ABSTRACTION],
},
# DSP libraries
{
"id": "dsp",
"source_dir": [DSP_ABSTRACTION, DSP_CMSIS],
"build_dir": DSP_LIBRARIES,
"dependencies": [MBED_LIBRARIES]
},
# File system libraries
{
"id": "fat",
"source_dir": [FAT_FS, SD_FS],
"build_dir": FS_LIBRARY,
"dependencies": [MBED_LIBRARIES]
},
# Network libraries
{
"id": "eth",
"source_dir": [ETH_SOURCES, LWIP_SOURCES],
"build_dir": ETH_LIBRARY,
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES]
},
{
"id": "ublox",
"source_dir": [UBLOX_SOURCES, CELLULAR_SOURCES, CELLULAR_USB_SOURCES,
LWIP_SOURCES],
"build_dir": UBLOX_LIBRARY,
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES],
},
# Unit Testing library
{
"id": "cpputest",
"source_dir": [CPPUTEST_SRC, CPPUTEST_PLATFORM_SRC,
CPPUTEST_TESTRUNNER_SCR],
"build_dir": CPPUTEST_LIBRARY,
"dependencies": [MBED_LIBRARIES],
'inc_dirs': [CPPUTEST_INC, CPPUTEST_PLATFORM_INC,
CPPUTEST_TESTRUNNER_INC, TEST_MBED_LIB],
'inc_dirs_ext': [CPPUTEST_INC_EXT],
'macros': ["CPPUTEST_USE_MEM_LEAK_DETECTION=0",
"CPPUTEST_USE_STD_CPP_LIB=0", "CPPUTEST=1"],
},
]
LIBRARY_MAP = dict([(library['id'], library) for library in LIBRARIES])
class Library(object):
"""A library representation that allows for querying of support"""
def __init__(self, lib_id):
lib = LIBRARY_MAP[lib_id]
self.supported = lib.get("supported", DEFAULT_SUPPORT)
self.dependencies = lib.get("dependencies", None)
# Include dirs required by library build
self.inc_dirs = lib.get("inc_dirs", None)
# Include dirs required by others to use with this library
self.inc_dirs_ext = lib.get("inc_dirs_ext", None)
# Additional macros you want to define when building library
self.macros = lib.get("macros", None)
self.source_dir = lib["source_dir"]
self.build_dir = lib["build_dir"]
def is_supported(self, target, toolchain):
"""Check if a target toolchain combination is supported
Positional arguments:
target - the MCU or board
toolchain - the compiler
"""
if not hasattr(self, 'supported'):
return True
return (target.name in self.supported) and \
(toolchain in self.supported[target.name])
| apache-2.0 | -2,152,962,828,375,320,300 | 30.910345 | 78 | 0.608386 | false |
geometalab/OSMDeepOD | src/data/orthofoto/other/multi_loader.py | 2 | 1892 | import logging
import time
from io import BytesIO
from multiprocessing.dummy import Pool as ThreadPool
import requests
from PIL import Image
from src.data.orthofoto.other.user_agent import UserAgent
class MultiLoader:
def __init__(self, urls, auth=None):
self.urls = urls
self.results = []
self.nb_threads = 10
self.nb_tile_per_trial = 40
self.auth = tuple() if auth is None else auth
self.user_agent = UserAgent()
self.logger = logging.getLogger(__name__)
def download(self):
results = []
nb_urls = len(self.urls)
for i in range(int(nb_urls / self.nb_tile_per_trial) + 1):
start = i * self.nb_tile_per_trial
end = start + self.nb_tile_per_trial
if end >= nb_urls:
end = nb_urls
url_part = self.urls[start:end]
result = self._try_download(url_part)
results += result
self.results = results
def _try_download(self, urls):
for i in range(4):
try:
results = self._download_async(urls)
return results
except Exception as e:
print("Tile download failed " + str(i) + " wait " + str(i * 10) + " " + str(e))
time.sleep(i * 10)
error_message = "Download of tiles have failed 4 times"
self.logger.error(error_message)
raise Exception(error_message)
def _download_async(self, urls):
pool = ThreadPool(self.nb_threads)
results = pool.map(self._download_image, urls)
pool.close()
pool.join()
return results
def _download_image(self, url):
response = requests.get(url, headers={'User-Agent': self.user_agent.random}, auth=self.auth)
img = Image.open(BytesIO(response.content))
img.filename = url
return img
| mit | 5,747,310,598,972,678,000 | 30.533333 | 100 | 0.576638 | false |
kubeflow/examples | github_issue_summarization/notebooks/trainer.py | 1 | 10766 | import json
import logging
import os
import sys
import numpy as np
import dill as dpickle
import pandas as pd
import tensorflow as tf
# TODO(https://github.com/kubeflow/examples/issues/280)
# TODO(https://github.com/kubeflow/examples/issues/196)
# We'd like to switch to importing keras from TensorFlow in order to support
# TF.Estimator but using tensorflow.keras we can't train a model either using
# Keras' fit function or using TF.Estimator.
import keras
from keras.callbacks import CSVLogger, ModelCheckpoint
from ktext.preprocess import processor
from sklearn.model_selection import train_test_split
from seq2seq_utils import load_decoder_inputs, load_encoder_inputs, load_text_processor, Seq2Seq_Inference # # pylint: disable=line-too-long
class Trainer(object): #pylint: disable=too-many-instance-attributes
def __init__(self, output_dir):
"""Construct the trainer.
Args:
output_dir: Directory where outputs should be written.
"""
if not output_dir:
raise ValueError("output dir can't be None.")
self.output_dir = output_dir
# Pull out the information needed for TF.Estimator.
self.tf_config = os.environ.get('TF_CONFIG', '{}')
self.tf_config_json = json.loads(self.tf_config)
self.cluster = self.tf_config_json.get('cluster')
self.job_name = self.tf_config_json.get('task', {}).get('type')
self.task_index = self.tf_config_json.get('task', {}).get('index')
# Files storing the preprocessors
self.body_pp_file = os.path.join(self.output_dir, 'body_pp.dpkl')
self.title_pp_file = os.path.join(self.output_dir, 'title_pp.dpkl')
# Files to store the processed data
self.preprocessed_titles = os.path.join(self.output_dir,
'train_title_vecs.npy')
self.preprocessed_bodies = os.path.join(self.output_dir,
'train_body_vecs.npy')
self.history = None
self.decoder_input_data = None
self.seq2seq_Model = None
self.decoder_target_data = None
self.test_df = None
self.encoder_input_data = None
self.title_pp = None
self.body_pp = None
def preprocess(self, data_file, num_samples=None):
"""Preprocess the input.
Trains preprocessors and splits the data into train and test sets.
Args:
data_file: The datafile to process
num_samples: Number of samples to use. Set to None to use
entire dataset.
"""
# We preprocess the data if we are the master or chief.
# Or if we aren't running distributed.
if self.job_name and self.job_name.lower() not in ["master", "chief"]:
return
# TODO(jlewi): The test data isn't being used for anything. How can
# we configure evaluation?
if num_samples:
sampled = pd.read_csv(data_file).sample(n=num_samples)
traindf, self.test_df = train_test_split(sampled, test_size=.10)
else:
traindf, self.test_df = train_test_split(pd.read_csv(data_file), test_size=.10)
# Print stats about the shape of the data.
logging.info('Train: %d rows %d columns', traindf.shape[0], traindf.shape[1])
train_body_raw = traindf.body.tolist()
train_title_raw = traindf.issue_title.tolist()
# Clean, tokenize, and apply padding / truncating such that each document
# length = 70. Also, retain only the top 8,000 words in the vocabulary and set
# the remaining words to 1 which will become common index for rare words.
self.body_pp = processor(keep_n=8000, padding_maxlen=70)
train_body_vecs = self.body_pp.fit_transform(train_body_raw)
logging.info('Example original body: %s', train_body_raw[0])
logging.info('Example body after pre-processing: %s', train_body_vecs[0])
self.title_pp = processor(append_indicators=True, keep_n=4500,
padding_maxlen=12, padding='post')
# process the title data
train_title_vecs = self.title_pp.fit_transform(train_title_raw)
logging.info('Example original title: %s', train_title_raw[0])
logging.info('Example title after pre-processing: %s', train_title_vecs[0])
# Save the preprocessor
with open(self.body_pp_file, 'wb') as f:
dpickle.dump(self.body_pp, f)
with open(self.title_pp_file, 'wb') as f:
dpickle.dump(self.title_pp, f)
# Save the processed data
np.save(self.preprocessed_titles, train_title_vecs)
np.save(self.preprocessed_bodies, train_body_vecs)
def build_model(self, learning_rate):
"""Build a keras model."""
logging.info("starting")
if self.job_name and self.job_name.lower() in ["ps"]:
logging.info("ps doesn't build model")
return
self.encoder_input_data, doc_length = load_encoder_inputs(
self.preprocessed_bodies)
self.decoder_input_data, self.decoder_target_data = load_decoder_inputs(
self.preprocessed_titles)
num_encoder_tokens, self.body_pp = load_text_processor(
self.body_pp_file)
num_decoder_tokens, self.title_pp = load_text_processor(
self.title_pp_file)
#arbitrarly set latent dimension for embedding and hidden units
latent_dim = 300
##### Define Model Architecture ######
########################
#### Encoder Model ####
encoder_inputs = keras.layers.Input(shape=(doc_length,), name='Encoder-Input')
# Word embeding for encoder (ex: Issue Body)
x = keras.layers.Embedding(
num_encoder_tokens, latent_dim, name='Body-Word-Embedding', mask_zero=False)(encoder_inputs)
x = keras.layers.BatchNormalization(name='Encoder-Batchnorm-1')(x)
# We do not need the `encoder_output` just the hidden state.
_, state_h = keras.layers.GRU(latent_dim, return_state=True, name='Encoder-Last-GRU')(x)
# Encapsulate the encoder as a separate entity so we can just
# encode without decoding if we want to.
encoder_model = keras.Model(inputs=encoder_inputs, outputs=state_h, name='Encoder-Model')
seq2seq_encoder_out = encoder_model(encoder_inputs)
########################
#### Decoder Model ####
decoder_inputs = keras.layers.Input(shape=(None,), name='Decoder-Input') # for teacher forcing
# Word Embedding For Decoder (ex: Issue Titles)
dec_emb = keras.layers.Embedding(
num_decoder_tokens,
latent_dim, name='Decoder-Word-Embedding',
mask_zero=False)(decoder_inputs)
dec_bn = keras.layers.BatchNormalization(name='Decoder-Batchnorm-1')(dec_emb)
# TODO(https://github.com/kubeflow/examples/issues/196):
# With TF.Estimtor we hit https://github.com/keras-team/keras/issues/9761
# and the model won't train.
decoder_gru = keras.layers.GRU(
latent_dim, return_state=True, return_sequences=True, name='Decoder-GRU')
decoder_gru_output, _ = decoder_gru(dec_bn, initial_state=[seq2seq_encoder_out])
x = keras.layers.BatchNormalization(name='Decoder-Batchnorm-2')(decoder_gru_output)
# Dense layer for prediction
decoder_dense = keras.layers.Dense(
num_decoder_tokens, activation='softmax', name='Final-Output-Dense')
decoder_outputs = decoder_dense(x)
########################
#### Seq2Seq Model ####
self.seq2seq_Model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
self.seq2seq_Model.compile(
optimizer=keras.optimizers.Nadam(lr=learning_rate),
loss='sparse_categorical_crossentropy',)
# TODO(jlewi): Computing accuracy causes a dimension mismatch.
# tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [869] vs. [79,11] # pylint: disable=line-too-long
# [[{{node metrics/acc/Equal}} = Equal[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](metrics/acc/Reshape, metrics/acc/Cast)]] # pylint: disable=line-too-long
# metrics=['accuracy'])
self.seq2seq_Model.summary()
def train_keras(self,
output_model_h5,
base_name='tutorial_seq2seq', batch_size=1200, epochs=7):
"""Train using Keras.
This is an alternative to using the TF.Estimator API.
TODO(jlewi): The reason we added support for using Keras
was to debug whether we were hitting issue:
https://github.com/keras-team/keras/issues/9761 only with TF.Estimator.
"""
logging.info("Using base name: %s", base_name)
csv_logger = CSVLogger('{:}.log'.format(base_name))
model_checkpoint = ModelCheckpoint(
'{:}.epoch{{epoch:02d}}-val{{val_loss:.5f}}.hdf5'.format(
base_name), save_best_only=True)
self.history = self.seq2seq_Model.fit(
[self.encoder_input_data, self.decoder_input_data],
np.expand_dims(self.decoder_target_data, -1),
batch_size=batch_size,
epochs=epochs,
validation_split=0.12, callbacks=[csv_logger, model_checkpoint])
#############
# Save model.
#############
self.seq2seq_Model.save(output_model_h5)
def evaluate_keras(self):
"""Generates predictions on holdout set and calculates BLEU Score."""
seq2seq_inf = Seq2Seq_Inference(encoder_preprocessor=self.body_pp,
decoder_preprocessor=self.title_pp,
seq2seq_model=self.seq2seq_Model)
bleu_score = seq2seq_inf.evaluate_model(holdout_bodies=self.test_df.body.tolist(),
holdout_titles=self.test_df.issue_title.tolist(),
max_len_title=12)
logging.info("Bleu score: %s", bleu_score)
return bleu_score
def train_estimator(self):
"""Train the model using the TF.Estimator API."""
if self.job_name:
cluster_spec = tf.train.ClusterSpec(self.cluster)
if self.job_name == "ps":
server = tf.train.Server(cluster_spec, job_name=self.job_name,
task_index=self.task_index)
server.join()
sys.exit(0)
cfg = tf.estimator.RunConfig(session_config=tf.ConfigProto(log_device_placement=False))
estimator = keras.estimator.model_to_estimator(
keras_model=self.seq2seq_Model, model_dir=self.output_dir,
config=cfg)
expanded = np.expand_dims(self.decoder_target_data, -1)
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'Encoder-Input': self.encoder_input_data,
'Decoder-Input': self.decoder_input_data},
y=expanded,
shuffle=False)
train_spec = tf.estimator.TrainSpec(input_fn=input_fn,
max_steps=self.args.max_steps)
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, throttle_secs=10,
steps=self.args.eval_steps)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| apache-2.0 | -4,417,197,483,078,284,000 | 38.874074 | 186 | 0.653632 | false |
tedi3231/openerp | openerp/addons/sale_crm/__init__.py | 69 | 1090 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import sale_crm
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,223,733,581,149,686,000 | 40.923077 | 78 | 0.620183 | false |
ElliotJH/sidereal | interface/mainview.py | 1 | 7475 | """The main view should be considered the default view, the one with all
of the spaceships and stuff around.
TODO this doc is clearly incomplete."""
# stdlib
import collections
import random
import math
# third party
import panda3d.core
import direct.task
# internal
# One thing I'm wondering about, is whether we implement the gui overlay
# in a seperate space. How does it intercept the clicks, and know which one
# is which? I guess we hope for panda magic.
# Right, in core homeworld, the camera is always focused on something, unless
# it's been destroyed. But my point that the camera always rotates and pivots
# around a central point. I'm sure we could implement "look around" later
# but first things need to rotate.
# things only rotate when the right mouse button is held down.
# Imagine we're on the surface of a sphere, which is centered around the point
# that we're looking at. Moving the mouse left rotates left from your
# perspective, and so on.
class MainView(object):
def __init__(self,showbase):
self.base = showbase
# The mainview has its own camera.
self.camera = panda3d.core.Camera('MainView camera')
self.camera_np = panda3d.core.NodePath(self.camera)
# A camera is its own node. Although we need to attach it to the
# tree to see anything.
self.camera_np.reparentTo(self.base.render)
self.focuspoint = (0,0,0)
self.zoom = 100
# where max, is maximum zoom out
# where min, is minimum zoon in
self.maxzoom = 200
self.minzoom = 10
# setting sensitivity to negative inverts the axis
self.horizontal_sensitivity = -0.5 # Higher is more precise
# As in, slower
self.vertical_sensitivity = 1
self.spherepoint = SpherePoint(self.zoom,0,0.5)
self.set_up_event_handlers()
def set_up_event_handlers(self):
self.base.accept('mouse3',self.watch_mouse)
self.base.accept('mouse3-up',self.stop_watching_mouse)
self.base.accept('wheel_up',self.adjust_zoom,[-10])
self.base.accept('wheel_down',self.adjust_zoom,[10])
#self.base.taskMgr.doMethodLater(0.5,self._randomise_spherepoint,'randomise')
def adjust_zoom(self,adjustment):
self.zoom += adjustment
self.zoom = max(self.zoom,self.minzoom)
self.zoom = min(self.zoom,self.maxzoom)
self.spherepoint.radius = self.zoom
def watch_mouse(self):
self.mouse_coords = ()
self.base.taskMgr.doMethodLater(0.01,self.mouse_monitor_task, 'main-view mouse watch')
def stop_watching_mouse(self):
self.base.taskMgr.remove('main-view mouse watch')
def mouse_monitor_task(self,task):
x = self.base.mouseWatcherNode.getMouseX()
y = self.base.mouseWatcherNode.getMouseY()
print x,y
# If the coords are empty, then skip this whole block
if self.mouse_coords is ():
self.mouse_coords = (x,y)
return direct.task.Task.cont # do the same next frame
dx = self.mouse_coords[0] - x
dy = self.mouse_coords[1] - y
print dx,dy
# then based on the dx,dy move the mainview's camera around its
# focused point. Preferable moving the mouse left, also rotates
# the camera to the left.
angle = self.spherepoint.angle
angle += dx / self.horizontal_sensitivity
angle %= math.pi * 2
self.spherepoint.angle = angle
vertical = self.spherepoint.vertical
vertical += dy / self.vertical_sensitivity
vertical = min(vertical,0.999999)
vertical = max(vertical,0.000001)
self.spherepoint.vertical = vertical
self.mouse_coords = (x,y)
print self.spherepoint
self.camera_np.setPos(*self.spherepoint.calculate())
self.camera_np.lookAt((0,0,0))
return direct.task.Task.again # do the same after delay
def _randomise_spherepoint(self,task):
self.spherepoint.vertical = random.random()
self.spherepoint.angle = random.random() * math.pi * 2
self.camera_np.setPos(*self.spherepoint.calculate())
self.camera_np.lookAt((0,0,0))
return direct.task.Task.again
class SpherePoint(object):
"""Manipulating a camera on a sphere's surface seems complicated.
As such, this class SHOULD be helpful in that.
Imagine the camera as a point on the sphere, which if you take a
2d flat horizontal slice is a circle. The camera is somewhere on
that circle, at a certain angle. Where you take the 2d slice
could be called the vertical.
The radius of the sphere should be obvious.
This sphere is centered on 0,0,0"""
def __init__(self,radius,angle,vertical):
"""
Radius is a positive number.
Angle is 0 < a < 2pi
Vertical ranges from 0 < v < 1
"""
self.radius = radius
self.angle = angle
self.vertical = vertical
def __repr__(self):
return "SpherePoint({0},{1},{2})".format(self.radius,self.angle,self.vertical)
def calculate(self):
slice_r = self.radius * math.sin(self.vertical * math.pi)
x = slice_r * math.sin(self.angle)
y = slice_r * math.cos(self.angle)
z = (2 * self.radius * self.vertical) - self.radius
return (x,y,z) # Remember, the center of this sphere is 0,0,0
class FocusManager(collections.MutableSet):
"""The FocusManager the utility for maintaining focus on one to many
game objects, with none being a special case.
It is treated as a Set of objects, which should (TODO change to MUST)
be ingame objects with coordinates. It can then determine the average
point to focus on from all of its component objects.
If objects leave the FocusManager (such as by being destroyed, or
de-focused, the FocusManager then recalculates its "average".
If all objects leave, then it will continue to stare at the last point
a la CalHomeworld behaviour."""
def __init__(self):
self._internal_set = set()
self.coord = (0,0,0)
def add(self,item):
self._internal_set.add(item)
self.coord = self._calculate_average()
def discard(self,item):
self._internal_set.discard(item)
# If len is 0, then it'll keep its last average_coord
if len(self) > 0:
self.coord = self._calculate_average()
def __len__(self):
return len(self._internal_set)
def __iter__(self):
return iter(self._internal_set)
def __contains__(self,item):
return item in self._internal_set
def _calculate_average(self):
"""Assumes that all objects in the set have a .coord attribute"""
# AVerageX and so on.
avx,avy,avz = 0
for item in self:
avx += item.coord[0]
avy += item.coord[1]
avz += item.coord[2]
avx /= len(self)
avy /= len(self)
avz /= len(self)
print avx,avy,avz
return (avx,avy,avz)
def _main():
import misc.teapot
import direct.showbase.ShowBase
base = direct.showbase.ShowBase.ShowBase()
base.disableMouse()
screen = misc.teapot.TeapotScreen(base)
screen.background()
screen.teapot()
screen.lights()
mainview = MainView(base)
base.win.getDisplayRegions()[1].setCamera(mainview.camera_np)
if __name__=='__main__':
_main()
| gpl-3.0 | -274,156,180,474,640,580 | 34.259434 | 94 | 0.641204 | false |
AlexanderWillner/BibLatex-Check | biblatex_check.py | 1 | 19201 | #!/usr/bin/env python
"""
BibLaTeX check on missing fields and consistent name conventions,
especially developed for requirements in Computer Science.
"""
__author__ = "Pez Cuckow"
__version__ = "0.1.3"
__credits__ = ["Pez Cuckow", "BibTex Check 0.2.0 by Fabian Beck"]
__license__ = "MIT"
__email__ = "email<at>pezcuckow.com"
####################################################################
# Properties (please change according to your needs)
####################################################################
# links
citeulikeUsername = "" # if no username is profided, no CiteULike links appear
citeulikeHref = "http://www.citeulike.org/user/" + \
citeulikeUsername + "/article/"
libraries = [("Scholar", "http://scholar.google.de/scholar?hl=en&q="),
("Google", "https://www.google.com/search?q="),
("DBLP", "http://dblp.org/search/index.php#query="),
("IEEE", "http://ieeexplore.ieee.org/search/searchresult.jsp?queryText="),
("ACM", "http://dl.acm.org/results.cfm?query="),
]
# fields that are required for a specific type of entry
requiredFields = {"article": ["author", "title", "journaltitle", "year/date"],
"book": ["author", "title", "year/date"],
"mvbook": "book",
"inbook": ["author", "title", "booktitle", "year/date"],
"bookinbook": "inbook",
"suppbook": "inbook",
"booklet": ["author/editor", "title", "year/date"],
"collection": ["editor", "title", "year/date"],
"mvcollection": "collection",
"incollection": ["author", "editor", "title", "booktitle", "year/date"],
"suppcollection": "incollection",
"manual": ["author/editor", "title", "year/date"],
"misc": ["author/editor", "title", "year/date"],
"online": ["author/editor", "title", "year/date", "url"],
"patent": ["author", "title", "number", "year/date"],
"periodical": ["editor", "title", "year/date"],
"suppperiodical": "article",
"proceedings": ["editor", "title", "year/date"],
"mvproceedings": "proceedings",
"inproceedings": ["author", "editor", "title", "booktitle", "year/date"],
"reference": "collection",
"mvreference": "collection",
"inreference": "incollection",
"report": ["author", "title", "type", "institution", "year/date"],
"thesis": ["author", "title", "type", "institution", "year/date"],
"unpublished": ["author", "title", "year/date"],
# semi aliases (differing fields)
"mastersthesis": ["author", "title", "institution", "year/date"],
"techreport": ["author", "title", "institution", "year/date"],
# other aliases
"conference": "inproceedings",
"electronic": "online",
"phdthesis": "mastersthesis",
"www": "online"
}
####################################################################
import string
import re
import sys
from optparse import OptionParser
# Parse options
usage = sys.argv[
0] + " [-b|--bib=<input.bib>] [-a|--aux=<input.aux>] [-o|--output=<output.html>] [-v|--view] [-h|--help]"
parser = OptionParser(usage)
parser.add_option("-b", "--bib", dest="bibFile",
help="Bib File", metavar="input.bib", default="input.bib")
parser.add_option("-a", "--aux", dest="auxFile",
help="Aux File", metavar="input.aux", default="references.aux")
parser.add_option("-o", "--output", dest="htmlOutput",
help="HTML Output File", metavar="output.html", default="biblatex_check.html")
parser.add_option("-v", "--view", dest="view", action="store_true",
help="Open in Browser")
(options, args) = parser.parse_args()
auxFile = options.auxFile
bibFile = options.bibFile
htmlOutput = options.htmlOutput
view = options.view
# Backporting Python 3 open(encoding="utf-8") to Python 2
# based on http://stackoverflow.com/questions/10971033/backporting-python-3-openencoding-utf-8-to-python-2
if sys.version_info[0] > 2:
# py3k
pass
else:
# py2
import codecs
import warnings
reload(sys)
sys.setdefaultencoding('utf8')
def open(file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None):
if newline is not None:
warnings.warn('newline is not supported in py2')
if not closefd:
warnings.warn('closefd is not supported in py2')
if opener is not None:
warnings.warn('opener is not supported in py2')
return codecs.open(filename=file, mode=mode, encoding=encoding,
errors=errors, buffering=buffering)
# Find used refernece ID's only
usedIds = set()
try:
fInAux = open(auxFile, 'r', encoding="utf8")
for line in fInAux:
if line.startswith("\\citation"):
ids = line.split("{")[1].rstrip("} \n").split(", ")
for id in ids:
if (id != ""):
usedIds.add(id)
fInAux.close()
except IOError as e:
print ("WARNING: Aux file '" + auxFile +
"' doesn't exist -> not restricting entries")
try:
fIn = open(bibFile, 'r', encoding="utf8")
except IOError as e:
print("ERROR: Input bib file '" + bibFile +
"' doesn't exist or is not readable")
sys.exit()
# Go through and check all references
completeEntry = ""
currentId = ""
ids = []
currentType = ""
currentArticleId = ""
currentTitle = ""
fields = []
problems = []
subproblems = []
counterMissingFields = 0
counterFlawedNames = 0
counterWrongTypes = 0
counterNonUniqueId = 0
counterWrongFieldNames = 0
removePunctuationMap = dict((ord(char), None) for char in string.punctuation)
for line in fIn:
line = line.strip("\n")
if line.startswith("@"):
if currentId in usedIds or not usedIds:
for fieldName, requiredFieldsType in requiredFields.items():
if fieldName == currentType.lower():
if isinstance(requiredFieldsType, str):
currentrequiredFields = requiredFields[fieldName]
# support for author/editor syntax
typeFields = requiredFieldsString.split('/')
# the required field is not found
if set(typeFields).isdisjoint(fields):
subproblems.append(
"missing field '" + requiredFieldsString + "'")
counterMissingFields += 1
else:
currentrequiredFields = requiredFieldsType
# currentrequiredFields can be a string: e.g. conference (BellAdam2004)
for requiredFieldsString in currentrequiredFields:
# support for author/editor syntax
typeFields = requiredFieldsString.split('/')
# at least one these required fields is not found
if set(typeFields).isdisjoint(fields):
subproblems.append(
"missing field '" + requiredFieldsString + "'")
counterMissingFields += 1
else:
subproblems = []
if currentId in usedIds or (currentId and not usedIds):
cleanedTitle = currentTitle.translate(removePunctuationMap)
problem = "<div id='" + currentId + \
"' class='problem severe" + str(len(subproblems)) + "'>"
problem += "<h2>" + currentId + " (" + currentType + ")</h2> "
problem += "<div class='links'>"
if citeulikeUsername:
problem += "<a href='" + citeulikeHref + \
currentArticleId + "' target='_blank'>CiteULike</a> |"
list = []
for name, site in libraries:
list.append(
" <a href='" + site + cleanedTitle + "' target='_blank'>" + name + "</a>")
problem += " | ".join(list)
problem += "</div>"
problem += "<div class='reference'>" + currentTitle
problem += "</div>"
problem += "<ul>"
for subproblem in subproblems:
problem += "<li>" + subproblem + "</li>"
problem += "</ul>"
problem += "<form class='problem_control'><label>checked</label><input type='checkbox' class='checked'/></form>"
problem += "<div class='bibtex_toggle'>Current BibLaTex Entry</div>"
problem += "<div class='bibtex'>" + completeEntry + "</div>"
problem += "</div>"
problems.append(problem)
fields = []
subproblems = []
currentId = line.split("{")[1].rstrip(",\n")
if currentId in ids:
subproblems.append("non-unique id: '" + currentId + "'")
counterNonUniqueId += 1
else:
ids.append(currentId)
currentType = line.split("{")[0].strip("@ ")
completeEntry = line + "<br />"
else:
if line != "":
completeEntry += line + "<br />"
if currentId in usedIds or not usedIds:
if "=" in line:
# biblatex is not case sensitive
field = line.split("=")[0].strip().lower()
fields.append(field)
value = line.split("=")[1].strip("{} ,\n")
if field == "author":
currentAuthor = filter(
lambda x: not (x in "\\\"{}"), value.split(" and ")[0])
if field == "citeulike-article-id":
currentArticleId = value
if field == "title":
currentTitle = re.sub(r'\}|\{', r'', value)
###############################################################
# Checks (please (de)activate/extend to your needs)
###############################################################
# check if type 'proceedings' might be 'inproceedings'
if currentType == "proceedings" and field == "pages":
subproblems.append(
"wrong type: maybe should be 'inproceedings' because entry has page numbers")
counterWrongTypes += 1
# check if abbreviations are used in journal titles
if currentType == "article" and (field == "journal" or field == "journaltitle"):
if field == "journal":
subproblems.append(
"wrong field: biblatex uses journaltitle, not journal")
counterWrongFieldNames += 1
if "." in line:
subproblems.append(
"flawed name: abbreviated journal title '" + value + "'")
counterFlawedNames += 1
# check booktitle format; expected format "ICBAB '13: Proceeding of the 13th International Conference on Bla and Blubb"
# if currentType == "inproceedings" and field == "booktitle":
# if ":" not in line or ("Proceedings" not in line and "Companion" not in line) or "." in line or " '" not in line or "workshop" in line or "conference" in line or "symposium" in line:
#subproblems.append("flawed name: inconsistent formatting of booktitle '"+value+"'")
#counterFlawedNames += 1
# check if title is capitalized (heuristic)
# if field == "title":
# for word in currentTitle.split(" "):
#word = word.strip(":")
# if len(word) > 7 and word[0].islower() and not "-" in word and not "_" in word and not "[" in word:
#subproblems.append("flawed name: non-capitalized title '"+currentTitle+"'")
#counterFlawedNames += 1
# break
###############################################################
fIn.close()
# Write out our HTML file
html = open(htmlOutput, 'w', encoding="utf8")
html.write("""<html>
<head>
<title>BibLatex Check</title>
<style>
body {
font-family: Calibri, Arial, Sans;
padding: 10px;
width: 1030px;
margin: 10 auto;
border-top: 1px solid black;
}
#title {
width: 720px;
border-bottom: 1px solid black;
}
#title h1 {
margin: 10px 0px;
}
#title h1 a {
color: black;
text-decoration: none;
}
#control {
clear: both;
}
#search {
float: left;
}
#search input {
width: 300px;
font-size: 14pt;
}
#mode {
text-align: right;
}
#mode label:first-child {
font-weight: bold;
}
#mode input {
margin-left: 20px;
}
.info {
margin-top: 10px;
padding: 10px;
background: #FAFADD;
width: 250px;
float: right;
box-shadow: 1px 1px 1px 1px #ccc;
clear: both;
}
.info h2 {
font-size: 12pt;
padding: 0px;
margin: 0px;
}
.problem {
margin-top: 10px;
margin-bottom: 10px;
padding: 10px;
background: #FFBBAA;
counter-increment: problem;
width: 700px;
border: 1px solid #993333;
border-left: 5px solid #993333;
box-shadow: 1px 1px 1px 1px #ccc;
float: left;
}
.active {
box-shadow: 5px 5px 3px 3px #ccc;
position: relative;
top: -2px;
}
.severe0 {
background: #FAFAFA;
border: 1px solid black;
border-left: 5px solid black;
}
.severe1 {
background: #FFEEDD;
}
.severe2 {
background: #FFDDCC;
}
.severe3 {
background: #FFCCBB;
}
.problem_checked {
border: 1px solid #339933;
border-left: 5px solid #339933;
}
.problem h2:before {
content: counter(problem) ". "; color: gray;
}
.problem h2 {
font-size: 12pt;
padding: 0px;
margin: 0px;
}
.problem .links {
float: right;
position:relative;
top: -22px;
}
.problem .links a {
color: #3333CC;
}
.problem .links a:visited {
color: #666666;
}
.problem .reference {
clear: both;
font-size: 9pt;
margin-left: 20px;
font-style:italic;
font-weight:bold;
}
.problem ul {
clear: both;
}
.problem .problem_control {
float: right;
margin: 0px;
padding: 0px;
}
.problem .bibtex_toggle{
text-decoration: underline;
font-size: 9pt;
cursor: pointer;
padding-top: 5px;
}
.problem .bibtex {
margin-top: 5px;
font-family: Monospace;
font-size: 8pt;
display: none;
border: 1px solid black;
background-color: #FFFFFF;
padding: 5px;
}
</style>
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.5/jquery.min.js"></script>
<script>
function isInProblemMode() {
return $('#mode_problems:checked').val() == 'problems'
}
function update() {
$('.problem').hide();
$('.problem[id*='+$('#search input').val()+']').show();
$('.problem .checked').each(function () {
if ($(this).attr('checked')) {
$(this).parents('.problem').addClass('problem_checked');
} else {
$(this).parents('.problem').removeClass('problem_checked');
}
});
if (isInProblemMode()) {
$('.severe0').hide();
$('.problem_checked').hide();
}
}
$(document).ready(function(){
$(".bibtex_toggle").click(function(event){
event.preventDefault();
$(this).next().slideToggle();
});
$('#search input').live('input', function() {
update();
});
$('#mode input').change(function() {
update();
});
$("#uncheck_button").click(function(){
$('.problem .checked').attr('checked',false);
localStorage.clear();
update();
});
$('.problem a').mousedown(function(event) {
$('.problem').removeClass('active');
$(this).parents('.problem').addClass('active');
});
$('.problem .checked').change(function(event) {
var problem = $(this).parents('.problem');
problem.toggleClass('problem_checked');
var checked = problem.hasClass('problem_checked');
localStorage.setItem(problem.attr('id'),checked);
if (checked && isInProblemMode()) {
problem.slideUp();
}
});
$('.problem .checked').each(function () {
$(this).attr('checked',localStorage.getItem($(this).parents('.problem').attr('id'))=='true');
});
update();
});
</script>
</head>
<body>
<div id="title">
<h1><a href='http://github.com/pezmc/BibLatex-Check'>BibLaTeX Check</a></h1>
<div id="control">
<form id="search"><input placeholder="search entry ID ..."/></form>
<form id="mode">
<label>show entries:</label>
<input type = "radio"
name = "mode"
id = "mode_problems"
value = "problems"
checked = "checked" />
<label for = "mode_problems">problems</label>
<input type = "radio"
name = "mode"
id = "mode_all"
value = "all" />
<label for = "mode_all">all</label>
<input type="button" value="uncheck all" id="uncheck_button"></button>
</form>
</div>
</div>
""")
problemCount = counterMissingFields + counterFlawedNames + counterWrongFieldNames + \
counterWrongTypes + counterNonUniqueId
html.write("<div class='info'><h2>Info</h2><ul>")
html.write("<li>bib file: " + bibFile + "</li>")
html.write("<li>aux file: " + auxFile + "</li>")
html.write("<li># entries: " + str(len(problems)) + "</li>")
html.write("<li># problems: " + str(problemCount) + "</li><ul>")
html.write("<li># missing fields: " + str(counterMissingFields) + "</li>")
html.write("<li># flawed names: " + str(counterFlawedNames) + "</li>")
html.write("<li># wrong types: " + str(counterWrongTypes) + "</li>")
html.write("<li># non-unique id: " + str(counterNonUniqueId) + "</li>")
html.write("<li># wrong field: " + str(counterWrongFieldNames) + "</li>")
html.write("</ul></ul></div>")
problems.sort()
for problem in problems:
html.write(problem)
html.write("</body></html>")
html.close()
if view:
import webbrowser
webbrowser.open(html.name)
print("SUCCESS: Report {} has been generated".format(htmlOutput))
| agpl-3.0 | -8,564,810,408,672,849,000 | 30.993127 | 204 | 0.514869 | false |
pajlada/pajbot | pajbot/tests/test_url_parser.py | 1 | 2264 | def test_is_subdomain():
from pajbot.modules.linkchecker import is_subdomain
assert is_subdomain("pajlada.se", "pajlada.se")
assert is_subdomain("test.pajlada.se", "pajlada.se")
assert not is_subdomain("test.pajlada.se", "pajlada.com")
assert not is_subdomain("kastaren.com", "roleplayer.se")
assert not is_subdomain("foo.bar.com", "foobar.com")
def test_is_subpath():
from pajbot.modules.linkchecker import is_subpath
assert is_subpath("/foo/", "/foo/")
assert is_subpath("/foo/bar", "/foo/")
assert not is_subpath("/foo/", "/bar/")
assert not is_subpath("/foo/", "/foo/bar")
def test_is_same_url():
from pajbot.modules.linkchecker import is_same_url, Url
assert is_same_url(Url("pajlada.se"), Url("pajlada.se/"))
assert not is_same_url(Url("pajlada.com"), Url("pajlada.se"))
assert not is_same_url(Url("pajlada.com"), Url("pajlada.com/abc"))
def test_find_unique_urls():
from pajbot.modules.linkchecker import find_unique_urls
assert find_unique_urls("pajlada.se test http://pajlada.se") == {"http://pajlada.se"}
assert find_unique_urls("pajlada.se pajlada.com foobar.se") == {
"http://pajlada.se",
"http://pajlada.com",
"http://foobar.se",
}
assert find_unique_urls("foobar.com foobar.com") == {"http://foobar.com"}
assert find_unique_urls("foobar.com foobar.se"), {"http://foobar.com" == "http://foobar.se"}
assert find_unique_urls("www.foobar.com foobar.se"), {"http://www.foobar.com" == "http://foobar.se"}
# TODO: Edge case, this behaviour should probably be changed. These URLs should be considered the same.
# Use is_same_url method?
assert find_unique_urls("pajlada.se/ pajlada.se"), {"http://pajlada.se/" == "http://pajlada.se"}
# TODO: The protocol of a URL is entirely thrown away, this behaviour should probably be changed.
assert find_unique_urls("https://pajlada.se/ https://pajlada.se") == {
"https://pajlada.se/",
"https://pajlada.se",
}
assert find_unique_urls("foo 192.168.0.1 bar") == {
"http://192.168.0.1",
}
assert find_unique_urls("omg this isn't chatting, this is meme-ing...my vanity") == set()
assert find_unique_urls("foo 1.40 bar") == set()
| mit | -2,901,317,349,210,714,000 | 37.372881 | 107 | 0.644876 | false |
mike820324/microProxy | microproxy/test/layer/test_manager.py | 1 | 7691 | import unittest
import sys
from mock import Mock
from tornado import gen, iostream
from microproxy.context import LayerContext, ServerContext
from microproxy.exception import (
DestStreamClosedError, SrcStreamClosedError, DestNotConnectedError
)
from microproxy.layer import manager as layer_manager
from microproxy.layer import (
SocksLayer, TransparentLayer, ReplayLayer, HttpProxyLayer,
TlsLayer, Http1Layer, Http2Layer, ForwardLayer
)
class TestLayerManager(unittest.TestCase):
def setUp(self):
super(TestLayerManager, self).setUp()
config = {
"mode": "socks",
"http_port": [],
"https_port": [],
"certfile": "microproxy/test/test.crt",
"keyfile": "microproxy/test/test.key"
}
self.server_state = ServerContext(config=config)
self.src_stream = Mock()
def test_get_socks_layer(self):
context = LayerContext(mode="socks", port=443)
layer = layer_manager.get_first_layer(context)
self.assertIsInstance(layer, SocksLayer)
@unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
def test_get_transparent_layer_linux(self):
context = LayerContext(mode="transparent", port=443)
layer = layer_manager.get_first_layer(context)
self.assertIsInstance(layer, TransparentLayer)
@unittest.skipIf('linux' in sys.platform, "TransparentLayer only in linux")
def test_get_transparent_layer_non_linux(self):
context = LayerContext(mode="transparent", port=443)
with self.assertRaises(NotImplementedError):
layer_manager.get_first_layer(context)
def test_get_replay_layer(self):
context = LayerContext(mode="replay", port=443)
layer = layer_manager.get_first_layer(context)
self.assertIsInstance(layer, ReplayLayer)
def test_get_tls_layer_from_socks(self):
context = LayerContext(mode="socks", port=443)
socks_layer = SocksLayer(context)
layer = layer_manager._next_layer(self.server_state, socks_layer, context)
self.assertIsInstance(layer, TlsLayer)
@unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
def test_get_tls_layer_from_transparent(self):
context = LayerContext(mode="socks", port=443)
transparent_layer = TransparentLayer(context)
layer = layer_manager._next_layer(self.server_state, transparent_layer, context)
self.assertIsInstance(layer, TlsLayer)
def test_get_http1_layer_from_socks_replay(self):
context = LayerContext(mode="socks", port=80)
socks_layer = SocksLayer(context)
layer = layer_manager._next_layer(self.server_state, socks_layer, context)
self.assertIsInstance(layer, Http1Layer)
context.scheme = "http"
replay_layer = ReplayLayer(context)
layer = layer_manager._next_layer(self.server_state, replay_layer, context)
self.assertIsInstance(layer, Http1Layer)
context.scheme = "https"
tls_layer = TlsLayer(self.server_state, context)
layer = layer_manager._next_layer(self.server_state, tls_layer, context)
self.assertIsInstance(layer, Http1Layer)
@unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
def test_get_http1_layer_from_transparent(self):
context = LayerContext(mode="socks", port=80)
transparent_layer = TransparentLayer(context)
layer = layer_manager._next_layer(self.server_state, transparent_layer, context)
self.assertIsInstance(layer, Http1Layer)
def test_get_http2_layer(self):
context = LayerContext(mode="socks", port=443, scheme="h2")
replay_layer = ReplayLayer(context)
layer = layer_manager._next_layer(self.server_state, replay_layer, context)
self.assertIsInstance(layer, Http2Layer)
tls_layer = TlsLayer(self.server_state, context)
layer = layer_manager._next_layer(self.server_state, tls_layer, context)
self.assertIsInstance(layer, Http2Layer)
def test_get_forward_layer_from_socks_replay(self):
context = LayerContext(mode="socks", port=5555)
socks_layer = SocksLayer(context)
layer = layer_manager._next_layer(self.server_state, socks_layer, context)
self.assertIsInstance(layer, ForwardLayer)
context.scheme = "test"
replay_layer = ReplayLayer(context)
layer = layer_manager._next_layer(self.server_state, replay_layer, context)
self.assertIsInstance(layer, ForwardLayer)
context.scheme = "test"
tls_layer = TlsLayer(self.server_state, context)
layer = layer_manager._next_layer(self.server_state, tls_layer, context)
self.assertIsInstance(layer, ForwardLayer)
@unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
def test_get_forward_layer_from_transparent(self):
context = LayerContext(mode="socks", port=5555)
transparent_layer = TransparentLayer(context)
layer = layer_manager._next_layer(self.server_state, transparent_layer, context)
self.assertIsInstance(layer, ForwardLayer)
def test_handle_layer_error(self):
context = LayerContext(
mode="socks", src_stream=self.src_stream, port=443, scheme="h2")
layer_manager._handle_layer_error(gen.TimeoutError("timeout"), context)
context.src_stream.close.assert_called_once_with()
context.src_stream.reset_mock()
layer_manager._handle_layer_error(DestNotConnectedError("stream closed"), context)
context.src_stream.close.assert_not_called()
context.src_stream.reset_mock()
layer_manager._handle_layer_error(DestStreamClosedError("stream closed"), context)
context.src_stream.close.assert_called_once_with()
context.src_stream.reset_mock()
layer_manager._handle_layer_error(SrcStreamClosedError("stream closed"), context)
context.src_stream.close.assert_not_called()
context.src_stream.reset_mock()
layer_manager._handle_layer_error(iostream.StreamClosedError("stream closed"), context)
context.src_stream.close.assert_called_once_with()
def test_handle_unhandled_layer_error(self):
context = LayerContext(
mode="socks", src_stream=Mock(), port=443, scheme="h2")
layer_manager._handle_layer_error(ValueError, context)
context.src_stream.close.assert_called_once_with()
def test_get_http_proxy_layer(self):
context = LayerContext(mode="http", port=80)
layer = layer_manager.get_first_layer(context)
self.assertIsInstance(layer, HttpProxyLayer)
def test_get_http_layer_from_http_proxy_layer(self):
context = LayerContext(mode="http", port=80)
http_proxy_layer = HttpProxyLayer(context)
layer = layer_manager._next_layer(
self.server_state, http_proxy_layer, context)
self.assertIsInstance(layer, Http1Layer)
def test_get_tls_layer_from_http_layer(self):
context = LayerContext(mode="http", scheme="https", port=80)
http_layer = Http1Layer(self.server_state, context)
layer = layer_manager._next_layer(
self.server_state, http_layer, context)
self.assertIsInstance(layer, TlsLayer)
def test_get_http_layer_from_http_layer(self):
context = LayerContext(mode="http", scheme="http", port=80)
http_layer = Http1Layer(self.server_state, context)
layer = layer_manager._next_layer(
self.server_state, http_layer, context)
self.assertIsInstance(layer, Http1Layer)
| mit | 1,768,103,212,269,279,700 | 41.258242 | 95 | 0.681966 | false |
DavidCain/mitoc-trips | ws/utils/perms.py | 1 | 3312 | import functools
from django.contrib.auth.models import Group, User
from ws import enums
# This is technically only accurate when starting the web server,
# but that's okay (new groups are created extremely rarely)
# This allows us to avoid repeatedly querying groups.
@functools.lru_cache(maxsize=None)
def all_group_names():
return set(Group.objects.values_list('name', flat=True))
def is_leader(user):
"""Return if the user is a trip leader.
Take advantage of the prefetched 'leaders' group for more efficient
querying of a user's leader status.
"""
return in_any_group(user, ['leaders'], allow_superusers=False)
def leader_on_trip(participant, trip, creator_allowed=False):
"""Return if the participant is leading this trip.
Optionally, the trip creator can be included even if they are not
leading the trip.
"""
if not participant:
return False
if participant in trip.leaders.all():
return True
return creator_allowed and participant == trip.creator
def chair_group(activity_enum):
if activity_enum == enums.Activity.WINTER_SCHOOL:
return 'WSC'
return activity_enum.value + '_chair'
def in_any_group(user, group_names, allow_superusers=True):
"""Return if the user belongs to any of the passed groups.
Group access control is used a lot in the app, so attempt to
use groups already present on the `user` object, or a cached list of all
group names. This will reduce needless queries.
"""
if not (user and user.is_authenticated):
return False
if allow_superusers and user.is_superuser:
search_groups = all_group_names()
else:
# Do this in raw Python to avoid n+1 queries
search_groups = {g.name for g in user.groups.all()}
return any(g in group_names for g in search_groups)
def make_chair(user, activity_enum):
"""Make the given user an activity chair!"""
group_name = chair_group(activity_enum) # Raises ValueError on invalid activity
Group.objects.get(name=group_name).user_set.add(user)
def is_chair(user, activity_enum, allow_superusers=True):
"""Return if the activity has chairs, and the user is one.
If the user is an admin, return True if and only if that activity
has chairs (e.g. even an admin can't be the chair of 'official events').
"""
if activity_enum is None: # (e.g. when the required activity is None)
return False
return in_any_group(user, [chair_group(activity_enum)], allow_superusers)
def chair_or_admin(user, activity_enum):
"""Return if the user is the chair of the activity, or if they're an admin.
This is needed because some activity types (open activities) don't have
any chairs by definition, but we still want to grant admins access as if
they were activity chairs.
"""
return True if user.is_superuser else is_chair(user, activity_enum, True)
def num_chairs(activity_enum):
group = chair_group(activity_enum)
return User.objects.filter(groups__name=group).count()
def chair_activities(user, allow_superusers=False):
"""All activities for which the user is the chair."""
return [
activity_enum
for activity_enum in enums.Activity
if is_chair(user, activity_enum, allow_superusers)
]
| gpl-3.0 | 7,713,661,299,016,247,000 | 32.12 | 84 | 0.697464 | false |
ryancoleman/autodock-vina | boost_1_54_0/tools/build/v2/build/alias.py | 45 | 2207 | # Copyright 2003, 2004, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Status: ported (danielw)
# Base revision: 56043
# This module defines the 'alias' rule and associated class.
#
# Alias is just a main target which returns its source targets without any
# processing. For example::
#
# alias bin : hello test_hello ;
# alias lib : helpers xml_parser ;
#
# Another important use of 'alias' is to conveniently group source files::
#
# alias platform-src : win.cpp : <os>NT ;
# alias platform-src : linux.cpp : <os>LINUX ;
# exe main : main.cpp platform-src ;
#
# Lastly, it's possible to create local alias for some target, with different
# properties::
#
# alias big_lib : : @/external_project/big_lib/<link>static ;
#
import targets
import property_set
from b2.manager import get_manager
from b2.util import metatarget
class AliasTarget(targets.BasicTarget):
def __init__(self, *args):
targets.BasicTarget.__init__(self, *args)
def construct(self, name, source_targets, properties):
return [property_set.empty(), source_targets]
def compute_usage_requirements(self, subvariant):
base = targets.BasicTarget.compute_usage_requirements(self, subvariant)
# Add source's usage requirement. If we don't do this, "alias" does not
# look like 100% alias.
return base.add(subvariant.sources_usage_requirements())
@metatarget
def alias(name, sources=[], requirements=[], default_build=[], usage_requirements=[]):
project = get_manager().projects().current()
targets = get_manager().targets()
targets.main_target_alternative(AliasTarget(
name, project,
targets.main_target_sources(sources, name, no_renaming=True),
targets.main_target_requirements(requirements or [], project),
targets.main_target_default_build(default_build, project),
targets.main_target_usage_requirements(usage_requirements or [], project)))
# Declares the 'alias' target. It will build sources, and return them unaltered.
get_manager().projects().add_rule("alias", alias)
| apache-2.0 | -1,870,042,807,218,909,000 | 34.031746 | 86 | 0.698233 | false |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/python/lib/site-packages/numpy/distutils/fcompiler/intel.py | 15 | 6536 | # http://developer.intel.com/software/products/compilers/flin/
from __future__ import division, absolute_import, print_function
import sys
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
def intel_version_match(type):
# Match against the important stuff in the version string
return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
class BaseIntelFCompiler(FCompiler):
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c',
f + '.f', '-o', f + '.o']
class IntelFCompiler(BaseIntelFCompiler):
compiler_type = 'intel'
compiler_aliases = ('ifort',)
description = 'Intel Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
possible_executables = ['ifort', 'ifc']
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : [None, "-72", "-w90", "-w95"],
'compiler_f90' : [None],
'compiler_fix' : [None, "-FI"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_free(self):
return ["-FR"]
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self):
#return ['-i8 -xhost -openmp -fp-model strict']
return ['-xhost -openmp -fp-model strict']
def get_flags_arch(self):
return []
def get_flags_linker_so(self):
opt = FCompiler.get_flags_linker_so(self)
v = self.get_version()
if v and v >= '8.0':
opt.append('-nofor_main')
if sys.platform == 'darwin':
# Here, it's -dynamiclib
try:
idx = opt.index('-shared')
opt.remove('-shared')
except ValueError:
idx = 0
opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup', '-Wl,-framework,Python']
return opt
class IntelItaniumFCompiler(IntelFCompiler):
compiler_type = 'intele'
compiler_aliases = ()
description = 'Intel Fortran Compiler for Itanium apps'
version_match = intel_version_match('Itanium|IA-64')
possible_executables = ['ifort', 'efort', 'efc']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
class IntelEM64TFCompiler(IntelFCompiler):
compiler_type = 'intelem'
compiler_aliases = ()
description = 'Intel Fortran Compiler for 64-bit apps'
version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit')
possible_executables = ['ifort', 'efort', 'efc']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI"],
'compiler_fix' : [None, "-FI"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self):
#return ['-i8 -xhost -openmp -fp-model strict']
return ['-xhost -openmp -fp-model strict']
def get_flags_arch(self):
return []
# Is there no difference in the version string between the above compilers
# and the Visual compilers?
class IntelVisualFCompiler(BaseIntelFCompiler):
compiler_type = 'intelv'
description = 'Intel Visual Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
f + '.f', '/o', f + '.o']
ar_exe = 'lib.exe'
possible_executables = ['ifort', 'ifl']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI", "-4L72", "-w"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
compile_switch = '/c '
object_switch = '/Fo' #No space after /Fo!
library_switch = '/OUT:' #No space after /OUT:!
module_dir_switch = '/module:' #No space after /module:
module_include_switch = '/I'
def get_flags(self):
opt = ['/nologo', '/MD', '/nbs', '/Qlowercase', '/us']
return opt
def get_flags_free(self):
return ["-FR"]
def get_flags_debug(self):
return ['/4Yb', '/d2']
def get_flags_opt(self):
return ['/O2']
def get_flags_arch(self):
return ["/arch:IA-32", "/QaxSSE3"]
class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelev'
description = 'Intel Visual Fortran Compiler for Itanium apps'
version_match = intel_version_match('Itanium')
possible_executables = ['efl'] # XXX this is a wild guess
ar_exe = IntelVisualFCompiler.ar_exe
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI", "-4L72", "-w"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
class IntelEM64VisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelvem'
description = 'Intel Visual Fortran Compiler for 64-bit apps'
version_match = simple_version_match(start='Intel\(R\).*?64,')
def get_flags_arch(self):
return ["/arch:SSE2"]
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='intel')
compiler.customize()
print(compiler.get_version())
| gpl-3.0 | 4,380,250,213,658,595,300 | 30.882927 | 100 | 0.561812 | false |
TansyArron/pants | tests/python/pants_test/base/test_extension_loader.py | 4 | 10853 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
import types
import unittest
import uuid
from contextlib import contextmanager
from pkg_resources import (Distribution, EmptyProvider, VersionConflict, WorkingSet, working_set,
yield_lines)
from pants.backend.core.tasks.task import Task
from pants.base.build_configuration import BuildConfiguration
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.exceptions import BuildConfigurationError
from pants.base.extension_loader import (PluginLoadOrderError, PluginNotFound, load_backend,
load_plugins)
from pants.base.target import Target
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar
from pants.subsystem.subsystem import Subsystem
class MockMetadata(EmptyProvider):
def __init__(self, metadata):
self.metadata = metadata
def has_metadata(self, name):
return name in self.metadata
def get_metadata(self, name):
return self.metadata[name]
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class DummySubsystem1(Subsystem):
options_scope = 'dummy-subsystem1'
class DummySubsystem2(Subsystem):
options_scope = 'dummy-subsystem2'
class DummyTarget(Target):
@classmethod
def subsystems(cls):
return super(DummyTarget, cls).subsystems() + (DummySubsystem1, )
class DummyObject1(object):
# Test that registering an object with no subsystems() method succeeds.
pass
class DummyObject2(object):
@classmethod
def subsystems(cls):
return (DummySubsystem2, )
class DummyTask(Task):
def execute(self): return 42
class LoaderTest(unittest.TestCase):
def setUp(self):
self.build_configuration = BuildConfiguration()
self.working_set = WorkingSet()
for entry in working_set.entries:
self.working_set.add_entry(entry)
def tearDown(self):
Goal.clear()
@contextmanager
def create_register(self, build_file_aliases=None, register_goals=None, global_subsystems=None,
module_name='register'):
package_name = b'__test_package_{0}'.format(uuid.uuid4().hex)
self.assertFalse(package_name in sys.modules)
package_module = types.ModuleType(package_name)
sys.modules[package_name] = package_module
try:
register_module_fqn = b'{0}.{1}'.format(package_name, module_name)
register_module = types.ModuleType(register_module_fqn)
setattr(package_module, module_name, register_module)
sys.modules[register_module_fqn] = register_module
def register_entrypoint(function_name, function):
if function:
setattr(register_module, function_name, function)
register_entrypoint('build_file_aliases', build_file_aliases)
register_entrypoint('global_subsystems', global_subsystems)
register_entrypoint('register_goals', register_goals)
yield package_name
finally:
del sys.modules[package_name]
def assert_empty_aliases(self):
registered_aliases = self.build_configuration.registered_aliases()
self.assertEqual(0, len(registered_aliases.targets))
self.assertEqual(0, len(registered_aliases.objects))
self.assertEqual(0, len(registered_aliases.context_aware_object_factories))
self.assertEqual(self.build_configuration.subsystems(), set())
def test_load_valid_empty(self):
with self.create_register() as backend_package:
load_backend(self.build_configuration, backend_package)
self.assert_empty_aliases()
def test_load_valid_partial_aliases(self):
aliases = BuildFileAliases.create(targets={'bob': DummyTarget},
objects={'obj1': DummyObject1,
'obj2': DummyObject2})
with self.create_register(build_file_aliases=lambda: aliases) as backend_package:
load_backend(self.build_configuration, backend_package)
registered_aliases = self.build_configuration.registered_aliases()
self.assertEqual(DummyTarget, registered_aliases.targets['bob'])
self.assertEqual(DummyObject1, registered_aliases.objects['obj1'])
self.assertEqual(DummyObject2, registered_aliases.objects['obj2'])
self.assertEqual(self.build_configuration.subsystems(),
set([DummySubsystem1, DummySubsystem2]))
def test_load_valid_partial_goals(self):
def register_goals():
Goal.by_name('jack').install(TaskRegistrar('jill', DummyTask))
with self.create_register(register_goals=register_goals) as backend_package:
Goal.clear()
self.assertEqual(0, len(Goal.all()))
load_backend(self.build_configuration, backend_package)
self.assert_empty_aliases()
self.assertEqual(1, len(Goal.all()))
task_names = Goal.by_name('jack').ordered_task_names()
self.assertEqual(1, len(task_names))
task_name = task_names[0]
self.assertEqual('jill', task_name)
def test_load_invalid_entrypoint(self):
def build_file_aliases(bad_arg):
return BuildFileAliases.create()
with self.create_register(build_file_aliases=build_file_aliases) as backend_package:
with self.assertRaises(BuildConfigurationError):
load_backend(self.build_configuration, backend_package)
def test_load_invalid_module(self):
with self.create_register(module_name='register2') as backend_package:
with self.assertRaises(BuildConfigurationError):
load_backend(self.build_configuration, backend_package)
def test_load_missing_plugin(self):
with self.assertRaises(PluginNotFound):
self.load_plugins(['Foobar'])
def get_mock_plugin(self, name, version, reg=None, alias=None, after=None):
"""Make a fake Distribution (optionally with entry points)
Note the entry points do not actually point to code in the returned distribution --
the distribution does not even have a location and does not contain any code, just metadata.
A module is synthesized on the fly and installed into sys.modules under a random name.
If optional entry point callables are provided, those are added as methods to the module and
their name (foo/bar/baz in fake module) is added as the requested entry point to the mocked
metadata added to the returned dist.
:param str name: project_name for distribution (see pkg_resources)
:param str version: version for distribution (see pkg_resources)
:param callable reg: Optional callable for goal registration entry point
:param callable alias: Optional callable for build_file_aliases entry point
:param callable after: Optional callable for load_after list entry point
"""
plugin_pkg = b'demoplugin{0}'.format(uuid.uuid4().hex)
pkg = types.ModuleType(plugin_pkg)
sys.modules[plugin_pkg] = pkg
module_name = b'{0}.{1}'.format(plugin_pkg, 'demo')
plugin = types.ModuleType(module_name)
setattr(pkg, 'demo', plugin)
sys.modules[module_name] = plugin
metadata = {}
entry_lines = []
if reg is not None:
setattr(plugin, 'foo', reg)
entry_lines.append('register_goals = {}:foo\n'.format(module_name))
if alias is not None:
setattr(plugin, 'bar', alias)
entry_lines.append('build_file_aliases = {}:bar\n'.format(module_name))
if after is not None:
setattr(plugin, 'baz', after)
entry_lines.append('load_after = {}:baz\n'.format(module_name))
if entry_lines:
entry_data = '[pantsbuild.plugin]\n{}\n'.format('\n'.join(entry_lines))
metadata = {'entry_points.txt': entry_data}
return Distribution(project_name=name, version=version, metadata=MockMetadata(metadata))
def load_plugins(self, plugins):
load_plugins(self.build_configuration, plugins, self.working_set)
def test_plugin_load_and_order(self):
d1 = self.get_mock_plugin('demo1', '0.0.1', after=lambda: ['demo2'])
d2 = self.get_mock_plugin('demo2', '0.0.3')
self.working_set.add(d1)
# Attempting to load 'demo1' then 'demo2' should fail as 'demo1' requires 'after'=['demo2'].
with self.assertRaises(PluginLoadOrderError):
self.load_plugins(['demo1', 'demo2'])
# Attempting to load 'demo2' first should fail as it is not (yet) installed.
with self.assertRaises(PluginNotFound):
self.load_plugins(['demo2', 'demo1'])
# Installing demo2 and then loading in correct order should work though.
self.working_set.add(d2)
self.load_plugins(['demo2>=0.0.2', 'demo1'])
# But asking for a bad (not installed) version fails.
with self.assertRaises(VersionConflict):
self.load_plugins(['demo2>=0.0.5'])
def test_plugin_installs_goal(self):
def reg_goal():
Goal.by_name('plugindemo').install(TaskRegistrar('foo', DummyTask))
self.working_set.add(self.get_mock_plugin('regdemo', '0.0.1', reg=reg_goal))
# Start without the custom goal.
self.assertEqual(0, len(Goal.by_name('plugindemo').ordered_task_names()))
# Load plugin which registers custom goal.
self.load_plugins(['regdemo'])
# Now the custom goal exists.
self.assertEqual(1, len(Goal.by_name('plugindemo').ordered_task_names()))
self.assertEqual('foo', Goal.by_name('plugindemo').ordered_task_names()[0])
def test_plugin_installs_alias(self):
def reg_alias():
return BuildFileAliases.create(targets={'pluginalias': DummyTarget},
objects={'FROMPLUGIN1': DummyObject1,
'FROMPLUGIN2': DummyObject2})
self.working_set.add(self.get_mock_plugin('aliasdemo', '0.0.1', alias=reg_alias))
# Start with no aliases.
self.assert_empty_aliases()
# Now load the plugin which defines aliases.
self.load_plugins(['aliasdemo'])
# Aliases now exist.
registered_aliases = self.build_configuration.registered_aliases()
self.assertEqual(DummyTarget, registered_aliases.targets['pluginalias'])
self.assertEqual(DummyObject1, registered_aliases.objects['FROMPLUGIN1'])
self.assertEqual(DummyObject2, registered_aliases.objects['FROMPLUGIN2'])
self.assertEqual(self.build_configuration.subsystems(),
{DummySubsystem1, DummySubsystem2})
def test_subsystems(self):
def global_subsystems():
return {DummySubsystem1, DummySubsystem2}
with self.create_register(global_subsystems=global_subsystems) as backend_package:
load_backend(self.build_configuration, backend_package)
self.assertEqual(self.build_configuration.subsystems(),
{DummySubsystem1, DummySubsystem2})
| apache-2.0 | -4,638,824,095,656,135,000 | 36.815331 | 97 | 0.699622 | false |
arthur-e/OpenClimateGIS | src/openclimategis/util/ncconv/experimental/OLD_experimental/in_memory.py | 7 | 12826 | import os
from netCDF4 import Dataset
import itertools
from shapely.geometry.multipoint import MultiPoint
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
import copy
import datetime
from netcdftime.netcdftime import num2date
from collections import namedtuple
import pp
from shapely import iterops, wkt
import geojson
from numpy.ma.core import MaskedConstant
import subprocess
from shapely.ops import cascaded_union
#NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/wcrp_cmip3/pcmdi.ipcc4.bccr_bcm2_0.1pctto2x.run1.monthly.cl_A1_1.nc'
NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/maurer/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
## all
#POLYINT = Polygon(((-99,39),(-94,38),(-94,40),(-100,39)))
## great lakes
POLYINT = [Polygon(((-90.35,40.55),(-80.80,40.55),(-80.80,49.87),(-90.35,49.87)))]
## two areas
#POLYINT = [wkt.loads('POLYGON ((-85.324076923076916 44.028020242914977,-84.280765182186229 44.16008502024291,-84.003429149797569 43.301663967611333,-83.607234817813762 42.91867611336032,-84.227939271255053 42.060255060728736,-84.941089068825903 41.307485829959511,-85.931574898785414 41.624441295546553,-85.588206477732783 43.011121457489871,-85.324076923076916 44.028020242914977))'),
# wkt.loads('POLYGON ((-89.24640080971659 46.061817813765174,-88.942651821862341 46.378773279352224,-88.454012145748976 46.431599190283393,-87.952165991902831 46.11464372469635,-88.163469635627521 45.190190283400803,-88.889825910931165 44.503453441295541,-88.770967611336033 43.552587044534405,-88.942651821862341 42.786611336032379,-89.774659919028338 42.760198380566798,-90.038789473684204 43.777097165991897,-89.735040485829956 45.097744939271251,-89.24640080971659 46.061817813765174))')]
TIMEINT = [datetime.datetime(1950,2,1),datetime.datetime(1950,4,30)]
AGGREGATE = True
CLIP = True
VAR = 'Prcp'
def make_poly(rtup,ctup):
return Polygon(((ctup[0],rtup[0]),
(ctup[0],rtup[1]),
(ctup[1],rtup[1]),
(ctup[1],rtup[0])))
def is_masked(arg):
if isinstance(arg,MaskedConstant):
return None
else:
return arg
def itr_id(start=1):
while True:
try:
yield start
finally:
start += 1
#def get_numpy_data(self,time_indices=[],x_indices=[],y_indices=[]):
def get_numpy_data(variable,idxtime,idxrow,idxcol):
""" Returns multi-dimensional NumPy array extracted from a NC."""
# def _f(idx):
# return range(min(idx),max(idx)+1)
#
# idxtime = _f(idxtime)
# idxrow = _f(idxrow)
# idxcol = _f(idxcol)
data = variable[idxtime,idxrow,idxcol]
col_grid,row_grid = np.meshgrid(idxcol,idxrow)
# row_grid = np.flipud(row_grid)
return(data,row_grid,col_grid)
#def itr_mask(mask):
# ix = xrange(mask.shape[0])
# jx = xrange(mask.shape[1])
# for ii,jj in itertools.product(ix,jx):
# if mask[ii,jj]:
# yield ii,jj
def itr_array(a):
ix = a.shape[0]
jx = a.shape[1]
for ii,jj in itertools.product(xrange(ix),xrange(jx)):
yield ii,jj
#def weight_select(nitrs,igrid,npd,row_grid,col_grid):
# for l in igrid:
# row_select = row_grid == l['row']
# col_select = col_grid == l['col']
# select = row_select & col_select
# for ii in xrange(len(nitrs)):
# if len(idxtime) == 1:
# dd = npd[:,:]
# else:
# dd = npd[ii,:,:]
# v = float(dd[select])
# if AGGREGATE:
# v = v*l['weight']
# l['time'].update({timevec[ii]:v})
# return(igrid)
#class OcgPolygon(Polygon):
#
# def __init__(self,coords,row=None,col=None):
# self.orow = row
# self.ocol = col
# super(OcgPolygon,self).__init__(coords)
## return the rectangular boundary of the polygon
#polyenv = Polygon(list(POLYINT.envelope.exterior.coords))
## get the bounding coords
#min_col,min_row,max_col,max_row = polyenv.bounds
def dump(polygon):
ds = Dataset(NC,'r')
v = ds.variables[VAR]
timevec = num2date(ds.variables['time'][:],'days since 1950-01-01 00:00:00','proleptic_gregorian')
print('retrieving data...')
row = ds.variables['latitude'][:]
col = ds.variables['longitude'][:]
row_bnds = ds.variables['bounds_latitude'][:]
col_bnds = ds.variables['bounds_longitude'][:]
#x,y = np.meshgrid(col,row)
#plt.plot(x,y)
#plt.show()
#tdk
print('making arrays...')
min_col,min_row = np.meshgrid(col_bnds[:,0],row_bnds[:,0])
max_col,max_row = np.meshgrid(col_bnds[:,1],row_bnds[:,1])
real_col,real_row = np.meshgrid(np.arange(0,len(col)),np.arange(0,len(row)))
## make the meshgrid bounded by the envelope
#idx1 = row_bnds[:,0] >= min_row
#idx2 = row_bnds[:,1] <= max_row
#srow = row[idx1 & idx2]
#
#idx1 = col_bnds[:,0] >= min_col
#idx2 = col_bnds[:,1] <= max_col
#scol = col[idx1 & idx2]
#
#gcol,grow = np.meshgrid(scol,srow)
#Docg = namedtuple('Docg',['weight','row','col','geom'])
#class Docg(object):
#
# def __init__(self,weight=None,row=None,col=None,geom=None):
# self.weight = weight
# self.row = row
# self.col = col
# self.geom = geom
#grid_pt = MultiPoint([(c,r) for r,c in itertools.product(row,col)])
#print('spatial representation...')
#grid = MultiPolygon([make_poly(ii,jj) for ii,jj in itertools.product(row_bnds,col_bnds)])
#grid_pt = MultiPoint([(c,r) for r,c in itertools.product(row,col)])
#print(' intersects operation...')
#igrid = MultiPolygon(list(iterops.intersects(POLYINT,grid,True)))
#igrid_pt = MultiPoint(list(iterops.intersects(polyenv,grid_pt,True)))
#geoms = np.empty(min_row.shape,dtype=object)
#for ii,jj in itr_array(min_row):
# geoms[ii,jj] = make_poly((min_row[ii,jj],max_row[ii,jj]),(min_col[ii,jj],max_col[ii,jj]))
print('overlay...')
igrid = np.empty(min_row.shape,dtype=object)
weights = np.empty(min_row.shape)
for ii,jj in itr_array(min_row):
g = make_poly((min_row[ii,jj],max_row[ii,jj]),(min_col[ii,jj],max_col[ii,jj]))
if g.intersects(polygon):
prearea = g.area
if CLIP:
ng = g.intersection(polygon)
else:
ng = g
w = ng.area/prearea
if w > 0:
igrid[ii,jj] = ng
weights[ii,jj] = w
mask = weights > 0
weights = weights/weights.sum()
#for ii,jj in itertools.product(xrange(len(row_bnds)),xrange(len(col_bnds))):
# for jj in xrange(len(col_bnds)):
# grid.append(Docg(weight=None,row=ii,col=jj,geom=make_poly(row_bnds[ii],col_bnds[jj])))
# grid.append(dict(time={},weight=None,row=ii,col=jj,geom=make_poly(row_bnds[ii],col_bnds[jj])))
#grid_poly = MultiPolygon([make_poly(r,c) for r,c in itertools.product(row_bnds,col_bnds)])
#igrid = [p for p in grid if p['geom'].intersects(POLYINT)]
#tdk
#print('intersection...')
#igrid = []
#for l in grid:
# if l.geom.intersects(POLYINT):
# prearea = l.geom.area
# l.geom = l.geom.intersection(POLYINT)
## l.update(dict(weight=l['geom'].area/prearea))
# l.weight = l.geom.area/prearea
## igrid.append(l)
# if l['geom'].intersects(POLYINT):
# prearea = l['geom'].area
# l['geom'] = l['geom'].intersection(POLYINT)
## l.update(dict(weight=l['geom'].area/prearea))
# w = l['geom'].area/prearea
# if w > 0:
# l['weight'] = w
# igrid.append(l)
print('getting numpy data...')
idxtime = np.arange(0,len(timevec))[(timevec>=TIMEINT[0])*(timevec<=TIMEINT[1])]
def u(arg):
un = np.unique(arg)
return(np.arange(un.min(),un.max()+1))
idxrow = u(real_row[mask])
idxcol = u(real_col[mask])
def sub(idxrow,idxcol,arg):
return arg[idxrow.min():idxrow.max()+1,idxcol.min():idxcol.max()+1]
mask = sub(idxrow,idxcol,mask)
weights = sub(idxrow,idxcol,weights)
igrid = sub(idxrow,idxcol,igrid)
#idxrow = np.unique(real_row[mask])
#idxcol = np.unique(real_col[mask])
#for ii,jj in itr_mask(mask):
# idxrow.append(ii)
# idxcol.append(jj)
npd,row_grid,col_grid = get_numpy_data(v,idxtime,idxrow,idxcol)
### make weights array
#weights = np.empty((row_grid.shape))
#geoms = np.empty((row_grid.shape),dtype=object)
#for l in igrid:
# weights[l['row'],l['col']] = l['weight']
# geoms[l['row'],l['col']] = l['geom']
### make mask
#mask = weights > 0
## apply the mask
#mnpd = npd*mask
print('extracting data...')
features = []
ids = itr_id()
if AGGREGATE:
## provide the unioned geometry
geoms = igrid[mask]
unioned = geoms[0]
for ii,geom in enumerate(geoms):
if ii == 0: continue
unioned = unioned.union(geom)
## weight the data by area
weighted = npd*weights
# tdata = dict(zip([timevec[it] for it in idxtime],[weighted[ii,:,:].sum() for ii in range(weighted.shape[0])]))
for kk in range(len(idxtime)):
if kk == 0:
print('unioning geometry...')
## need to remove geometries that have masked data
lyr = weighted[kk,:,:]
geoms = igrid[mask*np.invert(lyr.mask)]
unioned = cascaded_union([p for p in geoms])
# unioned = geoms[0]
# for ii,geom in enumerate(geoms):
# if ii == 0: continue
# unioned = unioned.union(geom)
## generate the feature
feature = geojson.Feature(id=ids.next(),
geometry=unioned,
properties=dict({VAR:float(weighted[kk,:,:].sum()),
'timestamp':str(timevec[idxtime[kk]])}))
features.append(feature)
else:
for ii,jj in itr_array(row_grid):
if mask[ii,jj] == True:
data = npd[:,ii,jj]
data = [is_masked(da) for da in data]
# tdata = dict(zip([timevec[it] for it in idxtime],data))
# geom = igrid[ii,jj]
for kk in range(len(data)):
if data[kk] == None: continue
feature = geojson.Feature(id=ids.next(),
geometry=igrid[ii,jj],
properties=dict({VAR:float(data[kk]),
'timestamp':str(timevec[idxtime[kk]])}))
features.append(feature)
return(features)
features = []
for polygon in POLYINT:
features += dump(polygon)
print('dumping...')
fc = geojson.FeatureCollection(features)
with open('/tmp/out.geojson','w') as f:
f.write(geojson.dumps(fc))
args = ['ogr2ogr','-overwrite','-f','ESRI Shapefile', '/tmp/out.shp','/tmp/out.geojson','OGRGeoJSON']
subprocess.call(args)
## apply the weight dictionary list. this serves the dual purpose of
## removing unneeded values included in the block netCDF query.
#print('getting actual data...')
#ctr = 1
#for l in igrid:
# print(' {0} of {1} igrid...'.format(ctr,len(igrid)))
# ctr += 1
## row_select = row_grid == l['row']
## col_select = col_grid == l['col']
## select = row_select & col_select
# for ii in xrange(len(idxtime)):
# if len(idxtime) == 1:
# dd = npd[:,:]
# else:
# dd = npd[ii,:,:]
# v = float(dd[select])
# if AGGREGATE:
# v = v*l['weight']
# l['time'].update({timevec[ii]:v})
#jobs = []
#job_server = pp.Server(3)
##for rid in rids:
#job = job_server.submit(weight_select,(3,igrid,npd,row_grid,col_grid),(),("numpy",))
#jobs.append(job)
#for job in jobs:
# print job()
# log.info(job())
#log.info('success.')
#igrid_pt = MultiPoint([p for p in grid_pt if p.intersects(POLYINT)])
#igrid_poly = MultiPolygon([p for p in grid_poly if p.intersects(POLYINT)])
#
#itgrid_poly = MultiPolygon([p.intersection(POLYINT) for p in igrid_poly])
#poly = Polygon(((col_bnds[ii,0],row_bnds[jj,0]),
# (col_bnds[ii,0],row_bnds[jj,1]),
# (col_bnds[ii,1],row_bnds[jj,1]),
# (col_bnds[ii,1],row_bnds[jj,0])))
#gnp = np.array(igrid_pt)
#plt.scatter(gnp[:,0],gnp[:,1])
#plt.show() | bsd-3-clause | 6,097,657,104,443,060,000 | 35.753582 | 502 | 0.576797 | false |
nicolasnoble/grpc | src/re2/gen_build_yaml.py | 8 | 1331 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import yaml
os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {}
out['libs'] = [{
#TODO @donnadionne: extracting the list of source files from bazel build to reduce duplication
'name':
're2',
'build':
'private',
'language':
'c',
'secure':
False,
'src':
sorted(
glob.glob('third_party/re2/re2/*.cc') + [
"third_party/re2/util/pcre.cc", "third_party/re2/util/rune.cc",
"third_party/re2/util/strutil.cc"
]),
'headers':
sorted(
glob.glob('third_party/re2/re2/*.h') +
glob.glob('third_party/re2/util/*.h')),
}]
print(yaml.dump(out))
| apache-2.0 | 8,217,073,239,722,683,000 | 26.729167 | 98 | 0.628099 | false |
ywcui1990/nupic.research | tests/algorithms/column_pooler_test.py | 3 | 46533 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import numpy as np
from nupic.data.generators.pattern_machine import PatternMachine
from htmresearch.algorithms.column_pooler import ColumnPooler
from htmresearch.support.column_pooler_mixin import ColumnPoolerMonitorMixin
class MonitoredColumnPooler(ColumnPoolerMonitorMixin, ColumnPooler):
pass
class ExtensiveColumnPoolerTest(unittest.TestCase):
"""
Algorithmic tests for the ColumnPooler region.
Each test actually tests multiple aspects of the algorithm. For more
atomic tests refer to column_pooler_unit_test.
The notation for objects is the following:
object{patternA, patternB, ...}
In these tests, the proximally-fed SDR's are simulated as unique (location,
feature) pairs regardless of actual locations and features, unless stated
otherwise.
"""
inputWidth = 2048 * 8
numInputActiveBits = int(0.02 * inputWidth)
outputWidth = 4096
numOutputActiveBits = 40
seed = 42
def testNewInputs(self):
"""
Checks that the behavior is correct when facing unseed inputs.
"""
self.init()
# feed the first input, a random SDR should be generated
initialPattern = self.generateObject(1)
self.learn(initialPattern, numRepetitions=1, newObject=True)
representation = self._getActiveRepresentation()
self.assertEqual(
len(representation),
self.numOutputActiveBits,
"The generated representation is incorrect"
)
# feed a new input for the same object, the previous SDR should persist
newPattern = self.generateObject(1)
self.learn(newPattern, numRepetitions=1, newObject=False)
newRepresentation = self._getActiveRepresentation()
self.assertNotEqual(initialPattern, newPattern)
self.assertEqual(
newRepresentation,
representation,
"The SDR did not persist when learning the same object"
)
# without sensory input, the SDR should persist as well
emptyPattern = [set()]
self.learn(emptyPattern, numRepetitions=1, newObject=False)
newRepresentation = self._getActiveRepresentation()
self.assertEqual(
newRepresentation,
representation,
"The SDR did not persist after an empty input."
)
def testLearnSinglePattern(self):
"""
A single pattern is learnt for a single object.
Objects: A{X, Y}
"""
self.init()
object = self.generateObject(1)
self.learn(object, numRepetitions=2, newObject=True)
# check that the active representation is sparse
representation = self._getActiveRepresentation()
self.assertEqual(
len(representation),
self.numOutputActiveBits,
"The generated representation is incorrect"
)
# check that the pattern was correctly learnt
self.pooler.reset()
self.infer(feedforwardPattern=object[0])
self.assertEqual(
self._getActiveRepresentation(),
representation,
"The pooled representation is not stable"
)
# present new pattern for same object
# it should be mapped to the same representation
newPattern = [self.generatePattern()]
self.learn(newPattern, numRepetitions=2, newObject=False)
# check that the active representation is sparse
newRepresentation = self._getActiveRepresentation()
self.assertEqual(
newRepresentation,
representation,
"The new pattern did not map to the same object representation"
)
# check that the pattern was correctly learnt and is stable
self.pooler.reset()
self.infer(feedforwardPattern=object[0])
self.assertEqual(
self._getActiveRepresentation(),
representation,
"The pooled representation is not stable"
)
def testLearnSingleObject(self):
"""
Many patterns are learnt for a single object.
Objects: A{P, Q, R, S, T}
"""
self.init()
object = self.generateObject(numPatterns=5)
self.learn(object, numRepetitions=2, randomOrder=True, newObject=True)
representation = self._getActiveRepresentation()
# check that all patterns map to the same object
for pattern in object:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representation,
"The pooled representation is not stable"
)
# if activity stops, check that the representation persists
self.infer(feedforwardPattern=set())
self.assertEqual(
self._getActiveRepresentation(),
representation,
"The pooled representation did not persist"
)
def testLearnTwoObjectNoCommonPattern(self):
"""
Same test as before, using two objects, without common pattern.
Objects: A{P, Q, R, S,T} B{V, W, X, Y, Z}
"""
self.init()
objectA = self.generateObject(numPatterns=5)
self.learn(objectA, numRepetitions=3, randomOrder=True, newObject=True)
representationA = self._getActiveRepresentation()
objectB = self.generateObject(numPatterns=5)
self.learn(objectB, numRepetitions=3, randomOrder=True, newObject=True)
representationB = self._getActiveRepresentation()
self.assertNotEqual(representationA, representationB)
# check that all patterns map to the same object
for pattern in objectA:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The pooled representation for the first object is not stable"
)
# check that all patterns map to the same object
for pattern in objectB:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationB,
"The pooled representation for the second object is not stable"
)
# feed union of patterns in object A
pattern = objectA[0] | objectA[1]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The active representation is incorrect"
)
# feed unions of patterns in objects A and B
pattern = objectA[0] | objectB[0]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB,
"The active representation is incorrect"
)
def testLearnTwoObjectsOneCommonPattern(self):
"""
Same test as before, except the two objects share a pattern
Objects: A{P, Q, R, S,T} B{P, W, X, Y, Z}
"""
self.init()
objectA = self.generateObject(numPatterns=5)
self.learn(objectA, numRepetitions=3, randomOrder=True, newObject=True)
representationA = self._getActiveRepresentation()
objectB = self.generateObject(numPatterns=5)
objectB[0] = objectA[0]
self.learn(objectB, numRepetitions=3, randomOrder=True, newObject=True)
representationB = self._getActiveRepresentation()
self.assertNotEqual(representationA, representationB)
# very small overlap
self.assertLessEqual(len(representationA & representationB), 3)
# check that all patterns except the common one map to the same object
for pattern in objectA[1:]:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The pooled representation for the first object is not stable"
)
# check that all patterns except the common one map to the same object
for pattern in objectB[1:]:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationB,
"The pooled representation for the second object is not stable"
)
# feed shared pattern
pattern = objectA[0]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB,
"The active representation is incorrect"
)
# feed union of patterns in object A
pattern = objectA[1] | objectA[2]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The active representation is incorrect"
)
# feed unions of patterns in objects A and B
pattern = objectA[1] | objectB[1]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB,
"The active representation is incorrect"
)
def testLearnThreeObjectsOneCommonPattern(self):
"""
Same test as before, with three objects
Objects: A{P, Q, R, S,T} B{P, W, X, Y, Z} C{W, H, I, K, L}
"""
self.init()
objectA = self.generateObject(numPatterns=5)
self.learn(objectA, numRepetitions=3, randomOrder=True, newObject=True)
representationA = self._getActiveRepresentation()
objectB = self.generateObject(numPatterns=5)
objectB[0] = objectA[0]
self.learn(objectB, numRepetitions=3, randomOrder=True, newObject=True)
representationB = self._getActiveRepresentation()
objectC = self.generateObject(numPatterns=5)
objectC[0] = objectB[1]
self.learn(objectC, numRepetitions=3, randomOrder=True, newObject=True)
representationC = self._getActiveRepresentation()
self.assertNotEquals(representationA, representationB, representationC)
# very small overlap
self.assertLessEqual(len(representationA & representationB), 3)
self.assertLessEqual(len(representationB & representationC), 3)
self.assertLessEqual(len(representationA & representationC), 3)
# check that all patterns except the common one map to the same object
for pattern in objectA[1:]:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The pooled representation for the first object is not stable"
)
# check that all patterns except the common one map to the same object
for pattern in objectB[2:]:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationB,
"The pooled representation for the second object is not stable"
)
# check that all patterns except the common one map to the same object
for pattern in objectC[1:]:
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationC,
"The pooled representation for the third object is not stable"
)
# feed shared pattern between A and B
pattern = objectA[0]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB,
"The active representation is incorrect"
)
# feed shared pattern between B and C
pattern = objectB[1]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationB | representationC,
"The active representation is incorrect"
)
# feed union of patterns in object A
pattern = objectA[1] | objectA[2]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The active representation is incorrect"
)
# feed unions of patterns to activate all objects
pattern = objectA[1] | objectB[1]
self.pooler.reset()
self.infer(feedforwardPattern=pattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB | representationC,
"The active representation is incorrect"
)
def testLearnThreeObjectsOneCommonPatternSpatialNoise(self):
"""
Same test as before, with three objects
Objects: A{P, Q, R, S,T} B{P, W, X, Y, Z} C{W, H, I, K, L}
"""
self.init()
objectA = self.generateObject(numPatterns=5)
self.learn(objectA, numRepetitions=3, randomOrder=True, newObject=True)
representationA = self._getActiveRepresentation()
objectB = self.generateObject(numPatterns=5)
objectB[0] = objectA[0]
self.learn(objectB, numRepetitions=3, randomOrder=True, newObject=True)
representationB = self._getActiveRepresentation()
objectC = self.generateObject(numPatterns=5)
objectC[0] = objectB[1]
self.learn(objectC, numRepetitions=3, randomOrder=True, newObject=True)
representationC = self._getActiveRepresentation()
self.assertNotEquals(representationA, representationB, representationC)
# very small overlap
self.assertLessEqual(len(representationA & representationB), 3)
self.assertLessEqual(len(representationB & representationC), 3)
self.assertLessEqual(len(representationA & representationC), 3)
# check that all patterns except the common one map to the same object
for pattern in objectA[1:]:
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The pooled representation for the first object is not stable"
)
# check that all patterns except the common one map to the same object
for pattern in objectB[2:]:
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationB,
"The pooled representation for the second object is not stable"
)
# check that all patterns except the common one map to the same object
for pattern in objectC[1:]:
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationC,
"The pooled representation for the third object is not stable"
)
# feed shared pattern between A and B
pattern = objectA[0]
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB,
"The active representation is incorrect"
)
# feed shared pattern between B and C
pattern = objectB[1]
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationB | representationC,
"The active representation is incorrect"
)
# feed union of patterns in object A
pattern = objectA[1] | objectA[2]
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The active representation is incorrect"
)
# feed unions of patterns to activate all objects
pattern = objectA[1] | objectB[1]
noisyPattern = self.proximalPatternMachine.addNoise(pattern, 0.05)
self.pooler.reset()
self.infer(feedforwardPattern=noisyPattern)
self.assertEqual(
self._getActiveRepresentation(),
representationA | representationB | representationC,
"The active representation is incorrect"
)
def testInferObjectOverTime(self):
"""Infer an object after touching only ambiguous points."""
self.init()
patterns = [self.generatePattern() for _ in xrange(3)]
objectA = [patterns[0], patterns[1]]
objectB = [patterns[1], patterns[2]]
objectC = [patterns[2], patterns[0]]
self.learn(objectA, numRepetitions=3, newObject=True)
representationA = set(self.pooler.getActiveCells())
self.learn(objectB, numRepetitions=3, newObject=True)
representationB = set(self.pooler.getActiveCells())
self.learn(objectC, numRepetitions=3, newObject=True)
representationC = set(self.pooler.getActiveCells())
self.pooler.reset()
self.infer(patterns[0])
self.assertEqual(set(self.pooler.getActiveCells()),
representationA | representationC)
self.infer(patterns[1])
self.assertEqual(set(self.pooler.getActiveCells()),
representationA)
def testLearnOneObjectInTwoColumns(self):
"""Learns one object in two different columns."""
self.init(numCols=2)
neighborsIndices = [[1], [0]]
objectA = self.generateObject(numPatterns=5, numCols=2)
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
objectARepresentations = self._getActiveRepresentations()
for pooler in self.poolers:
pooler.reset()
for patterns in objectA:
for i in xrange(3):
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=patterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
if i > 0:
self.assertEqual(activeRepresentations,
self._getActiveRepresentations())
self.assertEqual(objectARepresentations,
self._getActiveRepresentations())
def testLearnTwoObjectsInTwoColumnsNoCommonPattern(self):
"""Learns two objects in two different columns."""
self.init(numCols=2)
neighborsIndices = [[1], [0]]
objectA = self.generateObject(numPatterns=5, numCols=2)
objectB = self.generateObject(numPatterns=5, numCols=2)
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsA = self._getActiveRepresentations()
# learn object
self.learnMultipleColumns(
objectB,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True,
)
activeRepresentationsB = self._getActiveRepresentations()
for pooler in self.poolers:
pooler.reset()
# check inference for object A
for patternsA in objectA:
for i in xrange(3):
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=patternsA,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
self.assertEqual(
activeRepresentationsA,
self._getActiveRepresentations()
)
for pooler in self.poolers:
pooler.reset()
# check inference for object B
for patternsB in objectB:
for i in xrange(3):
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=patternsB,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices
)
self.assertEqual(
activeRepresentationsB,
self._getActiveRepresentations()
)
def testLearnTwoObjectsInTwoColumnsOneCommonPattern(self):
"""Learns two objects in two different columns, with a common pattern."""
self.init(numCols=2)
neighborsIndices = [[1], [0]]
objectA = self.generateObject(numPatterns=5, numCols=2)
objectB = self.generateObject(numPatterns=5, numCols=2)
# second pattern in column 0 is shared
objectB[1][0] = objectA[1][0]
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsA = self._getActiveRepresentations()
# learn object
self.learnMultipleColumns(
objectB,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsB = self._getActiveRepresentations()
# check inference for object A
# for the first pattern, the distal predictions won't be correct
# for the second one, the prediction will be unique thanks to the
# distal predictions from the other column which has no ambiguity
for pooler in self.poolers:
pooler.reset()
for patternsA in objectA:
for i in xrange(3):
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=patternsA,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
self.assertEqual(
activeRepresentationsA,
self._getActiveRepresentations()
)
for pooler in self.poolers:
pooler.reset()
# check inference for object B
for patternsB in objectB:
for i in xrange(3):
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=patternsB,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices
)
self.assertEqual(
activeRepresentationsB,
self._getActiveRepresentations()
)
def testLearnTwoObjectsInTwoColumnsOneCommonPatternEmptyFirstInput(self):
"""Learns two objects in two different columns, with a common pattern."""
self.init(numCols=2)
neighborsIndices = [[1], [0]]
objectA = self.generateObject(numPatterns=5, numCols=2)
objectB = self.generateObject(numPatterns=5, numCols=2)
# second pattern in column 0 is shared
objectB[1][0] = objectA[1][0]
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsA = self._getActiveRepresentations()
# learn object
self.learnMultipleColumns(
objectB,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsB = self._getActiveRepresentations()
# check inference for object A
for pooler in self.poolers:
pooler.reset()
firstPattern = True
for patternsA in objectA:
activeRepresentations = self._getActiveRepresentations()
if firstPattern:
self.inferMultipleColumns(
feedforwardPatterns=[set(), patternsA[1]],
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
desiredRepresentation = [set(), activeRepresentationsA[1]]
else:
self.inferMultipleColumns(
feedforwardPatterns=patternsA,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
desiredRepresentation = activeRepresentationsA
self.assertEqual(
desiredRepresentation,
self._getActiveRepresentations()
)
def testPersistence(self):
"""After learning, representation should persist in L2 without input."""
self.init(numCols=2)
neighborsIndices = [[1], [0]]
objectA = self.generateObject(numPatterns=5, numCols=2)
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
objectARepresentations = self._getActiveRepresentations()
for pooler in self.poolers:
pooler.reset()
for patterns in objectA:
for i in xrange(3):
# replace third pattern for column 2 by empty pattern
if i == 2:
patterns[1] = set()
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=patterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
if i > 0:
self.assertEqual(activeRepresentations,
self._getActiveRepresentations())
self.assertEqual(objectARepresentations,
self._getActiveRepresentations())
def testLateralDisambiguation(self):
"""Lateral disambiguation using a constant simulated distal input."""
self.init(overrides={
"lateralInputWidths": [self.inputWidth],
})
objectA = self.generateObject(numPatterns=5)
lateralInputA = [[set()]] + [[self.generatePattern()] for _ in xrange(4)]
self.learn(objectA,
lateralPatterns=lateralInputA,
numRepetitions=3,
randomOrder=True,
newObject=True)
representationA = self._getActiveRepresentation()
objectB = self.generateObject(numPatterns=5)
objectB[3] = objectA[3]
lateralInputB = [[set()]] + [[self.generatePattern()] for _ in xrange(4)]
self.learn(objectB,
lateralPatterns=lateralInputB,
numRepetitions=3,
randomOrder=True,
newObject=True)
representationB = self._getActiveRepresentation()
self.assertNotEqual(representationA, representationB)
# very small overlap
self.assertLessEqual(len(representationA & representationB), 3)
# no ambiguity with lateral input
for pattern in objectA:
self.pooler.reset()
self.infer(feedforwardPattern=pattern, lateralInputs=lateralInputA[-1])
self.assertEqual(
self._getActiveRepresentation(),
representationA,
"The pooled representation for the first object is not stable"
)
# no ambiguity with lateral input
for pattern in objectB:
self.pooler.reset()
self.infer(feedforwardPattern=pattern, lateralInputs=lateralInputB[-1])
self.assertEqual(
self._getActiveRepresentation(),
representationB,
"The pooled representation for the second object is not stable"
)
def testLateralContestResolved(self):
"""
Infer an object via lateral disambiguation even if some other columns have
similar ambiguity.
If an object SDR has at least distalSegmentInhibitionFactor times as many
votes (i.e. active distal segments) as another object, it should inhibit the
other object.
"""
self.init(overrides={"lateralInputWidths": [self.inputWidth,
self.inputWidth]})
patterns = [self.generatePattern() for _ in xrange(3)]
objectA = [patterns[0], patterns[1]]
objectB = [patterns[1], patterns[2]]
lateralInput1A = self.generatePattern()
lateralInput2A = self.generatePattern()
lateralInput1B = self.generatePattern()
lateralInput2B = self.generatePattern()
self.learn(objectA, lateralPatterns=[[lateralInput1A, lateralInput2A]]*2,
numRepetitions=3, newObject=True)
representationA = set(self.pooler.getActiveCells())
self.learn(objectB, lateralPatterns=[[lateralInput1B, lateralInput2B]]*2,
numRepetitions=3, newObject=True)
representationB = set(self.pooler.getActiveCells())
self.pooler.reset()
# This column will say A | B
# One lateral column says A | B
# Another lateral column says A
self.infer(patterns[1], lateralInputs=[(), ()])
self.infer(patterns[1], lateralInputs=[lateralInput1A | lateralInput1B,
lateralInput2A])
# 3 segments should beat 2 segments if distalSegmentInhibitionFactor is
# <= 3/2.
self.assertEqual(set(self.pooler.getActiveCells()), representationA)
def testLateralContestUnresolved(self):
"""
If an object SDR has fewer than distalSegmentInhibitionFactor times as many
votes (i.e. active distal segments) as another object, it shouldn't inhibit
the other object.
"""
self.init(overrides={"lateralInputWidths": [self.inputWidth,
self.inputWidth,
self.inputWidth],
"distalSegmentInhibitionFactor": 0.6667})
patterns = [self.generatePattern() for _ in xrange(3)]
objectA = [patterns[0], patterns[1]]
objectB = [patterns[1], patterns[2]]
lateralInput1A = self.generatePattern()
lateralInput2A = self.generatePattern()
lateralInput3A = self.generatePattern()
lateralInput1B = self.generatePattern()
lateralInput2B = self.generatePattern()
lateralInput3B = self.generatePattern()
self.learn(objectA, lateralPatterns=[[lateralInput1A,
lateralInput2A,
lateralInput3A]]*2,
numRepetitions=3, newObject=True)
representationA = set(self.pooler.getActiveCells())
self.learn(objectB, lateralPatterns=[[lateralInput1B,
lateralInput2B,
lateralInput3B]]*2,
numRepetitions=3, newObject=True)
representationB = set(self.pooler.getActiveCells())
self.pooler.reset()
# This column will say A | B
# One lateral column says A | B
# Another lateral column says A | B
# Another lateral column says A
self.infer(patterns[1], lateralInputs=[(), (), ()])
self.infer(patterns[1], lateralInputs=[lateralInput1A | lateralInput1B,
lateralInput2A | lateralInput2B,
lateralInput3A])
# 4 segments should only beat 3 segments if distalSegmentInhibitionFactor is
# <= 4/3
self.assertEqual(set(self.pooler.getActiveCells()),
representationA | representationB)
@unittest.skip("Fails, need to discuss")
def testMultiColumnCompetition(self):
"""Competition between multiple conflicting lateral inputs."""
self.init(numCols=4)
neighborsIndices = [[1, 2, 3], [0, 2, 3], [0, 1, 3], [0, 1, 2]]
objectA = self.generateObject(numPatterns=5, numCols=4)
objectB = self.generateObject(numPatterns=5, numCols=4)
# second pattern in column 0 is shared
objectB[1][0] = objectA[1][0]
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsA = self._getActiveRepresentations()
# learn object
self.learnMultipleColumns(
objectB,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsB = self._getActiveRepresentations()
# check inference for object A
# for the first pattern, the distal predictions won't be correct
# for the second one, the prediction will be unique thanks to the
# distal predictions from the other column which has no ambiguity
for pooler in self.poolers:
pooler.reset()
# sensed patterns will be mixed
sensedPatterns = objectA[1][:-1] + [objectA[1][-1] | objectB[1][-1]]
# feed sensed patterns first time
# every one feels the correct object, except first column which feels
# the union (reminder: lateral input are delayed)
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=sensedPatterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
firstSensedRepresentations = [
activeRepresentationsA[0] | activeRepresentationsB[0],
activeRepresentationsA[1],
activeRepresentationsA[2],
activeRepresentationsA[3] | activeRepresentationsB[3]
]
self.assertEqual(
firstSensedRepresentations,
self._getActiveRepresentations()
)
# feed sensed patterns second time
# the distal predictions are still ambiguous in C1, but disambiguated
# in C4
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=sensedPatterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
secondSensedRepresentations = [
activeRepresentationsA[0] | activeRepresentationsB[0],
activeRepresentationsA[1],
activeRepresentationsA[2],
activeRepresentationsA[3]
]
self.assertEqual(
secondSensedRepresentations,
self._getActiveRepresentations()
)
# feed sensed patterns third time
# this time, it is all disambiguated
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=sensedPatterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
self.assertEqual(
activeRepresentationsA,
self._getActiveRepresentations()
)
def testMutualDisambiguationThroughUnions(self):
"""
Learns three object in two different columns.
Feed ambiguous sensations, A u B and B u C. The system should narrow down
to B.
"""
self.init(numCols=2)
neighborsIndices = [[1], [0]]
objectA = self.generateObject(numPatterns=5, numCols=2)
objectB = self.generateObject(numPatterns=5, numCols=2)
objectC = self.generateObject(numPatterns=5, numCols=2)
# learn object
self.learnMultipleColumns(
objectA,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsA = self._getActiveRepresentations()
# learn object
self.learnMultipleColumns(
objectB,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsB = self._getActiveRepresentations()
# learn object
self.learnMultipleColumns(
objectC,
numRepetitions=3,
neighborsIndices=neighborsIndices,
randomOrder=True,
newObject=True
)
activeRepresentationsC = self._getActiveRepresentations()
# create sensed patterns (ambiguous)
sensedPatterns = [objectA[1][0] | objectB[1][0],
objectB[2][1] | objectC[2][1]]
for pooler in self.poolers:
pooler.reset()
# feed sensed patterns first time
# the L2 representations should be ambiguous
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=sensedPatterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
firstRepresentations = [
activeRepresentationsA[0] | activeRepresentationsB[0],
activeRepresentationsB[1] | activeRepresentationsC[1]
]
self.assertEqual(
firstRepresentations,
self._getActiveRepresentations()
)
# feed a second time, distal predictions should disambiguate
activeRepresentations = self._getActiveRepresentations()
self.inferMultipleColumns(
feedforwardPatterns=sensedPatterns,
activeRepresentations=activeRepresentations,
neighborsIndices=neighborsIndices,
)
# check that representations are unique, being slightly tolerant
self.assertLessEqual(
len(self._getActiveRepresentations()[0] - activeRepresentationsB[0]),
5,
)
self.assertLessEqual(
len(self._getActiveRepresentations()[1] - activeRepresentationsB[1]),
5,
)
self.assertGreaterEqual(
len(self._getActiveRepresentations()[0] & activeRepresentationsB[0]),
35,
)
self.assertGreaterEqual(
len(self._getActiveRepresentations()[1] & activeRepresentationsB[1]),
35,
)
def setUp(self):
"""
Sets up the test.
"""
# single column case
self.pooler = None
# multi column case
self.poolers = []
# create pattern machine
self.proximalPatternMachine = PatternMachine(
n=self.inputWidth,
w=self.numOutputActiveBits,
num=200,
seed=self.seed
)
self.patternId = 0
np.random.seed(self.seed)
# Wrappers around ColumnPooler API
def learn(self,
feedforwardPatterns,
lateralPatterns=None,
numRepetitions=1,
randomOrder=True,
newObject=True):
"""
Parameters:
----------------------------
Learns a single object, with the provided patterns.
@param feedforwardPatterns (list(set))
List of proximal input patterns
@param lateralPatterns (list(list(iterable)))
List of distal input patterns, or None. If no lateral input is
used. The outer list is expected to have the same length as
feedforwardPatterns, whereas each inner list's length is the
number of cortical columns which are distally connected to the
pooler.
@param numRepetitions (int)
Number of times the patterns will be fed
@param randomOrder (bool)
If true, the order of patterns will be shuffled at each
repetition
"""
if newObject:
self.pooler.mmClearHistory()
self.pooler.reset()
# set-up
indices = range(len(feedforwardPatterns))
if lateralPatterns is None:
lateralPatterns = [[] for _ in xrange(len(feedforwardPatterns))]
for _ in xrange(numRepetitions):
if randomOrder:
np.random.shuffle(indices)
for idx in indices:
self.pooler.compute(sorted(feedforwardPatterns[idx]),
[sorted(lateralPattern)
for lateralPattern in lateralPatterns[idx]],
learn=True)
def infer(self,
feedforwardPattern,
lateralInputs=(),
printMetrics=False):
"""
Feeds a single pattern to the column pooler (as well as an eventual lateral
pattern).
Parameters:
----------------------------
@param feedforwardPattern (set)
Input proximal pattern to the pooler
@param lateralInputs (list(set))
Input distal patterns to the pooler (one for each neighboring CC's)
@param printMetrics (bool)
If true, will print cell metrics
"""
self.pooler.compute(sorted(feedforwardPattern),
[sorted(lateralInput)
for lateralInput in lateralInputs],
learn=False)
if printMetrics:
print self.pooler.mmPrettyPrintMetrics(
self.pooler.mmGetDefaultMetrics()
)
# Helper functions
def generatePattern(self):
"""
Returns a random proximal input pattern.
"""
pattern = self.proximalPatternMachine.get(self.patternId)
self.patternId += 1
return pattern
def generateObject(self, numPatterns, numCols=1):
"""
Creates a list of patterns, for a given object.
If numCols > 1 is given, a list of list of patterns will be returned.
"""
if numCols == 1:
return [self.generatePattern() for _ in xrange(numPatterns)]
else:
patterns = []
for i in xrange(numPatterns):
patterns.append([self.generatePattern() for _ in xrange(numCols)])
return patterns
def init(self, overrides=None, numCols=1):
"""
Creates the column pooler with specified parameter overrides.
Except for the specified overrides and problem-specific parameters, used
parameters are implementation defaults.
"""
params = {
"inputWidth": self.inputWidth,
"lateralInputWidths": [self.outputWidth]*(numCols-1),
"cellCount": self.outputWidth,
"sdrSize": self.numOutputActiveBits,
"minThresholdProximal": 10,
"sampleSizeProximal": 20,
"connectedPermanenceProximal": 0.6,
"initialDistalPermanence": 0.51,
"activationThresholdDistal": 10,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.6,
"seed": self.seed,
}
if overrides is None:
overrides = {}
params.update(overrides)
if numCols == 1:
self.pooler = MonitoredColumnPooler(**params)
else:
# TODO: We need a different seed for each pooler otherwise each one
# outputs an identical representation. Use random seed for now but ideally
# we would set different specific seeds for each pooler
params['seed']=0
self.poolers = [MonitoredColumnPooler(**params) for _ in xrange(numCols)]
def _getActiveRepresentation(self):
"""
Retrieves the current active representation in the pooler.
"""
if self.pooler is None:
raise ValueError("No pooler has been instantiated")
return set(self.pooler.getActiveCells())
# Multi-column testing
def learnMultipleColumns(self,
feedforwardPatterns,
numRepetitions=1,
neighborsIndices=None,
randomOrder=True,
newObject=True):
"""
Learns a single object, feeding it through the multiple columns.
Parameters:
----------------------------
Learns a single object, with the provided patterns.
@param feedforwardPatterns (list(list(set)))
List of proximal input patterns (one for each pooler).
@param neighborsIndices (list(list))
List of column indices each column received input from.
@param numRepetitions (int)
Number of times the patterns will be fed
@param randomOrder (bool)
If true, the order of patterns will be shuffled at each
repetition
"""
if newObject:
for pooler in self.poolers:
pooler.mmClearHistory()
pooler.reset()
# use different set of pattern indices to allow random orders
indices = [range(len(feedforwardPatterns))] * len(self.poolers)
prevActiveCells = [set() for _ in xrange(len(self.poolers))]
# by default, all columns are neighbors
if neighborsIndices is None:
neighborsIndices = [
range(i) + range(i+1, len(self.poolers))
for i in xrange(len(self.poolers))
]
for _ in xrange(numRepetitions):
# independently shuffle pattern orders if necessary
if randomOrder:
for idx in indices:
np.random.shuffle(idx)
for i in xrange(len(indices[0])):
# Train each column
for col, pooler in enumerate(self.poolers):
# get union of relevant lateral representations
lateralInputs = [sorted(activeCells)
for presynapticCol, activeCells
in enumerate(prevActiveCells)
if col != presynapticCol]
pooler.compute(sorted(feedforwardPatterns[indices[col][i]][col]),
lateralInputs, learn=True)
prevActiveCells = self._getActiveRepresentations()
def inferMultipleColumns(self,
feedforwardPatterns,
activeRepresentations,
neighborsIndices=None,
printMetrics=False,
reset=False):
"""
Feeds a single pattern to the column pooler (as well as an eventual lateral
pattern).
Parameters:
----------------------------
@param feedforwardPattern (list(set))
Input proximal patterns to the pooler (one for each column)
@param activeRepresentations (list(set))
Active representations in the columns at the previous step.
@param neighborsIndices (list(list))
List of column indices each column received input from.
@param printMetrics (bool)
If true, will print cell metrics
"""
if reset:
for pooler in self.poolers:
pooler.reset()
# by default, all columns are neighbors
if neighborsIndices is None:
neighborsIndices = [
range(i) + range(i+1, len(self.poolers))
for i in xrange(len(self.poolers))
]
for col, pooler in enumerate(self.poolers):
# get union of relevant lateral representations
lateralInputs = [sorted(activeCells)
for presynapticCol, activeCells
in enumerate(activeRepresentations)
if col != presynapticCol]
pooler.compute(sorted(feedforwardPatterns[col]),
lateralInputs, learn=False)
if printMetrics:
for pooler in self.poolers:
print pooler.mmPrettyPrintMetrics(
pooler.mmGetDefaultMetrics()
)
def _getActiveRepresentations(self):
"""
Retrieves the current active representations in the poolers.
"""
if len(self.poolers) == 0:
raise ValueError("No pooler has been instantiated")
return [set(pooler.getActiveCells()) for pooler in self.poolers]
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 1,099,693,485,113,873,500 | 31.025465 | 80 | 0.664926 | false |
spaam/svtplay-dl | lib/svtplay_dl/service/eurosport.py | 1 | 5081 | import json
import re
from urllib.parse import quote
from urllib.parse import urlparse
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import Service
class Eurosport(Service):
supported_domains_re = [r"^([^.]+\.)*eurosportplayer.com"]
def get(self):
parse = urlparse(self.url)
match = re.search("window.server_path = ({.*});", self.get_urldata())
if not match:
yield ServiceError("Cant find api key")
return
janson = json.loads(match.group(1))
clientapikey = janson["sdk"]["clientApiKey"]
devices = "https://eu.edge.bamgrid.com/devices"
postdata = {"deviceFamily": "browser", "applicationRuntime": "firefox", "deviceProfile": "macosx", "attributes": {}}
header = {"authorization": f"Bearer {clientapikey}"}
res = self.http.post(devices, headers=header, json=postdata)
assertion = res.json()["assertion"]
token = "https://eu.edge.bamgrid.com/token"
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
"latitude": 0,
"longitude": 0,
"platform": "browser",
"subject_token": assertion,
"subject_token_type": "urn:bamtech:params:oauth:token-type:device",
}
res = self.http.post(token, headers=header, data=data)
access_token = res.json()["access_token"]
login = "https://eu.edge.bamgrid.com/idp/login"
header = {"authorization": f"Bearer {access_token}"}
res = self.http.post(login, headers=header, json={"email": self.config.get("username"), "password": self.config.get("password")})
if res.status_code > 400:
yield ServiceError("Wrong username or password")
return
id_token = res.json()["id_token"]
grant = "https://eu.edge.bamgrid.com/accounts/grant"
res = self.http.post(grant, headers=header, json={"id_token": id_token})
assertion = res.json()["assertion"]
token = "https://eu.edge.bamgrid.com/token"
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
"latitude": 0,
"longitude": 0,
"platform": "browser",
"subject_token": assertion,
"subject_token_type": "urn:bamtech:params:oauth:token-type:account",
}
header = {"authorization": f"Bearer {clientapikey}"}
res = self.http.post(token, headers=header, data=data)
access_token = res.json()["access_token"]
query = {"preferredLanguages": ["en"], "mediaRights": ["GeoMediaRight"], "uiLang": "en", "include_images": True}
if parse.path[:11] == "/en/channel":
pagetype = "channel"
match = re.search("/([^/]+)$", parse.path)
if not match:
yield ServiceError("Cant find channel")
return
(vid,) = match.groups()
query["pageType"] = pagetype
query["channelCallsign"] = vid
query["channelCallsigns"] = vid
query["onAir"] = True
self.config.set("live", True) # lets override to true
url = (
"https://search-api.svcs.eurosportplayer.com/svc/search/v2/graphql/persisted/"
"query/eurosport/web/Airings/onAir?variables={}".format(quote(json.dumps(query)))
)
res = self.http.get(url, headers={"authorization": access_token})
vid2 = res.json()["data"]["Airings"][0]["channel"]["id"]
url = f"https://global-api.svcs.eurosportplayer.com/channels/{vid2}/scenarios/browser"
res = self.http.get(url, headers={"authorization": access_token, "Accept": "application/vnd.media-service+json; version=1"})
hls_url = res.json()["stream"]["slide"]
else:
pagetype = "event"
match = re.search("/([^/]+)/([^/]+)$", parse.path)
if not match:
yield ServiceError("Cant fint event id")
return
query["title"], query["contentId"] = match.groups()
query["pageType"] = pagetype
url = "https://search-api.svcs.eurosportplayer.com/svc/search/v2/graphql/" "persisted/query/eurosport/Airings?variables={}".format(
quote(json.dumps(query)),
)
res = self.http.get(url, headers={"authorization": access_token})
programid = res.json()["data"]["Airings"][0]["programId"]
mediaid = res.json()["data"]["Airings"][0]["mediaId"]
url = f"https://global-api.svcs.eurosportplayer.com/programs/{programid}/media/{mediaid}/scenarios/browser"
res = self.http.get(url, headers={"authorization": access_token, "Accept": "application/vnd.media-service+json; version=1"})
hls_url = res.json()["stream"]["complete"]
yield from hlsparse(self.config, self.http.request("get", hls_url), hls_url, authorization=access_token, output=self.output)
| mit | 2,435,905,693,891,856,000 | 42.42735 | 143 | 0.582169 | false |
Stederr/ESCOM | Análisis de Algoritmos/ref/algoritmos/modulo_cinco.py | 1 | 2206 | # coding=utf-8
'''
=== Instituto Politécnico Nacional
=== Escuela Superior de Cómputo
*** Análisis de Algoritmos - 2CM3
>>> ww ww ww (@ww)
<<< www.ww.com/algoritmos
Nota: el código de esta licencia se distribuye bajo la especificada
en el repositorio de github.com/ww/ESCOM
'''
def crearTabla(mat):
tam = len(mat)
w = []
for i in range(tam):
v = []
for j in range(tam):
v.append(["", 0, 0, 0, -1]) # NombreTabla, M, N, P, valorSuma
w.append(v)
return w
def descartarCeldas(tab, mat):
contador = 1
limite = len(tab)
for i in range(limite):
for j in range(contador):
tab[i][j][0] = "#"
tab[i][j][4] = 0
contador += 1
def initPrimeraDiagonal(tab, mat):
lim = len(tab)
i = 0
j = 1
while i < lim and j < lim:
tab[i][j][0] = mat[i][0] + mat[j][0]
tab[i][j][1] = mat[i][1] # M
tab[i][j][2] = mat[i][2] # N
tab[i][j][3] = mat[j][2] # P
tab[i][j][4] = tab[i][j][1] * tab[i][j][2] * tab[i][j][3]
i += 1
j += 1
def setMinimo(tab, mat, m, n):
if tab[m][n][1] == 0 and tab[m][n][2] == 0 and tab[m][n][3] == 0:
matrizSize_izquierda = (tab[m][n-1][1], tab[m][n-1][3])
matrizSize_abajo = (tab[m+1][n][1], tab[m+1][n][3])
multiplicacion_izquierda = matrizSize_izquierda[0] * matrizSize_izquierda[1] * mat[n][2]
multiplicacion_abajo = mat[m][1] * matrizSize_abajo[0] * matrizSize_abajo[1]
suma_izquierda = tab[m][n-1][4] + multiplicacion_izquierda
suma_abajo = tab[m+1][n][4] + multiplicacion_abajo
if suma_izquierda < suma_abajo:
tab[m][n][0] = "("+tab[m][n-1][0]+")"+mat[n][0]
tab[m][n][1] = matrizSize_izquierda[0]
tab[m][n][2] = matrizSize_izquierda[1]
tab[m][n][3] = mat[n][2]
else:
tab[m][n][0] = mat[m][0]+"("+tab[m+1][n][0]+")"
tab[m][n][1] = mat[m][1]
tab[m][n][2] = matrizSize_abajo[0]
tab[m][n][3] = matrizSize_abajo[1]
tab[m][n][4] = min(suma_izquierda, suma_abajo)
def algoritmo(tab, mat):
initPrimeraDiagonal(tab, mat)
limite = len(tab)
for i in range(limite):
m = 0
n = i
for j in range(i, limite):
if tab[m][n][4] != 0:
setMinimo(tab, mat, m, n)
m += 1
n += 1
def init(matrices):
tabla = crearTabla(matrices)
descartarCeldas(tabla, matrices)
algoritmo(tabla, matrices)
return tabla | apache-2.0 | -6,570,103,270,355,356,000 | 27.61039 | 90 | 0.587648 | false |
mahak/neutron | neutron/objects/port/extensions/port_device_profile.py | 2 | 1201 | # Copyright (c) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.objects import common_types
from oslo_versionedobjects import fields as obj_fields
from neutron.db.models import port_device_profile
from neutron.objects import base
@base.NeutronObjectRegistry.register
class PortDeviceProfile(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = port_device_profile.PortDeviceProfile
primary_keys = ['port_id']
fields = {
'port_id': common_types.UUIDField(),
'device_profile': obj_fields.StringField(nullable=True),
}
foreign_keys = {'Port': {'port_id': 'id'}}
| apache-2.0 | 5,032,134,734,344,929,000 | 32.361111 | 78 | 0.714405 | false |
kdheepak89/pypdevs | test/testGVT.py | 1 | 5077 | # Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testutils import *
from pypdevs.basesimulator import BaseSimulator
from pypdevs.util import DEVSException
from pypdevs.DEVS import RootDEVS
class StubBaseSimulator(BaseSimulator):
def __init__(self, name):
BaseSimulator.__init__(self, name, None)
self.reverted = False
def receiveControl(self, msg):
thrd = threading.Thread(target=BaseSimulator.receiveControl, args=[self, msg])
thrd.start()
class StubRootDEVS(RootDEVS):
def __init__(self, models, num):
scheduler = "heapset"
models[num].model_id = num
RootDEVS.__init__(self, [models[num]], models, scheduler)
class TestGVT(unittest.TestCase):
def setUp(self):
self.sim = basicSim()
def tearDown(self):
self.sim.run_gvt = False
def test_GVT_notify_receive(self):
self.assertTrue(self.sim.V == [{}, {}, {}, {}])
self.sim.notifyReceive(0)
self.assertTrue(self.sim.V == [{0: -1}, {}, {}, {}])
self.sim.notifyReceive(0)
self.sim.notifyReceive(0)
self.sim.notifyReceive(0)
self.sim.notifyReceive(0)
self.assertTrue(self.sim.V == [{0: -5}, {}, {}, {}])
self.sim.notifyReceive(1)
self.assertTrue(self.sim.V == [{0: -5}, {0: -1}, {}, {}])
self.sim.notifyReceive(1)
self.sim.notifyReceive(1)
self.sim.notifyReceive(1)
self.assertTrue(self.sim.V == [{0: -5}, {0:-4}, {}, {}])
self.sim.notifyReceive(0)
self.sim.notifyReceive(1)
self.sim.notifyReceive(3)
self.sim.notifyReceive(2)
self.sim.notifyReceive(3)
self.sim.notifyReceive(3)
self.sim.notifyReceive(0)
self.assertTrue(self.sim.V == [{0: -7}, {0: -5}, {0: -1}, {0: -3}])
self.sim.V = [{0: 10, 1: 5}, {}, {}, {}]
self.sim.notifyReceive(0)
self.assertTrue(self.sim.V == [{0: 9, 1: 5}, {}, {}, {}])
def test_GVT_notify_send(self):
self.assertTrue(self.sim.V == [{}, {}, {}, {}])
self.assertTrue(self.sim.Tmin == float('inf'))
self.sim.notifySend(1, 1, 0)
self.assertTrue(self.sim.V == [{1: 1}, {}, {}, {}])
self.assertTrue(self.sim.Tmin == float('inf'))
self.sim.notifySend(1, 1, 0)
self.assertTrue(self.sim.V == [{1: 2}, {}, {}, {}])
self.assertTrue(self.sim.Tmin == float('inf'))
self.sim.notifySend(2, 1, 0)
self.assertTrue(self.sim.V == [{1: 2, 2: 1}, {}, {}, {}])
self.assertTrue(self.sim.Tmin == float('inf'))
self.sim.notifySend(2, 3, 0)
self.sim.notifySend(1, 2, 0)
self.sim.notifySend(2, 1, 0)
self.assertTrue(self.sim.V == [{1: 3, 2: 3}, {}, {}, {}])
self.assertTrue(self.sim.Tmin == float('inf'))
self.sim.notifySend(1, 9, 1)
self.assertTrue(self.sim.V == [{1: 3, 2: 3}, {1: 1}, {}, {}])
self.assertTrue(self.sim.Tmin == 9)
self.sim.notifySend(1, 6, 1)
self.assertTrue(self.sim.V == [{1: 3, 2: 3}, {1: 2}, {}, {}])
self.assertTrue(self.sim.Tmin == 6)
self.sim.notifySend(2, 5, 1)
self.assertTrue(self.sim.V == [{1: 3, 2: 3}, {1: 2, 2: 1}, {}, {}])
self.assertTrue(self.sim.Tmin == 5)
self.sim.notifySend(1, 8, 1)
self.assertTrue(self.sim.V == [{1: 3, 2: 3}, {1: 3, 2: 1}, {}, {}])
self.assertTrue(self.sim.Tmin == 5)
self.sim.notifySend(2, 5, 1)
self.sim.notifySend(2, 1, 0)
self.sim.notifySend(2, 1, 0)
self.sim.notifySend(2, 6, 1)
self.assertTrue(self.sim.V == [{1: 3, 2: 5}, {1: 3, 2: 3}, {}, {}])
self.assertTrue(self.sim.Tmin == 5)
def test_setGVT(self):
self.sim.gvt = 0
models = [Generator()]
from pypdevs.statesavers import CopyState
models[0].old_states = [CopyState((0, 1), (2, 1), None, 0, {}, 0), CopyState((2, 1), (6, 1), None, 0, {}, 0)]
self.sim.model = StubRootDEVS(models, 0)
# Prevent a loop
self.sim.next_LP = self.sim
self.assertTrue(self.sim.gvt == 0)
self.sim.setGVT(5, [], False)
self.assertTrue(self.sim.gvt == 5)
# Try to set to a time before the current GVT
try:
self.sim.setGVT(1, [], False)
self.fail()
except DEVSException:
pass
# GVT shouldn't have changed
self.assertTrue(self.sim.gvt == 5)
| apache-2.0 | -4,365,174,655,635,157,500 | 35.52518 | 117 | 0.569628 | false |
frodrigo/osmose-backend | analysers/analyser_merge_recycling_FR_nm_glass.py | 4 | 3578 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Thomas O. 2016 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Merge import Analyser_Merge, SourceOpenDataSoft, GeoJSON, Load, Conflate, Select, Mapping
class Analyser_Merge_Recycling_FR_nm_glass(Analyser_Merge):
def __init__(self, config, logger = None):
Analyser_Merge.__init__(self, config, logger)
self.def_class_missing_official(item = 8120, id = 21, level = 3, tags = ['merge', 'recycling', 'fix:survey', 'fix:picture'],
title = T_('{0} glass recycling not integrated', 'NM'))
self.def_class_possible_merge(item = 8121, id = 23, level = 3, tags = ['merge', 'recycling', 'fix:chair'],
title = T_('{0} glass recycling, integration suggestion', 'NM'))
self.def_class_update_official(item = 8122, id = 24, level = 3, tags = ['merge', 'recycling', 'fix:chair'],
title = T_('{0} glass recycling update', 'NM'))
self.init(
"https://data.nantesmetropole.fr/explore/dataset/244400404_colonnes-aeriennes-nantes-metropole",
"Colonnes aériennes de Nantes Métropole",
GeoJSON(SourceOpenDataSoft(
attribution="Nantes Métropole {0}",
url="https://data.nantesmetropole.fr/explore/dataset/244400404_colonnes-aeriennes-nantes-metropole",
format="geojson")),
Load(
"geom_x", "geom_y",
select={"type_dechet": "Verre"}),
Conflate(
select = Select(
types = ["nodes", "ways"],
tags = {"amenity": "recycling"}),
osmRef = "ref:FR:NM",
conflationDistance = 100,
mapping = Mapping(
static1 = {
"amenity": "recycling",
"recycling:glass_bottles": "yes",
"recycling_type": "container"},
static2 = {"source": self.source},
mapping1 = {"ref:FR:NM": "id_colonne"},
text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x is not None, [fields["type_dechet"], fields["adresse"], fields["observation"]]))} )))
| gpl-3.0 | -2,546,495,715,946,034,700 | 58.583333 | 169 | 0.478881 | false |
joemicro/Manufacturing | ui_forms/ui_batchform.py | 1 | 27806 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'batchform.ui'
#
# Created: Sun Jul 07 15:53:38 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_BatchForm(object):
def setupUi(self, BatchForm):
BatchForm.setObjectName(_fromUtf8("BatchForm"))
BatchForm.resize(763, 591)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/batch")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
BatchForm.setWindowIcon(icon)
self.detailView = QtGui.QTableView(BatchForm)
self.detailView.setGeometry(QtCore.QRect(20, 184, 728, 214))
self.detailView.setObjectName(_fromUtf8("detailView"))
self.memoTextedit = QtGui.QTextEdit(BatchForm)
self.memoTextedit.setGeometry(QtCore.QRect(528, 464, 215, 106))
self.memoTextedit.setObjectName(_fromUtf8("memoTextedit"))
self.label_8 = QtGui.QLabel(BatchForm)
self.label_8.setGeometry(QtCore.QRect(530, 444, 46, 25))
self.label_8.setMinimumSize(QtCore.QSize(0, 25))
self.label_8.setMaximumSize(QtCore.QSize(16777215, 25))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtGui.QLabel(BatchForm)
self.label_9.setGeometry(QtCore.QRect(250, 408, 203, 20))
self.label_9.setMinimumSize(QtCore.QSize(0, 20))
self.label_9.setMaximumSize(QtCore.QSize(16777215, 20))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.label_9.setPalette(palette)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setAutoFillBackground(True)
self.label_9.setFrameShape(QtGui.QFrame.NoFrame)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.multFactorLineedit = QtGui.QLineEdit(BatchForm)
self.multFactorLineedit.setGeometry(QtCore.QRect(358, 429, 96, 20))
self.multFactorLineedit.setMinimumSize(QtCore.QSize(96, 20))
self.multFactorLineedit.setMaximumSize(QtCore.QSize(96, 20))
self.multFactorLineedit.setFrame(True)
self.multFactorLineedit.setObjectName(_fromUtf8("multFactorLineedit"))
self.label_10 = QtGui.QLabel(BatchForm)
self.label_10.setGeometry(QtCore.QRect(250, 429, 109, 20))
self.label_10.setMinimumSize(QtCore.QSize(0, 20))
self.label_10.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_10.setFrameShape(QtGui.QFrame.Box)
self.label_10.setLineWidth(1)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.frame = QtGui.QFrame(BatchForm)
self.frame.setGeometry(QtCore.QRect(20, 90, 730, 86))
self.frame.setFrameShape(QtGui.QFrame.Box)
self.frame.setFrameShadow(QtGui.QFrame.Sunken)
self.frame.setObjectName(_fromUtf8("frame"))
self.mult_Label = QtGui.QLabel(self.frame)
self.mult_Label.setGeometry(QtCore.QRect(228, 7, 41, 25))
self.mult_Label.setMinimumSize(QtCore.QSize(0, 25))
self.mult_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.mult_Label.setTextFormat(QtCore.Qt.PlainText)
self.mult_Label.setWordWrap(True)
self.mult_Label.setObjectName(_fromUtf8("mult_Label"))
self.desc_Label = QtGui.QLabel(self.frame)
self.desc_Label.setGeometry(QtCore.QRect(13, 47, 61, 25))
self.desc_Label.setMinimumSize(QtCore.QSize(0, 25))
self.desc_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.desc_Label.setObjectName(_fromUtf8("desc_Label"))
self.multLineedit = QtGui.QLineEdit(self.frame)
self.multLineedit.setGeometry(QtCore.QRect(275, 7, 96, 25))
self.multLineedit.setMinimumSize(QtCore.QSize(96, 25))
self.multLineedit.setMaximumSize(QtCore.QSize(96, 25))
self.multLineedit.setObjectName(_fromUtf8("multLineedit"))
self.weight_Label = QtGui.QLabel(self.frame)
self.weight_Label.setGeometry(QtCore.QRect(550, 41, 34, 25))
self.weight_Label.setMinimumSize(QtCore.QSize(0, 25))
self.weight_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.weight_Label.setObjectName(_fromUtf8("weight_Label"))
self.baseno_Label = QtGui.QLabel(self.frame)
self.baseno_Label.setGeometry(QtCore.QRect(15, 7, 42, 25))
self.baseno_Label.setMinimumSize(QtCore.QSize(0, 25))
self.baseno_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.baseno_Label.setObjectName(_fromUtf8("baseno_Label"))
self.baseCombo = QtGui.QComboBox(self.frame)
self.baseCombo.setGeometry(QtCore.QRect(83, 7, 96, 25))
self.baseCombo.setMinimumSize(QtCore.QSize(96, 25))
self.baseCombo.setMaximumSize(QtCore.QSize(96, 25))
self.baseCombo.setObjectName(_fromUtf8("baseCombo"))
self.volume_Label = QtGui.QLabel(self.frame)
self.volume_Label.setGeometry(QtCore.QRect(550, 7, 59, 25))
self.volume_Label.setMinimumSize(QtCore.QSize(0, 25))
self.volume_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.volume_Label.setObjectName(_fromUtf8("volume_Label"))
self.volumeLineedit = QtGui.QLineEdit(self.frame)
self.volumeLineedit.setGeometry(QtCore.QRect(615, 7, 96, 25))
self.volumeLineedit.setMinimumSize(QtCore.QSize(96, 25))
self.volumeLineedit.setMaximumSize(QtCore.QSize(96, 25))
self.volumeLineedit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.volumeLineedit.setObjectName(_fromUtf8("volumeLineedit"))
self.date_Label = QtGui.QLabel(self.frame)
self.date_Label.setGeometry(QtCore.QRect(390, 41, 23, 25))
self.date_Label.setMinimumSize(QtCore.QSize(0, 25))
self.date_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.date_Label.setObjectName(_fromUtf8("date_Label"))
self.descLineedit = QtGui.QLineEdit(self.frame)
self.descLineedit.setGeometry(QtCore.QRect(83, 47, 288, 25))
self.descLineedit.setMinimumSize(QtCore.QSize(288, 25))
self.descLineedit.setMaximumSize(QtCore.QSize(288, 25))
self.descLineedit.setObjectName(_fromUtf8("descLineedit"))
self.v_weight_Label = QtGui.QLabel(self.frame)
self.v_weight_Label.setGeometry(QtCore.QRect(615, 41, 96, 25))
self.v_weight_Label.setMinimumSize(QtCore.QSize(96, 25))
self.v_weight_Label.setMaximumSize(QtCore.QSize(96, 25))
self.v_weight_Label.setFrameShape(QtGui.QFrame.Box)
self.v_weight_Label.setText(_fromUtf8(""))
self.v_weight_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.v_weight_Label.setObjectName(_fromUtf8("v_weight_Label"))
self.baseID_Label = QtGui.QLabel(self.frame)
self.baseID_Label.setGeometry(QtCore.QRect(390, 7, 48, 25))
self.baseID_Label.setMinimumSize(QtCore.QSize(0, 25))
self.baseID_Label.setMaximumSize(QtCore.QSize(16777215, 25))
self.baseID_Label.setObjectName(_fromUtf8("baseID_Label"))
self.dateEdit = QtGui.QDateEdit(self.frame)
self.dateEdit.setGeometry(QtCore.QRect(438, 41, 96, 25))
self.dateEdit.setMinimumSize(QtCore.QSize(96, 25))
self.dateEdit.setMaximumSize(QtCore.QSize(96, 25))
self.dateEdit.setCalendarPopup(True)
self.dateEdit.setObjectName(_fromUtf8("dateEdit"))
self.v_baseID_Label = QtGui.QLabel(self.frame)
self.v_baseID_Label.setGeometry(QtCore.QRect(438, 7, 96, 25))
self.v_baseID_Label.setMinimumSize(QtCore.QSize(96, 25))
self.v_baseID_Label.setMaximumSize(QtCore.QSize(96, 25))
self.v_baseID_Label.setFrameShape(QtGui.QFrame.Box)
self.v_baseID_Label.setText(_fromUtf8(""))
self.v_baseID_Label.setObjectName(_fromUtf8("v_baseID_Label"))
self.v_mult114lt_Label = QtGui.QLabel(BatchForm)
self.v_mult114lt_Label.setGeometry(QtCore.QRect(358, 549, 94, 20))
self.v_mult114lt_Label.setMinimumSize(QtCore.QSize(0, 20))
self.v_mult114lt_Label.setMaximumSize(QtCore.QSize(16777215, 20))
self.v_mult114lt_Label.setFrameShape(QtGui.QFrame.Box)
self.v_mult114lt_Label.setText(_fromUtf8(""))
self.v_mult114lt_Label.setObjectName(_fromUtf8("v_mult114lt_Label"))
self.v_mult4lt_Label = QtGui.QLabel(BatchForm)
self.v_mult4lt_Label.setGeometry(QtCore.QRect(358, 529, 94, 20))
self.v_mult4lt_Label.setMinimumSize(QtCore.QSize(0, 20))
self.v_mult4lt_Label.setMaximumSize(QtCore.QSize(16777215, 20))
self.v_mult4lt_Label.setFrameShape(QtGui.QFrame.Box)
self.v_mult4lt_Label.setText(_fromUtf8(""))
self.v_mult4lt_Label.setObjectName(_fromUtf8("v_mult4lt_Label"))
self.v_mult15lt_Label = QtGui.QLabel(BatchForm)
self.v_mult15lt_Label.setGeometry(QtCore.QRect(358, 489, 94, 20))
self.v_mult15lt_Label.setMinimumSize(QtCore.QSize(0, 20))
self.v_mult15lt_Label.setMaximumSize(QtCore.QSize(16777215, 20))
self.v_mult15lt_Label.setFrameShape(QtGui.QFrame.Box)
self.v_mult15lt_Label.setText(_fromUtf8(""))
self.v_mult15lt_Label.setObjectName(_fromUtf8("v_mult15lt_Label"))
self.v_multFlu_Label = QtGui.QLabel(BatchForm)
self.v_multFlu_Label.setGeometry(QtCore.QRect(358, 449, 94, 20))
self.v_multFlu_Label.setMinimumSize(QtCore.QSize(0, 20))
self.v_multFlu_Label.setMaximumSize(QtCore.QSize(16777215, 20))
self.v_multFlu_Label.setFrameShape(QtGui.QFrame.Box)
self.v_multFlu_Label.setText(_fromUtf8(""))
self.v_multFlu_Label.setObjectName(_fromUtf8("v_multFlu_Label"))
self.v_mult2lt_Label = QtGui.QLabel(BatchForm)
self.v_mult2lt_Label.setGeometry(QtCore.QRect(358, 509, 94, 20))
self.v_mult2lt_Label.setMinimumSize(QtCore.QSize(0, 20))
self.v_mult2lt_Label.setMaximumSize(QtCore.QSize(16777215, 20))
self.v_mult2lt_Label.setFrameShape(QtGui.QFrame.Box)
self.v_mult2lt_Label.setText(_fromUtf8(""))
self.v_mult2lt_Label.setObjectName(_fromUtf8("v_mult2lt_Label"))
self.v_mult1lt_Label = QtGui.QLabel(BatchForm)
self.v_mult1lt_Label.setGeometry(QtCore.QRect(358, 469, 94, 20))
self.v_mult1lt_Label.setMinimumSize(QtCore.QSize(0, 20))
self.v_mult1lt_Label.setMaximumSize(QtCore.QSize(16777215, 20))
self.v_mult1lt_Label.setFrameShape(QtGui.QFrame.Box)
self.v_mult1lt_Label.setText(_fromUtf8(""))
self.v_mult1lt_Label.setObjectName(_fromUtf8("v_mult1lt_Label"))
self.label_26 = QtGui.QLabel(BatchForm)
self.label_26.setGeometry(QtCore.QRect(250, 549, 109, 20))
self.label_26.setMinimumSize(QtCore.QSize(0, 20))
self.label_26.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_26.setFrameShape(QtGui.QFrame.Box)
self.label_26.setLineWidth(1)
self.label_26.setObjectName(_fromUtf8("label_26"))
self.label_27 = QtGui.QLabel(BatchForm)
self.label_27.setGeometry(QtCore.QRect(250, 469, 109, 20))
self.label_27.setMinimumSize(QtCore.QSize(0, 20))
self.label_27.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_27.setFrameShape(QtGui.QFrame.Box)
self.label_27.setLineWidth(1)
self.label_27.setObjectName(_fromUtf8("label_27"))
self.label_28 = QtGui.QLabel(BatchForm)
self.label_28.setGeometry(QtCore.QRect(250, 529, 109, 20))
self.label_28.setMinimumSize(QtCore.QSize(0, 20))
self.label_28.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_28.setFrameShape(QtGui.QFrame.Box)
self.label_28.setLineWidth(1)
self.label_28.setObjectName(_fromUtf8("label_28"))
self.label_29 = QtGui.QLabel(BatchForm)
self.label_29.setGeometry(QtCore.QRect(250, 509, 109, 20))
self.label_29.setMinimumSize(QtCore.QSize(0, 20))
self.label_29.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_29.setFrameShape(QtGui.QFrame.Box)
self.label_29.setLineWidth(1)
self.label_29.setObjectName(_fromUtf8("label_29"))
self.label_30 = QtGui.QLabel(BatchForm)
self.label_30.setGeometry(QtCore.QRect(250, 489, 109, 20))
self.label_30.setMinimumSize(QtCore.QSize(0, 20))
self.label_30.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_30.setFrameShape(QtGui.QFrame.Box)
self.label_30.setLineWidth(1)
self.label_30.setObjectName(_fromUtf8("label_30"))
self.label_31 = QtGui.QLabel(BatchForm)
self.label_31.setGeometry(QtCore.QRect(250, 449, 109, 20))
self.label_31.setMinimumSize(QtCore.QSize(0, 20))
self.label_31.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_31.setFrameShape(QtGui.QFrame.Box)
self.label_31.setLineWidth(1)
self.label_31.setObjectName(_fromUtf8("label_31"))
self.frame_2 = QtGui.QFrame(BatchForm)
self.frame_2.setGeometry(QtCore.QRect(0, 41, 769, 37))
self.frame_2.setAutoFillBackground(False)
self.frame_2.setStyleSheet(_fromUtf8("border: none;\n"
"background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
"stop: 0 #BBBBBB, \n"
"stop: 0.4 #EEEEEE,\n"
"stop: 0.9 #CCCCCC,\n"
" stop: 1 #666666);"))
self.frame_2.setFrameShape(QtGui.QFrame.Box)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName(_fromUtf8("frame_2"))
self.saveButton = QtGui.QPushButton(self.frame_2)
self.saveButton.setGeometry(QtCore.QRect(111, 5, 90, 27))
self.saveButton.setMinimumSize(QtCore.QSize(90, 0))
self.saveButton.setMaximumSize(QtCore.QSize(90, 16777215))
self.saveButton.setStyleSheet(_fromUtf8("QPushButton {\n"
"background-color: rgb(250, 250, 250);\n"
"color: #333;\n"
"border: 2px solid #555;\n"
"padding: 5px;}\n"
"QPushButton:hover {\n"
"background: qradialgradient(cx: 0.3, cy: -0.4,\n"
"fx: 0.3, fy: -0.4,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #ccc);}\n"
"QPushButton:pressed {\n"
"background: qradialgradient(cx: 0.4, cy: -0.1,\n"
"fx: 0.4, fy: -0.1,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #eee);}"))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/save")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.saveButton.setIcon(icon1)
self.saveButton.setIconSize(QtCore.QSize(20, 20))
self.saveButton.setFlat(False)
self.saveButton.setObjectName(_fromUtf8("saveButton"))
self.newButton = QtGui.QPushButton(self.frame_2)
self.newButton.setGeometry(QtCore.QRect(15, 5, 90, 27))
self.newButton.setMinimumSize(QtCore.QSize(90, 0))
self.newButton.setMaximumSize(QtCore.QSize(90, 16777215))
self.newButton.setStyleSheet(_fromUtf8("QPushButton {\n"
"background-color: rgb(250, 250, 250);\n"
"color: #333;\n"
"border: 2px solid #555;\n"
"padding: 5px;}\n"
"QPushButton:hover {\n"
"background: qradialgradient(cx: 0.3, cy: -0.4,\n"
"fx: 0.3, fy: -0.4,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #ccc);}\n"
"QPushButton:pressed {\n"
"background: qradialgradient(cx: 0.4, cy: -0.1,\n"
"fx: 0.4, fy: -0.1,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #eee);}"))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/new")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newButton.setIcon(icon2)
self.newButton.setIconSize(QtCore.QSize(20, 20))
self.newButton.setFlat(False)
self.newButton.setObjectName(_fromUtf8("newButton"))
self.printButton = QtGui.QPushButton(self.frame_2)
self.printButton.setGeometry(QtCore.QRect(303, 5, 90, 27))
self.printButton.setMinimumSize(QtCore.QSize(90, 0))
self.printButton.setMaximumSize(QtCore.QSize(90, 16777215))
self.printButton.setStyleSheet(_fromUtf8("QPushButton {\n"
"background-color: rgb(250, 250, 250);\n"
"color: #333;\n"
"border: 2px solid #555;\n"
"padding: 5px;}\n"
"QPushButton:hover {\n"
"background: qradialgradient(cx: 0.3, cy: -0.4,\n"
"fx: 0.3, fy: -0.4,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #ccc);}\n"
"QPushButton:pressed {\n"
"background: qradialgradient(cx: 0.4, cy: -0.1,\n"
"fx: 0.4, fy: -0.1,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #eee);}"))
self.printButton.setIconSize(QtCore.QSize(20, 20))
self.printButton.setFlat(False)
self.printButton.setObjectName(_fromUtf8("printButton"))
self.deleteButton = QtGui.QPushButton(self.frame_2)
self.deleteButton.setGeometry(QtCore.QRect(207, 5, 90, 27))
self.deleteButton.setMinimumSize(QtCore.QSize(90, 0))
self.deleteButton.setMaximumSize(QtCore.QSize(90, 16777215))
self.deleteButton.setStyleSheet(_fromUtf8("QPushButton {\n"
"background-color: rgb(250, 250, 250);\n"
"color: #333;\n"
"border: 2px solid #555;\n"
"padding: 5px;}\n"
"QPushButton:hover {\n"
"background: qradialgradient(cx: 0.3, cy: -0.4,\n"
"fx: 0.3, fy: -0.4,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #ccc);}\n"
"QPushButton:pressed {\n"
"background: qradialgradient(cx: 0.4, cy: -0.1,\n"
"fx: 0.4, fy: -0.1,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #eee);}"))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/delete")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.deleteButton.setIcon(icon3)
self.deleteButton.setIconSize(QtCore.QSize(20, 20))
self.deleteButton.setFlat(False)
self.deleteButton.setObjectName(_fromUtf8("deleteButton"))
self.closeButton = QtGui.QPushButton(self.frame_2)
self.closeButton.setGeometry(QtCore.QRect(399, 5, 90, 27))
self.closeButton.setMinimumSize(QtCore.QSize(90, 0))
self.closeButton.setMaximumSize(QtCore.QSize(90, 16777215))
self.closeButton.setStyleSheet(_fromUtf8("QPushButton {\n"
"background-color: rgb(250, 250, 250);\n"
"color: #333;\n"
"border: 2px solid #555;\n"
"padding: 5px;}\n"
"QPushButton:hover {\n"
"background: qradialgradient(cx: 0.3, cy: -0.4,\n"
"fx: 0.3, fy: -0.4,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #ccc);}\n"
"QPushButton:pressed {\n"
"background: qradialgradient(cx: 0.4, cy: -0.1,\n"
"fx: 0.4, fy: -0.1,\n"
"radius: 1.35, stop: 0 #fff, stop: 1 #eee);}"))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/exit")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.closeButton.setIcon(icon4)
self.closeButton.setIconSize(QtCore.QSize(20, 20))
self.closeButton.setFlat(False)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.groupBox = QtGui.QGroupBox(self.frame_2)
self.groupBox.setGeometry(QtCore.QRect(546, 6, 188, 24))
self.groupBox.setStyleSheet(_fromUtf8("background-color: rgb(212, 212, 212);"))
self.groupBox.setTitle(_fromUtf8(""))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.baseRadio = QtGui.QRadioButton(self.groupBox)
self.baseRadio.setGeometry(QtCore.QRect(20, 5, 55, 17))
self.baseRadio.setObjectName(_fromUtf8("baseRadio"))
self.batchRadio = QtGui.QRadioButton(self.groupBox)
self.batchRadio.setGeometry(QtCore.QRect(90, 4, 55, 17))
self.batchRadio.setObjectName(_fromUtf8("batchRadio"))
self.header_label = QtGui.QLabel(BatchForm)
self.header_label.setGeometry(QtCore.QRect(85, -1, 259, 32))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Calibri"))
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setUnderline(True)
font.setWeight(75)
self.header_label.setFont(font)
self.header_label.setObjectName(_fromUtf8("header_label"))
self.layoutWidget = QtGui.QWidget(BatchForm)
self.layoutWidget.setGeometry(QtCore.QRect(620, 408, 126, 27))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.layoutWidget)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_12 = QtGui.QLabel(self.layoutWidget)
self.label_12.setMinimumSize(QtCore.QSize(0, 25))
self.label_12.setMaximumSize(QtCore.QSize(16777215, 25))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.horizontalLayout_2.addWidget(self.label_12)
self.v_cost_Label = QtGui.QLabel(self.layoutWidget)
self.v_cost_Label.setMinimumSize(QtCore.QSize(96, 25))
self.v_cost_Label.setMaximumSize(QtCore.QSize(96, 25))
self.v_cost_Label.setFrameShape(QtGui.QFrame.Box)
self.v_cost_Label.setText(_fromUtf8(""))
self.v_cost_Label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.v_cost_Label.setObjectName(_fromUtf8("v_cost_Label"))
self.horizontalLayout_2.addWidget(self.v_cost_Label)
self.layoutWidget1 = QtGui.QWidget(BatchForm)
self.layoutWidget1.setGeometry(QtCore.QRect(20, 400, 219, 169))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.associatedLabel = QtGui.QLabel(self.layoutWidget1)
self.associatedLabel.setMinimumSize(QtCore.QSize(0, 25))
self.associatedLabel.setMaximumSize(QtCore.QSize(16777215, 25))
self.associatedLabel.setObjectName(_fromUtf8("associatedLabel"))
self.verticalLayout_2.addWidget(self.associatedLabel)
self.associatedView = QtGui.QTableView(self.layoutWidget1)
self.associatedView.setObjectName(_fromUtf8("associatedView"))
self.verticalLayout_2.addWidget(self.associatedView)
self.label_8.setBuddy(self.memoTextedit)
self.label_10.setBuddy(self.multFactorLineedit)
self.mult_Label.setBuddy(self.multLineedit)
self.desc_Label.setBuddy(self.descLineedit)
self.baseno_Label.setBuddy(self.baseCombo)
self.volume_Label.setBuddy(self.volumeLineedit)
self.date_Label.setBuddy(self.dateEdit)
self.associatedLabel.setBuddy(self.associatedView)
self.retranslateUi(BatchForm)
QtCore.QMetaObject.connectSlotsByName(BatchForm)
BatchForm.setTabOrder(self.baseCombo, self.dateEdit)
BatchForm.setTabOrder(self.dateEdit, self.detailView)
BatchForm.setTabOrder(self.detailView, self.memoTextedit)
BatchForm.setTabOrder(self.memoTextedit, self.multLineedit)
BatchForm.setTabOrder(self.multLineedit, self.descLineedit)
BatchForm.setTabOrder(self.descLineedit, self.volumeLineedit)
BatchForm.setTabOrder(self.volumeLineedit, self.multFactorLineedit)
BatchForm.setTabOrder(self.multFactorLineedit, self.associatedView)
BatchForm.setTabOrder(self.associatedView, self.baseRadio)
BatchForm.setTabOrder(self.baseRadio, self.batchRadio)
def retranslateUi(self, BatchForm):
BatchForm.setWindowTitle(_translate("BatchForm", "Dialog", None))
self.label_8.setText(_translate("BatchForm", "Notes", None))
self.label_9.setText(_translate("BatchForm", "Projected Results", None))
self.label_10.setText(_translate("BatchForm", "Multiplication Factor", None))
self.mult_Label.setText(_translate("BatchForm", "Multply Batch", None))
self.desc_Label.setText(_translate("BatchForm", "Description", None))
self.weight_Label.setText(_translate("BatchForm", "Weight", None))
self.baseno_Label.setText(_translate("BatchForm", "Base no.", None))
self.volume_Label.setText(_translate("BatchForm", "Base Volume", None))
self.date_Label.setText(_translate("BatchForm", "Date", None))
self.baseID_Label.setText(_translate("BatchForm", "Base ID", None))
self.label_26.setText(_translate("BatchForm", " TOTAL 11.4 LITRE:", None))
self.label_27.setText(_translate("BatchForm", " TOTAL 1 LITRE:", None))
self.label_28.setText(_translate("BatchForm", " TOTAL 4 LITRE :", None))
self.label_29.setText(_translate("BatchForm", " TOTAL 2 LITRE:", None))
self.label_30.setText(_translate("BatchForm", " TOTAL 1.5 LITRE:", None))
self.label_31.setText(_translate("BatchForm", "TOTAL FLU. LITRE:", None))
self.saveButton.setText(_translate("BatchForm", "&Save", None))
self.newButton.setText(_translate("BatchForm", "&New", None))
self.printButton.setText(_translate("BatchForm", "&Print", None))
self.deleteButton.setText(_translate("BatchForm", "&Delete", None))
self.closeButton.setText(_translate("BatchForm", "Close", None))
self.baseRadio.setText(_translate("BatchForm", "Base", None))
self.batchRadio.setText(_translate("BatchForm", "Batch", None))
self.header_label.setText(_translate("BatchForm", "Find Transactions:", None))
self.label_12.setText(_translate("BatchForm", "Cost", None))
self.associatedLabel.setText(_translate("BatchForm", "Associated Bases", None))
import images_rc
| mit | -4,558,092,067,687,230,000 | 54.170635 | 109 | 0.682515 | false |
isralopez/geonode | geonode/catalogue/views.py | 14 | 1811 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from pycsw import server
from geonode.catalogue.backends.pycsw_local import CONFIGURATION
@csrf_exempt
def csw_global_dispatch(request):
"""pycsw wrapper"""
# this view should only operate if pycsw_local is the backend
# else, redirect to the URL of the non-pycsw_local backend
if settings.CATALOGUE['default']['ENGINE'] != 'geonode.catalogue.backends.pycsw_local':
return HttpResponseRedirect(settings.CATALOGUE['default']['URL'])
mdict = dict(settings.PYCSW['CONFIGURATION'], **CONFIGURATION)
env = request.META.copy()
env.update({'local.app_root': os.path.dirname(__file__),
'REQUEST_URI': request.build_absolute_uri()})
csw = server.Csw(mdict, env)
content = csw.dispatch_wsgi()
return HttpResponse(content, content_type=csw.contenttype)
| gpl-3.0 | -2,713,650,765,973,121,000 | 36.729167 | 91 | 0.674213 | false |
riga/law | law/contrib/root/formatter.py | 1 | 3388 | # coding: utf-8
"""
ROOT target formatters.
"""
__all__ = [
"GuardedTFile", "ROOTFormatter", "ROOTNumpyFormatter", "ROOTPandasFormatter", "UprootFormatter",
]
from law.target.formatter import Formatter
from law.target.file import get_path
from law.contrib.root.util import import_ROOT
class GuardedTFile(object):
@classmethod
def Open(cls, *args, **kwargs):
ROOT = import_ROOT()
return cls(ROOT.TFile.Open(*args, **kwargs))
def __init__(self, *args, **kwargs):
super(GuardedTFile, self).__init__()
self._guarded_tfile = None
ROOT = import_ROOT()
if len(args) == 1 and isinstance(args[0], ROOT.TFile) and not kwargs:
self._guarded_tfile = args[0]
elif args or kwargs:
self._guarded_tfile = ROOT.TFile(*args, **kwargs)
def __enter__(self):
return self._guarded_tfile
def __exit__(self, exc_type, exc_value, traceback):
if self.IsOpen():
self.Close()
def __getattr__(self, attr):
if self._guarded_tfile is not None:
return getattr(self._guarded_tfile, attr)
else:
raise AttributeError("cannot forward attribute '{}' to undefined guarded tfile".format(
attr))
def __setattr__(self, attr, value):
if attr != "_guarded_tfile":
setattr(self._guarded_tfile, attr, value)
else:
super(GuardedTFile, self).__setattr__(attr, value)
class ROOTFormatter(Formatter):
name = "root"
@classmethod
def accepts(cls, path, mode):
return get_path(path).endswith(".root")
@classmethod
def load(cls, path, *args, **kwargs):
return GuardedTFile(get_path(path), *args, **kwargs)
@classmethod
def dump(cls, path, *args, **kwargs):
return GuardedTFile(get_path(path), *args, **kwargs)
class ROOTNumpyFormatter(Formatter):
name = "root_numpy"
@classmethod
def accepts(cls, path, mode):
return get_path(path).endswith(".root")
@classmethod
def load(cls, path, *args, **kwargs):
ROOT = import_ROOT() # noqa: F841
import root_numpy
return root_numpy.root2array(get_path(path), *args, **kwargs)
@classmethod
def dump(cls, path, arr, *args, **kwargs):
ROOT = import_ROOT() # noqa: F841
import root_numpy
return root_numpy.array2root(arr, get_path(path), *args, **kwargs)
class ROOTPandasFormatter(Formatter):
name = "root_pandas"
@classmethod
def accepts(cls, path, mode):
return get_path(path).endswith(".root")
@classmethod
def load(cls, path, *args, **kwargs):
ROOT = import_ROOT() # noqa: F841
import root_pandas
return root_pandas.read_root(get_path(path), *args, **kwargs)
@classmethod
def dump(cls, path, df, *args, **kwargs):
ROOT = import_ROOT() # noqa: F841
# importing root_pandas adds the to_root() method to data frames
import root_pandas # noqa: F401
return df.to_root(get_path(path), *args, **kwargs)
class UprootFormatter(Formatter):
name = "uproot"
@classmethod
def accepts(cls, path, mode):
return get_path(path).endswith(".root")
@classmethod
def load(cls, path, *args, **kwargs):
import uproot
return uproot.open(get_path(path), *args, **kwargs)
| bsd-3-clause | 1,715,753,316,678,142,200 | 24.473684 | 100 | 0.603896 | false |
larsks/blivet | tests/devices_test/partition_test.py | 6 | 6832 | # vim:set fileencoding=utf-8
import os
import unittest
from blivet.devices import DiskFile
from blivet.devices import PartitionDevice
from blivet.formats import getFormat
from blivet.size import Size
from blivet.util import sparsetmpfile
class PartitionDeviceTestCase(unittest.TestCase):
def testTargetSize(self):
with sparsetmpfile("targetsizetest", Size("10 MiB")) as disk_file:
disk = DiskFile(disk_file)
disk.format = getFormat("disklabel", device=disk.path)
grain_size = Size(disk.format.alignment.grainSize)
sector_size = Size(disk.format.partedDevice.sectorSize)
start = int(grain_size)
orig_size = Size("6 MiB")
end = start + int(orig_size / sector_size) - 1
disk.format.addPartition(start, end)
partition = disk.format.partedDisk.getPartitionBySector(start)
self.assertNotEqual(partition, None)
self.assertEqual(orig_size, Size(partition.getLength(unit='B')))
device = PartitionDevice(os.path.basename(partition.path),
size=orig_size)
device.disk = disk
device.exists = True
device.partedPartition = partition
device.format = getFormat("ext4", device=device.path)
device.format.exists = True
# grain size should be 1 MiB
device.format._minInstanceSize = Size("2 MiB") + (grain_size / 2)
device.format._resizable = True
# Make sure things are as expected to begin with.
self.assertEqual(device.size, orig_size)
self.assertEqual(device.minSize, Size("3 MiB"))
# start sector's at 1 MiB
self.assertEqual(device.maxSize, Size("9 MiB"))
# ValueError if not Size
with self.assertRaisesRegex(ValueError,
"new size must.*type Size"):
device.targetSize = 22
self.assertEqual(device.targetSize, orig_size)
# ValueError if size smaller than minSize
with self.assertRaisesRegex(ValueError,
"size.*smaller than the minimum"):
device.targetSize = Size("1 MiB")
self.assertEqual(device.targetSize, orig_size)
# ValueError if size larger than maxSize
with self.assertRaisesRegex(ValueError,
"size.*larger than the maximum"):
device.targetSize = Size("11 MiB")
self.assertEqual(device.targetSize, orig_size)
# ValueError if unaligned
with self.assertRaisesRegex(ValueError, "new size.*not.*aligned"):
device.targetSize = Size("3.1 MiB")
self.assertEqual(device.targetSize, orig_size)
# successfully set a new target size
new_target = device.maxSize
device.targetSize = new_target
self.assertEqual(device.targetSize, new_target)
self.assertEqual(device.size, new_target)
parted_size = Size(device.partedPartition.getLength(unit='B'))
self.assertEqual(parted_size, device.targetSize)
# reset target size to original size
device.targetSize = orig_size
self.assertEqual(device.targetSize, orig_size)
self.assertEqual(device.size, orig_size)
parted_size = Size(device.partedPartition.getLength(unit='B'))
self.assertEqual(parted_size, device.targetSize)
def testMinMaxSizeAlignment(self):
with sparsetmpfile("minsizetest", Size("10 MiB")) as disk_file:
disk = DiskFile(disk_file)
disk.format = getFormat("disklabel", device=disk.path)
grain_size = Size(disk.format.alignment.grainSize)
sector_size = Size(disk.format.partedDevice.sectorSize)
start = int(grain_size)
end = start + int(Size("6 MiB") / sector_size)
disk.format.addPartition(start, end)
partition = disk.format.partedDisk.getPartitionBySector(start)
self.assertNotEqual(partition, None)
device = PartitionDevice(os.path.basename(partition.path))
device.disk = disk
device.exists = True
device.partedPartition = partition
# Typical sector size is 512 B.
# Default optimum alignment grain size is 2048 sectors, or 1 MiB.
device.format = getFormat("ext4", device=device.path)
device.format.exists = True
device.format._minInstanceSize = Size("2 MiB") + (grain_size / 2)
device.format._resizable = True
##
## minSize
##
# The end sector based only on format min size should be unaligned.
min_sectors = int(device.format.minSize / sector_size)
min_end_sector = partition.geometry.start + min_sectors - 1
self.assertEqual(
disk.format.endAlignment.isAligned(partition.geometry,
min_end_sector),
False)
# The end sector based on device min size should be aligned.
min_sectors = int(device.minSize / sector_size)
min_end_sector = partition.geometry.start + min_sectors - 1
self.assertEqual(
disk.format.endAlignment.isAligned(partition.geometry,
min_end_sector),
True)
##
## maxSize
##
# Add a partition starting three sectors past an aligned sector and
# extending to the end of the disk so that there's a free region
# immediately following the first partition with an unaligned end
# sector.
free = disk.format.partedDisk.getFreeSpaceRegions()[-1]
raw_start = int(Size("9 MiB") / sector_size)
start = disk.format.alignment.alignUp(free, raw_start) + 3
disk.format.addPartition(start, disk.format.partedDevice.length - 1)
# Verify the end of the free region immediately following the first
# partition is unaligned.
free = disk.format.partedDisk.getFreeSpaceRegions()[1]
self.assertEqual(disk.format.endAlignment.isAligned(free, free.end),
False)
# The end sector based on device min size should be aligned.
max_sectors = int(device.maxSize / sector_size)
max_end_sector = partition.geometry.start + max_sectors - 1
self.assertEqual(
disk.format.endAlignment.isAligned(free, max_end_sector),
True)
| gpl-2.0 | -2,977,701,306,439,629,300 | 42.794872 | 80 | 0.588407 | false |
mmmatthew/raycast | code/rasterclipper.py | 1 | 5145 | from osgeo import gdal, gdalnumeric, ogr
from PIL import Image, ImageDraw
import os
import numpy as np
def clip_raster(rast, features_path, gt=None, nodata=-9999):
'''
Copyright: http://karthur.org/2015/clipping-rasters-in-python.html
Clips a raster (given as either a gdal.Dataset or as a numpy.array
instance) to a polygon layer provided by a Shapefile (or other vector
layer). If a numpy.array is given, a "GeoTransform" must be provided
(via dataset.GetGeoTransform() in GDAL). Returns an array. Clip features
must be a dissolved, single-part geometry (not multi-part). Modified from:
http://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
#clip-a-geotiff-with-shapefile
Arguments:
rast A gdal.Dataset or a NumPy array
features_path The path to the clipping features
gt An optional GDAL GeoTransform to use instead
nodata The NoData value; defaults to -9999.
'''
def array_to_image(a):
'''
Converts a gdalnumeric array to a Python Imaging Library (PIL) Image.
'''
i = Image.fromstring('L',(a.shape[1], a.shape[0]),
(a.astype('b')).tostring())
return i
def image_to_array(i):
'''
Converts a Python Imaging Library (PIL) array to a gdalnumeric image.
'''
a = gdalnumeric.fromstring(i.tobytes(), 'b')
a.shape = i.im.size[1], i.im.size[0]
return a
def world_to_pixel(geo_matrix, x, y):
'''
Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate
the pixel location of a geospatial coordinate; from:
http://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html#clip-a-geotiff-with-shapefile
'''
ulX = geo_matrix[0]
ulY = geo_matrix[3]
xDist = geo_matrix[1]
yDist = geo_matrix[5]
rtnX = geo_matrix[2]
rtnY = geo_matrix[4]
pixel = int((x - ulX) / xDist)
line = int((ulY - y) / xDist)
return (pixel, line)
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
gt = rast.GetGeoTransform()
rast = rast.ReadAsArray()
# Create an OGR layer from a boundary shapefile
features = ogr.Open(features_path)
if features.GetDriver().GetName() == 'ESRI Shapefile':
lyr = features.GetLayer(os.path.split(os.path.splitext(features_path)[0])[1])
else:
lyr = features.GetLayer()
# Get the first feature
poly = lyr.GetNextFeature()
# Convert the layer extent to image pixel coordinates
minX, maxX, minY, maxY = lyr.GetExtent()
ulX, ulY = world_to_pixel(gt, minX, maxY)
lrX, lrY = world_to_pixel(gt, maxX, minY)
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
# If the clipping features extend out-of-bounds and ABOVE the raster...
if gt[3] < maxY:
# In such a case... ulY ends up being negative--can't have that!
iY = ulY
ulY = 0
# Multi-band image?
try:
clip = rast[:, ulY:lrY, ulX:lrX]
except IndexError:
clip = rast[ulY:lrY, ulX:lrX]
# Create a new geomatrix for the image
gt2 = list(gt)
gt2[0] = minX
gt2[3] = maxY
# Map points to pixels for drawing the boundary on a blank 8-bit,
# black and white, mask image.
points = []
pixels = []
geom = poly.GetGeometryRef()
pts = geom.GetGeometryRef(0)
for p in range(pts.GetPointCount()):
points.append((pts.GetX(p), pts.GetY(p)))
for p in points:
pixels.append(world_to_pixel(gt2, p[0], p[1]))
raster_poly = Image.new('L', (pxWidth, pxHeight), 1)
rasterize = ImageDraw.Draw(raster_poly)
rasterize.polygon(pixels, 0) # Fill with zeroes
# If the clipping features extend out-of-bounds and ABOVE the raster...
if gt[3] < maxY:
# The clip features were "pushed down" to match the bounds of the
# raster; this step "pulls" them back up
premask = image_to_array(raster_poly)
# We slice out the piece of our clip features that are "off the map"
mask = np.ndarray((premask.shape[-2] - abs(iY), premask.shape[-1]), premask.dtype)
mask[:] = premask[abs(iY):, :]
mask.resize(premask.shape) # Then fill in from the bottom
# Most importantly, push the clipped piece down
gt2[3] = maxY - (maxY - gt[3])
else:
mask = image_to_array(raster_poly)
# Clip the image using the mask
try:
clip = gdalnumeric.choose(mask, (clip, nodata))
# If the clipping features extend out-of-bounds and BELOW the raster...
except ValueError:
# We have to cut the clipping features to the raster!
rshp = list(mask.shape)
if mask.shape[-2] != clip.shape[-2]:
rshp[0] = clip.shape[-2]
if mask.shape[-1] != clip.shape[-1]:
rshp[1] = clip.shape[-1]
mask.resize(*rshp, refcheck=False)
clip = gdalnumeric.choose(mask, (clip, nodata))
return (clip, ulX, ulY, gt2) | apache-2.0 | 3,801,077,543,901,111,000 | 33.07947 | 103 | 0.612828 | false |
mikechan0731/RaspBerryPi_MPU9250_data_read | IMU_GUI.py | 2 | 34305 | <<<<<<< HEAD
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Author : MikeChan
# Email : [email protected]
# Date : 06/21/2016
import time, sys, os, datetime, threading
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#===== Global Varibles =====
now = datetime.datetime.now()
today = str( str(now).split(" ")[0].split("-")[0] + str(now).split(" ")[0].split("-")[1] + str(now).split(" ")[0].split("-")[2] )
tonow = str( str(now).split(" ")[1].split(":")[0] + str(now).split(" ")[1].split(":")[1] )
#===== for raspberry PI =====
import smbus
import picamera
i2c = smbus.SMBus(1)
#enable pi-camera
try:
camera = picamera.PiCamera()
except:
pass
addr = 0x68
raw_data = []
#====== QT area ======
class IMU_GUI(QWidget):
def __init__(self, parent = None):
super(IMU_GUI, self).__init__(parent)
self.imu_start_record = imu_start_record()
self.imu_start_record_stable = imu_start_record_stable()
self.save_record = save_record()
# create Layout and connection
self.createLayout()
self.createConnection()
# window properties adjustment
self.setGeometry(140,70,300,400)
#self.resize(300,400)
#self.move(140,35)
self.setWindowIcon(QIcon('/home/pi/Self_IMU/icon/QIMU.png'))
self.setWindowTitle("MPU9250 & PiCamera GUI")
# varibles
self.preview_switch = False
self.film_switch = False
self.IMU_KEY = False
self.photo_count = 0
self.video_count = 0
self.path = "%s_%s" % (today, tonow)
if not os.path.exists(self.path):
try:
os.makedirs(self.path)
except OSError:
if not os.path.isdir(self.path):
raise
def createLayout(self):
#===== IMU Layout =====
self.IMU_Label = QLabel("@ IMU Area @")
self.stableCheckBox = QCheckBox(u"即時慢速寫入")
h0=QHBoxLayout()
h0.addWidget(self.IMU_Label)
h0.addWidget(self.stableCheckBox)
self.statusBrowser = QTextBrowser()
h1 = QHBoxLayout()
h1.addWidget(self.statusBrowser)
self.startButton = QPushButton("Start IMU")
self.stopButton = QPushButton("Stop IMU")
self.saveButton = QPushButton("Save Data")
h2 = QHBoxLayout()
h2.addWidget(self.startButton)
h2.addWidget(self.stopButton)
h2.addWidget(self.saveButton)
#===== PiCamera layout ======
self.PiCamera_Label = QLabel("@ PiCamera Area @")
h3=QHBoxLayout()
h3.addWidget(self.PiCamera_Label)
self.camBrowser = QTextBrowser()
h4 = QHBoxLayout()
h4.addWidget(self.camBrowser)
self.previewButton = QPushButton("Preview")
self.filmButton = QPushButton("Filming")
self.photoButton = QPushButton("Photo Take")
h5 = QHBoxLayout()
h5.addWidget(self.previewButton)
h5.addWidget(self.filmButton)
h5.addWidget(self.photoButton)
# setting layout
layout1 = QVBoxLayout()
layout2 = QVBoxLayout()
layout0 = QVBoxLayout()
# IMU Layout
layout1.addLayout(h0)
layout1.addLayout(h1)
layout1.addLayout(h2)
# PiCamera Layout
layout2.addLayout(h3)
layout2.addLayout(h4)
layout2.addLayout(h5)
layout0.addLayout(layout1)
layout0.addLayout(layout2)
self.setLayout(layout0)
self.previewButton.setEnabled(True)
self.filmButton.setEnabled(True)
self.photoButton.setEnabled(True)
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
def createConnection(self):
#===== Picamera related =====
self.previewButton.clicked.connect(self.preview)
self.filmButton.clicked.connect(self.film)
self.photoButton.clicked.connect(self.take_photo)
#===== IMU related =====
self.startButton.clicked.connect(self.start)
self.stopButton.clicked.connect(self.stop)
self.saveButton.clicked.connect(self.save)
self.connect(self.save_record, SIGNAL("finished()"), self.finished)
#====== Stable CheckBox related =====
self.stableCheckBox.stateChanged.connect(self.enable_stable)
#===== IMU Func. area =====
def start(self):
test_t0 = time.time()
self.statusBrowser.append("MPU9250 BootUp...")
#===== IMU initial =====
try:
i2c.write_byte_data(0x68, 0x6a, 0x00) # Clear sleep mode bit (6), enable all sensors
time.sleep(0.1) # Wait for all registers to reset
i2c.write_byte_data(0x68, 0x1a, 0x03)
# Configure Gyro and Thermometer
# Disable FSYNC and set thermometer and gyro bandwidth to 41 and 42 Hz, respectively;
# minimum delay time for this setting is 5.9 ms, which means sensor fusion update rates cannot
# be higher than 1 / 0.0059 = 170 Hz
# DLPF_CFG = bits 2:0 = 011; this limits the sample rate to 1000 Hz for both
# With the MPU9250, it is possible to get gyro sample rates of 32 kHz (!), 8 kHz, or 1 kHz
i2c.write_byte_data(0x68, 0x19, 0x04)
# sample_rate = internal_sample_rate/ (1+SMPLRT_DIV)
# Use a 200 Hz rate; a rate consistent with the filter update rate
#determined inset in CONFIG above
#ACC_CFG = i2c.read_byte_data(0x68, 0x1c)
#ACC_CFG_2 = i2c.read_byte_data(0x68, 0x1D)
#GYRO_CFG = i2c.read_byte_data(0x68, 0x1b)
'''
# add bias to offset ax_bias=71=0x47 , ay_bias= -56 =FFC7 , az_bias= -502 = FE09
# ax_offset high bit
i2c.write_byte_data(0x68, 0x77, 0x00)
# ax_offset low bit
i2c.write_byte_data(0x68, 0x78, 0x00)
# ay_offset high bit
i2c.write_byte_data(0x68, 0x7A, 0x00)
# ay_offset low bit
i2c.write_byte_data(0x68, 0x7B, 0x00)
# az_offset high bit
i2c.write_byte_data(0x68, 0x7D, 0x00)
# az_offset low bit
i2c.write_byte_data(0x68, 0x7E, 0x00)
'''
i2c.write_byte_data(0x68, 0x37, 0x02) # set pass-through mode
time.sleep(0.1)
i2c.write_byte_data(0x0c, 0x0a, 0x16) # enable AKM
time.sleep(0.1)
self.statusBrowser.append("Initialization Done!")
self.IMU_KEY = True
except:
self.statusBrowser.append("Bootup Failed! Check Wiring...")
self.IMU_KEY = False
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
#===== end initial ======
if self.IMU_KEY and not self.stableCheckBox.isChecked():
self.tt0 = time.time()
self.statusBrowser.append("Start Recording....")
self.startButton.setEnabled(False)
self.stopButton.setEnabled(True)
self.saveButton.setEnabled(False)
self.imu_start_record.start()
elif self.IMU_KEY and self.stableCheckBox.isChecked():
self.tt0 = time.time()
self.statusBrowser.append("Start Recording in Stable Status...")
self.startButton.setEnabled(False)
self.stopButton.setEnabled(True)
self.saveButton.setEnabled(False)
self.imu_start_record_stable.start()
def stop(self):
global raw_data
self.duringTime = time.time() - self.tt0
self.statusBrowser.append("Record Stop!")
self.statusBrowser.append("During Time: " + "%.2f" %self.duringTime)
if not self.stableCheckBox.isChecked():
self.imu_start_record.stop()
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(True)
elif self.stableCheckBox.isChecked():
self.imu_start_record_stable.stop()
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
self.statusBrowser.append("== Data Saved and Port Clear ==")
def save(self):
self.statusBrowser.append("Data saving....")
time.sleep(0.5)
self.save_record.start()
self.startButton.setEnabled(False)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
def finished(self): # will be call when save_record was finished
self.statusBrowser.append("Data saved, memory clear.")
self.statusBrowser.append("===== End Section =====")
self.saveButton.setEnabled(False)
self.stopButton.setEnabled(False)
self.startButton.setEnabled(True)
def enable_stable(self):
check_stable = QMessageBox.question(self, u'啟動即時寫入', \
u"此選項將延遲每筆資料速度,並保證資料即時記錄於檔案中,確定嗎?\n 按'Yes'啟動即時檔案寫入,\n 按'NO'取消即時檔案寫入", \
QMessageBox.Yes | QMessageBox.No)
if check_stable == QMessageBox.Yes:
self.statusBrowser.append(u"*IMU即時寫入已啟動")
self.stableCheckBox.setCheckState(2)
self.saveButton.setEnabled(False)
else:
self.statusBrowser.append(u"*取消即時寫入")
self.stableCheckBox.setCheckState(0)
self.saveButton.setEnabled(True)
#===== Picamera Func. area =====
def preview(self):
camera.stop_preview()
if not self.preview_switch:
self.camBrowser.append("Preview on.")
camera.start_preview(fullscreen = False, window = (450, 10, 400, 300) )
self.preview_switch = True
elif self.preview_switch:
self.camBrowser.append("Preview off.")
self.camBrowser.append("==========")
camera.stop_preview()
self.preview_switch = False
else:
self.camBrowser.append("Prview Booom!")
def film(self):
#camera.stop_recording()
film_path = self.path
if not os.path.exists(film_path):
try:
os.makedirs(film_path)
except OSError:
if not os.path.isdir(film_path):
raise
if not self.film_switch:
self.camBrowser.append("Start Filming...")
self.video_count += 1
camera.start_recording(film_path + '/video%d.h264' %(self.video_count) )
self.film_switch = True
self.photoButton.setEnabled(False)
elif self.film_switch:
self.camBrowser.append("Stop Filming...")
camera.stop_recording()
self.camBrowser.append("Film saved.")
self.film_switch = False
self.camBrowser.append("==========")
self.photoButton.setEnabled(True)
else:
self.camBrowser.append("Film Booom!")
def take_photo(self):
self.photo_count += 1
self.filmButton.setEnabled(False)
self.photoButton.setEnabled(False)
# Create "Photo" folder if not exist
photo_path = self.path
if not os.path.exists(photo_path):
try:
os.makedirs(photo_path)
except OSError:
if not os.path.isdir(photo_path):
raise
camera.capture(photo_path + '/image%d.jpg' %self.photo_count )
self.camBrowser.append("image%d saved" %self.photo_count )
self.photoButton.setEnabled(True)
self.filmButton.setEnabled(True)
class imu_start_record(QThread):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.stoped = False
self.mutex = QMutex()
def run(self):
global raw_data
# varibles
self.t0 = time.time()
self.t_a_g = []
with QMutexLocker(self.mutex):
self.stoped = False
while 1:
if not self.stoped:
self.mpu9250_data_get_and_write()
else:
#print("break!")
break
raw_data = list(self.t_a_g)
#print "data copy!"
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
def isStop(self):
with QMutexLocker(self.mutex):
return self.stoped
def mpu9250_data_get_and_write(self):
# keep AKM pointer on continue measuring
i2c.write_byte_data(0x0c, 0x0a, 0x16)
# get MPU9250 smbus block data
#xyz_g_offset = i2c.read_i2c_block_data(addr, 0x13, 6)
xyz_a_out = i2c.read_i2c_block_data(addr, 0x3B, 6)
xyz_g_out = i2c.read_i2c_block_data(addr, 0x43, 6)
#xyz_a_offset = i2c.read_i2c_block_data(addr, 0x77, 6)
# get AK8963 smbus data (by pass-through way)
xyz_mag = i2c.read_i2c_block_data(0x0c, 0x03, 6)
#xyz_mag_adj = i2c.read_i2c_block_data(0x0c, 0x10, 3)
# get real time
t1 = time.time() - self.t0
# save file to list buffer
self.t_a_g.append(t1)
self.t_a_g.append(xyz_a_out)
self.t_a_g.append(xyz_g_out)
self.t_a_g.append(xyz_mag)
#self.t_a_g.append(xyz_mag_adj)
time.sleep(0.00001)
class imu_start_record_stable(QThread):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.stoped = False
self.mutex = QMutex()
self.imu_count_stable = 0
def run(self):
self.t0 = time.time()
self.imu_count_stable += 1
with QMutexLocker(self.mutex):
self.stoped = False
while 1:
if not self.stoped:
self.mpu9250_data_get_and_write()
else:
break
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
def isStop(self):
with QMutexLocker(self.mutex):
return self.stoped
def mpu9250_data_get_and_write(self):
# keep AKM pointer on continue measuring
i2c.write_byte_data(0x0c, 0x0a, 0x16)
# get MPU9250 smbus block data
#xyz_g_offset = i2c.read_i2c_block_data(addr, 0x13, 6)
xyz_a_out = i2c.read_i2c_block_data(addr, 0x3B, 6)
xyz_g_out = i2c.read_i2c_block_data(addr, 0x43, 6)
#xyz_a_offset = i2c.read_i2c_block_data(addr, 0x77, 6)
# get AK8963 smbus data (by pass-through way)
xyz_mag = i2c.read_i2c_block_data(0x0c, 0x03, 6)
#xyz_mag_adj = i2c.read_i2c_block_data(0x0c, 0x10, 3)
# get real time
t1 = time.time() - self.t0
# save file to list buffer
#self.t_a_g.append(t1)
#self.t_a_g.append(xyz_a_out)
#self.t_a_g.append(xyz_g_out)
#self.t_a_g.append(xyz_mag)
#t_a_g.append(xyz_mag_adj)
filename = "IMU_LOG_REALTIME_%s.txt" %(self.imu_count_stable)
file_s = open(filename, "a")
print >> file_s, "%f" %t1
print >> file_s, xyz_a_out
print >> file_s, xyz_g_out
print >> file_s, xyz_mag
file_s.close()
time.sleep(0.00001)
class save_record(QThread):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.imu_count = 0
def run(self):
global raw_data
self.imu_count += 1
try:
filename = "IMU_LOG_%s.txt" %(self.imu_count)
self.data_f = open(filename, "w")
for i in raw_data:
if isinstance(i, float):
print >> self.data_f ,"%f" %i
else:
print >> self.data_f , i
except:
print "Data saving failed"
finally:
i2c.write_byte_data(addr, 0x6A, 0x07)
raw_data = []
self.data_f.close()
if __name__ == "__main__":
app=QApplication(sys.argv)
IMU_GUI = IMU_GUI()
IMU_GUI.show()
sys.exit(app.exec_())
=======
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Author : MikeChan
# Email : [email protected]
# Date : 06/21/2016
import time, sys, os, datetime, threading
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#===== Global Varibles =====
now = datetime.datetime.now()
today = str( str(now).split(" ")[0].split("-")[0] + str(now).split(" ")[0].split("-")[1] + str(now).split(" ")[0].split("-")[2] )
tonow = str( str(now).split(" ")[1].split(":")[0] + str(now).split(" ")[1].split(":")[1] )
countdown_sec = 0
#===== for raspberry PI =====
import smbus
import picamera
i2c = smbus.SMBus(1)
#enable pi-camera
try:
camera = picamera.PiCamera()
except:
pass
addr = 0x68
raw_data = []
#====== QT area ======
class IMU_GUI(QWidget):
global countdown_sec
def __init__(self, parent = None):
super(IMU_GUI, self).__init__(parent)
self.imu_start_record = imu_start_record()
self.imu_start_record_stable = imu_start_record_stable()
self.save_record = save_record()
# create Layout and connection
self.createLayout()
self.createConnection()
# window properties adjustment
self.setGeometry(140,70,300,400)
#self.resize(300,400)
#self.move(140,35)
self.setWindowIcon(QIcon('/home/pi/Self_IMU/icon/QIMU.png'))
self.setWindowTitle("MPU9250 & PiCamera GUI")
# varibles
self.preview_switch = False
self.film_switch = False
self.IMU_KEY = False
self.PAUSE_KEY = False
self.photo_count = 0
self.video_count = 0
self.path = "%s_%s" % (today, tonow)
if not os.path.exists(self.path):
try:
os.makedirs(self.path)
except OSError:
if not os.path.isdir(self.path):
raise
def createLayout(self):
#===== IMU Layout =====
self.IMU_Label = QLabel("@ IMU Area @")
self.stableCheckBox = QCheckBox(u"即時慢速寫入")
self.stableCheckBox.setChecked(True)
h0=QHBoxLayout()
h0.addWidget(self.IMU_Label)
h0.addWidget(self.stableCheckBox)
self.record_sec_le = QLineEdit()
self.sec_label = QLabel(u'分')
self.set_record_sec_btn = QPushButton(u"輸入幾分鐘(0=無限)")
h01 =QHBoxLayout()
h01.addWidget(self.record_sec_le)
h01.addWidget(self.sec_label)
h01.addWidget(self.set_record_sec_btn)
self.record_sec_le.setText(str(0))
self.statusBrowser = QTextBrowser()
h1 = QHBoxLayout()
h1.addWidget(self.statusBrowser)
self.startButton = QPushButton("Start IMU")
self.pauseButton = QPushButton("Pause/Resume")
self.stopButton = QPushButton("Stop IMU")
self.saveButton = QPushButton("Save Data")
h2 = QHBoxLayout()
h2.addWidget(self.startButton)
#h2.addWidget(self.pauseButton)
h2.addWidget(self.stopButton)
h2.addWidget(self.saveButton)
#===== PiCamera layout ======
self.PiCamera_Label = QLabel("@ PiCamera Area @")
h3=QHBoxLayout()
h3.addWidget(self.PiCamera_Label)
self.camBrowser = QTextBrowser()
h4 = QHBoxLayout()
h4.addWidget(self.camBrowser)
self.previewButton = QPushButton("Preview")
self.filmButton = QPushButton("Filming")
self.photoButton = QPushButton("Photo Take")
h5 = QHBoxLayout()
h5.addWidget(self.previewButton)
h5.addWidget(self.filmButton)
h5.addWidget(self.photoButton)
# setting layout
layout1 = QVBoxLayout()
layout2 = QVBoxLayout()
layout0 = QVBoxLayout()
# IMU Layout
layout1.addLayout(h0)
layout1.addLayout(h01)
layout1.addLayout(h1)
layout1.addLayout(h2)
# PiCamera Layout
layout2.addLayout(h3)
layout2.addLayout(h4)
layout2.addLayout(h5)
layout0.addLayout(layout1)
layout0.addLayout(layout2)
self.setLayout(layout0)
self.previewButton.setEnabled(True)
self.filmButton.setEnabled(True)
self.photoButton.setEnabled(True)
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
def createConnection(self):
#===== IMU related =====
self.set_record_sec_btn.clicked.connect(self.set_record_sec)
self.startButton.clicked.connect(self.start)
self.pauseButton.clicked.connect(self.pause)
self.stopButton.clicked.connect(self.stop)
self.saveButton.clicked.connect(self.save)
#self.connect(self.save_record, SIGNAL("finished()"), self.finished)
#===== Picamera related =====
self.previewButton.clicked.connect(self.preview)
self.filmButton.clicked.connect(self.film)
self.photoButton.clicked.connect(self.take_photo)
#====== Stable CheckBox related =====
self.stableCheckBox.stateChanged.connect(self.enable_stable)
#===== IMU Func. area =====
def set_record_sec(self):
num,ok = QInputDialog.getInt(self,u"預計IMU紀錄時間",u"輸入想要紀錄幾分鐘")
if ok:
self.record_sec_le.setText(str(num))
def start(self):
test_t0 = time.time()
countdown_sec = self.record_sec_le.text() * 60
self.statusBrowser.append("MPU9250 BootUp...")
#===== IMU initial =====
try:
i2c.write_byte_data(0x68, 0x6a, 0x00) # Clear sleep mode bit (6), enable all sensors
time.sleep(0.1) # Wait for all registers to reset
i2c.write_byte_data(0x68, 0x1a, 0x03)
# Configure Gyro and Thermometer
# Disable FSYNC and set thermometer and gyro bandwidth to 41 and 42 Hz, respectively;
# minimum delay time for this setting is 5.9 ms, which means sensor fusion update rates cannot
# be higher than 1 / 0.0059 = 170 Hz
# DLPF_CFG = bits 2:0 = 011; this limits the sample rate to 1000 Hz for both
# With the MPU9250, it is possible to get gyro sample rates of 32 kHz (!), 8 kHz, or 1 kHz
i2c.write_byte_data(0x68, 0x19, 0x04)
# sample_rate = internal_sample_rate/ (1+SMPLRT_DIV)
# Use a 200 Hz rate; a rate consistent with the filter update rate
#determined inset in CONFIG above
#ACC_CFG = i2c.read_byte_data(0x68, 0x1c)
#ACC_CFG_2 = i2c.read_byte_data(0x68, 0x1D)
#GYRO_CFG = i2c.read_byte_data(0x68, 0x1b)
i2c.write_byte_data(0x68, 0x37, 0x02) # set pass-through mode
time.sleep(0.1)
i2c.write_byte_data(0x0c, 0x0a, 0x16) # enable AKM
time.sleep(0.1)
self.statusBrowser.append("Initialization Done!")
self.IMU_KEY = True
except:
self.statusBrowser.append("Bootup Failed! Check Wiring...")
self.IMU_KEY = False
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
#===== end initial ======
countdown_sec = int(self.record_sec_le.text()) *60
s = u"預定記錄秒數: %ss" %countdown_sec
self.statusBrowser.append(s)
#確認 stable checkbox 是否開啟
if self.IMU_KEY and not self.stableCheckBox.isChecked():
self.tt0 = time.time()
self.statusBrowser.append(u"開始紀錄...")
self.imu_start_record.start()
elif self.IMU_KEY and self.stableCheckBox.isChecked():
self.tt0 = time.time()
self.statusBrowser.append(u"即時存檔狀態->開始紀錄...")
self.imu_start_record_stable.start()
# 防誤觸鎖鍵
self.startButton.setEnabled(False)
self.stopButton.setEnabled(True)
self.saveButton.setEnabled(False)
def pause(self):
if not self.stableCheckBox.isChecked():
if self.PAUSE_KEY == False:
self.imu_start_record.stop()
elif self.PAUSE_KEY == True:
self.imu_start_record.start()
elif self.stableCheckBox.isChecked():
if self.PAUSE_KEY == False:
self.imu_start_record_stable.stop()
elif self.PAUSE_KEY == True:
self.imu_start_record_stable.start()
self.PAUSE_KEY = not self.PAUSE_KEY
def stop(self):
global raw_data
self.duringTime = time.time() - self.tt0
self.statusBrowser.append(u"停止紀錄!")
self.statusBrowser.append("During Time: " + "%.2f" %self.duringTime)
if not self.stableCheckBox.isChecked():
self.imu_start_record.stop()
self.save()
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(True)
self.statusBrowser.append(u"=檔案儲存完畢,清除 IMU 連線=")
i2c.write_byte_data(addr, 0x6A, 0x07)
elif self.stableCheckBox.isChecked():
self.imu_start_record_stable.stop()
self.startButton.setEnabled(True)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
self.statusBrowser.append(u"=檔案儲存完畢,清除 IMU 連線=")
i2c.write_byte_data(addr, 0x6A, 0x07)
def save(self):
self.statusBrowser.append(u"檔案儲存中")
time.sleep(0.5)
self.save_record.start()
self.startButton.setEnabled(False)
self.stopButton.setEnabled(False)
self.saveButton.setEnabled(False)
def finished(self): # will be call when save_record was finished
self.statusBrowser.append(u"檔案已儲存,清空記憶體")
self.statusBrowser.append("===== End Section =====")
self.saveButton.setEnabled(False)
self.stopButton.setEnabled(False)
self.startButton.setEnabled(True)
# show stable checkbox option
def enable_stable(self):
check_stable = QMessageBox.question(self, u'啟動即時寫入', \
u"此選項將延遲每筆資料速度,並保證資料即時記錄於檔案中,確定嗎?\n 按'Yes'啟動即時檔案寫入,\n 按'NO'取消即時檔案寫入", \
QMessageBox.Yes | QMessageBox.No)
if check_stable == QMessageBox.Yes:
self.statusBrowser.append(u"*IMU即時寫入已啟動")
self.stableCheckBox.setCheckState(2)
self.saveButton.setEnabled(False)
else:
self.statusBrowser.append(u"*取消即時寫入")
self.stableCheckBox.setCheckState(0)
self.saveButton.setEnabled(True)
#===== Picamera Func. area =====
def preview(self):
camera.stop_preview()
if not self.preview_switch:
self.camBrowser.append("Preview on.")
camera.start_preview(fullscreen = False, window = (450, 10, 400, 300) )
self.preview_switch = True
elif self.preview_switch:
self.camBrowser.append("Preview off.")
self.camBrowser.append("==========")
camera.stop_preview()
self.preview_switch = False
else:
self.camBrowser.append("Prview Booom!")
def film(self):
#camera.stop_recording()
film_path = self.path
if not os.path.exists(film_path):
try:
os.makedirs(film_path)
except OSError:
if not os.path.isdir(film_path):
raise
if not self.film_switch:
self.camBrowser.append("Start Filming...")
self.video_count += 1
camera.start_recording(film_path + '/video%d.h264' %(self.video_count) )
self.film_switch = True
self.photoButton.setEnabled(False)
elif self.film_switch:
self.camBrowser.append("Stop Filming...")
camera.stop_recording()
self.camBrowser.append("Film saved.")
self.film_switch = False
self.camBrowser.append("==========")
self.photoButton.setEnabled(True)
else:
self.camBrowser.append("Film Booom!")
def take_photo(self):
self.photo_count += 1
self.filmButton.setEnabled(False)
self.photoButton.setEnabled(False)
# Create "Photo" folder if not exist
photo_path = self.path
if not os.path.exists(photo_path):
try:
os.makedirs(photo_path)
except OSError:
if not os.path.isdir(photo_path):
raise
camera.capture(photo_path + '/image%d.jpg' %self.photo_count )
self.camBrowser.append("image%d saved" %self.photo_count )
self.photoButton.setEnabled(True)
self.filmButton.setEnabled(True)
class imu_start_record(QThread):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.stoped = False
self.mutex = QMutex()
#===== QTimer =====
self.timer = QTimer()
self.timer.setInterval(10)
self.connect(self.timer, SIGNAL("timeout()"), self.mpu9250_data_get_and_write)
def run(self):
global raw_data
# varibles
self.t0 = time.time()
self.t_a_g = []
with QMutexLocker(self.mutex):
self.stoped = False
self.timer.start()
raw_data = list(self.t_a_g)
#print "data copy!"
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
def isStop(self):
with QMutexLocker(self.mutex):
return self.stoped
def mpu9250_data_get_and_write(self):
# keep AKM pointer on continue measuring
i2c.write_byte_data(0x0c, 0x0a, 0x16)
# get MPU9250 smbus block data
#xyz_g_offset = i2c.read_i2c_block_data(addr, 0x13, 6)
xyz_a_out = i2c.read_i2c_block_data(addr, 0x3B, 6)
xyz_g_out = i2c.read_i2c_block_data(addr, 0x43, 6)
#xyz_a_offset = i2c.read_i2c_block_data(addr, 0x77, 6)
# get AK8963 smbus data (by pass-through way)
xyz_mag = i2c.read_i2c_block_data(0x0c, 0x03, 6)
#xyz_mag_adj = i2c.read_i2c_block_data(0x0c, 0x10, 3)
# get real time
t1 = time.time() - self.t0
# save file to list buffer
self.t_a_g.append(t1)
self.t_a_g.append(xyz_a_out)
self.t_a_g.append(xyz_g_out)
self.t_a_g.append(xyz_mag)
#self.t_a_g.append(xyz_mag_adj)
time.sleep(0.00001)
class imu_start_record_stable(QThread):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.stoped = False
self.mutex = QMutex()
self.t0 = time.time()
self.imu_count_stable = 0
self.countdown_sec = 60
#===== QTimer =====
self.big_timer = QTimer()
self.timer = QTimer()
#self.timer.setInterval(10)
self.connect(self.timer, SIGNAL("timeout()"), self.mpu9250_data_get_and_write)
def run(self):
self.timer.start(7)
self.big_timer.singleShot(self.countdown_sec *1000, self.stop)
with QMutexLocker(self.mutex):
self.stoped = False
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
self.timer.stop()
print 'Done'
def isStop(self):
with QMutexLocker(self.mutex):
return self.stoped
def mpu9250_data_get_and_write(self):
# keep AKM pointer on continue measuring
i2c.write_byte_data(0x0c, 0x0a, 0x16)
# get MPU9250 smbus block data
#xyz_g_offset = i2c.read_i2c_block_data(addr, 0x13, 6)
xyz_a_out = i2c.read_i2c_block_data(addr, 0x3B, 6)
xyz_g_out = i2c.read_i2c_block_data(addr, 0x43, 6)
#xyz_a_offset = i2c.read_i2c_block_data(addr, 0x77, 6)
# get AK8963 smbus data (by pass-through way)
xyz_mag = i2c.read_i2c_block_data(0x0c, 0x03, 6)
#xyz_mag_adj = i2c.read_i2c_block_data(0x0c, 0x10, 3)
# get real time
t1 = time.time() - self.t0
filename = "IMU_LOG_REALTIME_%s.txt" %(self.imu_count_stable)
file_s = open(filename, "a")
print >> file_s, "%.6f" %t1
print >> file_s, xyz_a_out
print >> file_s, xyz_g_out
print >> file_s, xyz_mag
file_s.close()
time.sleep(0.00001)
class save_record(QThread):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.imu_count = 0
def run(self):
global raw_data
self.imu_count += 1
try:
filename = "IMU_LOG_%s.txt" %(self.imu_count)
self.data_f = open(filename, "w")
for i in raw_data:
if isinstance(i, float):
print >> self.data_f ,"%f" %i
else:
print >> self.data_f , i
except:
print "Data saving failed"
finally:
raw_data = []
self.data_f.close()
if __name__ == "__main__":
app=QApplication(sys.argv)
IMU_GUI = IMU_GUI()
IMU_GUI.show()
sys.exit(app.exec_())
>>>>>>> fd9988560bbd6a0c888dc87011c6b7fc72041855
| mit | 8,342,836,070,605,105,000 | 30.246948 | 129 | 0.563008 | false |
MihaiMoldovanu/ansible | lib/ansible/modules/cloud/cloudstack/cs_host.py | 26 | 18433 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_host
short_description: Manages hosts on Apache CloudStack based clouds.
description:
- Create, update and remove hosts.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the host.
required: true
aliases: [ 'ip_address' ]
url:
description:
- Url of the host used to create a host.
- If not provided, C(http://) and param C(name) is used as url.
- Only considered if C(state=present) and host does not yet exist.
username:
description:
- Username for the host.
- Required if C(state=present) and host does not yet exist.
password:
description:
- Password for the host.
- Required if C(state=present) and host does not yet exist.
pod:
description:
- Name of the pod.
- Required if C(state=present) and host does not yet exist.
cluster:
description:
- Name of the cluster.
hypervisor:
description:
- Name of the cluster.
- Required if C(state=present) and host does not yet exist.
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator' ]
allocation_state:
description:
- Allocation state of the host.
choices: [ 'enabled', 'disabled' ]
host_tags:
description:
- Tags of the host.
aliases: [ host_tag ]
state:
description:
- State of the host.
default: 'present'
choices: [ 'present', 'absent' ]
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Ensure a host is present but disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
cluster: vcenter.example.com/ch-zrh-ix/pod01-cluster01
pod: pod01
zone: ch-zrh-ix-01
hypervisor: VMware
allocation_state: disabled
host_tags:
- perf
- gpu
- name: Ensure an existing host is disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: disabled
- name: Ensure an existing host is disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: enabled
- name: Ensure a host is absent
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
capabilities:
description: Capabilities of the host.
returned: success
type: string
sample: hvm
cluster:
description: Cluster of the host.
returned: success
type: string
sample: vcenter.example.com/zone/cluster01
cluster_type:
description: Type of the cluster of the host.
returned: success
type: string
sample: ExternalManaged
cpu_allocated:
description: Amount in percent of the host's CPU currently allocated.
returned: success
type: string
sample: 166.25%
cpu_number:
description: Number of CPUs of the host.
returned: success
type: string
sample: 24
cpu_sockets:
description: Number of CPU sockets of the host.
returned: success
type: int
sample: 2
cpu_speed:
description: CPU speed in Mhz
returned: success
type: int
sample: 1999
cpu_used:
description: Amount of the host's CPU currently used.
returned: success
type: string
sample: 33.6%
cpu_with_overprovisioning:
description: Amount of the host's CPU after applying the cpu.overprovisioning.factor.
returned: success
type: string
sample: 959520.0
created:
description: Date when the host was created.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
disconnected:
description: Date when the host was disconnected.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
disk_size_allocated:
description: Host's currently allocated disk size.
returned: success
type: int
sample: 2593
disk_size_total:
description: Total disk size of the host
returned: success
type: int
sample: 259300
events:
description: Events available for the host
returned: success
type: string
sample: "Ping; HostDown; AgentConnected; AgentDisconnected; PingTimeout; ShutdownRequested; Remove; StartAgentRebalance; ManagementServerDown"
ha_host:
description: Whether the host is a HA host.
returned: success
type: bool
sample: false
has_enough_capacity:
description: Whether the host has enough CPU and RAM capacity to migrate a VM to it.
returned: success
type: bool
sample: true
host_tags:
description: Comma-separated list of tags for the host.
returned: success
type: string
sample: "perf"
hypervisor:
description: Host's hypervisor.
returned: success
type: string
sample: VMware
hypervisor_version:
description: Hypervisor version.
returned: success
type: string
sample: 5.1
ip_address:
description: IP address of the host
returned: success
type: string
sample: 10.10.10.1
is_local_storage_active:
description: Whether the local storage is available or not.
returned: success
type: bool
sample: false
last_pinged:
description: Date and time the host was last pinged.
returned: success
type: string
sample: "1970-01-17T17:27:32+0100"
management_server_id:
description: Management server ID of the host.
returned: success
type: int
sample: 345050593418
memory_allocated:
description: Amount of the host's memory currently allocated.
returned: success
type: int
sample: 69793218560
memory_total:
description: Total of memory of the host.
returned: success
type: int
sample: 206085263360
memory_used:
description: Amount of the host's memory currently used.
returned: success
type: int
sample: 65504776192
name:
description: Name of the host.
returned: success
type: string
sample: esx32.example.com
network_kbs_read:
description: Incoming network traffic on the host.
returned: success
type: int
sample: 0
network_kbs_write:
description: Outgoing network traffic on the host.
returned: success
type: int
sample: 0
os_category:
description: OS category name of the host.
returned: success
type: string
sample: ...
out_of_band_management:
description: Host out-of-band management information.
returned: success
type: string
sample: ...
pod:
description: Pod name of the host.
returned: success
type: string
sample: Pod01
removed:
description: Date and time the host was removed.
returned: success
type: string
sample: "1970-01-17T17:27:32+0100"
resource_state:
description: Resource state of the host.
returned: success
type: string
sample: Enabled
allocation_state::
description: Allocation state of the host.
returned: success
type: string
sample: enabled
state:
description: State of the host.
returned: success
type: string
sample: Up
suitable_for_migration:
description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM
to it or not.
returned: success
type: string
sample: true
host_type:
description: Type of the host.
returned: success
type: string
sample: Routing
host_version:
description: Version of the host.
returned: success
type: string
sample: 4.5.2
gpu_group:
description: GPU cards present in the host.
returned: success
type: list
sample: []
zone:
description: Zone of the host.
returned: success
type: string
sample: zone01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
CS_HYPERVISORS
)
import time
class AnsibleCloudStackHost(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackHost, self).__init__(module)
self.returns = {
'averageload': 'average_load',
'capabilities': 'capabilities',
'clustername': 'cluster',
'clustertype': 'cluster_type',
'cpuallocated': 'cpu_allocated',
'cpunumber': 'cpu_number',
'cpusockets': 'cpu_sockets',
'cpuspeed': 'cpu_speed',
'cpuused': 'cpu_used',
'cpuwithoverprovisioning': 'cpu_with_overprovisioning',
'disconnected': 'disconnected',
'details': 'details',
'disksizeallocated': 'disk_size_allocated',
'disksizetotal': 'disk_size_total',
'events': 'events',
'hahost': 'ha_host',
'hasenoughcapacity': 'has_enough_capacity',
'hypervisor': 'hypervisor',
'hypervisorversion': 'hypervisor_version',
'ipaddress': 'ip_address',
'islocalstorageactive': 'is_local_storage_active',
'lastpinged': 'last_pinged',
'managementserverid': 'management_server_id',
'memoryallocated': 'memory_allocated',
'memorytotal': 'memory_total',
'memoryused': 'memory_used',
'networkkbsread': 'network_kbs_read',
'networkkbswrite': 'network_kbs_write',
'oscategoryname': 'os_category',
'outofbandmanagement': 'out_of_band_management',
'podname': 'pod',
'removed': 'removed',
'resourcestate': 'resource_state',
'suitableformigration': 'suitable_for_migration',
'type': 'host_type',
'version': 'host_version',
'gpugroup': 'gpu_group',
}
# States only usable by the updateHost API
self.allocation_states_for_update = {
'enabled': 'Enable',
'disabled': 'Disable',
}
self.host = None
def get_pod(self, key=None):
pod_name = self.module.params.get('pod')
if not pod_name:
return None
args = {
'name': pod_name,
'zoneid': self.get_zone(key='id'),
}
pods = self.query_api('listPods', **args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found" % pod_name)
def get_cluster(self, key=None):
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {
'name': cluster_name,
'zoneid': self.get_zone(key='id'),
}
clusters = self.query_api('listClusters', **args)
if clusters:
return self._get_by_key(key, clusters['cluster'][0])
self.module.fail_json(msg="Cluster %s not found" % cluster_name)
def get_host_tags(self):
host_tags = self.module.params.get('host_tags')
if host_tags is None:
return None
return ','.join(host_tags)
def get_host(self, refresh=False):
if self.host is not None and not refresh:
return self.host
name = self.module.params.get('name')
args = {
'zoneid': self.get_zone(key='id'),
}
res = self.query_api('listHosts', **args)
if res:
for h in res['host']:
if name in [h['ipaddress'], h['name']]:
self.host = h
return self.host
def _handle_allocation_state(self, host):
allocation_state = self.module.params.get('allocation_state')
if not allocation_state:
return host
host = self._set_host_allocation_state(host)
# In case host in maintenance and target is maintenance
if host['allocationstate'].lower() == allocation_state and allocation_state == 'maintenance':
return host
# Cancel maintenance if target state is enabled/disabled
elif allocation_state in list(self.allocation_states_for_update.keys()):
host = self.disable_maintenance(host)
host = self._update_host(host, self.allocation_states_for_update[allocation_state])
# Only an enabled host can put in maintenance
elif allocation_state == 'maintenance':
host = self._update_host(host, 'Enable')
host = self.enable_maintenance(host)
return host
def _set_host_allocation_state(self, host):
if host is None:
host['allocationstate'] = 'Enable'
# Set host allocationstate to be disabled/enabled
elif host['resourcestate'].lower() in list(self.allocation_states_for_update.keys()):
host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
else:
host['allocationstate'] = host['resourcestate']
return host
def present_host(self):
host = self.get_host()
if not host:
host = self._create_host(host)
else:
host = self._update_host(host)
if host:
host = self._handle_allocation_state(host)
return host
def _get_url(self):
url = self.module.params.get('url')
if url:
return url
else:
return "http://%s" % self.module.params.get('name')
def _create_host(self, host):
required_params = [
'password',
'username',
'hypervisor',
'pod',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = {
'hypervisor': self.module.params.get('hypervisor'),
'url': self._get_url(),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
'podid': self.get_pod(key='id'),
'zoneid': self.get_zone(key='id'),
'clusterid': self.get_cluster(key='id'),
'hosttags': self.get_host_tags(),
}
if not self.module.check_mode:
host = self.query_api('addHost', **args)
host = host['host'][0]
return host
def _update_host(self, host, allocation_state=None):
args = {
'id': host['id'],
'hosttags': self.get_host_tags(),
'allocationstate': allocation_state,
}
if allocation_state is not None:
host = self._set_host_allocation_state(host)
if self.has_changed(args, host):
self.result['changed'] = True
if not self.module.check_mode:
host = self.query_api('updateHost', **args)
host = host['host']
return host
def absent_host(self):
host = self.get_host()
if host:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.enable_maintenance(host)
if res:
res = self.query_api('deleteHost', **args)
return host
def enable_maintenance(self, host):
if host['resourcestate'] not in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.query_api('prepareHostForMaintenance', **args)
self.poll_job(res, 'host')
host = self._poll_for_maintenance()
return host
def disable_maintenance(self, host):
if host['resourcestate'] in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.query_api('cancelHostMaintenance', **args)
host = self.poll_job(res, 'host')
return host
def _poll_for_maintenance(self):
for i in range(0, 300):
time.sleep(2)
host = self.get_host(refresh=True)
if not host:
return None
elif host['resourcestate'] != 'PrepareForMaintenance':
return host
self.fail_json("Polling for maintenance timed out")
def get_result(self, host):
super(AnsibleCloudStackHost, self).get_result(host)
if host:
self.result['allocation_state'] = host['resourcestate'].lower()
self.result['host_tags'] = host['hosttags'].split(',') if host.get('hosttags') else []
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['ip_address']),
url=dict(),
password=dict(no_log=True),
username=dict(),
hypervisor=dict(choices=CS_HYPERVISORS),
allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']),
pod=dict(),
cluster=dict(),
host_tags=dict(type='list', aliases=['host_tag']),
zone=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_host = AnsibleCloudStackHost(module)
state = module.params.get('state')
if state == 'absent':
host = acs_host.absent_host()
else:
host = acs_host.present_host()
result = acs_host.get_result(host)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,734,906,756,448,105,500 | 28.442492 | 151 | 0.619825 | false |
awhite40/pymks | pymks/bases/gsh.py | 1 | 6022 | import numpy as np
import gsh_hex_tri_L0_16 as gsh_hex
import gsh_cub_tri_L0_16 as gsh_cub
import gsh_tri_tri_L0_13 as gsh_tri
from .abstract import _AbstractMicrostructureBasis
class GSHBasis(_AbstractMicrostructureBasis):
r"""
Discretize a continuous field into continuous local states using a
Generalized Spherical Harmonic (GSH) basis such that,
.. math::
\frac{1}{\Delta} \int_s m(g, x) dx =
\sum_{l, m, n} m[l, \tilde{m}, n, s] T_l^{\tilde{m}n}(g)
where the :math:`T_l^{\tilde{m}n}` are GSH basis functions and the
local state space :math:`H` is mapped into the orthogonal, periodic
domain of the GSH functions
The mapping of :math:`H` into some desired periodic domain is done
automatically in PyMKS by using the `domain` key work argument to
select the desired crystal symmetry.
>>> X = np.array([[0.1, 0.2, 0.3],
... [6.5, 2.3, 3.4]])
>>> gsh_basis = GSHBasis(n_states = [3], domain='hexagonal')
>>> def test_gsh(x):
... phi = x[:, 1]
... t915 = np.cos(phi)
... return 0.15e2 / 0.2e1 * t915 ** 2 - 0.5e1 / 0.2e1
>>> assert(np.allclose(np.squeeze(gsh_basis.discretize(X)), test_gsh(X)))
If you select an invalid crystal symmetry PyMKS will give an error
>>> gsh_basis = GSHBasis(n_states=[3], domain='squishy') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: invalid crystal symmetry
>>> gsh_basis = GSHBasis(n_states=[3], domain='hex') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: invalid crystal symmetry
"""
def __init__(self, n_states=np.arange(15), domain=None):
"""
Instantiate a `Basis`
Args:
n_states (int, array): An array of local states to be used. states
requested. If an integer is provided, all local states up
to that number will be used.
domain (list, optional): indicate the desired crystal symmetry for
the GSH. Valid choices for symmetry are "hexagonal", "cubic" or
"triclinic" if no symmetry is desired (not specifying any
symmetry has the same effect)
"""
self.n_states = n_states
if isinstance(self.n_states, int):
self.n_states = np.arange(n_states)
if domain in [None, 'triclinic']:
self.domain = 'triclinic'
self._symmetry = gsh_tri
elif domain in ['hexagonal']:
self.domain = 'hexagonal'
self._symmetry = gsh_hex
elif domain in ['cubic']:
self.domain = 'cubic'
self._symmetry = gsh_cub
else:
raise RuntimeError("invalid crystal symmetry")
full_indx = self._symmetry.gsh_basis_info()
self.basis_indices = full_indx[self.n_states, :]
def check(self, X):
"""Warns the user if Euler angles apear to be defined in degrees
instead of radians"""
if (np.min(X) < -90.) or (np.max(X) > 90.):
print "Warning: X may be defined in degrees instead of radians"
def _shape_check(self, X, y):
"""
Checks the shape of the microstructure and response data to
ensure that they are correct.
Firstly, the response data "y" must have a dimension to index the
microstructure instantiation and at least one dimension to index the
local microstructural information.
Second, the shape of X and y must me equal except for the last
dimension of X.
Finally, the length of the final dimension of X must be 3.
This is because we assume that Bunge Euler angles are assigned for
each location in the microstructure
"""
if not len(y.shape) > 1:
raise RuntimeError("The shape of y is incorrect.")
if y.shape != X.shape[:-1]:
raise RuntimeError("X and y must have the same number of " +
"samples and microstructure shape.")
if X.shape[-1] != 3:
raise RuntimeError("X must have 3 continuous local states " +
"(euler angles)")
def _pred_shape(self, X):
"""
Function to describe the expected output shape of a given
microstructure X.
"""
return X.shape[:-1] # X has Euler angles, while output is scalar
def discretize(self, X):
"""
Discretize `X`.
Args:
X (ND array): The microstructure, an `(n_samples, n_x, ..., 3)`
shaped array where `n_samples` is the number of samples,
`n_x` is the spatial discretization and the last dimension
contains the Bunge Euler angles.
Returns:
Float valued field of of Generalized Spherical Harmonics
coefficients.
>>> X = np.array([[0.1, 0.2, 0.3],
... [6.5, 2.3, 3.4]])
>>> gsh_basis = GSHBasis(n_states = [1])
>>> def q(x):
... phi1 = x[:, 0]
... phi = x[:, 1]
... phi2 = x[:, 2]
... x_GSH = ((0.3e1 / 0.2e1) * (0.1e1 + np.cos(phi)) *
... np.exp((-1*1j) * (phi1 + phi2)))
... return x_GSH
>>> assert(np.allclose(np.squeeze(gsh_basis.discretize(X)), q(X)))
"""
self.check(X)
return self._symmetry.gsh_eval(X, self.n_states)
def _reshape_feature(self, X, size):
"""
Helper function used to check the shape of the microstructure,
and change to appropriate shape.
Args:
X: The microstructure, an `(n_samples, n_x, ...)` shaped array
where `n_samples` is the number of samples and `n_x` is thes
patial discretization.
Returns:
microstructure with shape (n_samples, size)
"""
new_shape = (X.shape[0],) + size + (X.shape[-1],)
return X.reshape(new_shape)
| mit | -3,618,581,478,012,761,600 | 36.403727 | 81 | 0.566921 | false |
maxikov/nyaafs | nyaafs/trunk/nyafs.py | 1 | 5443 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
#
# Copyright (C) 2009 Vladimir Badaev
#
# This file is part of NyaaFS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: nyafs.py 7 2009-05-15 19:13:07Z Vladimir.Badaev $
# imports
# import errno
import fuse
fuse.fuse_python_api = (0,2)
import sqlite3 as db
#
# NyaDB
#
class NyaDB(object):
"""
Класс для работы с базой данных.
ПреАльфа :)
"""
def __init__(self):
"""
Конструктор. Можно добавить пару парамнтров типа имени базы и тд.
"""
pass
def __del__(self):
"""
Деструктор.
"""
pass
def getFileAttr(self, path):
"""
Возвращает NyaStat файла path.
"""
pass
def setFileAttr(self, path, nyaStat):
"""
Изменяет атрибуты файла.
"""
pass
def getRealFile(self, path, mode=None):
"""
Возвращает путь к файлу на хост-фс, по пути path.
Возможно еще будет нужна проверка прав доступа и тп.
"""
pass
def getFilesFromDir(self, path):
"""
Возвращает список файлов из директории path.
Возможно как (имя, NyaStat).
"""
pass
def newFile(self, path, type, stat):
"""
ХЗ точно, наверно это будет раз пять переколбашено.
Ориентировочно: создать новый файл, характеризуемый путем, типом(файл, дирректория етц), и статом.
"""
pass
def getFileXAttr(self, *hz):
"""
Получить расширеные атрибуты файла. Надо поманить, ВТФ оно вообще есть.
Кста, возможно потом понадобиться функция для получения списка xattr-ов.
"""
pass
def setFileXAttr(self, *hz):
"""
Изменить xattr-ы.
"""
pass
#
# NyaStat
#
class NyaStat(fuse.Stat):
"""
Стандартная информация о файле, см. stat(2)
Содержит:
struct stat {
dev_t st_dev; /* ID of device containing file */
ino_t st_ino; /* inode number */
mode_t st_mode; /* protection */
nlink_t st_nlink; /* number of hard links */
uid_t st_uid; /* user ID of owner */
gid_t st_gid; /* group ID of owner */
dev_t st_rdev; /* device ID (if special file) */
off_t st_size; /* total size, in bytes */
blksize_t st_blksize; /* blocksize for file system I/O */
blkcnt_t st_blocks; /* number of 512B blocks allocated */
time_t st_atime; /* time of last access */
time_t st_mtime; /* time of last modification */
time_t st_ctime; /* time of last status change */
};
"""
pass
#
# NyaFile
#
class NyaFile(object):
"""
Класс представляет интерфейс к файлу.
"""
def __init__(self, path, flags, mode=None):
"""
"""
pass
def read(self, length, offset, fh=None):
"""
Чтение из файла.
"""
pass
def write(self, buf, offset, fh=None):
"""
Внезапно запись в файл.
"""
pass
def fgetattr(self, fh=None):
"""
"""
pass
def ftruncate(self, len, fh=None):
"""
"""
pass
def flush(self, fh=None):
"""
"""
pass
def release(self, fh=None):
"""
"""
pass
def fsync(self, fdatasync, fh=None):
"""
"""
pass
#
# NyaFS
#
class NyaFS(object):
"""
"""
def __init__(self):
"""
"""
pass
def getattr(self, path):
"""
"""
pass
def readlink(self, path):
"""
"""
pass
def mknod(self, path, mode, rdev):
"""
"""
pass
def mkdir(self, path, mode):
"""
"""
pass
def unlink(self, path):
"""
"""
pass
def symlink(self, target, name):
"""
"""
pass
def rename(self, old, new):
"""
"""
pass
def link(self, target, name):
"""
"""
pass
def fsinit(self):
"""
"""
pass
| gpl-3.0 | -5,718,833,874,135,542,000 | 20.886878 | 106 | 0.523878 | false |
sephalon/python-ivi | ivi/agilent/agilent8596E.py | 6 | 1556 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590E import *
class agilent8596E(agilentBase8590E):
"Agilent 8596E IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8596E')
super(agilent8596E, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 9e3
self._frequency_high = 12.8e9
| mit | -4,970,438,373,828,452,000 | 36.047619 | 77 | 0.74036 | false |
franek/weboob | modules/gdcvault/test.py | 5 | 1595 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
# Copyright(C) 2012 François Revol
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
#from weboob.capabilities.video import BaseVideo
class GDCVaultTest(BackendTest):
BACKEND = 'gdcvault'
# def test_search(self):
# l = list(self.backend.search_videos('linux'))
# self.assertTrue(len(l) > 0)
# v = l[0]
# self.backend.fillobj(v, ('url',))
# self.assertTrue(v.url and v.url.startswith('http://'), 'URL for video "%s" not found: %s' % (v.id, v.url))
# self.backend.browser.openurl(v.url)
# def test_latest(self):
# l = list(self.backend.iter_resources([BaseVideo], [u'latest']))
# self.assertTrue(len(l) > 0)
# v = l[0]
# self.backend.fillobj(v, ('url',))
# self.assertTrue(v.url and v.url.startswith('http://'), 'URL for video "%s" not found: %s' % (v.id, v.url))
| agpl-3.0 | 7,942,737,984,229,299,000 | 36.952381 | 116 | 0.666248 | false |
gedare/gem5 | src/python/m5/simulate.py | 5 | 13132 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
import atexit
import os
import sys
# import the wrapped C++ functions
import _m5.drain
import _m5.core
from _m5.stats import updateEvents as updateStatEvents
import stats
import SimObject
import ticks
import objects
from m5.util.dot_writer import do_dot, do_dvfs_dot
from util import fatal
from util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
_memory_modes = {
"atomic" : objects.params.atomic,
"timing" : objects.params.timing,
"atomic_noncaching" : objects.params.atomic_noncaching,
}
_drain_manager = _m5.drain.DrainManager.instance()
# The final hook to generate .ini files. Called from the user script
# once the config is built.
def instantiate(ckpt_dir=None):
from m5 import options
root = objects.Root.getInstance()
if not root:
fatal("Need to instantiate Root() before calling instantiate()")
# we need to fix the global frequency
ticks.fixGlobalFrequency()
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants(): obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants(): obj.unproxyParams()
if options.dump_config:
ini_file = file(os.path.join(options.outdir, options.dump_config), 'w')
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
ini_file.close()
if options.json_config:
try:
import json
json_file = file(os.path.join(options.outdir, options.json_config), 'w')
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
except ImportError:
pass
do_dot(root, options.outdir, options.dot_config)
# Initialize the global statistics
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants(): obj.createCCObject()
for obj in root.descendants(): obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants(): obj.init()
# Do a third pass to initialize statistics
for obj in root.descendants(): obj.regStats()
# Do a fourth pass to initialize probe points
for obj in root.descendants(): obj.regProbePoints()
# Do a fifth pass to connect probe listeners
for obj in root.descendants(): obj.regProbeListeners()
# We want to generate the DVFS diagram for the system. This can only be
# done once all of the CPP objects have been created and initialised so
# that we are able to figure out which object belongs to which domain.
if options.dot_dvfs_config:
do_dvfs_dot(root, options.outdir, options.dot_dvfs_config)
# We're done registering statistics. Enable the stats package now.
stats.enable()
# Restore checkpoint (if any)
if ckpt_dir:
_drain_manager.preCheckpointRestore()
ckpt = _m5.core.getCheckpoint(ckpt_dir)
_m5.core.unserializeGlobals(ckpt);
for obj in root.descendants(): obj.loadState(ckpt)
else:
for obj in root.descendants(): obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
need_startup = True
def simulate(*args, **kwargs):
global need_startup
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants(): obj.startup()
need_startup = False
# Python exit handlers happen in reverse order.
# We want to dump stats last.
atexit.register(stats.dump)
# register our C++ exit callback function with Python
atexit.register(_m5.core.doExitCleanup)
# Reset to put the stats in a consistent state.
stats.reset()
if _drain_manager.isDrained():
_drain_manager.resume()
return _m5.event.simulate(*args, **kwargs)
def drain():
"""Drain the simulator in preparation of a checkpoint or memory mode
switch.
This operation is a no-op if the simulator is already in the
Drained state.
"""
# Try to drain all objects. Draining might not be completed unless
# all objects return that they are drained on the first call. This
# is because as objects drain they may cause other objects to no
# longer be drained.
def _drain():
# Try to drain the system. The drain is successful if all
# objects are done without simulation. We need to simulate
# more if not.
if _drain_manager.tryDrain():
return True
# WARNING: if a valid exit event occurs while draining, it
# will not get returned to the user script
exit_event = _m5.event.simulate()
while exit_event.getCause() != 'Finished drain':
exit_event = simulate()
return False
# Don't try to drain a system that is already drained
is_drained = _drain_manager.isDrained()
while not is_drained:
is_drained = _drain()
assert _drain_manager.isDrained(), "Drain state inconsistent"
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
raise TypeError, "Checkpoint must be called on a root object."
drain()
memWriteback(root)
print "Writing checkpoint"
_m5.core.serializeAll(dir)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError, "Parameter of type '%s'. Must be type %s or %s." % \
(type(system), objects.Root, objects.System)
if system.getMemoryMode() != mode:
system.setMemoryMode(mode)
else:
print "System already in target mode. Memory mode unchanged."
def switchCpus(system, cpuList, verbose=True):
"""Switch CPUs in a system.
Note: This method may switch the memory mode of the system if that
is required by the CPUs. It may also flush all caches in the
system.
Arguments:
system -- Simulated system.
cpuList -- (old_cpu, new_cpu) tuples
"""
if verbose:
print "switching cpus"
if not isinstance(cpuList, list):
raise RuntimeError, "Must pass a list to this function"
for item in cpuList:
if not isinstance(item, tuple) or len(item) != 2:
raise RuntimeError, "List must have tuples of (oldCPU,newCPU)"
old_cpus = [old_cpu for old_cpu, new_cpu in cpuList]
new_cpus = [new_cpu for old_cpu, new_cpu in cpuList]
old_cpu_set = set(old_cpus)
memory_mode_name = new_cpus[0].memory_mode()
for old_cpu, new_cpu in cpuList:
if not isinstance(old_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % old_cpu
if not isinstance(new_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % new_cpu
if new_cpu in old_cpu_set:
raise RuntimeError, \
"New CPU (%s) is in the list of old CPUs." % (old_cpu,)
if not new_cpu.switchedOut():
raise RuntimeError, \
"New CPU (%s) is already active." % (new_cpu,)
if not new_cpu.support_take_over():
raise RuntimeError, \
"New CPU (%s) does not support CPU handover." % (old_cpu,)
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError, \
"%s and %s require different memory modes." % (new_cpu,
new_cpus[0])
if old_cpu.switchedOut():
raise RuntimeError, \
"Old CPU (%s) is inactive." % (new_cpu,)
if not old_cpu.support_take_over():
raise RuntimeError, \
"Old CPU (%s) does not support CPU handover." % (old_cpu,)
try:
memory_mode = _memory_modes[memory_mode_name]
except KeyError:
raise RuntimeError, "Invalid memory mode (%s)" % memory_mode_name
drain()
# Now all of the CPUs are ready to be switched out
for old_cpu, new_cpu in cpuList:
old_cpu.switchOut()
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
# Flush the memory system if we are switching to a memory mode
# that disables caches. This typically happens when switching to a
# hardware virtualized CPU.
if memory_mode == objects.params.atomic_noncaching:
memWriteback(system)
memInvalidate(system)
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
def notifyFork(root):
for obj in root.descendants():
obj.notifyFork()
fork_count = 0
def fork(simout="%(parent)s.f%(fork_seq)i"):
"""Fork the simulator.
This function forks the simulator. After forking the simulator,
the child process gets its output files redirected to a new output
directory. The default name of the output directory is the same as
the parent with the suffix ".fN" added where N is the fork
sequence number. The name of the output directory can be
overridden using the simout keyword argument.
Output file formatting dictionary:
parent -- Path to the parent process's output directory.
fork_seq -- Fork sequence number.
pid -- PID of the child process.
Keyword Arguments:
simout -- New simulation output directory.
Return Value:
pid of the child process or 0 if running in the child.
"""
from m5 import options
global fork_count
if not _m5.core.listenersDisabled():
raise RuntimeError, "Can not fork a simulator with listeners enabled"
drain()
try:
pid = os.fork()
except OSError, e:
raise e
if pid == 0:
# In child, notify objects of the fork
root = objects.Root.getInstance()
notifyFork(root)
# Setup a new output directory
parent = options.outdir
options.outdir = simout % {
"parent" : parent,
"fork_seq" : fork_count,
"pid" : os.getpid(),
}
_m5.core.setOutputDir(options.outdir)
else:
fork_count += 1
return pid
from _m5.core import disableAllListeners, listenersDisabled
from _m5.core import listenersLoopbackOnly
from _m5.core import curTick
| bsd-3-clause | -3,885,045,055,174,306,300 | 34.301075 | 84 | 0.668672 | false |
Kjwon15/libearth | libearth/parser/autodiscovery.py | 2 | 6439 | """:mod:`libearth.parser.autodiscovery` --- Autodiscovery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides functions to autodiscovery feed url in document.
"""
try:
import HTMLParser
except ImportError:
import html.parser as HTMLParser
import collections
import logging
import re
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from ..compat import text
from ..compat.etree import fromstring
from .atom import parse_atom
from .rss1 import parse_rss1
from .rss2 import parse_rss2
from .util import normalize_xml_encoding
__all__ = ('ATOM_TYPE', 'RSS_TYPE', 'TYPE_TABLE', 'AutoDiscovery', 'FeedLink',
'FeedUrlNotFoundError', 'autodiscovery', 'get_format')
#: (:class:`str`) The MIME type of RSS 2.0 format
#: (:mimetype:`application/rss+xml`).
RSS_TYPE = 'application/rss+xml'
#: (:class:`str`) The MIME type of Atom format
#: (:mimetype:`application/atom+xml`).
ATOM_TYPE = 'application/atom+xml'
#: (:class:`collections.Set`) The set of supported feed MIME types.
#:
#: .. versionadded:: 0.3.0
FEED_TYPES = frozenset([RSS_TYPE, ATOM_TYPE])
#: (:class:`collections.Mapping`) The mapping table of feed types
TYPE_TABLE = {parse_atom: ATOM_TYPE, parse_rss2: RSS_TYPE, parse_rss1: RSS_TYPE}
#: Namedtuple which is a pair of ``type` and ``url``
FeedLink = collections.namedtuple('FeedLink', 'type url')
def autodiscovery(document, url):
"""If the given url refers an actual feed, it returns the given url
without any change.
If the given url is a url of an ordinary web page
(i.e. :mimetype:`text/html`), it finds the urls of the corresponding feed.
It returns feed urls in feed types' lexicographical order.
If autodiscovery failed, it raise :exc:`FeedUrlNotFoundError`.
:param document: html, or xml strings
:type document: :class:`str`
:param url: the url used to retrieve the ``document``.
if feed url is in html and represented in relative url,
it will be rebuilt on top of the ``url``
:type url: :class:`str`
:returns: list of :class:`FeedLink` objects
:rtype: :class:`collections.MutableSequence`
"""
document = text(document)
document_type = get_format(document)
if document_type is None:
parser = AutoDiscovery()
feed_links, _ = parser.find(document)
if not feed_links:
raise FeedUrlNotFoundError('Cannot find feed url')
for link in feed_links:
if link.url.startswith('/'):
absolute_url = urlparse.urljoin(url, link.url)
feed_links[feed_links.index(link)] = \
FeedLink(link.type, absolute_url)
return feed_links
else:
return [FeedLink(TYPE_TABLE[document_type], url)]
class AutoDiscovery(HTMLParser.HTMLParser):
"""Parse the given HTML and try finding the actual feed urls from it.
.. versionchanged:: 0.3.0
It became to find icon links as well, and :meth:`find_feed_url()`
method (that returned only feed links) was gone, instead :meth:`find()`
(that return a pair of feed links and icon links) was introduced.
"""
LINK_PATTERN = re.compile(r'''rel\s?=\s?(?:'|")?([^'">]+)''')
LINK_HREF_PATTERN = re.compile(r'''href\s?=\s?(?:'|")?([^'"\s>]+)''')
LINK_TYPE_PATTERN = re.compile(r'''type\s?=\s?(?:'|")?([^'"\s>]+)''')
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.feed_links = []
self.icon_links = []
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if not (tag == 'link' and 'rel' in attrs and 'href' in attrs):
return
if attrs['rel'] == 'alternate' and 'type' in attrs and \
attrs['type'] in FEED_TYPES:
self.feed_links.append(FeedLink(attrs['type'], attrs['href']))
elif 'icon' in attrs['rel'].split():
self.icon_links.append(attrs['href'])
def find(self, document):
document = text(document)
match = re.match('.+</head>', document)
if match:
head = match.group(0)
else:
head = document
chunks = re.findall('[^>]*(?:>|$)', head)
for chunk in chunks:
try:
self.feed(chunk)
except Exception:
self.find_link_with_regex(chunk)
self.feed_links = sorted(self.feed_links, key=lambda link: link.type)
return self.feed_links, self.icon_links
def find_link_with_regex(self, chunk):
match = self.LINK_PATTERN.search(chunk)
if not match:
return
href_match = self.LINK_HREF_PATTERN.search(chunk)
if not href_match:
return
rels = match.group(1).split()
href = href_match.group(1)
if 'alternate' in rels:
type_match = self.LINK_TYPE_PATTERN.search(chunk)
if type_match:
type_ = type_match.group(1)
if type_ in FEED_TYPES:
self.feed_links.append(FeedLink(type_, href))
if 'icon' in rels:
self.icon_links.append(href)
class FeedUrlNotFoundError(Exception):
"""Exception raised when feed url cannot be found in html."""
def __init__(self, msg):
self.msg = msg
def get_format(document):
"""Guess the syndication format of an arbitrary ``document``.
:param document: document string to guess
:type document: :class:`str`, :class:`bytes`
:returns: the function possible to parse the given ``document``
:rtype: :class:`collections.Callable`
.. versionchanged:: 0.2.0
The function was in :mod:`libearth.parser.heuristic` module (which is
removed now) before 0.2.0, but now it's moved to
:mod:`libearth.parser.autodiscovery`.
"""
document = normalize_xml_encoding(document)
try:
root = fromstring(document)
except Exception as e:
logger = logging.getLogger(__name__ + '.get_format')
logger.debug('document = %r', document)
logger.warning(e, exc_info=True)
return None
if root.tag in ('{http://www.w3.org/2005/Atom}feed',
'{http://purl.org/atom/ns#}feed'):
return parse_atom
elif root.tag == '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF':
return parse_rss1
elif root.tag == 'rss':
return parse_rss2
else:
return None
| gpl-2.0 | 3,931,579,882,290,926,000 | 33.068783 | 80 | 0.608324 | false |
huntxu/fuel-web | nailgun/nailgun/test/unit/test_task.py | 2 | 29158 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from oslo_serialization import jsonutils
import yaml
from nailgun import consts
from nailgun.db.sqlalchemy.models import Task
from nailgun.errors import errors
from nailgun.extensions.volume_manager.manager import VolumeManager
from nailgun import objects
from nailgun.task import task
from nailgun.test.base import BaseTestCase
from nailgun.utils import reverse
class TestClusterDeletionTask(BaseTestCase):
def create_cluster_and_execute_deletion_task(
self, attributes=None, os=consts.RELEASE_OS.centos):
self.env.create(
cluster_kwargs={
'editable_attributes': attributes,
},
release_kwargs={
'operating_system': os,
'version': '2025-7.0',
},
)
self.fake_task = Task(name=consts.TASK_NAMES.cluster_deletion,
cluster=self.env.clusters[0])
task.ClusterDeletionTask.execute(self.fake_task)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_skipped_empty_attributes(
self, mock_img_task, mock_del):
self.create_cluster_and_execute_deletion_task({})
self.assertTrue(mock_del.execute.called)
self.assertFalse(mock_img_task.called)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_skipped_os_centos(
self, mock_img_task, mock_del):
attributes = {'provision': {
'method': consts.PROVISION_METHODS.image,
}}
self.create_cluster_and_execute_deletion_task(attributes)
self.assertTrue(mock_del.execute.called)
self.assertFalse(mock_img_task.called)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_skipped_os_ubuntu_cobbler(
self, mock_img_task, mock_del):
os = consts.RELEASE_OS.ubuntu
attributes = {'provision': {
'method': consts.PROVISION_METHODS.cobbler,
}}
self.create_cluster_and_execute_deletion_task(attributes, os)
self.assertTrue(mock_del.execute.called)
self.assertFalse(mock_img_task.called)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_executed(self, mock_img_task, mock_del):
os = consts.RELEASE_OS.ubuntu
attributes = {'provision': {
'method': consts.PROVISION_METHODS.image,
}}
self.create_cluster_and_execute_deletion_task(attributes, os)
self.assertTrue(mock_del.execute.called)
self.assertTrue(mock_img_task.called)
fake_attrs = objects.Attributes.merged_attrs_values(
self.fake_task.cluster.attributes)
mock_img_task.assert_called_once_with(
mock.ANY, fake_attrs['provision']['image_data'])
class TestDeleteIBPImagesTask(BaseTestCase):
@mock.patch('nailgun.task.task.settings')
@mock.patch('nailgun.task.task.make_astute_message')
def test_message(self, mock_astute, mock_settings):
mock_settings.PROVISIONING_IMAGES_PATH = '/fake/path'
mock_settings.REMOVE_IMAGES_TIMEOUT = 'fake_timeout'
task_mock = mock.Mock()
task_mock.cluster.id = '123'
task_mock.uuid = 'fake_uuid'
fake_image_data = {'/': {'uri': 'http://a.b/fake.img'},
'/boot': {'uri': 'http://c.d/fake-boot.img'}}
task.DeleteIBPImagesTask.message(task_mock, fake_image_data)
rpc_message = mock_astute.call_args[0][3]
rm_cmd = rpc_message['tasks'][0]['parameters'].pop('cmd')
mock_astute.assert_called_once_with(
mock.ANY, 'execute_tasks', 'remove_images_resp', mock.ANY)
self.assertEqual(rpc_message, {
'tasks': [{
'id': None,
'type': 'shell',
'uids': [consts.MASTER_NODE_UID],
'parameters': {
'retries': 3,
'cwd': '/',
'timeout': 'fake_timeout',
'interval': 1}}]})
self.assertTrue(rm_cmd.startswith('rm -f'))
self.assertIn('/fake/path/fake-boot.img', rm_cmd)
self.assertIn('/fake/path/fake.img', rm_cmd)
class TestHelperUpdateClusterStatus(BaseTestCase):
def setUp(self):
super(TestHelperUpdateClusterStatus, self).setUp()
self.env.create(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['compute', 'virt']},
{'roles': ['cinder']}])
def node_should_be_error_with_type(self, node, error_type):
self.assertEqual(node.status, 'error')
self.assertEqual(node.error_type, error_type)
self.assertEqual(node.progress, 0)
def nodes_should_not_be_error(self, nodes):
for node in nodes:
self.assertEqual(node.status, 'discover')
@property
def cluster(self):
return self.env.clusters[0]
def test_update_nodes_to_error_if_deployment_task_failed(self):
self.cluster.nodes[0].status = 'deploying'
self.cluster.nodes[0].progress = 12
deployment_task = Task(name='deployment', cluster=self.cluster,
status='error')
self.db.add(deployment_task)
self.db.commit()
objects.Task._update_cluster_data(deployment_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.node_should_be_error_with_type(self.cluster.nodes[0], 'deploy')
self.nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_error_if_deploy_task_failed(self):
deploy_task = Task(name='deploy', cluster=self.cluster, status='error')
self.db.add(deploy_task)
self.db.commit()
objects.Task._update_cluster_data(deploy_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
def test_update_nodes_to_error_if_provision_task_failed(self):
self.cluster.nodes[0].status = 'provisioning'
self.cluster.nodes[0].progress = 12
provision_task = Task(name='provision', cluster=self.cluster,
status='error')
self.db.add(provision_task)
self.db.commit()
objects.Task._update_cluster_data(provision_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.node_should_be_error_with_type(self.cluster.nodes[0], 'provision')
self.nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_operational(self):
deploy_task = Task(name='deploy', cluster=self.cluster, status='ready')
self.db.add(deploy_task)
self.db.commit()
objects.Task._update_cluster_data(deploy_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'operational')
def test_update_if_parent_task_is_ready_all_nodes_should_be_ready(self):
for node in self.cluster.nodes:
node.status = 'ready'
node.progress = 100
self.cluster.nodes[0].status = 'deploying'
self.cluster.nodes[0].progress = 24
deploy_task = Task(name='deploy', cluster=self.cluster, status='ready')
self.db.add(deploy_task)
self.db.commit()
objects.Task._update_cluster_data(deploy_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'operational')
for node in self.cluster.nodes:
self.assertEqual(node.status, 'ready')
self.assertEqual(node.progress, 100)
def test_update_cluster_status_if_task_was_already_in_error_status(self):
for node in self.cluster.nodes:
node.status = 'provisioning'
node.progress = 12
provision_task = Task(name='provision', cluster=self.cluster,
status='error')
self.db.add(provision_task)
self.db.commit()
data = {'status': 'error', 'progress': 100}
objects.Task.update(provision_task, data)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.assertEqual(provision_task.status, 'error')
for node in self.cluster.nodes:
self.assertEqual(node.status, 'error')
self.assertEqual(node.progress, 0)
def test_do_not_set_cluster_to_error_if_validation_failed(self):
for task_name in ['check_before_deployment', 'check_networks']:
supertask = Task(
name='deploy',
cluster=self.cluster,
status='error')
check_task = Task(
name=task_name,
cluster=self.cluster,
status='error')
supertask.subtasks.append(check_task)
self.db.add(check_task)
self.db.commit()
objects.Task._update_cluster_data(supertask)
self.db.flush()
self.assertEqual(self.cluster.status, 'new')
class TestCheckBeforeDeploymentTask(BaseTestCase):
def setUp(self):
super(TestCheckBeforeDeploymentTask, self).setUp()
self.env.create(
release_kwargs={'version': '1111-8.0'},
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'
},
nodes_kwargs=[{'roles': ['controller']}])
self.env.create_node()
self.node = self.env.nodes[0]
self.cluster = self.env.clusters[0]
self.task = Task(cluster_id=self.env.clusters[0].id)
self.env.db.add(self.task)
self.env.db.commit()
def set_node_status(self, status):
self.node.status = status
self.env.db.commit()
self.assertEqual(self.node.status, status)
def set_node_error_type(self, error_type):
self.node.error_type = error_type
self.env.db.commit()
self.assertEqual(self.node.error_type, error_type)
def is_checking_required(self):
return task.CheckBeforeDeploymentTask._is_disk_checking_required(
self.node)
def test_is_disk_checking_required(self):
self.set_node_status('ready')
self.assertFalse(self.is_checking_required())
self.set_node_status('deploying')
self.assertFalse(self.is_checking_required())
self.set_node_status('discover')
self.assertTrue(self.is_checking_required())
self.set_node_status('provisioned')
self.assertFalse(self.is_checking_required())
def test_is_disk_checking_required_in_case_of_error(self):
self.set_node_status('error')
self.set_node_error_type('provision')
self.assertTrue(self.is_checking_required())
self.set_node_error_type('deploy')
self.assertFalse(self.is_checking_required())
def test_check_volumes_and_disks_do_not_run_if_node_ready(self):
self.set_node_status('ready')
with mock.patch.object(
VolumeManager,
'check_disk_space_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_disks(self.task)
self.assertFalse(check_mock.called)
with mock.patch.object(
VolumeManager,
'check_volume_sizes_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_volumes(self.task)
self.assertFalse(check_mock.called)
def test_check_volumes_and_disks_run_if_node_not_ready(self):
self.set_node_status('discover')
with mock.patch.object(
VolumeManager,
'check_disk_space_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_disks(self.task)
self.assertEqual(check_mock.call_count, 1)
with mock.patch.object(
VolumeManager,
'check_volume_sizes_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_volumes(self.task)
self.assertEqual(check_mock.call_count, 1)
def test_check_nodes_online_raises_exception(self):
self.node.online = False
self.env.db.commit()
self.assertRaises(
errors.NodeOffline,
task.CheckBeforeDeploymentTask._check_nodes_are_online,
self.task)
def test_check_nodes_online_do_not_raise_exception_node_to_deletion(self):
self.node.online = False
self.node.pending_deletion = True
self.env.db.commit()
task.CheckBeforeDeploymentTask._check_nodes_are_online(self.task)
def test_check_controllers_count_operational_cluster(self):
self.cluster.status = consts.CLUSTER_STATUSES.operational
# remove old controller and add new one
self.node.pending_deletion = True
new_controller = self.env.create_node()
new_controller.pendint_addition = True
self.assertRaises(
errors.NotEnoughControllers,
task.CheckBeforeDeploymentTask._check_controllers_count,
self.task)
def test_check_controllers_count_new_cluster(self):
self.cluster.status = consts.CLUSTER_STATUSES.new
# check there's not exceptions with one controller
self.assertNotRaises(
errors.NotEnoughControllers,
task.CheckBeforeDeploymentTask._check_controllers_count,
self.task)
# check there's exception with one non-controller node
self.node.roles = ['compute']
self.env.db.flush()
self.assertRaises(
errors.NotEnoughControllers,
task.CheckBeforeDeploymentTask._check_controllers_count,
self.task)
def find_net_by_name(self, nets, name):
for net in nets['networks']:
if net['name'] == name:
return net
def test_missing_network_group_with_template(self):
net_template = self.env.read_fixtures(['network_template_80'])[0]
objects.Cluster.set_network_template(
self.cluster,
net_template
)
public = [n for n in self.cluster.network_groups
if n.name == consts.NETWORKS.public][0]
self.env._delete_network_group(public.id)
self.assertRaisesRegexp(
errors.NetworkTemplateMissingNetworkGroup,
"The following network groups are missing: public",
task.CheckBeforeDeploymentTask._validate_network_template,
self.task)
def test_missing_node_role_from_template(self):
net_template = self.env.read_fixtures(['network_template_80'])[0]
objects.Cluster.set_network_template(
self.cluster,
net_template
)
cluster_assigned_roles = \
objects.Cluster.get_assigned_roles(self.cluster)
conf_template = self.cluster.network_config.configuration_template
for net_group in six.itervalues(conf_template['adv_net_template']):
template_node_roles = net_group['templates_for_node_role']
for assigned_role in cluster_assigned_roles:
if assigned_role in template_node_roles:
del template_node_roles[assigned_role]
self.assertRaises(
errors.NetworkTemplateMissingRoles,
task.CheckBeforeDeploymentTask._validate_network_template,
self.task
)
def test_missing_network_group_with_template_multi_ng(self):
net_template = self.env.read_fixtures(['network_template_80'])[0]
resp = self.env.create_node_group(name='group-custom-1',
cluster_id=self.cluster.id)
del self.cluster.nodes[0]
ng = objects.NodeGroup.get_by_uid(resp.json_body['id'])
self.env.create_nodes_w_interfaces_count(
1, 5,
roles=['controller'],
cluster_id=self.cluster.id,
group_id=ng.id
)
objects.Cluster.set_network_template(
self.cluster,
net_template
)
public = [n for n in ng.networks
if n.name == consts.NETWORKS.public][0]
self.env._delete_network_group(public.id)
self.assertRaisesRegexp(
errors.NetworkTemplateMissingNetworkGroup,
("The following network groups are missing: public "
".* group-custom-1"),
task.CheckBeforeDeploymentTask._validate_network_template,
self.task)
def test_default_net_data_used_for_checking_absent_node_groups(self):
self.env.create_node_group(api=False, name='new_group',
cluster_id=self.cluster.id)
# template validation should pass without errors
# as the 'default' sub-template must be used for 'new_group'
# (same as for 'default' node group)
self.assertNotRaises(
Exception,
task.CheckBeforeDeploymentTask._validate_network_template,
self.task
)
def test_check_public_networks(self):
cluster = self.env.clusters[0]
self.env.create_nodes(
2, api=True, roles=['controller'], cluster_id=cluster.id)
self.env.create_nodes(
2, api=True, roles=['compute'], cluster_id=cluster.id)
# we have 3 controllers now
self.assertEqual(
sum('controller' in n.all_roles for n in self.env.nodes),
3
)
attrs = cluster.attributes.editable
self.assertEqual(
attrs['public_network_assignment']['assign_to_all_nodes']['value'],
False
)
self.assertFalse(
objects.Cluster.should_assign_public_to_all_nodes(cluster))
resp = self.env.neutron_networks_get(cluster.id)
nets = resp.json_body
# not enough IPs for 3 nodes and 2 VIPs
self.find_net_by_name(nets, 'public')['ip_ranges'] = \
[["172.16.0.2", "172.16.0.5"]]
resp = self.env.neutron_networks_put(cluster.id, nets)
self.assertEqual(resp.status_code, 200)
self.assertRaises(
errors.NetworkCheckError,
task.CheckBeforeDeploymentTask._check_public_network,
self.task)
# enough IPs for 3 nodes and 2 VIPs
self.find_net_by_name(nets, 'public')['ip_ranges'] = \
[["172.16.0.2", "172.16.0.6"]]
resp = self.env.neutron_networks_put(cluster.id, nets)
self.assertEqual(resp.status_code, 200)
self.assertNotRaises(
errors.NetworkCheckError,
task.CheckBeforeDeploymentTask._check_public_network,
self.task)
attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \
True
resp = self.app.patch(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster.id}),
params=jsonutils.dumps({'editable': attrs}),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertTrue(
objects.Cluster.should_assign_public_to_all_nodes(cluster))
self.assertRaises(
errors.NetworkCheckError,
task.CheckBeforeDeploymentTask._check_public_network,
self.task)
def test_check_deployment_graph_with_correct_data(self):
correct_yaml_tasks = """
- id: test-controller
type: group
role: [test-controller]
requires: [primary-controller]
required_for: [deploy_end]
parameters:
strategy:
type: parallel
amount: 2
"""
tasks = yaml.load(correct_yaml_tasks)
deployment_tasks = objects.Cluster.get_deployment_tasks(self.cluster)
deployment_tasks.extend(tasks)
objects.Cluster.update(
self.cluster,
{'deployment_tasks': deployment_tasks})
task.CheckBeforeDeploymentTask.\
_check_deployment_graph_for_correctness(
self.task)
def test_check_deployment_graph_with_incorrect_dependencies_data(self):
incorrect_dependencies_yaml_tasks = """
- id: test-controller
type: group
role: [primary-controller]
required_for: [non_existing_stage]
parameters:
strategy:
type: one_by_one
"""
tasks = yaml.load(incorrect_dependencies_yaml_tasks)
deployment_tasks = objects.Cluster.get_deployment_tasks(self.cluster)
deployment_tasks.extend(tasks)
objects.Cluster.update(
self.cluster,
{'deployment_tasks': deployment_tasks})
with self.assertRaisesRegexp(
errors.InvalidData,
"Tasks 'non_existing_stage' can't be in requires|required_for|"
"groups|tasks for \['test-controller'\] because they don't "
"exist in the graph"):
task.CheckBeforeDeploymentTask.\
_check_deployment_graph_for_correctness(
self.task)
def test_check_deployment_graph_with_cycling_dependencies_data(self):
incorrect_cycle_yaml_tasks = """
- id: test-controller-1
type: role
requires: [test-controller-2]
- id: test-controller-2
type: role
requires: [test-controller-1]
"""
tasks = yaml.load(incorrect_cycle_yaml_tasks)
deployment_tasks = objects.Cluster.get_deployment_tasks(self.cluster)
deployment_tasks.extend(tasks)
objects.Cluster.update(
self.cluster,
{'deployment_tasks': deployment_tasks})
with self.assertRaisesRegexp(
errors.InvalidData,
"Tasks can not be processed because it contains cycles in it"):
task.CheckBeforeDeploymentTask.\
_check_deployment_graph_for_correctness(
self.task)
def test_check_missed_nodes_vmware_nova_computes(self):
operational_node = self.env.create_node(
roles=['compute-vmware'],
cluster_id=self.cluster.id,
name='node-1'
)
pending_addition_node = self.env.create_node(
roles=['compute-vmware'],
cluster_id=self.cluster.id,
pending_addition=True,
name='node-2'
)
msg = ("The following compute-vmware nodes are not assigned to "
"any vCenter cluster: {0}").format(', '.join(
sorted([operational_node.name, pending_addition_node.name])
))
with self.assertRaisesRegexp(errors.CheckBeforeDeploymentError, msg):
task.CheckBeforeDeploymentTask._check_vmware_consistency(self.task)
@mock.patch('objects.VmwareAttributes.get_nova_computes_target_nodes')
def test_check_not_deleted_nodes_vmware_nova_computes(self, target_nodes):
operational_node = self.env.create_node(
roles=['compute-vmware'],
cluster_id=self.cluster.id,
name='node-1'
)
pending_deletion_node = self.env.create_node(
roles=['compute-vmware'],
cluster_id=self.cluster.id,
pending_deletion=True,
name='node-2'
)
target_nodes.return_value = [{
'id': operational_node.hostname,
'label': operational_node.name
}, {
'id': pending_deletion_node.hostname,
'label': pending_deletion_node.name
}]
msg = ("The following nodes are prepared for deletion and couldn't be "
"assigned to any vCenter cluster: {0}".format(
pending_deletion_node.name))
with self.assertRaisesRegexp(errors.CheckBeforeDeploymentError, msg):
task.CheckBeforeDeploymentTask._check_vmware_consistency(self.task)
@mock.patch('objects.VmwareAttributes.get_nova_computes_target_nodes')
def test_check_extra_nodes_vmware_nova_computes(self, target_nodes):
operational_node = self.env.create_node(
roles=['compute-vmware'],
cluster_id=self.cluster.id,
name='node-1'
)
non_cluster_node = self.env.create_node(
roles=['compute-vmware'],
name='node-2'
)
other_role_node = self.env.create_node(
cluster_id=self.cluster.id,
name='node-3'
)
target_nodes.return_value = [{
'id': operational_node.hostname,
'label': operational_node.name
}, {
'id': non_cluster_node.hostname,
'label': non_cluster_node.name
}, {
'id': other_role_node.hostname,
'label': other_role_node.name
}]
msg = ("The following nodes don't belong to compute-vmware nodes of "
"environment and couldn't be assigned to any vSphere cluster: "
"{0}".format(', '.join(
sorted([non_cluster_node.name, other_role_node.name]))
))
with self.assertRaisesRegexp(errors.CheckBeforeDeploymentError, msg):
task.CheckBeforeDeploymentTask._check_vmware_consistency(self.task)
class TestDeployTask(BaseTestCase):
def create_deploy_tasks(self):
self.env.create()
cluster = self.env.clusters[0]
deploy_task = Task(name=consts.TASK_NAMES.deploy,
cluster_id=cluster.id,
status=consts.TASK_STATUSES.pending)
self.db.add(deploy_task)
self.db.flush()
provision_task = Task(name=consts.TASK_NAMES.provision,
status=consts.TASK_STATUSES.pending,
parent_id=deploy_task.id, cluster_id=cluster.id)
self.db.add(provision_task)
deployment_task = Task(name=consts.TASK_NAMES.deployment,
status=consts.TASK_STATUSES.pending,
parent_id=deploy_task.id, cluster_id=cluster.id)
self.db.add(deployment_task)
self.db.flush()
return deploy_task, provision_task, deployment_task
def test_running_status_bubble_for_deploy_task(self):
deploy_task, provision_task, deployment_task = \
self.create_deploy_tasks()
objects.Task.update(provision_task,
{'status': consts.TASK_STATUSES.running})
# Only deploy and provision tasks are running now
self.assertEqual(consts.TASK_STATUSES.running, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.running, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.pending, deployment_task.status)
def test_error_status_bubble_for_deploy_task(self):
deploy_task, provision_task, deployment_task = \
self.create_deploy_tasks()
objects.Task.update(provision_task,
{'status': consts.TASK_STATUSES.error})
# All tasks have error status
self.assertEqual(consts.TASK_STATUSES.error, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.error, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.error, deployment_task.status)
def test_ready_status_bubble_for_deploy_task(self):
deploy_task, provision_task, deployment_task = \
self.create_deploy_tasks()
objects.Task.update(provision_task,
{'status': consts.TASK_STATUSES.ready})
# Not all child bugs in ready state
self.assertEqual(consts.TASK_STATUSES.running, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.ready, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.pending, deployment_task.status)
# All child bugs in ready state
objects.Task.update(deployment_task,
{'status': consts.TASK_STATUSES.ready})
self.assertEqual(consts.TASK_STATUSES.ready, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.ready, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.ready, deployment_task.status)
| apache-2.0 | 936,353,081,573,012,200 | 37.365789 | 79 | 0.607964 | false |
drphilmarshall/SpaceWarps | analysis/SWAP.py | 2 | 29808 | #!/usr/bin/env python
# ======================================================================
import swap
import sys,getopt,datetime,os,subprocess
import numpy as np
import cPickle
# ======================================================================
def SWAP(argv):
"""
NAME
SWAP.py
PURPOSE
Space Warps Analysis Pipeline
Read in a Space Warps classification database from a MongoDB
database, and analyse it.
COMMENTS
The SW analysis is "online" in the statistical sense: we step
through the classifications one by one, updating each
classifier's agent's confusion matrix, and each subject's lens
probability. The main reason for taking this approach is that
it is the most logical one; secondarily, it opens up the
possibility of performing the analysis in real time (and maybe even
with this piece of python).
Currently, the agents' confusion matrices only depend on the
classifications of training subjects. Upgrading this would be a
nice piece of further work. Likewise, neither the Marker
positions, the classification durations, nor any other
parameters are used in estimating lens probability - but they
could be. In this version, it's LENS or NOT.
Standard operation is to update the candidate list by making a
new, timestamped catalog of candidates - and the classifications
that led to them. This means we have to know when the last
update was made - this is done by SWAP writing its own next
config file, and by reading in a pickle of the last
classification to be SWAPped. The bureau has to always be read
in in its entirety, because a classifier can reappear any time
to have their agent update its confusion matrix.
FLAGS
-h Print this message
INPUTS
configfile Plain text file containing SW experiment configuration
OUTPUTS
stdout
*_bureau.pickle
*_collection.pickle
EXAMPLE
cd workspace
SWAP.py startup.config > CFHTLS-beta-day01.log
BUGS
AUTHORS
This file is part of the Space Warps project, and is distributed
under the MIT license by the Space Warps Science Team.
http://spacewarps.org/
HISTORY
2013-04-03 started. Marshall (Oxford)
2013-04-17 implemented v1 "LENS or NOT" analysis. Marshall (Oxford)
2013-05-.. "fuzzy" trajectories. S. More (IPMU)
"""
# ------------------------------------------------------------------
try:
opts, args = getopt.getopt(argv,"h",["help"])
except getopt.GetoptError, err:
print str(err) # will print something like "option -a not recognized"
print SWAP.__doc__ # will print the big comment above.
return
for o,a in opts:
if o in ("-h", "--help"):
print SWAP.__doc__
return
else:
assert False, "unhandled option"
# Check for setup file in array args:
if len(args) == 1:
configfile = args[0]
print swap.doubledashedline
print swap.hello
print swap.doubledashedline
print "SWAP: taking instructions from",configfile
else:
print SWAP.__doc__
return
# ------------------------------------------------------------------
# Read in run configuration:
tonights = swap.Configuration(configfile)
# Read the pickled random state file
random_file = open(tonights.parameters['random_file'],"r");
random_state = cPickle.load(random_file);
random_file.close();
np.random.set_state(random_state);
practise = (tonights.parameters['dbspecies'] == 'Toy')
if practise:
print "SWAP: doing a dry run using a Toy database"
else:
print "SWAP: data will be read from the current live Mongo database"
stage = str(int(tonights.parameters['stage']))
survey = tonights.parameters['survey']
print "SWAP: looks like we are on Stage "+stage+" of the ",survey," survey project"
try: supervised = tonights.parameters['supervised']
except: supervised = False
try: supervised_and_unsupervised = tonights.parameters['supervised_and_unsupervised']
except: supervised_and_unsupervised = False
# will agents be able to learn?
try: agents_willing_to_learn = tonights.parameters['agents_willing_to_learn']
except: agents_willing_to_learn = False
if agents_willing_to_learn:
if supervised_and_unsupervised:
print "SWAP: agents will use both training AND test data to update their confusion matrices"
elif supervised:
print "SWAP: agents will use training data to update their confusion matrices"
else:
print "SWAP: agents will only use test data to update their confusion matrices"
a_few_at_the_start = tonights.parameters['a_few_at_the_start']
if a_few_at_the_start > 0:
print "SWAP: but at first they'll ignore their volunteer until "
print "SWAP: they've done ",int(a_few_at_the_start)," images"
else:
a_few_at_the_start = 0
print "SWAP: agents will use fixed confusion matrices without updating them"
waste = tonights.parameters['hasty']
if waste:
print "SWAP: agents will ignore the classifications of rejected subjects"
else:
print "SWAP: agents will use all classifications, even of rejected subjects"
vb = tonights.parameters['verbose']
if not vb: print "SWAP: only reporting minimal stdout"
one_by_one = tonights.parameters['one_by_one']
report = tonights.parameters['report']
if report:
print "SWAP: will make plots and write report at the end"
else:
print "SWAP: postponing reporting until the last minute"
# From when shall we take classifications to analyze?
if tonights.parameters['start'] == 'the_beginning':
t1 = datetime.datetime(1978, 2, 28, 12, 0, 0, 0)
elif tonights.parameters['start'] == 'dont_bother':
print "SWAP: looks like there is nothing more to do!"
swap.set_cookie(False)
print swap.doubledashedline
return
else:
t1 = datetime.datetime.strptime(tonights.parameters['start'], '%Y-%m-%d_%H:%M:%S')
print "SWAP: updating all subjects classified between "+tonights.parameters['start']
# When will we stop considering classifications?
if tonights.parameters['end'] == 'the_end_of_time':
t2 = datetime.datetime(2100, 1, 1, 12, 0, 0, 0)
else:
t2 = datetime.datetime.strptime(tonights.parameters['end'], '%Y-%m-%d_%H:%M:%S')
print "SWAP: and "+tonights.parameters['end']
# How many classifications do we look at per batch?
try: N_per_batch = tonights.parameters['N_per_batch']
except: N_per_batch = 5000000
print "SWAP: setting the number of classifications made in this batch to ",N_per_batch
# How will we decide if a sim has been seen?
try: use_marker_positions = tonights.parameters['use_marker_positions']
except: use_marker_positions = False
print "SWAP: should we use the marker positions on sims? ",use_marker_positions
try: prior = tonights.parameters['prior']
except: prior = 2e-4
print "SWAP: set prior for analysis to ",prior
# Will we do offline analysis?
try: offline = tonights.parameters['offline']
except: offline = False
print "SWAP: should we do offline analysis? ",offline
# How will we make decisions based on probability?
thresholds = {}
thresholds['detection'] = tonights.parameters['detection_threshold']
thresholds['rejection'] = tonights.parameters['rejection_threshold']
# ------------------------------------------------------------------
# Read in, or create, a bureau of agents who will represent the
# volunteers:
bureau = swap.read_pickle(tonights.parameters['bureaufile'],'bureau')
# ------------------------------------------------------------------
# Read in, or create, an object representing the candidate list:
sample = swap.read_pickle(tonights.parameters['samplefile'],'collection')
# ------------------------------------------------------------------
# Open up database:
if practise:
db = swap.read_pickle(tonights.parameters['dbfile'],'database')
if db is None:
print "SWAP: making a new Toy database..."
db = swap.ToyDB(pars=tonights.parameters)
print "SWAP: database has ",db.size()," Toy classifications"
print "SWAP: of ",db.surveysize," Toy subjects"
print "SWAP: made by ",db.population," Toy classifiers"
print "SWAP: where each classifier makes ",db.enthusiasm," classifications, on average"
else:
db = swap.MongoDB()
# Read in a batch of classifications, made since the aforementioned
# start time:
batch = db.find('since',t1)
# Actually, batch is a cursor, now set to the first classification
# after time t1. Maybe this could be a Kafka cursor instead? And then
# all of this could be in an infinite loop? Hmm - we'd still want to
# produce some output periodically - but this should be done by querying
# the bureau and sample databases, separately from SWAP.
# ------------------------------------------------------------------
count_max = N_per_batch
print "SWAP: interpreting up to",count_max," classifications..."
if one_by_one: print "SWAP: ...one by one - hit return for the next one..."
count = 0
for classification in batch:
if one_by_one: next = raw_input()
# Get the vitals for this classification:
items = db.digest(classification,survey,method=use_marker_positions)
if vb: print "#"+str(count+1)+". items = ",items
if items is None:
continue # Tutorial subjects fail, as do stage/project mismatches!
# t,Name,ID,ZooID,category,kind,X,Y,location,thisstage,P = items
# X, Y: result,truth (LENS,NOT,UNKNOWN)
# CPD 31.5.14: added annotation_x, annotation_y : locations of clicks
# PJM 20014-08-21: added "flavor" of subject, 'lensing cluster', len
tstring,Name,ID,ZooID,category,kind,flavor,X,Y,location,classification_stage,at_x,at_y = items
# this is probably bad form:
at_x = eval(at_x)
at_y = eval(at_y)
t = datetime.datetime.strptime(tstring, '%Y-%m-%d_%H:%M:%S')
# If the stage of this classification does not match the stage we are
# on, skip to the next one!
if classification_stage != stage:
if vb:
print "Found classification from different stage: ",classification_stage," cf. ",stage,", items = ",items
print " "
continue
else:
if vb:
print "Found classification from this stage: ",items
print " "
# Break out if we've reached the time limit:
if t > t2:
break
# Register new volunteers, and create an agent for each one:
# Old, slow code: if Name not in bureau.list():
try: test = bureau.member[Name]
except: bureau.member[Name] = swap.Agent(Name,tonights.parameters)
# Register newly-classified subjects:
# Old, slow code: if ID not in sample.list():
try: test = sample.member[ID]
except: sample.member[ID] = swap.Subject(ID,ZooID,category,kind,flavor,Y,thresholds,location,prior=prior)
# Update the subject's lens probability using input from the
# classifier. We send that classifier's agent to the subject
# to do this.
sample.member[ID].was_described(by=bureau.member[Name],as_being=X,at_time=tstring,while_ignoring=a_few_at_the_start,haste=waste,at_x=at_x,at_y=at_y)
# Update the agent's confusion matrix, based on what it heard:
P = sample.member[ID].mean_probability
if supervised_and_unsupervised:
# use both training and test images
if agents_willing_to_learn * ((category == 'test') + (category == 'training')):
bureau.member[Name].heard(it_was=X,actually_it_was=Y,with_probability=P,ignore=False,ID=ID,at_time=tstring)
elif ((category == 'test') + (category == 'training')):
bureau.member[Name].heard(it_was=X,actually_it_was=Y,with_probability=P,ignore=True,ID=ID,at_time=tstring)
elif supervised:
# Only use training images!
if category == 'training' and agents_willing_to_learn:
bureau.member[Name].heard(it_was=X,actually_it_was=Y,with_probability=P,ignore=False,ID=ID,at_time=tstring)
elif category == 'training':
bureau.member[Name].heard(it_was=X,actually_it_was=Y,with_probability=P,ignore=True,ID=ID,at_time=tstring)
else:
# Unsupervised: ignore all the training images...
if category == 'test' and agents_willing_to_learn:
bureau.member[Name].heard(it_was=X,actually_it_was=Y,with_probability=P,ignore=False,ID=ID,at_time=tstring)
elif category == 'test':
bureau.member[Name].heard(it_was=X,actually_it_was=Y,with_probability=P,ignore=True,ID=ID,at_time=tstring)
# Brag about it:
count += 1
if vb:
print swap.dashedline
print "SWAP: Subject "+ID+" was classified by "+Name+" during Stage ",stage
print "SWAP: he/she said "+X+" when it was actually "+Y+", with Pr(LENS) = "+str(P)
print "SWAP: their agent reckons their contribution (in bits) = ",bureau.member[Name].contribution
print "SWAP: while estimating their PL,PD as ",bureau.member[Name].PL,bureau.member[Name].PD
print "SWAP: and the subject's new probability as ",sample.member[ID].probability
else:
# Count up to 74 in dots:
if count == 1: sys.stdout.write('SWAP: ')
elif np.mod(count,int(count_max/73.0)) == 0: sys.stdout.write('.')
# elif count == db.size(): sys.stdout.write('\n')
sys.stdout.flush()
# When was the first classification made?
if count == 1:
t1 = t
# Did we at least manage to do 1?
elif count == 2:
swap.set_cookie(True)
# Have we done enough for this run?
elif count == count_max:
break
sys.stdout.write('\n')
if vb: print swap.dashedline
print "SWAP: total no. of classifications processed: ",count
#-------------------------------------------------------------------------
# Now do offline analysis
if offline:
# for each step in EM algorithm, construct agents and classifications
# list
# will also need to set probabilities to prior_probability and such
# before the algorithm is run
# some settings that I guess you could configure but these work fine enough
initialPL = tonights.parameters['initialPL']
initialPD = tonights.parameters['initialPD']
N_min = 40 # min number of EM steps required
N_max = 100 # max number of EM steps allowed
# TODO: make the epsilons be in logit terms?
epsilon_min = 1e-6 # average change in probabilities before we claim convergence
epsilon_taus = 10 # initial value
N_try = 0 # initial value
epsilon_list = []
print "SWAP: offline: resetting prior probability to ",prior
for ID in sample.list():
sample.member[ID].probability = prior
sample.member[ID].update_state()
print "SWAP: offline: resetting PL and PDs to ",initialPL,initialPD
for ID in bureau.list():
bureau.member[ID].PD = initialPD
bureau.member[ID].PL = initialPL
print "SWAP: offline: running EM"
while (epsilon_taus > epsilon_min) * (N_try < N_max) + (N_try < N_min):
# do E step
epsilon_taus = 0
num_taus = 0
for ID in sample.list():
annotationhistory = sample.member[ID].annotationhistory
names = annotationhistory['Name']
classifications = annotationhistory['ItWas']
if len(names) > 0:
old_probability = sample.member[ID].mean_probability
if supervised_and_unsupervised:
laplace_smoothing = 0
elif supervised:
laplace_smoothing = 0
else:
laplace_smoothing = 0
sample.member[ID].was_described_many_times(bureau, names, classifications, realize_confusion=False, laplace_smoothing=laplace_smoothing) # not doing the binomial realization
epsilon_taus += (sample.member[ID].mean_probability - old_probability) ** 2
num_taus += 1
# divide epsilon_taus by the number of taus
epsilon_taus = np.sqrt(epsilon_taus) * 1. / num_taus
# do M step
# I am PRETTY sure this is inefficient!
for ID in bureau.list():
agent = bureau.member[ID]
if supervised_and_unsupervised:
# supervised learning AND unsupervised
# use perfect training in M step
# use test info in M step
classifications_train = agent.traininghistory['ItWas']
probabilities_train = []
for Subj_ID in agent.traininghistory['ID']:
if sample.member[Subj_ID].kind == 'test':
probabilities_train.append(sample.member[Subj_ID].mean_probability)
elif sample.member[Subj_ID].kind == 'sim':
probabilities_train.append(1.0)
elif sample.member[Subj_ID].kind == 'dud':
probabilities_train.append(0)
probabilities_train = np.array(probabilities_train)
classifications_test = agent.testhistory['ItWas']
probabilities_test = []
for Subj_ID in agent.testhistory['ID']:
probabilities_test.append(sample.member[Subj_ID].mean_probability)
probabilities_test = np.array(probabilities_test)
probabilities = np.append(probabilities_train, probabilities_test)
classifications = np.append(classifications_train, classifications_test)
elif supervised:
# supervised learning
# use perfect training in M step
# DONT use test info in M step
probabilities = agent.traininghistory['ActuallyItWas']
classifications = agent.traininghistory['ItWas']
else:
# totally unsupervised
# DONT use perfect training in M step
# use test info in M step
classifications = agent.testhistory['ItWas']
probabilities = []
for Subj_ID in agent.testhistory['ID']:
probabilities.append(sample.member[Subj_ID].mean_probability)
probabilities = np.array(probabilities)
bureau.member[ID].heard_many_times(probabilities, classifications)
# done with the EM steps! add one to the tally of tries
N_try += 1
# done with EM! collect probabilities in the bureau
bureau.collect_probabilities()
# repeat for the sample
for kind in ['sim', 'dud', 'test']:
sample.collect_probabilities(kind)
# All good things come to an end:
if count == 0:
print "SWAP: if we're not plotting, something might be wrong: 0 classifications found."
t = t1
more_to_do = False
# return
elif count < count_max: # ie we didn't make it through the whole batch this time!
more_to_do = False
else:
more_to_do = True
# ------------------------------------------------------------------
# Set up outputs based on where we got to.
# And what will we call the new files we make? Use the first
# classification timestamp!
tonights.parameters['finish'] = t1.strftime('%Y-%m-%d_%H:%M:%S')
# Let's also update the start parameter, ready for next time:
tonights.parameters['start'] = tstring
# Use the following directory for output lists and plots:
tonights.parameters['trunk'] = \
tonights.parameters['survey']+'_'+tonights.parameters['finish']
tonights.parameters['dir'] = os.getcwd()+'/'+tonights.parameters['trunk']
if not os.path.exists(tonights.parameters['dir']):
os.makedirs(tonights.parameters['dir'])
# ------------------------------------------------------------------
# Pickle the bureau, sample, and database, if required. If we do
# this, its because we want to pick up from where we left off
# (ie with SWAPSHOP) - so save the pickles in the $cwd. This is
# taken care of in io.py. Note that we update the parameters as
# we go - this will be useful later when we write update.config.
if tonights.parameters['repickle'] and count > 0:
new_bureaufile = swap.get_new_filename(tonights.parameters,'bureau')
print "SWAP: saving agents to "+new_bureaufile
swap.write_pickle(bureau,new_bureaufile)
tonights.parameters['bureaufile'] = new_bureaufile
new_samplefile = swap.get_new_filename(tonights.parameters,'collection')
print "SWAP: saving subjects to "+new_samplefile
swap.write_pickle(sample,new_samplefile)
tonights.parameters['samplefile'] = new_samplefile
if practise:
new_dbfile = swap.get_new_filename(tonights.parameters,'database')
print "SWAP: saving database to "+new_dbfile
swap.write_pickle(db,new_dbfile)
tonights.parameters['dbfile'] = new_dbfile
# ------------------------------------------------------------------
if report:
# Output list of subjects to retire, based on this batch of
# classifications. Note that what is needed here is the ZooID,
# not the subject ID:
new_retirementfile = swap.get_new_filename(tonights.parameters,'retire_these')
print "SWAP: saving retiree subject Zooniverse IDs..."
N = swap.write_list(sample,new_retirementfile,item='retired_subject')
print "SWAP: "+str(N)+" lines written to "+new_retirementfile
# Also print out lists of detections etc! These are urls of images.
new_samplefile = swap.get_new_filename(tonights.parameters,'candidates')
print "SWAP: saving lens candidates..."
N = swap.write_list(sample,new_samplefile,item='candidate')
print "SWAP: "+str(N)+" lines written to "+new_samplefile
# Now save the training images, for inspection:
new_samplefile = swap.get_new_filename(tonights.parameters,'training_true_positives')
print "SWAP: saving true positives..."
N = swap.write_list(sample,new_samplefile,item='true_positive')
print "SWAP: "+str(N)+" lines written to "+new_samplefile
new_samplefile = swap.get_new_filename(tonights.parameters,'training_false_positives')
print "SWAP: saving false positives..."
N = swap.write_list(sample,new_samplefile,item='false_positive')
print "SWAP: "+str(N)+" lines written to "+new_samplefile
new_samplefile = swap.get_new_filename(tonights.parameters,'training_false_negatives')
print "SWAP: saving false negatives..."
N = swap.write_list(sample,new_samplefile,item='false_negative')
print "SWAP: "+str(N)+" lines written to "+new_samplefile
# Also write out catalogs of subjects, including the ZooID, subject ID,
# how many classifications, and probability:
catalog = swap.get_new_filename(tonights.parameters,'candidate_catalog')
print "SWAP: saving catalog of high probability subjects..."
Nlenses,Nsubjects = swap.write_catalog(sample,catalog,thresholds,kind='test')
print "SWAP: From "+str(Nsubjects)+" subjects classified,"
print "SWAP: "+str(Nlenses)+" candidates (with P > rejection) written to "+catalog
catalog = swap.get_new_filename(tonights.parameters,'sim_catalog')
print "SWAP: saving catalog of high probability subjects..."
Nsims,Nsubjects = swap.write_catalog(sample,catalog,thresholds,kind='sim')
print "SWAP: From "+str(Nsubjects)+" subjects classified,"
print "SWAP: "+str(Nsims)+" sim 'candidates' (with P > rejection) written to "+catalog
catalog = swap.get_new_filename(tonights.parameters,'dud_catalog')
print "SWAP: saving catalog of high probability subjects..."
Nduds,Nsubjects = swap.write_catalog(sample,catalog,thresholds,kind='dud')
print "SWAP: From "+str(Nsubjects)+" subjects classified,"
print "SWAP: "+str(Nduds)+" dud 'candidates' (with P > rejection) written to "+catalog
# ------------------------------------------------------------------
# Now, if there is more to do, over-write the update.config file so
# that we can carry on where we left off. Note that the pars are
# already updated! :-)
if not more_to_do:
tonights.parameters['start'] = tstring
swap.set_cookie(False)
# SWAPSHOP will read this cookie and act accordingly.
configfile = 'update.config'
# Random_file needs updating, else we always start from the same random
# state when update.config is reread!
random_file = open(tonights.parameters['random_file'],"w");
random_state = np.random.get_state();
cPickle.dump(random_state,random_file);
random_file.close();
swap.write_config(configfile, tonights.parameters)
# ------------------------------------------------------------------
if report:
# Make plots! Can't plot everything - uniformly sample 200 of each
# thing (agent or subject).
# Agent histories:
fig1 = bureau.start_history_plot()
pngfile = swap.get_new_filename(tonights.parameters,'histories')
Nc = np.min([200,bureau.size()])
print "SWAP: plotting "+str(Nc)+" agent histories in "+pngfile
for Name in bureau.shortlist(Nc):
bureau.member[Name].plot_history(fig1)
bureau.finish_history_plot(fig1,t,pngfile)
tonights.parameters['historiesplot'] = pngfile
# Agent probabilities:
pngfile = swap.get_new_filename(tonights.parameters,'probabilities')
print "SWAP: plotting "+str(Nc)+" agent probabilities in "+pngfile
bureau.plot_probabilities(Nc,t,pngfile)
tonights.parameters['probabilitiesplot'] = pngfile
# Subject trajectories:
fig3 = sample.start_trajectory_plot()
pngfile = swap.get_new_filename(tonights.parameters,'trajectories')
# Random 500 for display purposes:
Ns = np.min([500,sample.size()])
print "SWAP: plotting "+str(Ns)+" subject trajectories in "+pngfile
for ID in sample.shortlist(Ns):
sample.member[ID].plot_trajectory(fig3)
# To plot only false negatives, or only true positives:
# for ID in sample.shortlist(Ns,kind='sim',status='rejected'):
# sample.member[ID].plot_trajectory(fig3)
# for ID in sample.shortlist(Ns,kind='sim',status='detected'):
# sample.member[ID].plot_trajectory(fig3)
sample.finish_trajectory_plot(fig3,pngfile,t=t)
tonights.parameters['trajectoriesplot'] = pngfile
# Candidates! Plot all undecideds or detections:
fig4 = sample.start_trajectory_plot(final=True)
pngfile = swap.get_new_filename(tonights.parameters,'sample')
# BigN = 100000 # Would get them all...
BigN = 500 # Can't see them all!
candidates = []
candidates += sample.shortlist(BigN,kind='test',status='detected')
candidates += sample.shortlist(BigN,kind='test',status='undecided')
sims = []
sims += sample.shortlist(BigN,kind='sim',status='detected')
sims += sample.shortlist(BigN,kind='sim',status='undecided')
duds = []
duds += sample.shortlist(BigN,kind='dud',status='detected')
duds += sample.shortlist(BigN,kind='dud',status='undecided')
print "SWAP: plotting "+str(len(sims))+" sims in "+pngfile
for ID in sims:
sample.member[ID].plot_trajectory(fig4)
print "SWAP: plotting "+str(len(duds))+" duds in "+pngfile
for ID in duds:
sample.member[ID].plot_trajectory(fig4)
print "SWAP: plotting "+str(len(candidates))+" candidates in "+pngfile
for ID in candidates:
sample.member[ID].plot_trajectory(fig4)
# They will all show up in the histogram though:
sample.finish_trajectory_plot(fig4,pngfile,final=True)
tonights.parameters['candidatesplot'] = pngfile
# ------------------------------------------------------------------
# Finally, write a PDF report:
swap.write_report(tonights.parameters,bureau,sample)
# ------------------------------------------------------------------
print swap.doubledashedline
return
# ======================================================================
if __name__ == '__main__':
SWAP(sys.argv[1:])
# ======================================================================
| mit | 8,545,231,875,970,111,000 | 40.924051 | 194 | 0.59994 | false |
vecnet/vnetsource | ts_om/tests/execution_util_tests.py | 2 | 2978 | import mock
import sys
from django.test import TestCase
from vecnet.simulation import sim_model
from .data import EMPTY_SCENARIO
from ts_om.submit import add_simulation
from data_services.models import DimUser, Simulation, SimulationGroup
class AddSimulationTests(TestCase):
"""
Tests for the add_simulation function in the execution_util module.
"""
@classmethod
def setUpClass(cls):
super(AddSimulationTests, cls).setUpClass()
cls.test_user = DimUser.objects.create(username='test-user')
cls.sim_group = SimulationGroup.objects.create(submitted_by=cls.test_user)
# Setup mock for sys.stdout so "print" statements don't print anything
cls.sys_stdout = sys.stdout
sys.stdout = mock.Mock()
@classmethod
def tearDownClass(cls):
sys.stdout = cls.sys_stdout
cls.sys_stdout = None
super(AddSimulationTests, cls).tearDownClass()
def setUp(self):
Simulation.objects.all().delete()
def test_wrong_type_for_group(self):
"""
Test the addition_simulation function when the wrong data type is passed for the simulation group.
"""
self.assertRaises(AssertionError, add_simulation, None, "")
self.assertRaises(AssertionError, add_simulation, list(), "")
self.assertRaises(AssertionError, add_simulation, self, "")
def check_simulation(self, simulation, expected_contents):
"""
Check that the simulation belongs to the test simulation group, and has 1 input file named "scenario.xml" with
the expected file contents.
"""
self.assertIsInstance(simulation, Simulation)
self.assertIs(simulation.group, self.sim_group)
self.assertEqual(simulation.model, sim_model.OPEN_MALARIA)
self.assertEqual(simulation.input_files.count(), 1)
input_file = simulation.input_files.all()[0]
self.assertEqual(input_file.name, 'scenario.xml')
self.assertEqual(input_file.created_by, self.test_user)
self.assertEqual(input_file.get_contents(), expected_contents)
def test_with_empty_xml(self):
"""
Test the addition_simulation function when the XML contents is an empty string.
"""
simulation = add_simulation(self.sim_group, "", version="30")
self.check_simulation(simulation, "")
def test_with_3_simulations(self):
"""
Test the addition_simulation function by adding 3 simulations to a group.
"""
xml_contents = [
"<?xml version='1.0'>", # Just XML header
"", # Empty file,
EMPTY_SCENARIO,
]
expected_count = 0
for xml in xml_contents:
simulation = add_simulation(self.sim_group, xml, version="30")
self.check_simulation(simulation, xml)
expected_count += 1
self.assertEqual(self.sim_group.simulations.count(), expected_count)
| mpl-2.0 | -820,474,965,824,943,400 | 36.225 | 118 | 0.649429 | false |
jalexvig/tensorflow | tensorflow/contrib/model_pruning/python/pruning_test.py | 11 | 9125 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"do_not_prune=[conv1,conv2]", "sparsity_function_end_step=100",
"target_sparsity=0.9"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.Variable(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertAllEqual(p._spec.do_not_prune, ["conv1", "conv2"])
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
def testInitWithExternalSparsity(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.test_session():
weights = variables.Variable(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.test_session() as session:
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.5, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.Variable(0.0, name="threshold")
sparsity = variables.Variable(0.5, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.test_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.test_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.Variable(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6",
"nbins=100"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.test_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,094,381,627,248,387,000 | 41.24537 | 80 | 0.653041 | false |
impmihai/coala | tests/results/result_actions/ShowPatchActionTest.py | 16 | 6464 | import unittest
from os.path import join
from coala_utils.ContextManagers import retrieve_stdout
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.settings.Section import Section, Setting
class ShowPatchActionTest(unittest.TestCase):
def setUp(self):
self.uut = ShowPatchAction()
self.file_dict = {'a': ['a\n', 'b\n', 'c\n'], 'b': ['old_first\n']}
self.diff_dict = {'a': Diff(self.file_dict['a']),
'b': Diff(self.file_dict['b'])}
self.diff_dict['a'].add_lines(1, ['test\n'])
self.diff_dict['a'].delete_line(3)
self.diff_dict['b'].add_lines(0, ['first\n'])
self.test_result = Result('origin', 'message', diffs=self.diff_dict)
self.section = Section('name')
self.section.append(Setting('colored', 'false'))
def test_is_applicable(self):
diff = Diff([], rename='new_name')
result = Result('', '', diffs={'f': diff})
# Two renames donot result in any change
self.assertEqual(
self.uut.is_applicable(result, {}, {'f': diff}),
'The given patches do not change anything anymore.'
)
with self.assertRaises(TypeError):
self.uut.is_applicable(1, None, None)
self.assertEqual(
self.uut.is_applicable(Result('o', 'm'), None, None),
'This result has no patch attached.')
self.assertTrue(self.uut.is_applicable(self.test_result, {}, {}))
self.assertIn(
'Two or more patches conflict with each other: ',
self.uut.is_applicable(self.test_result, {}, self.diff_dict))
def test_apply(self):
with retrieve_stdout() as stdout:
self.assertEqual(self.uut.apply_from_section(self.test_result,
self.file_dict,
{},
self.section),
{})
self.assertEqual(stdout.getvalue(),
'|----| | a\n'
'| |++++| a\n'
'| 1| 1| a\n'
'| | 2|+test\n'
'| 2| 3| b\n'
'| 3| |-c\n'
'|----| | b\n'
'| |++++| b\n'
'| | 1|+first\n'
'| 1| 2| old_first\n')
def test_apply_renaming_only(self):
with retrieve_stdout() as stdout:
test_result = Result('origin', 'message',
diffs={'a': Diff([], rename='b')})
file_dict = {'a': []}
self.assertEqual(self.uut.apply_from_section(test_result,
file_dict,
{},
self.section),
{})
self.assertEqual(stdout.getvalue(),
'|----| | ' + join('a', 'a') + '\n'
'| |++++| ' + join('b', 'b') + '\n')
def test_apply_empty(self):
with retrieve_stdout() as stdout:
test_result = Result('origin', 'message',
diffs={'a': Diff([])})
file_dict = {'a': []}
self.assertEqual(self.uut.apply_from_section(test_result,
file_dict,
{},
self.section),
{})
self.assertEqual(stdout.getvalue(), '')
def test_apply_with_previous_patches(self):
with retrieve_stdout() as stdout:
previous_diffs = {'a': Diff(self.file_dict['a'])}
previous_diffs['a'].modify_line(2, 'b_changed\n')
self.assertEqual(self.uut.apply_from_section(self.test_result,
self.file_dict,
previous_diffs,
self.section),
previous_diffs)
self.assertEqual(stdout.getvalue(),
'|----| | a\n'
'| |++++| a\n'
'| 1| 1| a\n'
'| | 2|+test\n'
'| 2| 3| b_changed\n'
'| 3| |-c\n'
'|----| | b\n'
'| |++++| b\n'
'| | 1|+first\n'
'| 1| 2| old_first\n')
def test_apply_with_rename(self):
with retrieve_stdout() as stdout:
previous_diffs = {'a': Diff(self.file_dict['a'])}
previous_diffs['a'].modify_line(2, 'b_changed\n')
diff_dict = {'a': Diff(self.file_dict['a'], rename='a.rename'),
'b': Diff(self.file_dict['b'], delete=True)}
diff_dict['a'].add_lines(1, ['test\n'])
diff_dict['a'].delete_line(3)
diff_dict['b'].add_lines(0, ['first\n'])
test_result = Result('origin', 'message', diffs=diff_dict)
self.assertEqual(self.uut.apply_from_section(test_result,
self.file_dict,
previous_diffs,
self.section),
previous_diffs)
self.assertEqual(stdout.getvalue(),
'|----| | a\n'
'| |++++| a.rename\n'
'| 1| 1| a\n'
'| | 2|+test\n'
'| 2| 3| b_changed\n'
'| 3| |-c\n'
'|----| | b\n'
'| |++++| /dev/null\n'
'| 1| |-old_first\n')
| agpl-3.0 | 3,208,941,245,466,998,300 | 44.521127 | 76 | 0.376856 | false |
NLnetLabs/unbound | pythonmod/examples/log.py | 25 | 4345 | import os
'''
calc.py: Response packet logger
Copyright (c) 2009, Zdenek Vasicek (vasicek AT fit.vutbr.cz)
Marek Vavrusa (xvavru00 AT stud.fit.vutbr.cz)
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the organization nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
def dataHex(data, prefix=""):
"""Converts binary string data to display representation form"""
res = ""
for i in range(0, (len(data)+15)/16):
res += "%s0x%02X | " % (prefix, i*16)
d = map(lambda x:ord(x), data[i*16:i*16+17])
for ch in d:
res += "%02X " % ch
for i in range(0,17-len(d)):
res += " "
res += "| "
for ch in d:
if (ch < 32) or (ch > 127):
res += ". "
else:
res += "%c " % ch
res += "\n"
return res
def logDnsMsg(qstate):
"""Logs response"""
r = qstate.return_msg.rep
q = qstate.return_msg.qinfo
print "-"*100
print("Query: %s, type: %s (%d), class: %s (%d) " % (
qstate.qinfo.qname_str, qstate.qinfo.qtype_str, qstate.qinfo.qtype,
qstate.qinfo.qclass_str, qstate.qinfo.qclass))
print "-"*100
print "Return reply :: flags: %04X, QDcount: %d, Security:%d, TTL=%d" % (r.flags, r.qdcount, r.security, r.ttl)
print " qinfo :: qname: %s %s, qtype: %s, qclass: %s" % (str(q.qname_list), q.qname_str, q.qtype_str, q.qclass_str)
if (r):
print "Reply:"
for i in range(0, r.rrset_count):
rr = r.rrsets[i]
rk = rr.rk
print i,":",rk.dname_list, rk.dname_str, "flags: %04X" % rk.flags,
print "type:",rk.type_str,"(%d)" % ntohs(rk.type), "class:",rk.rrset_class_str,"(%d)" % ntohs(rk.rrset_class)
d = rr.entry.data
for j in range(0,d.count+d.rrsig_count):
print " ",j,":","TTL=",d.rr_ttl[j],
if (j >= d.count): print "rrsig",
print
print dataHex(d.rr_data[j]," ")
print "-"*100
def init(id, cfg):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, cfg.port, cfg.python_script))
return True
def deinit(id):
log_info("pythonmod: deinit called, module id is %d" % id)
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def operate(id, event, qstate, qdata):
log_info("pythonmod: operate called, id: %d, event:%s" % (id, strmodulevent(event)))
if (event == MODULE_EVENT_NEW) or (event == MODULE_EVENT_PASS):
#Pass on the new event to the iterator
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event == MODULE_EVENT_MODDONE:
#Iterator finished, show response (if any)
if (qstate.return_msg):
logDnsMsg(qstate)
qstate.ext_state[id] = MODULE_FINISHED
return True
qstate.ext_state[id] = MODULE_ERROR
return True
| bsd-3-clause | -2,534,265,965,040,965,000 | 35.512605 | 128 | 0.624396 | false |
krinart/AutobahnPython | examples/twisted/wamp/beginner/client.py | 7 | 2729 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.endpoints import clientFromString
from autobahn.twisted import wamp, websocket
class MyFrontendComponent(wamp.ApplicationSession):
"""
Application code goes here. This is an example component that calls
a remote procedure on a WAMP peer, subscribes to a topic to receive
events, and then stops the world after some events.
"""
@inlineCallbacks
def onJoin(self, details):
## call a remote procedure
##
try:
now = yield self.call(u'com.timeservice.now')
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
## subscribe to a topic
##
self.received = 0
def on_event(i):
print("Got event: {}".format(i))
self.received += 1
if self.received > 5:
self.leave()
sub = yield self.subscribe(on_event, u'com.myapp.topic1')
print("Subscribed with subscription ID {}".format(sub.id))
def onDisconnect(self):
reactor.stop()
if __name__ == '__main__':
## 0) start logging to console
log.startLogging(sys.stdout)
## 1) create a WAMP application session factory
session_factory = wamp.ApplicationSessionFactory()
session_factory.session = MyFrontendComponent
## 2) create a WAMP-over-WebSocket transport client factory
transport_factory = websocket.WampWebSocketClientFactory(session_factory, \
debug = False, \
debug_wamp = False)
## 3) start the client from a Twisted endpoint
client = clientFromString(reactor, "tcp:127.0.0.1:8080")
client.connect(transport_factory)
## 4) now enter the Twisted reactor loop
reactor.run()
| apache-2.0 | 4,921,761,950,285,915,000 | 30.367816 | 79 | 0.616343 | false |
dqnykamp/sympy | sympy/physics/mechanics/kane.py | 13 | 35025 | from __future__ import print_function, division
__all__ = ['KanesMethod']
from sympy import zeros, Matrix, diff, solve_linear_system_LU, eye
from sympy.utilities import default_sort_key
from sympy.physics.vector import (ReferenceFrame, dynamicsymbols,
partial_velocity)
from sympy.physics.mechanics.particle import Particle
from sympy.physics.mechanics.rigidbody import RigidBody
from sympy.physics.mechanics.functions import (msubs, find_dynamicsymbols,
_f_list_parser)
from sympy.physics.mechanics.linearize import Linearizer
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iterable
class KanesMethod(object):
"""Kane's method object.
This object is used to do the "book-keeping" as you go through and form
equations of motion in the way Kane presents in:
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
The attributes are for equations in the form [M] udot = forcing.
Attributes
==========
q, u : Matrix
Matrices of the generalized coordinates and speeds
bodylist : iterable
Iterable of Point and RigidBody objects in the system.
forcelist : iterable
Iterable of (Point, vector) or (ReferenceFrame, vector) tuples
describing the forces on the system.
auxiliary : Matrix
If applicable, the set of auxiliary Kane's
equations used to solve for non-contributing
forces.
mass_matrix : Matrix
The system's mass matrix
forcing : Matrix
The system's forcing vector
mass_matrix_full : Matrix
The "mass matrix" for the u's and q's
forcing_full : Matrix
The "forcing vector" for the u's and q's
Examples
========
This is a simple example for a one degree of freedom translational
spring-mass-damper.
In this example, we first need to do the kinematics.
This involves creating generalized speeds and coordinates and their
derivatives.
Then we create a point and set its velocity in a frame.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.mechanics import Point, Particle, KanesMethod
>>> q, u = dynamicsymbols('q u')
>>> qd, ud = dynamicsymbols('q u', 1)
>>> m, c, k = symbols('m c k')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
Next we need to arrange/store information in the way that KanesMethod
requires. The kinematic differential equations need to be stored in a
dict. A list of forces/torques must be constructed, where each entry in
the list is a (Point, Vector) or (ReferenceFrame, Vector) tuple, where the
Vectors represent the Force or Torque.
Next a particle needs to be created, and it needs to have a point and mass
assigned to it.
Finally, a list of all bodies and particles needs to be created.
>>> kd = [qd - u]
>>> FL = [(P, (-k * q - c * u) * N.x)]
>>> pa = Particle('pa', P, m)
>>> BL = [pa]
Finally we can generate the equations of motion.
First we create the KanesMethod object and supply an inertial frame,
coordinates, generalized speeds, and the kinematic differential equations.
Additional quantities such as configuration and motion constraints,
dependent coordinates and speeds, and auxiliary speeds are also supplied
here (see the online documentation).
Next we form FR* and FR to complete: Fr + Fr* = 0.
We have the equations of motion at this point.
It makes sense to rearrnge them though, so we calculate the mass matrix and
the forcing terms, for E.o.M. in the form: [MM] udot = forcing, where MM is
the mass matrix, udot is a vector of the time derivatives of the
generalized speeds, and forcing is a vector representing "forcing" terms.
>>> KM = KanesMethod(N, q_ind=[q], u_ind=[u], kd_eqs=kd)
>>> (fr, frstar) = KM.kanes_equations(FL, BL)
>>> MM = KM.mass_matrix
>>> forcing = KM.forcing
>>> rhs = MM.inv() * forcing
>>> rhs
Matrix([[(-c*u(t) - k*q(t))/m]])
>>> KM.linearize(A_and_B=True, new_method=True)[0]
Matrix([
[ 0, 1],
[-k/m, -c/m]])
Please look at the documentation pages for more information on how to
perform linearization and how to deal with dependent coordinates & speeds,
and how do deal with bringing non-contributing forces into evidence.
"""
def __init__(self, frame, q_ind, u_ind, kd_eqs=None, q_dependent=None,
configuration_constraints=None, u_dependent=None,
velocity_constraints=None, acceleration_constraints=None,
u_auxiliary=None):
"""Please read the online documentation. """
if not isinstance(frame, ReferenceFrame):
raise TypeError('An intertial ReferenceFrame must be supplied')
self._inertial = frame
self._fr = None
self._frstar = None
self._forcelist = None
self._bodylist = None
self._initialize_vectors(q_ind, q_dependent, u_ind, u_dependent,
u_auxiliary)
self._initialize_constraint_matrices(configuration_constraints,
velocity_constraints, acceleration_constraints)
self._initialize_kindiffeq_matrices(kd_eqs)
def _initialize_vectors(self, q_ind, q_dep, u_ind, u_dep, u_aux):
"""Initialize the coordinate and speed vectors."""
none_handler = lambda x: Matrix(x) if x else Matrix()
# Initialize generalized coordinates
q_dep = none_handler(q_dep)
if not iterable(q_ind):
raise TypeError('Generalized coordinates must be an iterable.')
if not iterable(q_dep):
raise TypeError('Dependent coordinates must be an iterable.')
q_ind = Matrix(q_ind)
self._qdep = q_dep
self._q = Matrix([q_ind, q_dep])
self._qdot = self.q.diff(dynamicsymbols._t)
# Initialize generalized speeds
u_dep = none_handler(u_dep)
if not iterable(u_ind):
raise TypeError('Generalized speeds must be an iterable.')
if not iterable(u_dep):
raise TypeError('Dependent speeds must be an iterable.')
u_ind = Matrix(u_ind)
self._udep = u_dep
self._u = Matrix([u_ind, u_dep])
self._udot = self.u.diff(dynamicsymbols._t)
self._uaux = none_handler(u_aux)
def _initialize_constraint_matrices(self, config, vel, acc):
"""Initializes constraint matrices."""
# Define vector dimensions
o = len(self.u)
m = len(self._udep)
p = o - m
none_handler = lambda x: Matrix(x) if x else Matrix()
# Initialize configuration constraints
config = none_handler(config)
if len(self._qdep) != len(config):
raise ValueError('There must be an equal number of dependent '
'coordinates and configuration constraints.')
self._f_h = none_handler(config)
# Initialize velocity and acceleration constraints
vel = none_handler(vel)
acc = none_handler(acc)
if len(vel) != m:
raise ValueError('There must be an equal number of dependent '
'speeds and velocity constraints.')
if acc and (len(acc) != m):
raise ValueError('There must be an equal number of dependent '
'speeds and acceleration constraints.')
if vel:
u_zero = dict((i, 0) for i in self.u)
udot_zero = dict((i, 0) for i in self._udot)
self._f_nh = vel.subs(u_zero)
self._k_nh = (vel - self._f_nh).jacobian(self.u)
# If no acceleration constraints given, calculate them.
if not acc:
self._f_dnh = (self._k_nh.diff(dynamicsymbols._t) * self.u +
self._f_nh.diff(dynamicsymbols._t))
self._k_dnh = self._k_nh
else:
self._f_dnh = acc.subs(udot_zero)
self._k_dnh = (acc - self._f_dnh).jacobian(self._udot)
# Form of non-holonomic constraints is B*u + C = 0.
# We partition B into independent and dependent columns:
# Ars is then -B_dep.inv() * B_ind, and it relates dependent speeds
# to independent speeds as: udep = Ars*uind, neglecting the C term.
B_ind = self._k_nh[:, :p]
B_dep = self._k_nh[:, p:o]
self._Ars = -B_dep.LUsolve(B_ind)
else:
self._f_nh = Matrix()
self._k_nh = Matrix()
self._f_dnh = Matrix()
self._k_dnh = Matrix()
self._Ars = Matrix()
def _initialize_kindiffeq_matrices(self, kdeqs):
"""Initialize the kinematic differential equation matrices."""
if kdeqs:
if len(self.q) != len(kdeqs):
raise ValueError('There must be an equal number of kinematic '
'differential equations and coordinates.')
kdeqs = Matrix(kdeqs)
u = self.u
qdot = self._qdot
# Dictionaries setting things to zero
u_zero = dict((i, 0) for i in u)
uaux_zero = dict((i, 0) for i in self._uaux)
qdot_zero = dict((i, 0) for i in qdot)
f_k = kdeqs.subs(u_zero).subs(qdot_zero)
k_ku = (kdeqs.subs(qdot_zero) - f_k).jacobian(u)
k_kqdot = (kdeqs.subs(u_zero) - f_k).jacobian(qdot)
f_k = k_kqdot.LUsolve(f_k)
k_ku = k_kqdot.LUsolve(k_ku)
k_kqdot = eye(len(qdot))
self._qdot_u_map = solve_linear_system_LU(
Matrix([k_kqdot.T, -(k_ku * u + f_k).T]).T, qdot)
self._f_k = f_k.subs(uaux_zero)
self._k_ku = k_ku.subs(uaux_zero)
self._k_kqdot = k_kqdot
else:
self._qdot_u_map = None
self._f_k = Matrix()
self._k_ku = Matrix()
self._k_kqdot = Matrix()
def _form_fr(self, fl):
"""Form the generalized active force."""
if not iterable(fl):
raise TypeError('Force pairs must be supplied in an iterable.')
N = self._inertial
# pull out relevant velocities for constructing partial velocities
vel_list, f_list = _f_list_parser(fl, N)
vel_list = [i.subs(self._qdot_u_map) for i in vel_list]
# Fill Fr with dot product of partial velocities and forces
o = len(self.u)
b = len(f_list)
FR = zeros(o, 1)
partials = partial_velocity(vel_list, self.u, N)
for i in range(o):
FR[i] = sum(partials[j][i] & f_list[j] for j in range(b))
# In case there are dependent speeds
if self._udep:
p = o - len(self._udep)
FRtilde = FR[:p, 0]
FRold = FR[p:o, 0]
FRtilde += self._Ars.T * FRold
FR = FRtilde
self._forcelist = fl
self._fr = FR
return FR
def _form_frstar(self, bl):
"""Form the generalized inertia force."""
if not iterable(bl):
raise TypeError('Bodies must be supplied in an iterable.')
t = dynamicsymbols._t
N = self._inertial
# Dicts setting things to zero
udot_zero = dict((i, 0) for i in self._udot)
uaux_zero = dict((i, 0) for i in self._uaux)
uauxdot = [diff(i, t) for i in self._uaux]
uauxdot_zero = dict((i, 0) for i in uauxdot)
# Dictionary of q' and q'' to u and u'
q_ddot_u_map = dict((k.diff(t), v.diff(t)) for (k, v) in
self._qdot_u_map.items())
q_ddot_u_map.update(self._qdot_u_map)
# Fill up the list of partials: format is a list with num elements
# equal to number of entries in body list. Each of these elements is a
# list - either of length 1 for the translational components of
# particles or of length 2 for the translational and rotational
# components of rigid bodies. The inner most list is the list of
# partial velocities.
def get_partial_velocity(body):
if isinstance(body, RigidBody):
vlist = [body.masscenter.vel(N), body.frame.ang_vel_in(N)]
elif isinstance(body, Particle):
vlist = [body.point.vel(N),]
else:
raise TypeError('The body list may only contain either '
'RigidBody or Particle as list elements.')
v = [vel.subs(self._qdot_u_map) for vel in vlist]
return partial_velocity(v, self.u, N)
partials = [get_partial_velocity(body) for body in bl]
# Compute fr_star in two components:
# fr_star = -(MM*u' + nonMM)
o = len(self.u)
MM = zeros(o, o)
nonMM = zeros(o, 1)
zero_uaux = lambda expr: expr.subs(uaux_zero)
zero_udot_uaux = lambda expr: expr.subs(udot_zero).subs(uaux_zero)
for i, body in enumerate(bl):
if isinstance(body, RigidBody):
M = zero_uaux(body.mass)
I = zero_uaux(body.central_inertia)
vel = zero_uaux(body.masscenter.vel(N))
omega = zero_uaux(body.frame.ang_vel_in(N))
acc = zero_udot_uaux(body.masscenter.acc(N))
inertial_force = (M.diff(t) * vel + M * acc)
inertial_torque = zero_uaux((I.dt(body.frame) & omega) +
(I & body.frame.ang_acc_in(N)).subs(udot_zero) +
(omega ^ (I & omega)))
for j in range(o):
tmp_vel = zero_uaux(partials[i][0][j])
tmp_ang = zero_uaux(I & partials[i][1][j])
for k in range(o):
# translational
MM[j, k] += M * (tmp_vel & partials[i][0][k])
# rotational
MM[j, k] += (tmp_ang & partials[i][1][k])
nonMM[j] += inertial_force & partials[i][0][j]
nonMM[j] += inertial_torque & partials[i][1][j]
else:
M = zero_uaux(body.mass)
vel = zero_uaux(body.point.vel(N))
acc = zero_udot_uaux(body.point.acc(N))
inertial_force = (M.diff(t) * vel + M * acc)
for j in range(o):
temp = zero_uaux(partials[i][0][j])
for k in range(o):
MM[j, k] += M * (temp & partials[i][0][k])
nonMM[j] += inertial_force & partials[i][0][j]
# Compose fr_star out of MM and nonMM
MM = zero_uaux(msubs(MM, q_ddot_u_map))
nonMM = msubs(msubs(nonMM, q_ddot_u_map),
udot_zero, uauxdot_zero, uaux_zero)
fr_star = -(MM * Matrix(self._udot).subs(uauxdot_zero) + nonMM)
# If there are dependent speeds, we need to find fr_star_tilde
if self._udep:
p = o - len(self._udep)
fr_star_ind = fr_star[:p, 0]
fr_star_dep = fr_star[p:o, 0]
fr_star = fr_star_ind + (self._Ars.T * fr_star_dep)
# Apply the same to MM
MMi = MM[:p, :]
MMd = MM[p:o, :]
MM = MMi + (self._Ars.T * MMd)
self._bodylist = bl
self._frstar = fr_star
self._k_d = MM
self._f_d = -msubs(self._fr + self._frstar, udot_zero)
return fr_star
def to_linearizer(self):
"""Returns an instance of the Linearizer class, initiated from the
data in the KanesMethod class. This may be more desirable than using
the linearize class method, as the Linearizer object will allow more
efficient recalculation (i.e. about varying operating points)."""
if (self._fr is None) or (self._frstar is None):
raise ValueError('Need to compute Fr, Fr* first.')
# Get required equation components. The Kane's method class breaks
# these into pieces. Need to reassemble
f_c = self._f_h
if self._f_nh and self._k_nh:
f_v = self._f_nh + self._k_nh*Matrix(self.u)
else:
f_v = Matrix()
if self._f_dnh and self._k_dnh:
f_a = self._f_dnh + self._k_dnh*Matrix(self._udot)
else:
f_a = Matrix()
# Dicts to sub to zero, for splitting up expressions
u_zero = dict((i, 0) for i in self.u)
ud_zero = dict((i, 0) for i in self._udot)
qd_zero = dict((i, 0) for i in self._qdot)
qd_u_zero = dict((i, 0) for i in Matrix([self._qdot, self.u]))
# Break the kinematic differential eqs apart into f_0 and f_1
f_0 = msubs(self._f_k, u_zero) + self._k_kqdot*Matrix(self._qdot)
f_1 = msubs(self._f_k, qd_zero) + self._k_ku*Matrix(self.u)
# Break the dynamic differential eqs into f_2 and f_3
f_2 = msubs(self._frstar, qd_u_zero)
f_3 = msubs(self._frstar, ud_zero) + self._fr
f_4 = zeros(len(f_2), 1)
# Get the required vector components
q = self.q
u = self.u
if self._qdep:
q_i = q[:-len(self._qdep)]
else:
q_i = q
q_d = self._qdep
if self._udep:
u_i = u[:-len(self._udep)]
else:
u_i = u
u_d = self._udep
# Form dictionary to set auxiliary speeds & their derivatives to 0.
uaux = self._uaux
uauxdot = uaux.diff(dynamicsymbols._t)
uaux_zero = dict((i, 0) for i in Matrix([uaux, uauxdot]))
# Checking for dynamic symbols outside the dynamic differential
# equations; throws error if there is.
sym_list = set(Matrix([q, self._qdot, u, self._udot, uaux, uauxdot]))
if any(find_dynamicsymbols(i, sym_list) for i in [self._k_kqdot,
self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]):
raise ValueError('Cannot have dynamicsymbols outside dynamic \
forcing vector.')
# Find all other dynamic symbols, forming the forcing vector r.
# Sort r to make it canonical.
r = list(find_dynamicsymbols(self._f_d.subs(uaux_zero), sym_list))
r.sort(key=default_sort_key)
# Check for any derivatives of variables in r that are also found in r.
for i in r:
if diff(i, dynamicsymbols._t) in r:
raise ValueError('Cannot have derivatives of specified \
quantities when linearizing forcing terms.')
return Linearizer(f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a, q, u, q_i,
q_d, u_i, u_d, r)
def linearize(self, **kwargs):
""" Linearize the equations of motion about a symbolic operating point.
If kwarg A_and_B is False (default), returns M, A, B, r for the
linearized form, M*[q', u']^T = A*[q_ind, u_ind]^T + B*r.
If kwarg A_and_B is True, returns A, B, r for the linearized form
dx = A*x + B*r, where x = [q_ind, u_ind]^T. Note that this is
computationally intensive if there are many symbolic parameters. For
this reason, it may be more desirable to use the default A_and_B=False,
returning M, A, and B. Values may then be substituted in to these
matrices, and the state space form found as
A = P.T*M.inv()*A, B = P.T*M.inv()*B, where P = Linearizer.perm_mat.
In both cases, r is found as all dynamicsymbols in the equations of
motion that are not part of q, u, q', or u'. They are sorted in
canonical form.
The operating points may be also entered using the ``op_point`` kwarg.
This takes a dictionary of {symbol: value}, or a an iterable of such
dictionaries. The values may be numberic or symbolic. The more values
you can specify beforehand, the faster this computation will run.
As part of the deprecation cycle, the new method will not be used unless
the kwarg ``new_method`` is set to True. If the kwarg is missing, or set
to false, the old linearization method will be used. After next release
the need for this kwarg will be removed.
For more documentation, please see the ``Linearizer`` class."""
if 'new_method' not in kwargs or not kwargs['new_method']:
# User is still using old code.
SymPyDeprecationWarning('The linearize class method has changed '
'to a new interface, the old method is deprecated. To '
'use the new method, set the kwarg `new_method=True`. '
'For more information, read the docstring '
'of `linearize`.').warn()
return self._old_linearize()
# Remove the new method flag, before passing kwargs to linearize
kwargs.pop('new_method')
linearizer = self.to_linearizer()
result = linearizer.linearize(**kwargs)
return result + (linearizer.r,)
def _old_linearize(self):
"""Old method to linearize the equations of motion. Returns a tuple of
(f_lin_A, f_lin_B, y) for forming [M]qudot = [f_lin_A]qu + [f_lin_B]y.
Deprecated in favor of new method using Linearizer class. Please change
your code to use the new `linearize` method."""
if (self._fr is None) or (self._frstar is None):
raise ValueError('Need to compute Fr, Fr* first.')
# Note that this is now unneccessary, and it should never be
# encountered; I still think it should be in here in case the user
# manually sets these matrices incorrectly.
for i in self.q:
if self._k_kqdot.diff(i) != 0 * self._k_kqdot:
raise ValueError('Matrix K_kqdot must not depend on any q.')
t = dynamicsymbols._t
uaux = self._uaux
uauxdot = [diff(i, t) for i in uaux]
# dictionary of auxiliary speeds & derivatives which are equal to zero
subdict = dict(list(zip(uaux + uauxdot, [0] * (len(uaux) + len(uauxdot)))))
# Checking for dynamic symbols outside the dynamic differential
# equations; throws error if there is.
insyms = set(Matrix([self.q, self._qdot, self.u, self._udot, uaux, uauxdot]))
if any(find_dynamicsymbols(i, insyms) for i in [self._k_kqdot,
self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]):
raise ValueError('Cannot have dynamicsymbols outside dynamic \
forcing vector.')
other_dyns = list(find_dynamicsymbols(self._f_d.subs(subdict), insyms))
# make it canonically ordered so the jacobian is canonical
other_dyns.sort(key=default_sort_key)
for i in other_dyns:
if diff(i, dynamicsymbols._t) in other_dyns:
raise ValueError('Cannot have derivatives of specified '
'quantities when linearizing forcing terms.')
o = len(self.u) # number of speeds
n = len(self.q) # number of coordinates
l = len(self._qdep) # number of configuration constraints
m = len(self._udep) # number of motion constraints
qi = Matrix(self.q[: n - l]) # independent coords
qd = Matrix(self.q[n - l: n]) # dependent coords; could be empty
ui = Matrix(self.u[: o - m]) # independent speeds
ud = Matrix(self.u[o - m: o]) # dependent speeds; could be empty
qdot = Matrix(self._qdot) # time derivatives of coordinates
# with equations in the form MM udot = forcing, expand that to:
# MM_full [q,u].T = forcing_full. This combines coordinates and
# speeds together for the linearization, which is necessary for the
# linearization process, due to dependent coordinates. f1 is the rows
# from the kinematic differential equations, f2 is the rows from the
# dynamic differential equations (and differentiated non-holonomic
# constraints).
f1 = self._k_ku * Matrix(self.u) + self._f_k
f2 = self._f_d
# Only want to do this if these matrices have been filled in, which
# occurs when there are dependent speeds
if m != 0:
f2 = self._f_d.col_join(self._f_dnh)
fnh = self._f_nh + self._k_nh * Matrix(self.u)
f1 = f1.subs(subdict)
f2 = f2.subs(subdict)
fh = self._f_h.subs(subdict)
fku = (self._k_ku * Matrix(self.u)).subs(subdict)
fkf = self._f_k.subs(subdict)
# In the code below, we are applying the chain rule by hand on these
# things. All the matrices have been changed into vectors (by
# multiplying the dynamic symbols which it is paired with), so we can
# take the jacobian of them. The basic operation is take the jacobian
# of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of
# q, u, and t; f2 is a function of q, qdot, u, and t. In the code
# below, we are not considering perturbations in t. So if f1 is a
# function of the q's, u's but some of the q's or u's could be
# dependent on other q's or u's (qd's might be dependent on qi's, ud's
# might be dependent on ui's or qi's), so what we do is take the
# jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's
# gets multiplied by the jacobian of qd wrt qi, this is extended for
# the ud's as well. dqd_dqi is computed by taking a taylor expansion of
# the holonomic constraint equations about q*, treating q* - q as dq,
# separating into dqd (depedent q's) and dqi (independent q's) and the
# rearranging for dqd/dqi. This is again extended for the speeds.
# First case: configuration and motion constraints
if (l != 0) and (m != 0):
fh_jac_qi = fh.jacobian(qi)
fh_jac_qd = fh.jacobian(qd)
fnh_jac_qi = fnh.jacobian(qi)
fnh_jac_qd = fnh.jacobian(qd)
fnh_jac_ui = fnh.jacobian(ui)
fnh_jac_ud = fnh.jacobian(ud)
fku_jac_qi = fku.jacobian(qi)
fku_jac_qd = fku.jacobian(qd)
fku_jac_ui = fku.jacobian(ui)
fku_jac_ud = fku.jacobian(ud)
fkf_jac_qi = fkf.jacobian(qi)
fkf_jac_qd = fkf.jacobian(qd)
f1_jac_qi = f1.jacobian(qi)
f1_jac_qd = f1.jacobian(qd)
f1_jac_ui = f1.jacobian(ui)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qi = f2.jacobian(qi)
f2_jac_qd = f2.jacobian(qd)
f2_jac_ui = f2.jacobian(ui)
f2_jac_ud = f2.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
dqd_dqi = - fh_jac_qd.LUsolve(fh_jac_qi)
dud_dqi = fnh_jac_ud.LUsolve(fnh_jac_qd * dqd_dqi - fnh_jac_qi)
dud_dui = - fnh_jac_ud.LUsolve(fnh_jac_ui)
dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui +
fku_jac_ud * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi +
(fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi)
f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi
f1_u = f1_jac_ui + f1_jac_ud * dud_dui
f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi +
f2_jac_ud * dud_dqi)
f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui
# Second case: configuration constraints only
elif l != 0:
dqd_dqi = - fh.jacobian(qd).LUsolve(fh.jacobian(qi))
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) *
dqd_dqi)
f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi +
f2.jac_qdot * dqdot_dqi)
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
# Third case: motion constraints only
elif m != 0:
dud_dqi = fnh.jacobian(ud).LUsolve(- fnh.jacobian(qi))
dud_dui = - fnh.jacobian(ud).LUsolve(fnh.jacobian(ui))
dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) +
fku.jacobian(ud) * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
f2_jac_ud = f2.jacobian(ud)
f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi
f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui
f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud
* dud_dqi)
f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot *
dqdot_dui)
# Fourth case: No constraints
else:
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi))
f1_q = f1.jacobian(qi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u))
if other_dyns:
f1_oths = f1.jacobian(other_dyns)
f2_oths = f2.jacobian(other_dyns)
f_lin_B = -f1_oths.col_join(f2_oths)
else:
f_lin_B = Matrix()
return (f_lin_A, f_lin_B, Matrix(other_dyns))
def kanes_equations(self, FL, BL):
""" Method to form Kane's equations, Fr + Fr* = 0.
Returns (Fr, Fr*). In the case where auxiliary generalized speeds are
present (say, s auxiliary speeds, o generalized speeds, and m motion
constraints) the length of the returned vectors will be o - m + s in
length. The first o - m equations will be the constrained Kane's
equations, then the s auxiliary Kane's equations. These auxiliary
equations can be accessed with the auxiliary_eqs().
Parameters
==========
FL : list
Takes in a list of (Point, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
BL : list
A list of all RigidBody's and Particle's in the system.
"""
if not self._k_kqdot:
raise AttributeError('Create an instance of KanesMethod with '
'kinematic differential equations to use this method.')
fr = self._form_fr(FL)
frstar = self._form_frstar(BL)
if self._uaux:
if not self._udep:
km = KanesMethod(self._inertial, self.q, self._uaux,
u_auxiliary=self._uaux)
else:
km = KanesMethod(self._inertial, self.q, self._uaux,
u_auxiliary=self._uaux, u_dependent=self._udep,
velocity_constraints=(self._k_nh * self.u +
self._f_nh))
km._qdot_u_map = self._qdot_u_map
self._km = km
fraux = km._form_fr(FL)
frstaraux = km._form_frstar(BL)
self._aux_eq = fraux + frstaraux
self._fr = fr.col_join(fraux)
self._frstar = frstar.col_join(frstaraux)
return (self._fr, self._frstar)
def rhs(self, inv_method=None):
""" Returns the system's equations of motion in first order form.
The output of this will be the right hand side of:
[qdot, udot].T = f(q, u, t)
Or, the equations of motion in first order form. The right hand side
is what is needed by most numerical ODE integrators.
Parameters
==========
inv_method : str
The specific sympy inverse matrix calculation method to use. For a
list of valid methods, see
:meth:`~sympy.matrices.matrices.MatrixBase.inv`
"""
if inv_method is None:
self._rhs = self.mass_matrix_full.LUsolve(self.forcing_full)
else:
self._rhs = (self.mass_matrix_full.inv(inv_method,
try_block_diag=True) * self.forcing_full)
return self._rhs
def kindiffdict(self):
"""Returns a dictionary mapping q' to u."""
if not self._qdot_u_map:
raise AttributeError('Create an instance of KanesMethod with '
'kinematic differential equations to use this method.')
return self._qdot_u_map
@property
def auxiliary_eqs(self):
"""A matrix containing the auxiliary equations."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
if not self._uaux:
raise ValueError('No auxiliary speeds have been declared.')
return self._aux_eq
@property
def mass_matrix(self):
"""The mass matrix of the system."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
return Matrix([self._k_d, self._k_dnh])
@property
def mass_matrix_full(self):
"""The mass matrix of the system, augmented by the kinematic
differential equations."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
o = len(self.u)
n = len(self.q)
return ((self._k_kqdot).row_join(zeros(n, o))).col_join((zeros(o,
n)).row_join(self.mass_matrix))
@property
def forcing(self):
"""The forcing vector of the system."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
return -Matrix([self._f_d, self._f_dnh])
@property
def forcing_full(self):
"""The forcing vector of the system, augmented by the kinematic
differential equations."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
f1 = self._k_ku * Matrix(self.u) + self._f_k
return -Matrix([f1, self._f_d, self._f_dnh])
@property
def q(self):
return self._q
@property
def u(self):
return self._u
@property
def bodylist(self):
return self._bodylist
@property
def forcelist(self):
return self._forcelist
| bsd-3-clause | 3,973,334,214,438,429,700 | 42.347772 | 85 | 0.567709 | false |
mpenkov/smart_open | smart_open/tests/test_bytebuffer.py | 1 | 5334 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <[email protected]>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
import random
import unittest
import six
import smart_open.bytebuffer
CHUNK_SIZE = 1024
def random_byte_string(length=CHUNK_SIZE):
rand_bytes = [six.int2byte(random.randint(0, 255)) for _ in range(length)]
return b''.join(rand_bytes)
def bytebuffer_and_random_contents():
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
contents = random_byte_string(CHUNK_SIZE)
content_reader = six.BytesIO(contents)
buf.fill(content_reader)
return [buf, contents]
class ByteBufferTest(unittest.TestCase):
def test_len(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
self.assertEqual(len(buf), 0)
contents = b'foo bar baz'
buf._bytes = contents
self.assertEqual(len(buf), len(contents))
pos = 4
buf._pos = pos
self.assertEqual(len(buf), len(contents) - pos)
def test_fill_from_reader(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
contents = random_byte_string(CHUNK_SIZE)
content_reader = six.BytesIO(contents)
bytes_filled = buf.fill(content_reader)
self.assertEqual(bytes_filled, CHUNK_SIZE)
self.assertEqual(len(buf), CHUNK_SIZE)
self.assertEqual(buf._bytes, contents)
def test_fill_from_iterable(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
contents = random_byte_string(CHUNK_SIZE)
contents_iter = (contents[i:i+8] for i in range(0, CHUNK_SIZE, 8))
bytes_filled = buf.fill(contents_iter)
self.assertEqual(bytes_filled, CHUNK_SIZE)
self.assertEqual(len(buf), CHUNK_SIZE)
self.assertEqual(buf._bytes, contents)
def test_fill_from_list(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
contents = random_byte_string(CHUNK_SIZE)
contents_list = [contents[i:i+7] for i in range(0, CHUNK_SIZE, 7)]
bytes_filled = buf.fill(contents_list)
self.assertEqual(bytes_filled, CHUNK_SIZE)
self.assertEqual(len(buf), CHUNK_SIZE)
self.assertEqual(buf._bytes, contents)
def test_fill_multiple(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
long_contents = random_byte_string(CHUNK_SIZE * 4)
long_content_reader = six.BytesIO(long_contents)
first_bytes_filled = buf.fill(long_content_reader)
self.assertEqual(first_bytes_filled, CHUNK_SIZE)
second_bytes_filled = buf.fill(long_content_reader)
self.assertEqual(second_bytes_filled, CHUNK_SIZE)
self.assertEqual(len(buf), 2 * CHUNK_SIZE)
def test_fill_size(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
contents = random_byte_string(CHUNK_SIZE * 2)
content_reader = six.BytesIO(contents)
fill_size = int(CHUNK_SIZE / 2)
bytes_filled = buf.fill(content_reader, size=fill_size)
self.assertEqual(bytes_filled, fill_size)
self.assertEqual(len(buf), fill_size)
second_bytes_filled = buf.fill(content_reader, size=CHUNK_SIZE+1)
self.assertEqual(second_bytes_filled, CHUNK_SIZE)
self.assertEqual(len(buf), fill_size + CHUNK_SIZE)
def test_fill_reader_exhaustion(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
short_content_size = int(CHUNK_SIZE / 4)
short_contents = random_byte_string(short_content_size)
short_content_reader = six.BytesIO(short_contents)
bytes_filled = buf.fill(short_content_reader)
self.assertEqual(bytes_filled, short_content_size)
self.assertEqual(len(buf), short_content_size)
def test_fill_iterable_exhaustion(self):
buf = smart_open.bytebuffer.ByteBuffer(CHUNK_SIZE)
short_content_size = int(CHUNK_SIZE / 4)
short_contents = random_byte_string(short_content_size)
short_contents_iter = (short_contents[i:i+8]
for i in range(0, short_content_size, 8))
bytes_filled = buf.fill(short_contents_iter)
self.assertEqual(bytes_filled, short_content_size)
self.assertEqual(len(buf), short_content_size)
def test_empty(self):
buf, _ = bytebuffer_and_random_contents()
self.assertEqual(len(buf), CHUNK_SIZE)
buf.empty()
self.assertEqual(len(buf), 0)
def test_peek(self):
buf, contents = bytebuffer_and_random_contents()
self.assertEqual(buf.peek(), contents)
self.assertEqual(len(buf), CHUNK_SIZE)
self.assertEqual(buf.peek(64), contents[0:64])
self.assertEqual(buf.peek(CHUNK_SIZE * 10), contents)
def test_read(self):
buf, contents = bytebuffer_and_random_contents()
self.assertEqual(buf.read(), contents)
self.assertEqual(len(buf), 0)
self.assertEqual(buf.read(), b'')
def test_read_size(self):
buf, contents = bytebuffer_and_random_contents()
read_size = 128
self.assertEqual(buf.read(read_size), contents[:read_size])
self.assertEqual(len(buf), CHUNK_SIZE - read_size)
self.assertEqual(buf.read(CHUNK_SIZE*2), contents[read_size:])
self.assertEqual(len(buf), 0)
| mit | 6,110,070,565,927,979,000 | 33.636364 | 78 | 0.652793 | false |
t-artistik/browserscope | models/result_ranker_storage.py | 9 | 3914 | #!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Stephen Lamm)'
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
import score_ranker
class ScoreDatastore(score_ranker.StorageBase):
def __init__(self, parent_key):
"""Initialize the ScoreDatastore.
Args:
parent_key: the parent key of all the node entities.
"""
self.parent_key = parent_key
def RunInTransaction(self, func, *args, **kwds):
"""Run the pass function in a transaction.
Blocks other changes to the storage.
Args:
func: a function reference
args: the positional arguments list
kwds: the keyword arguments dict
Raises:
score_ranker.TransactionFailedError if transaction failed
"""
return datastore.RunInTransaction(func, *args, **kwds)
def SetMultiple(self, nodes):
"""Set multiple nodes at once.
Nodes indexes that do not exist are created.
Exising nodes are updated.
Args:
nodes: {node_index: [child_count_1, ...], ...}
"""
datastore.Put([self._CreateRankerNodeEntity(node)
for node in nodes.items()])
def Get(self, node_index):
"""Get a single node
Args:
node_index: an integer (0 is the root node)
Returns:
[child_count_1, ...] or None
"""
try:
node_entity = datastore.Get(self._RankerNodeKey(node_index))
except datastore_errors.EntityNotFoundError:
return None
return node_entity["child_counts"]
def GetMultiple(self, node_indexes):
"""Get multiple nodes at once.
Args:
node_indexes: [node index, ...] # where node_index is an integer
Returns:
{node_index_1: [child_count_1, ...], ...}
"""
node_entities = datastore.Get([self._RankerNodeKey(node_index)
for node_index in node_indexes])
return dict((node_index, node["child_counts"])
for node_index, node in zip(node_indexes, node_entities)
if node)
def DeleteMultiple(self, node_indexes):
"""Delete multiple nodes at once.
Args:
node_indexes: [node index, ...] # where node_index is an integer
"""
db_nodes = datastore.Delete([self._RankerNodeKey(node_index)
for node_index in set(node_indexes)])
def DeleteAll(self):
query = datastore.Query('ranker_node', keys_only=True)
query.Ancestor(self.parent_key)
datastore.Delete(list(query.Run()))
def _RankerNodeKey(self, node_index):
"""Creates a (named) key for the node with a given id.
The key will have the ranker as a parent element to guarantee
uniqueness (in the presence of multiple rankers) and to put all
nodes in a single entity group.
Args:
node_index: The node's id as an integer.
Returns:
A (named) key for the node with the id 'node_index'.
"""
return datastore_types.Key.from_path(
"ranker_node", "node_%s" % node_index, parent=self.parent_key)
def _CreateRankerNodeEntity(self, node):
node_index, child_counts = node
node_entity = datastore.Entity(
"ranker_node", parent=self.parent_key,
name=self._RankerNodeKey(node_index).name())
node_entity["child_counts"] = child_counts
return node_entity
| apache-2.0 | 2,059,538,155,266,419,000 | 30.312 | 74 | 0.661472 | false |
erikdejonge/newsrivr | daemons/python2/stats.py | 1 | 1984 | from __future__ import print_function
from builtins import str
#!/usr/bin/env python
import os
import time
import pymongo
import datetime
from pymongo import ASCENDING, DESCENDING
if "Darwin" in os.popen("uname").read():
MONGOSERVER = 'localhost'
else:
MONGOSERVER = '192.168.167.192'
MONGOPORT = 27017
def getDB():
cnt = 0
db = None
while not db:
try:
conn = pymongo.Connection(MONGOSERVER, MONGOPORT)
db = conn.newsrivr
except Exception as e:
time.sleep(2)
cnt += 1
if cnt>60:
raise e
return db
def getCollUsers():
db = getDB()
coll = db.users
return coll
def getCollUnprocessedTweets():
db = getDB()
coll = db.tweets
return coll
def getCollDrops():
db = getDB()
coll = db.drops
return coll
def getCollStream():
db = getDB()
coll = db.stream
return coll
def getCollImageMd5s():
db = getDB()
coll = db.imagemd5
return coll
def getCollYoutubeTags():
db = getDB()
coll = db.youtubetags
return coll
def main():
while True:
print(datetime.datetime.now().strftime("%A, %d %B %Y, %I:%M:%S"))
print()
print("users:", getCollUsers().find({"screen_name":{'$exists':True}}).count())
print("tweets:", getCollUnprocessedTweets().count())
print("drops:", getCollDrops().count())
print("stream:", getCollStream().count())
print("imagemd5:", getCollImageMd5s().count())
print("youtubetags:", getCollYoutubeTags().count())
print()
print("users:")
for u in getCollUsers().find(sort=[("last_login",-1)]).limit(100):
if "screen_name" in u:
s = "\t<b>"+u["screen_name"] + "</b> - " + str(u["id_str"]) + " - " + str(u["newsrivr_userid_md5"])
if "last_login" in u:
s += " - Last pagerefresh:"+u["last_login"] + " - pagerefreshes:"+str(u["login_count"])
print(s)
#if "agent" in u:
# for a in u["agent"]:
# s = a.split(" ")
# if len(s)>2:
# print "\t\t", s[1].replace("(", "").replace(";", ""), s[len(s)-1], s[len(s)-2]
print()
return
if __name__=='__main__':
main() | gpl-2.0 | -3,438,365,905,066,072,600 | 21.556818 | 104 | 0.620968 | false |
silentfuzzle/calibre | src/tinycss/tests/media3.py | 16 | 3065 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.media3 import CSSMedia3Parser, MediaQuery as MQ
from tinycss.tests import BaseTest, jsonify
def jsonify_expr(e):
if e is None:
return None
return next(jsonify([e]))
def jsonify_expressions(mqlist):
for mq in mqlist:
mq.expressions = tuple(
(k, jsonify_expr(e)) for k, e in mq.expressions)
return mqlist
class TestFonts3(BaseTest):
def test_media_queries(self):
'Test parsing of media queries from the CSS 3 media module'
for css, media_query_list, expected_errors in [
# CSS 2.1 (simple media queries)
('@media {}', [MQ()], []),
('@media all {}', [MQ()], []),
('@media screen {}', [MQ('screen')], []),
('@media , screen {}', [MQ(), MQ('screen')], []),
('@media screen, {}', [MQ('screen'), MQ()], []),
# Examples from the CSS 3 specs
('@media screen and (color) {}', [MQ('screen', (('color', None),))], []),
('@media all and (min-width:500px) {}', [
MQ('all', (('min-width', ('DIMENSION', 500)),))], []),
('@media (min-width:500px) {}', [
MQ('all', (('min-width', ('DIMENSION', 500)),))], []),
('@media (orientation: portrait) {}', [
MQ('all', (('orientation', ('IDENT', 'portrait')),))], []),
('@media screen and (color), projection and (color) {}', [
MQ('screen', (('color', None),)), MQ('projection', (('color', None),)),], []),
('@media not screen and (color) {}', [
MQ('screen', (('color', None),), True)], []),
('@media only screen and (color) {}', [
MQ('screen', (('color', None),))], []),
('@media aural and (device-aspect-ratio: 16/9) {}', [
MQ('aural', (('device-aspect-ratio', ('RATIO', (16, 9))),))], []),
('@media (resolution: 166dpi) {}', [
MQ('all', (('resolution', ('DIMENSION', 166)),))], []),
('@media (min-resolution: 166DPCM) {}', [
MQ('all', (('min-resolution', ('DIMENSION', 166)),))], []),
# Malformed media queries
('@media (example, all,), speech {}', [MQ(negated=True), MQ('speech')], ['expected a :']),
('@media &test, screen {}', [MQ(negated=True), MQ('screen')], ['expected a media expression not a DELIM']),
]:
stylesheet = CSSMedia3Parser().parse_stylesheet(css)
self.assert_errors(stylesheet.errors, expected_errors)
self.ae(len(stylesheet.rules), 1)
rule = stylesheet.rules[0]
self.ae(jsonify_expressions(rule.media), media_query_list)
| gpl-3.0 | 6,072,671,689,688,287,000 | 45.439394 | 123 | 0.48385 | false |
boundary/pulse-api-cli | tests/unit/boundary/measurement_create_test.py | 4 | 1463 | #!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from boundary import MeasurementCreate
from cli_runner import CLIRunner
from cli_test import CLITest
class MeasurementCreateTest(TestCase):
def setUp(self):
self.cli = MeasurementCreate()
def test_get_description(self):
CLITest.check_description(self, self.cli)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
def test_create_curl(self):
runner = CLIRunner(self.cli)
metric = 'CPU'
measurement = 0.5
timestamp = 1452643455
curl = runner.get_output(['-n', metric,
'-m', str(measurement),
'-s', 'source1',
'-d', str(timestamp),
'-z'])
CLITest.check_curl(self, self.cli, curl)
| apache-2.0 | -3,944,174,863,018,193,400 | 29.479167 | 74 | 0.632946 | false |
authman/Python201609 | Wright_Will/Assignments/login_and_registration_pylot/Pylot/app/config/database.py | 1 | 1049 | """
Database Specific Configuration File
"""
""" Put Generic Database Configurations here """
import os
class DBConfig(object):
""" DB_ON must be True to use the DB! """
DB_ON = True
DB_DRIVER = 'mysql'
DB_ORM = False
""" Put Development Specific Configurations here """
class DevelopmentDBConfig(DBConfig):
DB_USERNAME = 'root'
DB_PASSWORD = 'root'
DB_DATABASE_NAME = 'login_reg'
DB_HOST = 'localhost'
DB_PORT = 3306
# """ unix_socket is used for connecting with MAMP. Take this out if you aren't using MAMP """
# DB_OPTIONS = {
# 'unix_socket': '/Applications/MAMP/tmp/mysql/mysql.sock'
# }
""" Put Staging Specific Configurations here """
class StagingDBConfig(DBConfig):
DB_USERNAME = 'root'
DB_PASSWORD = 'root'
DB_DATABASE_NAME = 'login_reg'
DB_HOST = 'localhost'
""" Put Production Specific Configurations here """
class ProductionDBConfig(DBConfig):
DB_USERNAME = 'root'
DB_PASSWORD = 'root'
DB_DATABASE_NAME = 'login_reg'
DB_HOST = 'localhost'
| mit | 1,118,535,375,860,781,600 | 27.351351 | 98 | 0.64919 | false |
wscullin/spack | var/spack/repos/builtin/packages/python/package.py | 2 | 26410 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import ast
import os
import platform
import re
import sys
from contextlib import closing
import spack
import llnl.util.tty as tty
from llnl.util.lang import match_predicate
from llnl.util.filesystem import force_remove
from spack import *
from spack.util.environment import *
from spack.util.prefix import Prefix
import spack.util.spack_json as sjson
class Python(AutotoolsPackage):
"""The Python programming language."""
homepage = "http://www.python.org"
url = "http://www.python.org/ftp/python/2.7.8/Python-2.7.8.tgz"
list_url = "https://www.python.org/downloads/"
list_depth = 1
version('3.6.2', 'e1a36bfffdd1d3a780b1825daf16e56c')
version('3.6.1', '2d0fc9f3a5940707590e07f03ecb08b9')
version('3.6.0', '3f7062ccf8be76491884d0e47ac8b251')
version('3.5.2', '3fe8434643a78630c61c6464fe2e7e72')
version('3.5.1', 'be78e48cdfc1a7ad90efff146dce6cfe')
version('3.5.0', 'a56c0c0b45d75a0ec9c6dee933c41c36')
version('3.4.3', '4281ff86778db65892c05151d5de738d')
version('3.3.6', 'cdb3cd08f96f074b3f3994ccb51063e9')
version('3.2.6', '23815d82ae706e9b781ca65865353d39')
version('3.1.5', '02196d3fc7bc76bdda68aa36b0dd16ab')
version('2.7.14', 'cee2e4b33ad3750da77b2e85f2f8b724', preferred=True)
version('2.7.13', '17add4bf0ad0ec2f08e0cae6d205c700')
version('2.7.12', '88d61f82e3616a4be952828b3694109d')
version('2.7.11', '6b6076ec9e93f05dd63e47eb9c15728b')
version('2.7.10', 'd7547558fd673bd9d38e2108c6b42521')
version('2.7.9', '5eebcaa0030dc4061156d3429657fb83')
version('2.7.8', 'd4bca0159acb0b44a781292b5231936f')
extendable = True
# --enable-shared is known to cause problems for some users on macOS
# See http://bugs.python.org/issue29846
variant('shared', default=sys.platform != 'darwin',
description='Enable shared libraries')
variant('tk', default=False, description='Provide support for Tkinter')
variant('ucs4', default=False,
description='Enable UCS4 (wide) unicode strings')
# From https://docs.python.org/2/c-api/unicode.html: Python's default
# builds use a 16-bit type for Py_UNICODE and store Unicode values
# internally as UCS2. It is also possible to build a UCS4 version of Python
# (most recent Linux distributions come with UCS4 builds of Python). These
# builds then use a 32-bit type for Py_UNICODE and store Unicode data
# internally as UCS4. Note that UCS2 and UCS4 Python builds are not binary
# compatible.
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
depends_on("openssl")
depends_on("bzip2")
depends_on("readline")
depends_on("ncurses")
depends_on("sqlite")
depends_on("zlib")
depends_on("tk", when="+tk")
depends_on("tcl", when="+tk")
# Patch does not work for Python 3.1
patch('ncurses.patch', when='@:2.8,3.2:')
# Ensure that distutils chooses correct compiler option for RPATH on cray:
patch('cray-rpath-2.3.patch', when="@2.3:3.0.1 platform=cray")
patch('cray-rpath-3.1.patch', when="@3.1:3.99 platform=cray")
_DISTUTIL_VARS_TO_SAVE = ['LDSHARED']
_DISTUTIL_CACHE_FILENAME = 'sysconfig.json'
_distutil_vars = None
@when('@2.7:2.8,3.4:')
def patch(self):
# NOTE: Python's default installation procedure makes it possible for a
# user's local configurations to change the Spack installation. In
# order to prevent this behavior for a full installation, we must
# modify the installation script so that it ignores user files.
ff = FileFilter('Makefile.pre.in')
ff.filter(
r'^(.*)setup\.py(.*)((build)|(install))(.*)$',
r'\1setup.py\2 --no-user-cfg \3\6'
)
def setup_environment(self, spack_env, run_env):
spec = self.spec
prefix = self.prefix
# TODO: The '--no-user-cfg' option for Python installation is only in
# Python v2.7 and v3.4+ (see https://bugs.python.org/issue1180) and
# adding support for ignoring user configuration will require
# significant changes to this package for other Python versions.
if not spec.satisfies('@2.7,3.4:'):
tty.warn(('Python v{0} may not install properly if Python '
'user configurations are present.').format(self.version))
# Need this to allow python build to find the Python installation.
spack_env.set('PYTHONHOME', prefix)
spack_env.set('PYTHONPATH', prefix)
spack_env.set('MACOSX_DEPLOYMENT_TARGET', platform.mac_ver()[0])
def configure_args(self):
spec = self.spec
# setup.py needs to be able to read the CPPFLAGS and LDFLAGS
# as it scans for the library and headers to build
dep_pfxs = [dspec.prefix for dspec in spec.dependencies('link')]
config_args = [
'--with-threads',
'CPPFLAGS=-I{0}'.format(' -I'.join(dp.include for dp in dep_pfxs)),
'LDFLAGS=-L{0}'.format(' -L'.join(dp.lib for dp in dep_pfxs)),
]
if spec.satisfies('%gcc platform=darwin'):
config_args.append('--disable-toolbox-glue')
if '+shared' in spec:
config_args.append('--enable-shared')
else:
config_args.append('--disable-shared')
if '+ucs4' in spec:
if spec.satisfies('@:2.7'):
config_args.append('--enable-unicode=ucs4')
elif spec.satisfies('@3.0:3.2'):
config_args.append('--with-wide-unicode')
elif spec.satisfies('@3.3:'):
# https://docs.python.org/3.3/whatsnew/3.3.html
raise ValueError(
'+ucs4 variant not compatible with Python 3.3 and beyond')
if spec.satisfies('@3:'):
config_args.append('--without-ensurepip')
if '+pic' in spec:
config_args.append('CFLAGS={0}'.format(self.compiler.pic_flag))
return config_args
@run_after('install')
def post_install(self):
spec = self.spec
prefix = self.prefix
self.sysconfigfilename = '_sysconfigdata.py'
if spec.satisfies('@3.6:'):
# Python 3.6.0 renamed the sys config file
sc = 'import sysconfig; print(sysconfig._get_sysconfigdata_name())'
cf = self.command('-c', sc, output=str).strip()
self.sysconfigfilename = '{0}.py'.format(cf)
self._save_distutil_vars(prefix)
self.filter_compilers(prefix)
# TODO:
# On OpenSuse 13, python uses <prefix>/lib64/python2.7/lib-dynload/*.so
# instead of <prefix>/lib/python2.7/lib-dynload/*.so. Oddly enough the
# result is that Python can not find modules like cPickle. A workaround
# for now is to symlink to `lib`:
src = os.path.join(prefix.lib64,
'python{0}'.format(self.version.up_to(2)),
'lib-dynload')
dst = os.path.join(prefix.lib,
'python{0}'.format(self.version.up_to(2)),
'lib-dynload')
if os.path.isdir(src) and not os.path.isdir(dst):
mkdirp(dst)
for f in os.listdir(src):
os.symlink(os.path.join(src, f),
os.path.join(dst, f))
# TODO: Once better testing support is integrated, add the following tests
# https://wiki.python.org/moin/TkInter
#
# Note: Only works if ForwardX11Trusted is enabled, i.e. `ssh -Y`
#
# if '+tk' in spec:
# env['TK_LIBRARY'] = join_path(spec['tk'].prefix.lib,
# 'tk{0}'.format(spec['tk'].version.up_to(2)))
# env['TCL_LIBRARY'] = join_path(spec['tcl'].prefix.lib,
# 'tcl{0}'.format(spec['tcl'].version.up_to(2)))
#
# $ python
# >>> import _tkinter
#
# if spec.satisfies('@3:')
# >>> import tkinter
# >>> tkinter._test()
# else:
# >>> import Tkinter
# >>> Tkinter._test()
def _save_distutil_vars(self, prefix):
"""
Run before changing automatically generated contents of the
_sysconfigdata.py, which is used by distutils to figure out what
executables to use while compiling and linking extensions. If we build
extensions with spack those executables should be spack's wrappers.
Spack partially covers this by setting environment variables that
are also accounted for by distutils. Currently there is one more known
variable that must be set, which is LDSHARED, so the method saves its
autogenerated value to pass it to the dependent package's setup script.
"""
self._distutil_vars = {}
input_filename = None
for filename in [join_path(lib_dir,
'python{0}'.format(self.version.up_to(2)),
self.sysconfigfilename)
for lib_dir in [prefix.lib, prefix.lib64]]:
if os.path.isfile(filename):
input_filename = filename
break
if not input_filename:
return
input_dict = None
try:
with open(input_filename) as input_file:
match = re.search(r'build_time_vars\s*=\s*(?P<dict>{.*})',
input_file.read(),
flags=re.DOTALL)
if match:
input_dict = ast.literal_eval(match.group('dict'))
except (IOError, SyntaxError):
pass
if not input_dict:
tty.warn("Failed to find 'build_time_vars' dictionary in file "
"'%s'. This might cause the extensions that are "
"installed with distutils to call compilers directly "
"avoiding Spack's wrappers." % input_filename)
return
for var_name in Python._DISTUTIL_VARS_TO_SAVE:
if var_name in input_dict:
self._distutil_vars[var_name] = input_dict[var_name]
else:
tty.warn("Failed to find key '%s' in 'build_time_vars' "
"dictionary in file '%s'. This might cause the "
"extensions that are installed with distutils to "
"call compilers directly avoiding Spack's wrappers."
% (var_name, input_filename))
if len(self._distutil_vars) > 0:
output_filename = None
try:
output_filename = join_path(
spack.store.layout.metadata_path(self.spec),
Python._DISTUTIL_CACHE_FILENAME)
with open(output_filename, 'w') as output_file:
sjson.dump(self._distutil_vars, output_file)
except:
tty.warn("Failed to save metadata for distutils. This might "
"cause the extensions that are installed with "
"distutils to call compilers directly avoiding "
"Spack's wrappers.")
# We make the cache empty if we failed to save it to file
# to provide the same behaviour as in the case when the cache
# is initialized by the method load_distutils_data().
self._distutil_vars = {}
if output_filename:
force_remove(output_filename)
def _load_distutil_vars(self):
# We update and keep the cache unchanged only if the package is
# installed.
if not self._distutil_vars and self.installed:
try:
input_filename = join_path(
spack.store.layout.metadata_path(self.spec),
Python._DISTUTIL_CACHE_FILENAME)
if os.path.isfile(input_filename):
with open(input_filename) as input_file:
self._distutil_vars = sjson.load(input_file)
except:
pass
if not self._distutil_vars:
self._distutil_vars = {}
return self._distutil_vars
def filter_compilers(self, prefix):
"""Run after install to tell the configuration files and Makefiles
to use the compilers that Spack built the package with.
If this isn't done, they'll have CC and CXX set to Spack's generic
cc and c++. We want them to be bound to whatever compiler
they were built with."""
kwargs = {'ignore_absent': True, 'backup': False, 'string': True}
lib_dirnames = [
join_path(lib_dir, 'python{0}'.format(self.version.up_to(2))) for
lib_dir in [prefix.lib, prefix.lib64]]
config_dirname = 'config-{0}m'.format(
self.version.up_to(2)) if self.spec.satisfies('@3:') else 'config'
rel_filenames = [self.sysconfigfilename,
join_path(config_dirname, 'Makefile')]
abs_filenames = [join_path(dirname, filename) for dirname in
lib_dirnames for filename in rel_filenames]
filter_file(env['CC'], self.compiler.cc, *abs_filenames, **kwargs)
filter_file(env['CXX'], self.compiler.cxx, *abs_filenames, **kwargs)
# ========================================================================
# Set up environment to make install easy for python extensions.
# ========================================================================
@property
def command(self):
"""Returns the Python command, which may vary depending
on the version of Python and how it was installed.
In general, Python 2 comes with ``python`` and ``python2`` commands,
while Python 3 only comes with a ``python3`` command.
:returns: The Python command
:rtype: Executable
"""
# We need to be careful here. If the user is using an externally
# installed python, all 3 commands could be in the same directory.
# Search for `python2` iff using Python 2
if (self.spec.satisfies('@:2') and
os.path.exists(os.path.join(self.prefix.bin, 'python2'))):
command = 'python2'
# Search for `python3` iff using Python 3
elif (self.spec.satisfies('@3:') and
os.path.exists(os.path.join(self.prefix.bin, 'python3'))):
command = 'python3'
# If neither were found, try `python`
elif os.path.exists(os.path.join(self.prefix.bin, 'python')):
command = 'python'
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(self.name, self.prefix.bin))
# The python command may be a symlink if it was installed
# with Homebrew. Since some packages try to determine the
# location of libraries and headers based on the path,
# return the realpath
path = os.path.realpath(os.path.join(self.prefix.bin, command))
return Executable(path)
def print_string(self, string):
"""Returns the appropriate print string depending on the
version of Python.
Examples:
* Python 2
.. code-block:: python
>>> self.print_string('sys.prefix')
'print sys.prefix'
* Python 3
.. code-block:: python
>>> self.print_string('sys.prefix')
'print(sys.prefix)'
"""
if self.spec.satisfies('@:2'):
return 'print {0}'.format(string)
else:
return 'print({0})'.format(string)
def get_config_var(self, key):
"""Returns the value of a single variable. Wrapper around
``distutils.sysconfig.get_config_var()``."""
cmd = 'from distutils.sysconfig import get_config_var; '
cmd += self.print_string("get_config_var('{0}')".format(key))
return self.command('-c', cmd, output=str).strip()
def get_config_h_filename(self):
"""Returns the full path name of the configuration header.
Wrapper around ``distutils.sysconfig.get_config_h_filename()``."""
cmd = 'from distutils.sysconfig import get_config_h_filename; '
cmd += self.print_string('get_config_h_filename()')
return self.command('-c', cmd, output=str).strip()
@property
def home(self):
"""Most of the time, ``PYTHONHOME`` is simply
``spec['python'].prefix``. However, if the user is using an
externally installed python, it may be symlinked. For example,
Homebrew installs python in ``/usr/local/Cellar/python/2.7.12_2``
and symlinks it to ``/usr/local``. Users may not know the actual
installation directory and add ``/usr/local`` to their
``packages.yaml`` unknowingly. Query the python executable to
determine exactly where it is installed."""
prefix = self.get_config_var('prefix')
return Prefix(prefix)
@property
def libs(self):
# Spack installs libraries into lib, except on openSUSE where it
# installs them into lib64. If the user is using an externally
# installed package, it may be in either lib or lib64, so we need
# to ask Python where its LIBDIR is.
libdir = self.get_config_var('LIBDIR')
# The system Python installation on macOS and Homebrew installations
# install libraries into a Frameworks directory
frameworkprefix = self.get_config_var('PYTHONFRAMEWORKPREFIX')
if '+shared' in self.spec:
ldlibrary = self.get_config_var('LDLIBRARY')
if os.path.exists(os.path.join(libdir, ldlibrary)):
return LibraryList(os.path.join(libdir, ldlibrary))
elif os.path.exists(os.path.join(frameworkprefix, ldlibrary)):
return LibraryList(os.path.join(frameworkprefix, ldlibrary))
else:
msg = 'Unable to locate {0} libraries in {1}'
raise RuntimeError(msg.format(self.name, libdir))
else:
library = self.get_config_var('LIBRARY')
if os.path.exists(os.path.join(libdir, library)):
return LibraryList(os.path.join(libdir, library))
elif os.path.exists(os.path.join(frameworkprefix, library)):
return LibraryList(os.path.join(frameworkprefix, library))
else:
msg = 'Unable to locate {0} libraries in {1}'
raise RuntimeError(msg.format(self.name, libdir))
@property
def headers(self):
config_h = self.get_config_h_filename()
if os.path.exists(config_h):
return HeaderList(config_h)
else:
includepy = self.get_config_var('INCLUDEPY')
msg = 'Unable to locate {0} headers in {1}'
raise RuntimeError(msg.format(self.name, includepy))
@property
def python_lib_dir(self):
return join_path('lib', 'python{0}'.format(self.version.up_to(2)))
@property
def python_include_dir(self):
return join_path('include', 'python{0}'.format(self.version.up_to(2)))
@property
def site_packages_dir(self):
return join_path(self.python_lib_dir, 'site-packages')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Set PYTHONPATH to include the site-packages directory for the
extension and any other python extensions it depends on."""
spack_env.set('PYTHONHOME', self.home)
python_paths = []
for d in dependent_spec.traverse(
deptype=('build', 'run'), deptype_query='run'):
if d.package.extends(self.spec):
python_paths.append(join_path(d.prefix,
self.site_packages_dir))
pythonpath = ':'.join(python_paths)
spack_env.set('PYTHONPATH', pythonpath)
# For run time environment set only the path for
# dependent_spec and prepend it to PYTHONPATH
if dependent_spec.package.extends(self.spec):
run_env.prepend_path('PYTHONPATH', join_path(
dependent_spec.prefix, self.site_packages_dir))
def setup_dependent_package(self, module, dependent_spec):
"""Called before python modules' install() methods.
In most cases, extensions will only need to have one line::
setup_py('install', '--prefix={0}'.format(prefix))"""
module.python = self.command
module.setup_py = Executable(
self.command.path + ' setup.py --no-user-cfg')
distutil_vars = self._load_distutil_vars()
if distutil_vars:
for key, value in distutil_vars.items():
module.setup_py.add_default_env(key, value)
# Add variables for lib/pythonX.Y and lib/pythonX.Y/site-packages dirs.
module.python_lib_dir = join_path(dependent_spec.prefix,
self.python_lib_dir)
module.python_include_dir = join_path(dependent_spec.prefix,
self.python_include_dir)
module.site_packages_dir = join_path(dependent_spec.prefix,
self.site_packages_dir)
self.spec.home = self.home
# Make the site packages directory for extensions
if dependent_spec.package.is_extension:
mkdirp(module.site_packages_dir)
# ========================================================================
# Handle specifics of activating and deactivating python modules.
# ========================================================================
def python_ignore(self, ext_pkg, args):
"""Add some ignore files to activate/deactivate args."""
ignore_arg = args.get('ignore', lambda f: False)
# Always ignore easy-install.pth, as it needs to be merged.
patterns = [r'site-packages/easy-install\.pth$']
# Ignore pieces of setuptools installed by other packages.
# Must include directory name or it will remove all site*.py files.
if ext_pkg.name != 'py-setuptools':
patterns.extend([
r'bin/easy_install[^/]*$',
r'site-packages/setuptools[^/]*\.egg$',
r'site-packages/setuptools\.pth$',
r'site-packages/site[^/]*\.pyc?$',
r'site-packages/__pycache__/site[^/]*\.pyc?$'
])
if ext_pkg.name != 'py-pygments':
patterns.append(r'bin/pygmentize$')
if ext_pkg.name != 'py-numpy':
patterns.append(r'bin/f2py[0-9.]*$')
return match_predicate(ignore_arg, patterns)
def write_easy_install_pth(self, exts):
paths = []
for ext in sorted(exts.values()):
ext_site_packages = join_path(ext.prefix, self.site_packages_dir)
easy_pth = join_path(ext_site_packages, "easy-install.pth")
if not os.path.isfile(easy_pth):
continue
with closing(open(easy_pth)) as f:
for line in f:
line = line.rstrip()
# Skip lines matching these criteria
if not line:
continue
if re.search(r'^(import|#)', line):
continue
if (ext.name != 'py-setuptools' and
re.search(r'setuptools.*egg$', line)):
continue
paths.append(line)
site_packages = join_path(self.home, self.site_packages_dir)
main_pth = join_path(site_packages, "easy-install.pth")
if not paths:
if os.path.isfile(main_pth):
os.remove(main_pth)
else:
with closing(open(main_pth, 'w')) as f:
f.write("import sys; sys.__plen = len(sys.path)\n")
for path in paths:
f.write("{0}\n".format(path))
f.write("import sys; new=sys.path[sys.__plen:]; "
"del sys.path[sys.__plen:]; "
"p=getattr(sys,'__egginsert',0); "
"sys.path[p:p]=new; "
"sys.__egginsert = p+len(new)\n")
def activate(self, ext_pkg, **args):
ignore = self.python_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Python, self).activate(ext_pkg, **args)
exts = spack.store.layout.extension_map(self.spec)
exts[ext_pkg.name] = ext_pkg.spec
self.write_easy_install_pth(exts)
def deactivate(self, ext_pkg, **args):
args.update(ignore=self.python_ignore(ext_pkg, args))
super(Python, self).deactivate(ext_pkg, **args)
exts = spack.store.layout.extension_map(self.spec)
# Make deactivate idempotent
if ext_pkg.name in exts:
del exts[ext_pkg.name]
self.write_easy_install_pth(exts)
| lgpl-2.1 | -3,751,054,935,770,177,000 | 40.201248 | 79 | 0.580083 | false |
veger/ansible | lib/ansible/modules/network/fortios/fortios_firewall_vipgrp64.py | 3 | 8717 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vipgrp64
short_description: Configure IPv6 to IPv4 virtual IP groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and vipgrp64 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_vipgrp64:
description:
- Configure IPv6 to IPv4 virtual IP groups.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
color:
description:
- Integer value to determine the color of the icon in the GUI (range 1 to 32, default = 0, which sets the value to 1).
comments:
description:
- Comment.
member:
description:
- Member VIP objects of the group (Separate multiple objects with a space).
suboptions:
name:
description:
- VIP64 name. Source firewall.vip64.name.
required: true
name:
description:
- VIP64 group name.
required: true
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 to IPv4 virtual IP groups.
fortios_firewall_vipgrp64:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_vipgrp64:
state: "present"
color: "3"
comments: "<your_own_value>"
member:
-
name: "default_name_6 (source firewall.vip64.name)"
name: "default_name_7"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: string
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: string
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: string
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: string
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: string
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: string
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: string
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: string
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: string
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: string
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: string
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_vipgrp64_data(json):
option_list = ['color', 'comments', 'member',
'name', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_vipgrp64(data, fos):
vdom = data['vdom']
firewall_vipgrp64_data = data['firewall_vipgrp64']
filtered_data = filter_firewall_vipgrp64_data(firewall_vipgrp64_data)
if firewall_vipgrp64_data['state'] == "present":
return fos.set('firewall',
'vipgrp64',
data=filtered_data,
vdom=vdom)
elif firewall_vipgrp64_data['state'] == "absent":
return fos.delete('firewall',
'vipgrp64',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_vipgrp64']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_vipgrp64": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comments": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -11,581,835,324,222,444 | 28.750853 | 138 | 0.585637 | false |
Woile/commitizen | commitizen/config/yaml_config.py | 1 | 1337 | from pathlib import Path
from typing import Union
import yaml
from .base_config import BaseConfig
class YAMLConfig(BaseConfig):
def __init__(self, *, data: Union[bytes, str], path: Union[Path, str]):
super(YAMLConfig, self).__init__()
self.is_empty_config = False
self._parse_setting(data)
self.add_path(path)
def init_empty_config_content(self):
with open(self.path, "a") as json_file:
yaml.dump({"commitizen": {}}, json_file)
def _parse_setting(self, data: Union[bytes, str]):
"""We expect to have a section in cz.yaml looking like
```
commitizen:
name: cz_conventional_commits
```
"""
doc = yaml.safe_load(data)
try:
self.settings.update(doc["commitizen"])
except (KeyError, TypeError):
self.is_empty_config = True
def set_key(self, key, value):
"""Set or update a key in the conf.
For now only strings are supported.
We use to update the version number.
"""
with open(self.path, "rb") as yaml_file:
parser = yaml.load(yaml_file, Loader=yaml.FullLoader)
parser["commitizen"][key] = value
with open(self.path, "w") as yaml_file:
yaml.dump(parser, yaml_file)
return self
| mit | -6,270,069,536,542,927,000 | 27.446809 | 75 | 0.5819 | false |
mtndesign/myVim | myvim/bundle/ropevim/ftplugin/python/libs/ropemode/decorators.py | 24 | 2479 | import traceback
from rope.base import exceptions
class Logger(object):
message = None
only_short = False
def __call__(self, message, short=None):
if short is None or not self.only_short:
self._show(message)
if short is not None:
self._show(short)
def _show(self, message):
if message is None:
print message
else:
self.message(message)
logger = Logger()
def lisphook(func):
def newfunc(*args, **kwds):
try:
func(*args, **kwds)
except Exception, e:
trace = str(traceback.format_exc())
short = 'Ignored an exception in ropemode hook: %s' % \
_exception_message(e)
logger(trace, short)
newfunc.lisp = None
newfunc.__name__ = func.__name__
newfunc.__doc__ = func.__doc__
return newfunc
def lispfunction(func):
func.lisp = None
return func
input_exceptions = (exceptions.RefactoringError,
exceptions.ModuleSyntaxError,
exceptions.BadIdentifierError)
def _exception_handler(func):
def newfunc(*args, **kwds):
try:
return func(*args, **kwds)
except exceptions.RopeError, e:
short = None
if isinstance(e, input_exceptions):
short = _exception_message(e)
logger(str(traceback.format_exc()), short)
newfunc.__name__ = func.__name__
newfunc.__doc__ = func.__doc__
return newfunc
def _exception_message(e):
return '%s: %s' % (e.__class__.__name__, str(e))
def rope_hook(hook):
def decorator(func):
func = lisphook(func)
func.name = func.__name__
func.kind = 'hook'
func.hook = hook
return func
return decorator
def local_command(key=None, prefix=False, shortcut=None, name=None):
def decorator(func, name=name):
func = _exception_handler(func)
func.kind = 'local'
func.prefix = prefix
func.local_key = key
func.shortcut_key = shortcut
if name is None:
name = func.__name__
func.name = name
return func
return decorator
def global_command(key=None, prefix=False):
def decorator(func):
func = _exception_handler(func)
func.kind = 'global'
func.prefix = prefix
func.global_key = key
func.name = func.__name__
return func
return decorator
| mit | -1,750,259,612,598,305,500 | 24.295918 | 68 | 0.564744 | false |
Yannig/ansible | lib/ansible/modules/source_control/git.py | 2 | 44955 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
dest:
required: true
description:
- The path of where the repository should be checked out. This
parameter is required, unless C(clone) is set to C(no).
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case C(refspec) needs
to be specified if the given revision is not already available.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), ensure that "-o StrictHostKeyChecking=no" is
present as an ssh options.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
umask:
required: false
default: null
version_added: "2.2"
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be present in the GPG keyring.
archive:
required: false
version_added: "2.4"
description:
- Specify archive file path with extension. If specified, creates an
archive file of the specified format containing the tree structure
for the source tree.
Allowed archive formats ["zip", "tar.gz", "tar", "tgz"]
requirements:
- git>=1.7.1 (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
version: release-0.22
# Example read-write git checkout from github
- git:
repo: ssh://[email protected]/mylogin/hello.git
dest: /home/mylogin/hello
# Example just ensuring the repo checkout exists
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
update: no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
clone: no
update: no
# Example checkout a github repo and use refspec to fetch all pull requests
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
refspec: '+refs/pull/*:refs/heads/*'
# Example Create git archive from repo
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
archive: /tmp/ansible-examples.zip
'''
RETURN = '''
after:
description: last commit revision of the repository retrieved during the update
returned: success
type: string
sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
description: commit revision before the repository was updated, "null" for new repository
returned: success
type: string
sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
description: Contains True or False whether or not the remote URL was changed.
returned: success
type: boolean
sample: True
warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: string
sample: Your git version is too old to fully support the depth argument. Falling back to full checkouts.
'''
import filecmp
import os
import re
import shlex
import stat
import sys
import shutil
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, get_module_path
from ansible.module_utils.six import b, string_types
from ansible.module_utils._text import to_native
def head_splitter(headfile, remote, module=None, fail_on_error=False):
'''Extract the head reference'''
# https://github.com/ansible/ansible-modules-core/pull/907
res = None
if os.path.exists(headfile):
rawdata = None
try:
f = open(headfile, 'r')
rawdata = f.readline()
f.close()
except:
if fail_on_error and module:
module.fail_json(msg="Unable to read %s" % headfile)
if rawdata:
try:
rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
refparts = rawdata.split(' ')
newref = refparts[-1]
nrefparts = newref.split('/', 2)
res = nrefparts[-1].rstrip('\n')
except:
if fail_on_error and module:
module.fail_json(msg="Unable to split head from '%s'" % rawdata)
return res
def unfrackgitpath(path):
if path is None:
return None
# copied from ansible.utils.path
return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
def get_submodule_update_params(module, git_path, cwd):
# or: git submodule [--quiet] update [--init] [-N|--no-fetch]
# [-f|--force] [--rebase] [--reference <repository>] [--merge]
# [--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[', '')
update_line = update_line.replace(']', '')
update_line = update_line.replace('|', ' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper():
module_dir = get_module_path()
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_dir, os.W_OK | os.R_OK | os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = b("""#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
# Let ssh fail rather than prompt
BASEOPTS="$BASEOPTS -o BatchMode=yes"
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
""")
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
# git_ssh_command will override git_ssh, so only older git needs it
os.environ["GIT_SSH"] = ssh_wrapper
# using shell to avoid 'noexec' issues if module temp dir is located in such a mount
os.environ["GIT_SSH_COMMAND"] = '%s %s' % (os.environ.get('SHELL', '/bin/sh'), ssh_wrapper)
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = to_native(stdout).rstrip('\n')
return sha
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(
msg='Unable to determine hashes of submodules',
stdout=out,
stderr=err,
rc=rc)
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, verify_commit):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [git_path, 'clone']
if bare:
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
if depth:
if version == 'HEAD' or refspec:
cmd.extend(['--depth', str(depth)])
elif is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
# only use depth if the remote object is branch or tag (i.e. fetchable)
module.warn("Ignoring depth argument. "
"Shallow clones are only available for "
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend(['--reference', str(reference)])
cmd.extend([repo, dest])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend(['--depth', str(depth)])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status --porcelain" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
''' Return the difference between 2 versions '''
if before is None:
return {'prepared': '>> Newly checked out %s' % after}
elif before != after:
# Ensure we have the object we are referring to during git diff !
git_version_used = git_version(git_path, module)
fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
return {'prepared': out}
elif rc == 0:
return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
elif err:
return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
else:
return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
return {}
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
out = to_native(out)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch --no-color -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
for line in out.split('\n'):
if line.strip():
branches.append(line.strip())
return branches
def get_annotated_tags(git_path, module, dest):
tags = []
cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
for line in to_native(out).split('\n'):
if line.strip():
tagtype, tagname = line.strip().split(':')
if tagtype == 'tag':
tags.append(tagname)
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for branch in branches:
if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch):
return True
return False
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
git_conf = open(repo_path, 'rb')
for line in git_conf:
config_val = line.split(b(':'), 1)
if config_val[0].strip() == b('gitdir'):
gitdir = to_native(config_val[1].strip(), errors='surrogate_or_strict')
break
else:
# No repo path found
return ''
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
# No repo path found
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
headfile = os.path.join(repo_path, "HEAD")
if is_not_a_branch(git_path, module, dest):
headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
return branch
def get_remote_url(git_path, module, dest, remote):
'''Return URL of remote source for repo.'''
command = [git_path, 'ls-remote', '--get-url', remote]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
# There was an issue getting remote URL, most likely
# command is not available in this version of Git.
return None
return to_native(out).rstrip('\n')
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
# Return if remote URL isn't changing.
remote_url = get_remote_url(git_path, module, dest, remote)
if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
return False
command = [git_path, 'remote', 'set-url', remote, repo]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
# Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
# successful checkout
currenthead = get_head_branch(git_path, module, dest, remote)
if refspec:
refspecs.append(refspec)
elif version == 'HEAD':
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
# this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
else:
refspecs.append(version)
elif is_remote_tag(git_path, module, dest, repo, version):
refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
if refspecs:
# if refspecs is empty, i.e. version is neither heads nor tags
# assume it is a version hash
# fall back to a full clone, otherwise we might not be able to checkout
# version
fetch_cmd.extend(['--depth', str(depth)])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
if git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
commands.append((fetch_str, fetch_cmd + [remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
for (label, command) in commands:
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
# FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules, force=False):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [git_path, 'submodule', 'sync']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
else:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
if force:
cmd.append('--force')
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def set_remote_branch(git_path, module, dest, remote, version, depth):
"""set refs for the remote branch version
This assumes the branch does not yet exist locally and is therefore also not checked out.
Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
"""
branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
def switch_version(git_path, module, dest, remote, version, verify_commit, depth):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
else:
# FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
if depth and not is_local_branch(git_path, module, dest, version):
# git clone --depth implies --single-branch, which makes
# the checkout fail if the version changes
# fetch the remote branch, to be able to check it out next
set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version):
if version in get_annotated_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
def git_version(git_path, module):
"""return the installed version of git"""
cmd = "%s --version" % git_path
(rc, out, err) = module.run_command(cmd)
if rc != 0:
# one could fail_json here, but the version info is not that important,
# so let's try to fail only on actual git commands
return None
rematch = re.search('git version (.*)$', to_native(out))
if not rematch:
return None
return LooseVersion(rematch.groups()[0])
def git_archive(git_path, module, dest, archive, archive_fmt, version):
""" Create git archive in given source directory """
cmd = "%s archive --format=%s --output=%s %s" \
% (git_path, archive_fmt, archive, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to perform archive operation",
details="Git archive command failed to create "
"archive %s using %s directory."
"Error: %s" % (archive, dest, err))
return rc, out, err
def create_archive(git_path, module, dest, archive, version, repo, result):
""" Helper function for creating archive using git_archive """
all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
'.tgz': 'tgz'}
_, archive_ext = os.path.splitext(archive)
archive_fmt = all_archive_fmt.get(archive_ext, None)
if archive_fmt is None:
module.fail_json(msg="Unable to get file extension from "
"archive file name : %s" % archive,
details="Please specify archive as filename with "
"extension. File extension can be one "
"of ['tar', 'tar.gz', 'zip', 'tgz']")
repo_name = repo.split("/")[-1].replace(".git", "")
if os.path.exists(archive):
# If git archive file exists, then compare it with new git archive file.
# if match, do nothing
# if does not match, then replace existing with temp archive file.
tempdir = tempfile.mkdtemp()
new_archive_dest = os.path.join(tempdir, repo_name)
new_archive = new_archive_dest + '.' + archive_fmt
git_archive(git_path, module, dest, new_archive, archive_fmt, version)
# filecmp is supposed to be efficient than md5sum checksum
if filecmp.cmp(new_archive, archive):
result.update(changed=False)
# Cleanup before exiting
try:
shutil.remove(tempdir)
except OSError:
pass
else:
try:
shutil.move(new_archive, archive)
shutil.remove(tempdir)
result.update(changed=True)
except OSError as e:
module.fail_json(msg="Failed to move %s to %s" %
(new_archive, archive),
details="Error occured while moving : %s"
% to_native(e))
else:
# Perform archive from local directory
git_archive(git_path, module, dest, archive, archive_fmt, version)
result.update(changed=True)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
archive=dict(type='path'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
reference = module.params['reference']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
umask = module.params['umask']
archive = module.params['archive']
result = dict(changed=False, warnings=list())
if module.params['accept_hostkey']:
if ssh_opts is not None:
if "-o StrictHostKeyChecking=no" not in ssh_opts:
ssh_opts += " -o StrictHostKeyChecking=no"
else:
ssh_opts = "-o StrictHostKeyChecking=no"
# evaluate and set the umask before doing anything else
if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
umask = int(umask, 8)
except:
module.fail_json(msg="umask must be an octal integer",
details=str(sys.exc_info()[1]))
os.umask(umask)
# Certain features such as depth require a file:/// protocol for path based urls
# so force a protocol here ...
if repo.startswith('/'):
repo = 'file://' + repo
# We screenscrape a huge amount of git commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(dest)
if bare:
gitconfig = os.path.join(dest, 'config')
else:
gitconfig = os.path.join(dest, '.git', 'config')
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = write_ssh_wrapper()
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
git_version_used = git_version(git_path, module)
if depth is not None and git_version_used < LooseVersion('1.9.1'):
result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
result.update(before=None)
local_mods = False
need_fetch = True
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
result.update(changed=True, after=remote_head)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
need_fetch = False
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
result['before'] = get_version(module, git_path, dest)
result.update(after=result['before'])
module.exit_json(**result)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
result['before'] = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
result.update(changed=True, msg='Local modifications exist.')
# exit if already at desired sha version
if module.check_mode:
remote_url = get_remote_url(git_path, module, dest, remote)
remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
result.update(remote_url_changed=remote_url_changed)
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
# FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
else:
fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used)
result['after'] = get_version(module, git_path, dest)
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit, depth)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if submodules_updated:
result.update(submodules_changed=submodules_updated)
if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
# determine if we changed anything
result['after'] = get_version(module, git_path, dest)
if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
result.update(changed=True)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
if archive:
# Git archive is not supported by all git servers, so
# we will first clone and perform git archive from local directory
if module.check_mode:
result.update(changed=True)
module.exit_json(**result)
create_archive(git_path, module, dest, archive, version, repo, result)
# cleanup the wrapper script
if ssh_wrapper:
try:
os.remove(ssh_wrapper)
except OSError:
# No need to fail if the file already doesn't exist
pass
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,742,812,333,051,799,000 | 37.097458 | 133 | 0.592059 | false |
tectronics/cortex-vfx | test/IECore/ThreadingTest.py | 5 | 10410 | ##########################################################################
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import time
import threading
import random
import os
import IECore
class ThreadingTest( unittest.TestCase ) :
def callSomeThings( self, things, args=(), kwArgs=(), threaded=False, iterations=1 ) :
for i in range( 0, iterations ) :
threads = []
for j in range( 0, len( things ) ) :
a = args[j] if args else ()
kwa = kwArgs[j] if kwArgs else {}
if threaded :
t = threading.Thread( target=things[j], args=a, kwargs=kwa )
t.start()
threads.append( t )
else :
things[j]( *a, **kwa )
for t in threads :
t.join()
def testThreadedOpGains( self ) :
## Checks that we actually get a speedup by running a bunch of slow
# C++ ops in parallel.
ops = []
kwArgs = []
for i in range( 0, 4 ) :
ops.append( IECore.ParticleMeshOp() )
kwArgs.append( {
"filename" : "test/IECore/data/pdcFiles/particleMesh.pdc",
"useRadiusAttribute" : False,
"radius" : 0.25,
"threshold" : 1,
"bound" : IECore.Box3f( IECore.V3f( -5, -7, -2 ), IECore.V3f( 3, 3, 3 ) ),
"resolution" : IECore.V3i( 80, 80, 80 ),
} )
tStart = time.time()
self.callSomeThings( ops, kwArgs=kwArgs, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( ops, kwArgs=kwArgs, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # may fail on single core machines or machines under varying load
def testThreadedReaders( self ) :
## Checks that we can read a bunch of files in parallel, even when one
# of the Readers is implemented in python. We're using the CachedReader
# here as it forces a call to Reader::create when the GIL isn't held yet.
args = [
( "test/IECore/data/exrFiles/ramp.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/tiff/toTrace.tif", ),
( "test/IECore/data/tiff/toTraceThinned.tif", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/tiff/toTraceThinned.tif", ),
]
sp = IECore.SearchPath( "./", ":" )
calls = [ lambda f : IECore.CachedReader( sp, 1024 * 1024 * 10 ).read( f ) ] * len( args )
self.callSomeThings( calls, args, threaded=True )
def testMixedCPPAndPython( self ) :
## Checks that we can mix a bunch of C++ and python ops concurrently
# without crashing
ops = []
kwArgs = []
for i in range( 0, 4 ) :
ops.append( IECore.ParticleMeshOp() )
kwArgs.append( {
"filename" : "test/IECore/data/pdcFiles/particleMesh.pdc",
"useRadiusAttribute" : False,
"radius" : 0.25,
"threshold" : 1,
"bound" : IECore.Box3f( IECore.V3f( -5, -7, -2 ), IECore.V3f( 3, 3, 3 ) ),
"resolution" : IECore.V3i( 80, 80, 80 ),
} )
ops.append( IECore.ClassLsOp() )
kwArgs.append( { "type" : "op" } )
self.callSomeThings( ops, kwArgs=kwArgs, threaded=True, iterations=5 )
def testReadingGains( self ) :
## Checks that we can use a bunch of readers in different threads and
# that we get a speedup of some sort doing that.
args = [
( "test/IECore/data/exrFiles/ramp.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/pdcFiles/particleMesh.pdc", ),
( "test/IECore/data/cobFiles/ball.cob", ),
( "test/IECore/data/jpg/21mm.jpg", ),
( "test/IECore/data/jpg/exif.jpg", ),
( "test/IECore/data/dpx/ramp.dpx", ),
]
calls = [ lambda f : IECore.Reader.create( f ).read() ] * len( args )
tStart = time.time()
self.callSomeThings( calls, args, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, args, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testWritingGains( self ) :
image = IECore.Reader.create( "test/IECore/data/jpg/21mm.jpg" ).read()
def write( o, f ) :
IECore.Writer.create( o, f ).write()
calls = []
for i in range( 0, 4 ) :
fileName = "test/IECore/test%d.jpg" % i
calls.append( IECore.curry( write, image, fileName ) )
tStart = time.time()
self.callSomeThings( calls, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testCachedReaderConcurrency( self ) :
args = [
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/cobFiles/intDataTen.cob", ),
( "test/IECore/data/cobFiles/intDataTen.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
]
cachedReader = IECore.CachedReader( IECore.SearchPath( "./", ":" ), 1024 * 1024 * 50 )
calls = [ lambda f : cachedReader.read( f ) ] * len( args )
for i in range( 0, 5 ) :
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=True )
def testCachedReaderGains( self ) :
args = [
( "test/IECore/data/jpg/21mm.jpg", ),
( "test/IECore/data/jpg/exif.jpg", ),
( "test/IECore/data/jpg/greyscaleCheckerBoard.jpg", ),
( "test/IECore/data/dpx/ramp.dpx", ),
] * 4
cachedReader = IECore.CachedReader( IECore.SearchPath( "./", ":" ), 1024 * 1024 * 50 )
calls = [ lambda f : cachedReader.read( f ) ] * len( args )
tStart = time.time()
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testPythonColorConverterWithThread( self ) :
def NewSRGBToLinear( inputColorSpace, outputColorSpace ) :
converter = IECore.SRGBToLinearOp()
return converter
IECore.ColorSpaceTransformOp.registerConversion(
"newSRGB", "linear", NewSRGBToLinear
)
runThread = True
def test():
while runThread :
pass
newThread = threading.Thread(target=test)
newThread.start()
reader = IECore.Reader.create( "test/IECore/data/cinFiles/uvMap.512x256.cin" )
reader['colorSpace'] = 'newSRGB'
reader.read()
runThread = False
newThread.join()
def testInterpolatedCacheGains( self ) :
numObjects = 100
numAttrs = 2
def createCache( fileName ) :
data = IECore.V3fVectorData( [ IECore.V3f( 1 ) ] * 50000 )
cache = IECore.AttributeCache( fileName, IECore.IndexedIO.OpenMode.Write )
for i in range( 0, numObjects ) :
for j in range( 0, numAttrs ) :
cache.write( "object%d" % i, "attr%d" % j, data )
createCache( "test/IECore/interpolatedCache.0250.fio" )
createCache( "test/IECore/interpolatedCache.0500.fio" )
cache = IECore.InterpolatedCache(
"test/IECore/interpolatedCache.####.fio",
IECore.InterpolatedCache.Interpolation.Linear,
)
calls = []
for i in range( 0, 200 ) :
calls.append(
IECore.curry(
cache.read,
1.5,
"object%d" % random.uniform( 0, numObjects ),
"attr%d" % random.uniform( 0, numAttrs )
)
)
tStart = time.time()
self.callSomeThings( calls, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def tearDown( self ) :
for f in [
"test/IECore/test0.jpg",
"test/IECore/test1.jpg",
"test/IECore/test2.jpg",
"test/IECore/test3.jpg",
"test/IECore/interpolatedCache.0250.fio",
"test/IECore/interpolatedCache.0500.fio",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -3,644,985,725,074,588,700 | 30.737805 | 143 | 0.658405 | false |
ngnrsaa/qflex | qflexcirq/qflex_cirq_example.py | 1 | 4364 | # The interface between Cirq and the Python interface to the C++ QFlex
import sys, os
sys.path.insert(
1, os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + '/../'))
import qflexcirq.interface.qflex_simulator as qsim
import qflexcirq.interface.qflex_virtual_device as qdevice
import qflexcirq.interface.qflex_grid as qgrid
import qflexcirq.interface.qflex_circuit as qcirc
import qflexcirq.interface.qflex_order as qorder
import qflexcirq.utils as qflexutils
from qflexcirq import qflex
"""
Example HOWTO
Running the example requires a compiled version of QFlex.
Two possibilities are available:
1. compilation from a clone of the github repository
2. pip installing the qflexcirq package
In the examples below different simulation configurations are illustrated.
A configuration includes three types of information: grid, order of tensor
contractions, quantum circuit. For example, config_small or config_large use
different grid arrangements, different circuits and different input states.
When using the pip install version of qflexcirq, the
!!! config files are not copied onto the local machine !!!
Config files have to be explicitly downloaded from the
github repository. Correspondingly, the paths in the configurations below need
to be adapted to where the config files are stored.
"""
config_small = {
'circuit_filename': 'config/circuits/rectangular_2x2_1-2-1_0.txt',
'ordering_filename': 'config/ordering/rectangular_2x2.txt',
'grid_filename': 'config/grid/rectangular_2x2.txt',
'final_state': "0110"
}
config_mem_crash = {
'circuit_filename': "config/circuits/bristlecone_70_1-40-1_0.txt",
'ordering_filename': "config/ordering/bristlecone_70.txt",
'grid_filename': 'config/grid/bristlecone_70.txt',
'final_state': "1" * 70
}
config_large = {
'circuit_filename': "config/circuits/bristlecone_70_1-16-1_0.txt",
'ordering_filename': "config/ordering/bristlecone_70.txt",
'grid_filename': 'config/grid/bristlecone_70.txt',
'final_state': "1" * 70
}
config_sycamore = {
'circuit_filename': "config/circuits/sycamore_53_4_0.txt",
'ordering_filename': "config/ordering/sycamore_53.txt",
'grid_filename': 'config/grid/sycamore_53.txt',
'final_state': "1" * 53
}
def run_qflex_simulator(config):
my_grid = qgrid.QFlexGrid.from_existing_file(config['grid_filename'])
my_device = qdevice.QFlexVirtualDevice(qflex_grid=my_grid)
# The qubits are collected and indexed from the underlying grid_string
# that was passed as constructor to the Device
my_qubits = my_device.get_indexed_grid_qubits()
# Take a QFlex circuit and generate a Cirq circuit from it
# The Cirq circuit will be afterwards transformed into a Qflex circuit
# You can construct a Cirq circuit from an existing QFlex circuit
# Note that circuits provided in files were designed for a specific arrangement
my_circuit = qflexutils.GetCircuitOfMoments(config["circuit_filename"],
my_qubits)
my_order = qorder.QFlexOrder.from_existing_file(config["ordering_filename"])
circuit_on_device = qcirc.QFlexCircuit(cirq_circuit=my_circuit,
device=my_device,
qflex_order=my_order)
print("\nRunning QFlex simulation\n")
my_sim = qsim.QFlexSimulator()
myres = my_sim.compute_amplitudes(program=circuit_on_device,
bitstrings=[config['final_state']])
print(myres)
def run_pybind_interface(config):
print("\nRunning Pybind Interface\n")
print(qflex.simulate(config))
def main():
#
print("\n\n === Simulation 1" + str(config_small))
run_qflex_simulator(config_small)
run_pybind_interface(config_small)
print("\n\n === Simulation 2" + str(config_large))
run_qflex_simulator(config_large)
run_pybind_interface(config_large)
print("\n\n === Simulation 3" + str(config_sycamore))
run_qflex_simulator(config_sycamore)
run_pybind_interface(config_sycamore)
#
# TODO: This simulation fails due to insufficient memory
#
# print(" === Simulation 3" + str(config_mem_crash))
# run_qflex_simulator(config_mem_crash)
# run_pybind_interface(config_mem_crash)
if __name__ == "__main__":
main()
| apache-2.0 | 5,580,065,536,052,286,000 | 34.193548 | 83 | 0.695921 | false |
areitz/pants | src/python/pants/goal/workspace.py | 17 | 1878 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from abc import abstractmethod
from pants.base.build_environment import get_buildroot
from pants.scm.scm import Scm
from pants.util.meta import AbstractClass
class Workspace(AbstractClass):
"""Tracks the state of the current workspace."""
class WorkspaceError(Exception):
"""Indicates a problem reading the local workspace."""
@abstractmethod
def touched_files(self, parent):
"""Returns the paths modified between the parent state and the current workspace state."""
@abstractmethod
def changes_in(self, rev_or_range):
"""Returns the paths modified by some revision, revision range or other identifier."""
class ScmWorkspace(Workspace):
"""A workspace that uses an Scm to determine the touched files."""
def __init__(self, scm):
super(ScmWorkspace, self).__init__()
if scm is None:
raise self.WorkspaceError('Cannot figure out what changed without a configured '
'source-control system.')
self._scm = scm
def touched_files(self, parent):
try:
return self._scm.changed_files(from_commit=parent,
include_untracked=True,
relative_to=get_buildroot())
except Scm.ScmException as e:
raise self.WorkspaceError("Problem detecting changed files.", e)
def changes_in(self, rev_or_range):
try:
return self._scm.changes_in(rev_or_range, relative_to=get_buildroot())
except Scm.ScmException as e:
raise self.WorkspaceError("Problem detecting changes in {}.".format(rev_or_range), e)
| apache-2.0 | 470,163,342,238,584,450 | 34.433962 | 94 | 0.678914 | false |
takeshineshiro/heat | heat/engine/resources/openstack/heat/software_deployment.py | 1 | 25832 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import resource_group
from heat.engine.resources.openstack.heat import software_config as sc
from heat.engine.resources import signal_responder
from heat.engine import support
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_deployment_signal_transport',
'heat.common.config')
LOG = logging.getLogger(__name__)
class SoftwareDeployment(signal_responder.SignalResponder):
'''
This resource associates a server with some configuration which
is to be deployed to that server.
A deployment allows input values to be specified which map to the inputs
schema defined in the config resource. These input values are interpreted
by the configuration tool in a tool-specific manner.
Whenever this resource goes to an IN_PROGRESS state, it creates an
ephemeral config that includes the inputs values plus a number of extra
inputs which have names prefixed with deploy_. The extra inputs relate
to the current state of the stack, along with the information and
credentials required to signal back the deployment results.
Unless signal_transport=NO_SIGNAL, this resource will remain in an
IN_PROGRESS state until the server signals it with the output values
for that deployment. Those output values are then available as resource
attributes, along with the default attributes deploy_stdout,
deploy_stderr and deploy_status_code.
Specifying actions other than the default CREATE and UPDATE will result
in the deployment being triggered in those actions. For example this would
allow cleanup configuration to be performed during actions SUSPEND and
DELETE. A config could be designed to only work with some specific
actions, or a config can read the value of the deploy_action input to
allow conditional logic to perform different configuration for different
actions.
'''
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
CONFIG, SERVER, INPUT_VALUES,
DEPLOY_ACTIONS, NAME, SIGNAL_TRANSPORT
) = (
'config', 'server', 'input_values',
'actions', 'name', 'signal_transport'
)
ALLOWED_DEPLOY_ACTIONS = (
resource.Resource.CREATE,
resource.Resource.UPDATE,
resource.Resource.DELETE,
resource.Resource.SUSPEND,
resource.Resource.RESUME,
)
ATTRIBUTES = (
STDOUT, STDERR, STATUS_CODE
) = (
'deploy_stdout', 'deploy_stderr', 'deploy_status_code'
)
DERIVED_CONFIG_INPUTS = (
DEPLOY_SERVER_ID, DEPLOY_ACTION,
DEPLOY_SIGNAL_ID, DEPLOY_STACK_ID,
DEPLOY_RESOURCE_NAME, DEPLOY_AUTH_URL,
DEPLOY_USERNAME, DEPLOY_PASSWORD,
DEPLOY_PROJECT_ID, DEPLOY_USER_ID,
DEPLOY_SIGNAL_VERB, DEPLOY_SIGNAL_TRANSPORT,
DEPLOY_QUEUE_ID
) = (
'deploy_server_id', 'deploy_action',
'deploy_signal_id', 'deploy_stack_id',
'deploy_resource_name', 'deploy_auth_url',
'deploy_username', 'deploy_password',
'deploy_project_id', 'deploy_user_id',
'deploy_signal_verb', 'deploy_signal_transport',
'deploy_queue_id'
)
SIGNAL_TRANSPORTS = (
CFN_SIGNAL, TEMP_URL_SIGNAL, HEAT_SIGNAL, NO_SIGNAL,
ZAQAR_SIGNAL
) = (
'CFN_SIGNAL', 'TEMP_URL_SIGNAL', 'HEAT_SIGNAL', 'NO_SIGNAL',
'ZAQAR_SIGNAL'
)
properties_schema = {
CONFIG: properties.Schema(
properties.Schema.STRING,
_('ID of software configuration resource to execute when '
'applying to the server.'),
update_allowed=True
),
SERVER: properties.Schema(
properties.Schema.STRING,
_('ID of resource to apply configuration to. '
'Normally this should be a Nova server ID.'),
required=True,
),
INPUT_VALUES: properties.Schema(
properties.Schema.MAP,
_('Input values to apply to the software configuration on this '
'server.'),
update_allowed=True
),
DEPLOY_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('Which lifecycle actions of the deployment resource will result '
'in this deployment being triggered.'),
update_allowed=True,
default=[resource.Resource.CREATE, resource.Resource.UPDATE],
constraints=[constraints.AllowedValues(ALLOWED_DEPLOY_ACTIONS)]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the derived config associated with this deployment. '
'This is used to apply a sort order to the list of '
'configurations currently deployed to a server.'),
),
SIGNAL_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should signal to heat with the deployment '
'output values. CFN_SIGNAL will allow an HTTP POST to a CFN '
'keypair signed URL. TEMP_URL_SIGNAL will create a '
'Swift TempURL to be signaled via HTTP PUT. HEAT_SIGNAL '
'will allow calls to the Heat API resource-signal using the '
'provided keystone credentials. ZAQAR_SIGNAL will create a'
'dedicated zaqar queue to be signaled using the provided '
'keystone credentials. NO_SIGNAL will result in the resource '
'going to the COMPLETE state without waiting for any signal.'),
default=cfg.CONF.default_deployment_signal_transport,
constraints=[
constraints.AllowedValues(SIGNAL_TRANSPORTS),
]
),
}
attributes_schema = {
STDOUT: attributes.Schema(
_("Captured stdout from the configuration execution."),
type=attributes.Schema.STRING
),
STDERR: attributes.Schema(
_("Captured stderr from the configuration execution."),
type=attributes.Schema.STRING
),
STATUS_CODE: attributes.Schema(
_("Returned status code from the configuration execution"),
type=attributes.Schema.STRING
),
}
default_client_name = 'heat'
no_signal_actions = ()
# No need to make metadata_update() calls since deployments have a
# dedicated API for changing state on signals
signal_needs_metadata_updates = False
def _signal_transport_cfn(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.CFN_SIGNAL
def _signal_transport_heat(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.HEAT_SIGNAL
def _signal_transport_none(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.NO_SIGNAL
def _signal_transport_temp_url(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.TEMP_URL_SIGNAL
def _signal_transport_zaqar(self):
return self.properties.get(
self.SIGNAL_TRANSPORT) == self.ZAQAR_SIGNAL
def _build_properties(self, properties, config_id, action):
props = {
'config_id': config_id,
'action': action,
}
if self._signal_transport_none():
props['status'] = SoftwareDeployment.COMPLETE
props['status_reason'] = _('Not waiting for outputs signal')
else:
props['status'] = SoftwareDeployment.IN_PROGRESS
props['status_reason'] = _('Deploy data available')
return props
def _delete_derived_config(self, derived_config_id):
try:
self.rpc_client().delete_software_config(
self.context, derived_config_id)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
def _get_derived_config(self, action, source_config):
derived_params = self._build_derived_config_params(
action, source_config)
derived_config = self.rpc_client().create_software_config(
self.context, **derived_params)
return derived_config[rpc_api.SOFTWARE_CONFIG_ID]
def _handle_action(self, action):
if self.properties.get(self.CONFIG):
config = self.rpc_client().show_software_config(
self.context, self.properties.get(self.CONFIG))
else:
config = {}
if (action not in self.properties[self.DEPLOY_ACTIONS]
and not config.get(
rpc_api.SOFTWARE_CONFIG_GROUP) == 'component'):
return
props = self._build_properties(
self.properties,
self._get_derived_config(action, config),
action)
if self.resource_id is None:
sd = self.rpc_client().create_software_deployment(
self.context,
server_id=self.properties[SoftwareDeployment.SERVER],
stack_user_project_id=self.stack.stack_user_project_id,
**props)
self.resource_id_set(sd[rpc_api.SOFTWARE_DEPLOYMENT_ID])
else:
sd = self.rpc_client().show_software_deployment(
self.context, self.resource_id)
prev_derived_config = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID]
sd = self.rpc_client().update_software_deployment(
self.context,
deployment_id=self.resource_id,
**props)
if prev_derived_config:
self._delete_derived_config(prev_derived_config)
if not self._signal_transport_none():
# NOTE(pshchelo): sd is a simple dict, easy to serialize,
# does not need fixing re LP bug #1393268
return sd
def _check_complete(self):
sd = self.rpc_client().show_software_deployment(
self.context, self.resource_id)
status = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS]
if status == SoftwareDeployment.COMPLETE:
return True
elif status == SoftwareDeployment.FAILED:
status_reason = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS_REASON]
message = _("Deployment to server "
"failed: %s") % status_reason
LOG.error(message)
exc = exception.Error(message)
raise exc
def empty_config(self):
return ''
def _build_derived_config_params(self, action, source):
scl = sc.SoftwareConfig
derived_inputs = self._build_derived_inputs(action, source)
derived_options = self._build_derived_options(action, source)
derived_config = self._build_derived_config(
action, source, derived_inputs, derived_options)
derived_name = self.properties.get(self.NAME) or source.get(scl.NAME)
return {
scl.GROUP: source.get(scl.GROUP) or 'Heat::Ungrouped',
scl.CONFIG: derived_config or self.empty_config(),
scl.OPTIONS: derived_options,
scl.INPUTS: derived_inputs,
scl.OUTPUTS: source.get(scl.OUTPUTS),
scl.NAME: derived_name or self.physical_resource_name()
}
def _build_derived_config(self, action, source,
derived_inputs, derived_options):
return source.get(sc.SoftwareConfig.CONFIG)
def _build_derived_options(self, action, source):
return source.get(sc.SoftwareConfig.OPTIONS)
def _build_derived_inputs(self, action, source):
scl = sc.SoftwareConfig
inputs = copy.deepcopy(source.get(scl.INPUTS)) or []
input_values = dict(self.properties.get(self.INPUT_VALUES) or {})
for inp in inputs:
input_key = inp[scl.NAME]
inp['value'] = input_values.pop(input_key, inp[scl.DEFAULT])
# for any input values that do not have a declared input, add
# a derived declared input so that they can be used as config
# inputs
for inpk, inpv in input_values.items():
inputs.append({
scl.NAME: inpk,
scl.TYPE: 'String',
'value': inpv
})
inputs.extend([{
scl.NAME: self.DEPLOY_SERVER_ID,
scl.DESCRIPTION: _('ID of the server being deployed to'),
scl.TYPE: 'String',
'value': self.properties[self.SERVER]
}, {
scl.NAME: self.DEPLOY_ACTION,
scl.DESCRIPTION: _('Name of the current action being deployed'),
scl.TYPE: 'String',
'value': action
}, {
scl.NAME: self.DEPLOY_STACK_ID,
scl.DESCRIPTION: _('ID of the stack this deployment belongs to'),
scl.TYPE: 'String',
'value': self.stack.identifier().stack_path()
}, {
scl.NAME: self.DEPLOY_RESOURCE_NAME,
scl.DESCRIPTION: _('Name of this deployment resource in the '
'stack'),
scl.TYPE: 'String',
'value': self.name
}, {
scl.NAME: self.DEPLOY_SIGNAL_TRANSPORT,
scl.DESCRIPTION: _('How the server should signal to heat with '
'the deployment output values.'),
scl.TYPE: 'String',
'value': self.properties[self.SIGNAL_TRANSPORT]
}])
if self._signal_transport_cfn():
inputs.append({
scl.NAME: self.DEPLOY_SIGNAL_ID,
scl.DESCRIPTION: _('ID of signal to use for signaling '
'output values'),
scl.TYPE: 'String',
'value': self._get_ec2_signed_url()
})
inputs.append({
scl.NAME: self.DEPLOY_SIGNAL_VERB,
scl.DESCRIPTION: _('HTTP verb to use for signaling '
'output values'),
scl.TYPE: 'String',
'value': 'POST'
})
elif self._signal_transport_temp_url():
inputs.append({
scl.NAME: self.DEPLOY_SIGNAL_ID,
scl.DESCRIPTION: _('ID of signal to use for signaling '
'output values'),
scl.TYPE: 'String',
'value': self._get_swift_signal_url()
})
inputs.append({
scl.NAME: self.DEPLOY_SIGNAL_VERB,
scl.DESCRIPTION: _('HTTP verb to use for signaling '
'output values'),
scl.TYPE: 'String',
'value': 'PUT'
})
elif self._signal_transport_heat() or self._signal_transport_zaqar():
creds = self._get_heat_signal_credentials()
inputs.extend([{
scl.NAME: self.DEPLOY_AUTH_URL,
scl.DESCRIPTION: _('URL for API authentication'),
scl.TYPE: 'String',
'value': creds['auth_url']
}, {
scl.NAME: self.DEPLOY_USERNAME,
scl.DESCRIPTION: _('Username for API authentication'),
scl.TYPE: 'String',
'value': creds['username']
}, {
scl.NAME: self.DEPLOY_USER_ID,
scl.DESCRIPTION: _('User ID for API authentication'),
scl.TYPE: 'String',
'value': creds['user_id']
}, {
scl.NAME: self.DEPLOY_PASSWORD,
scl.DESCRIPTION: _('Password for API authentication'),
scl.TYPE: 'String',
'value': creds['password']
}, {
scl.NAME: self.DEPLOY_PROJECT_ID,
scl.DESCRIPTION: _('ID of project for API authentication'),
scl.TYPE: 'String',
'value': creds['project_id']
}])
if self._signal_transport_zaqar():
inputs.append({
scl.NAME: self.DEPLOY_QUEUE_ID,
scl.DESCRIPTION: _('ID of queue to use for signaling '
'output values'),
scl.TYPE: 'String',
'value': self._get_zaqar_signal_queue_id()
})
return inputs
def handle_create(self):
return self._handle_action(self.CREATE)
def check_create_complete(self, sd):
if not sd:
return True
return self._check_complete()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
return self._handle_action(self.UPDATE)
def check_update_complete(self, sd):
if not sd:
return True
return self._check_complete()
def handle_delete(self):
try:
return self._handle_action(self.DELETE)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
def check_delete_complete(self, sd=None):
if not sd or self._check_complete():
self._delete_resource()
return True
def _delete_resource(self):
self._delete_signals()
self._delete_user()
derived_config_id = None
if self.resource_id is not None:
try:
sd = self.rpc_client().show_software_deployment(
self.context, self.resource_id)
derived_config_id = sd[rpc_api.SOFTWARE_DEPLOYMENT_CONFIG_ID]
self.rpc_client().delete_software_deployment(
self.context, self.resource_id)
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
if derived_config_id:
self._delete_derived_config(derived_config_id)
def handle_suspend(self):
return self._handle_action(self.SUSPEND)
def check_suspend_complete(self, sd):
if not sd:
return True
return self._check_complete()
def handle_resume(self):
return self._handle_action(self.RESUME)
def check_resume_complete(self, sd):
if not sd:
return True
return self._check_complete()
def handle_signal(self, details):
return self.rpc_client().signal_software_deployment(
self.context, self.resource_id, details,
timeutils.utcnow().isoformat())
def FnGetAtt(self, key, *path):
'''
Resource attributes map to deployment outputs values
'''
sd = self.rpc_client().show_software_deployment(
self.context, self.resource_id)
ov = sd[rpc_api.SOFTWARE_DEPLOYMENT_OUTPUT_VALUES] or {}
if key in ov:
attribute = ov.get(key)
return attributes.select_from_attribute(attribute, path)
# Since there is no value for this key yet, check the output schemas
# to find out if the key is valid
sc = self.rpc_client().show_software_config(
self.context, self.properties[self.CONFIG])
outputs = sc[rpc_api.SOFTWARE_CONFIG_OUTPUTS] or []
output_keys = [output['name'] for output in outputs]
if key not in output_keys and key not in self.ATTRIBUTES:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
return None
def validate(self):
'''
Validate any of the provided params
:raises StackValidationFailed: if any property failed validation.
'''
super(SoftwareDeployment, self).validate()
server = self.properties[self.SERVER]
if server:
res = self.stack.resource_by_refid(server)
if res:
if not (res.properties.get('user_data_format') ==
'SOFTWARE_CONFIG'):
raise exception.StackValidationFailed(message=_(
"Resource %s's property user_data_format should be "
"set to SOFTWARE_CONFIG since there are software "
"deployments on it.") % server)
class SoftwareDeploymentGroup(resource_group.ResourceGroup):
'''
This resource associates a group of servers with some configuration which
is to be deployed to all servers in the group.
The properties work in a similar way to OS::Heat::SoftwareDeployment,
and in addition to the attributes documented, you may pass any
attribute supported by OS::Heat::SoftwareDeployment, including those
exposing arbitrary outputs, and return a map of deployment names to
the specified attribute.
'''
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
SERVERS,
CONFIG,
INPUT_VALUES,
DEPLOY_ACTIONS,
NAME,
SIGNAL_TRANSPORT,
) = (
'servers',
SoftwareDeployment.CONFIG,
SoftwareDeployment.INPUT_VALUES,
SoftwareDeployment.DEPLOY_ACTIONS,
SoftwareDeployment.NAME,
SoftwareDeployment.SIGNAL_TRANSPORT,
)
ATTRIBUTES = (
STDOUTS, STDERRS, STATUS_CODES
) = (
'deploy_stdouts', 'deploy_stderrs', 'deploy_status_codes'
)
_sd_ps = SoftwareDeployment.properties_schema
_rg_ps = resource_group.ResourceGroup.properties_schema
properties_schema = {
SERVERS: properties.Schema(
properties.Schema.MAP,
_('A map of Nova names and IDs to apply configuration to.'),
update_allowed=True
),
CONFIG: _sd_ps[CONFIG],
INPUT_VALUES: _sd_ps[INPUT_VALUES],
DEPLOY_ACTIONS: _sd_ps[DEPLOY_ACTIONS],
NAME: _sd_ps[NAME],
SIGNAL_TRANSPORT: _sd_ps[SIGNAL_TRANSPORT]
}
attributes_schema = {
STDOUTS: attributes.Schema(
_("A map of Nova names and captured stdouts from the "
"configuration execution to each server."),
type=attributes.Schema.MAP
),
STDERRS: attributes.Schema(
_("A map of Nova names and captured stderrs from the "
"configuration execution to each server."),
type=attributes.Schema.MAP
),
STATUS_CODES: attributes.Schema(
_("A map of Nova names and returned status code from the "
"configuration execution"),
type=attributes.Schema.MAP
),
}
update_policy_schema = {}
def get_size(self):
return len(self.properties.get(self.SERVERS, {}))
def _resource_names(self):
return six.iterkeys(self.properties.get(self.SERVERS, {}))
def _do_prop_replace(self, res_name, res_def_template):
res_def = copy.deepcopy(res_def_template)
props = res_def[self.RESOURCE_DEF_PROPERTIES]
servers = self.properties.get(self.SERVERS, {})
props[SoftwareDeployment.SERVER] = servers.get(res_name)
return res_def
def _build_resource_definition(self, include_all=False):
p = self.properties
return {
self.RESOURCE_DEF_TYPE: 'OS::Heat::SoftwareDeployment',
self.RESOURCE_DEF_PROPERTIES: {
self.CONFIG: p[self.CONFIG],
self.INPUT_VALUES: p[self.INPUT_VALUES],
self.DEPLOY_ACTIONS: p[self.DEPLOY_ACTIONS],
self.SIGNAL_TRANSPORT: p[self.SIGNAL_TRANSPORT],
self.NAME: p[self.NAME],
}
}
def FnGetAtt(self, key, *path):
rg = super(SoftwareDeploymentGroup, self)
if key == self.STDOUTS:
n_attr = SoftwareDeployment.STDOUT
elif key == self.STDERRS:
n_attr = SoftwareDeployment.STDERR
elif key == self.STATUS_CODES:
n_attr = SoftwareDeployment.STATUS_CODE
else:
# Allow any attribute valid for a single SoftwareDeployment
# including arbitrary outputs, so we can't validate here
n_attr = key
rg_attr = rg.FnGetAtt(rg.ATTR_ATTRIBUTES, n_attr)
return attributes.select_from_attribute(rg_attr, path)
class SoftwareDeployments(SoftwareDeploymentGroup):
deprecation_msg = _('This resource is deprecated and use is discouraged. '
'Please use resource OS::Heat:SoftwareDeploymentGroup '
'instead.')
support_status = support.SupportStatus(status=support.DEPRECATED,
message=deprecation_msg,
version='2014.2')
def resource_mapping():
return {
'OS::Heat::SoftwareDeployment': SoftwareDeployment,
'OS::Heat::SoftwareDeploymentGroup': SoftwareDeploymentGroup,
'OS::Heat::SoftwareDeployments': SoftwareDeployments,
}
| apache-2.0 | -1,331,330,780,396,496,400 | 37.100295 | 79 | 0.590121 | false |
spirrello/spirrello-pynet-work | applied_python/lib/python2.7/site-packages/ansible/modules/extras/packaging/os/homebrew_cask.py | 4 | 16000 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: homebrew_cask
author: Daniel Jaouen
short_description: Install/uninstall homebrew casks.
description:
- Manages Homebrew casks.
version_added: "1.6"
options:
name:
description:
- name of cask to install/remove
required: true
state:
description:
- state of the cask
choices: [ 'installed', 'uninstalled' ]
required: false
default: present
'''
EXAMPLES = '''
- homebrew_cask: name=alfred state=present
- homebrew_cask: name=alfred state=absent
'''
import os.path
import re
# exceptions -------------------------------------------------------------- {{{
class HomebrewCaskException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class HomebrewCask(object):
'''A class to manage Homebrew casks.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
. # dots
{sep} # the OS-specific path separator
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
. # dots
{sep} # the OS-specific path separator
- # dashes
'''.format(sep=os.path.sep)
VALID_CASK_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
\+ # plusses
- # dashes
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- spaces
- colons
- os.path.sep
'''
if isinstance(path, basestring):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, basestring)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_cask(cls, cask):
'''A valid cask is either None or alphanumeric + backslashes.'''
if cask is None:
return True
return (
isinstance(cask, basestring)
and not cls.INVALID_CASK_REGEX.search(cask)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- installed
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, basestring)
and state.lower() in (
'installed',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewCaskException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewCaskException(self.message)
else:
if isinstance(path, basestring):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewCaskException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_cask(self):
return self._current_cask
@current_cask.setter
def current_cask(self, cask):
if not self.valid_cask(cask):
self._current_cask = None
self.failed = True
self.message = 'Invalid cask: {0}.'.format(cask)
raise HomebrewCaskException(self.message)
else:
self._current_cask = cask
return cask
# /class properties -------------------------------------------- }}}
def __init__(self, module, path=None, casks=None, state=None):
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, casks=casks,
state=state)
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in kwargs.iteritems():
setattr(self, key, val)
def _prep(self):
self._prep_path()
self._prep_brew_path()
def _prep_path(self):
if not self.path:
self.path = ['/usr/local/bin']
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewCaskException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewCaskException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewCaskException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_cask_is_installed(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
cmd = [self.brew_path, 'cask', 'list']
rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
if 'nothing to list' in err:
return False
elif rc == 0:
casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()]
return self.current_cask in casks
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.state == 'installed':
return self._install_casks()
elif self.state == 'absent':
return self._uninstall_casks()
if self.command:
return self._command()
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
], path_prefix=self.path[0])
if rc == 0:
if out and isinstance(out, basestring):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
# /updated ------------------------------- }}}
# installed ------------------------------ {{{
def _install_current_cask(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if self._current_cask_is_installed():
self.unchanged_count += 1
self.message = 'Cask already installed: {0}'.format(
self.current_cask,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be installed: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
cmd = [opt
for opt in (self.brew_path, 'cask', 'install', self.current_cask)
if opt]
rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
if self._current_cask_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Cask installed: {0}'.format(self.current_cask)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _install_casks(self):
for cask in self.casks:
self.current_cask = cask
self._install_current_cask()
return True
# /installed ----------------------------- }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_cask(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if not self._current_cask_is_installed():
self.unchanged_count += 1
self.message = 'Cask already uninstalled: {0}'.format(
self.current_cask,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be uninstalled: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
cmd = [opt
for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask)
if opt]
rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
if not self._current_cask_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _uninstall_casks(self):
for cask in self.casks:
self.current_cask = cask
self._uninstall_current_cask()
return True
# /uninstalled ----------------------------- }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["cask"], required=False),
path=dict(required=False),
state=dict(
default="present",
choices=[
"present", "installed",
"absent", "removed", "uninstalled",
],
),
),
supports_check_mode=True,
)
p = module.params
if p['name']:
casks = p['name'].split(',')
else:
casks = None
path = p['path']
if path:
path = path.split(':')
else:
path = ['/usr/local/bin']
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
brew_cask = HomebrewCask(module=module, path=path, casks=casks,
state=state)
(failed, changed, message) = brew_cask.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| gpl-3.0 | 8,235,355,458,988,184,000 | 29.947776 | 82 | 0.496375 | false |
promptworks/horizon | openstack_dashboard/dashboards/admin/volumes/volume_types/extras/views.py | 36 | 4685 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volumes.volume_types.extras \
import forms as project_forms
from openstack_dashboard.dashboards.admin.volumes.volume_types.extras \
import tables as project_tables
class ExtraSpecMixin(object):
def get_context_data(self, **kwargs):
context = super(ExtraSpecMixin, self).get_context_data(**kwargs)
try:
context['vol_type'] = api.cinder.volume_type_get(
self.request, self.kwargs['type_id'])
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve volume type details."))
if 'key' in self.kwargs:
context['key'] = self.kwargs['key']
return context
class IndexView(ExtraSpecMixin, forms.ModalFormMixin, tables.DataTableView):
table_class = project_tables.ExtraSpecsTable
template_name = 'admin/volumes/volume_types/extras/index.html'
def get_data(self):
try:
type_id = self.kwargs['type_id']
extras_list = api.cinder.volume_type_extra_get(self.request,
type_id)
extras_list.sort(key=lambda es: (es.key,))
except Exception:
extras_list = []
exceptions.handle(self.request,
_('Unable to retrieve extra spec list.'))
return extras_list
class CreateView(ExtraSpecMixin, forms.ModalFormView):
form_class = project_forms.CreateExtraSpec
form_id = "extra_spec_create_form"
modal_header = _("Create Volume Type Extra Spec")
modal_id = "extra_spec_create_modal"
submit_label = _("Create")
submit_url = "horizon:admin:volumes:volume_types:extras:create"
template_name = 'admin/volumes/volume_types/extras/create.html'
success_url = 'horizon:admin:volumes:volume_types:extras:index'
def get_initial(self):
return {'type_id': self.kwargs['type_id']}
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['type_id'],))
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
args = (self.kwargs['type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
class EditView(ExtraSpecMixin, forms.ModalFormView):
form_class = project_forms.EditExtraSpec
form_id = "extra_spec_edit_form"
modal_header = _('Edit Extra Spec Value: %s')
modal_id = "extra_spec_edit_modal"
submit_label = _("Save")
submit_url = "horizon:admin:volumes:volume_types:extras:edit"
template_name = 'admin/volumes/volume_types/extras/edit.html'
success_url = 'horizon:admin:volumes:volume_types:extras:index'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['type_id'],))
def get_initial(self):
type_id = self.kwargs['type_id']
key = self.kwargs['key']
try:
extra_specs = api.cinder.volume_type_extra_get(self.request,
type_id,
raw=True)
except Exception:
extra_specs = {}
exceptions.handle(self.request,
_('Unable to retrieve volume type extra spec '
'details.'))
return {'type_id': type_id,
'key': key,
'value': extra_specs.get(key, '')}
def get_context_data(self, **kwargs):
context = super(EditView, self).get_context_data(**kwargs)
args = (self.kwargs['type_id'], self.kwargs['key'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['modal_header'] = self.modal_header % self.kwargs['key']
return context
| apache-2.0 | -666,318,647,534,431,900 | 38.70339 | 76 | 0.619851 | false |
jromang/retina-old | distinclude/spyderlib/widgets/shell.py | 1 | 46139 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Shell widgets: base, python and terminal"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
import os
import time
import os.path as osp
import re
from spyderlib.qt.QtGui import (QMenu, QApplication, QCursor, QToolTip,
QKeySequence, QMessageBox, QMouseEvent,
QTextCursor, QTextCharFormat, QShortcut)
from spyderlib.qt.QtCore import Qt, QCoreApplication, QTimer, SIGNAL, Property
from spyderlib.qt.compat import getsavefilename
# Local import
from spyderlib.baseconfig import get_conf_path, _, STDERR
from spyderlib.config import CONF, get_icon, get_font
from spyderlib.utils import encoding
from spyderlib.utils.misc import get_error_match
from spyderlib.utils.dochelpers import getobj
from spyderlib.utils.qthelpers import (keybinding, create_action, add_actions,
restore_keyevent)
from spyderlib.widgets.sourcecode.base import ConsoleBaseWidget
HISTORY_FILENAMES = []
class ShellBaseWidget(ConsoleBaseWidget):
"""
Shell base widget
"""
INITHISTORY = None
SEPARATOR = None
def __init__(self, parent, history_filename, debug=False, profile=False):
"""
parent : specifies the parent widget
"""
ConsoleBaseWidget.__init__(self, parent)
# Prompt position: tuple (line, index)
self.current_prompt_pos = None
self.new_input_line = True
# History
self.histidx = None
self.hist_wholeline = False
assert isinstance(history_filename, (str, unicode))
self.history_filename = history_filename
self.history = self.load_history()
# Session
self.historylog_filename = CONF.get('main', 'historylog_filename',
get_conf_path('history.log'))
# Context menu
self.menu = None
self.setup_context_menu()
# Debug mode
self.debug = debug
# Simple profiling test
self.profile = profile
# Buffer to increase performance of write/flush operations
self.__buffer = []
self.__timestamp = 0.0
self.__flushtimer = QTimer(self)
self.__flushtimer.setSingleShot(True)
self.connect(self.__flushtimer, SIGNAL('timeout()'), self.flush)
# Give focus to widget
self.setFocus()
# Calltips
calltip_size = CONF.get('shell_appearance', 'calltips/size')
calltip_font = get_font('shell_appearance', 'calltips')
self.setup_calltips(calltip_size, calltip_font)
# Completion
completion_size = CONF.get('shell_appearance', 'completion/size')
completion_font = get_font('shell_appearance', 'completion')
self.completion_widget.setup_appearance(completion_size,
completion_font)
# Cursor width
self.setCursorWidth( CONF.get('shell_appearance', 'cursor/width') )
def toggle_wrap_mode(self, enable):
"""Enable/disable wrap mode"""
self.set_wrap_mode('character' if enable else None)
def set_font(self, font):
"""Set shell styles font"""
self.set_pythonshell_font(font)
cursor = self.textCursor()
cursor.select(QTextCursor.Document)
charformat = QTextCharFormat()
charformat.setFontFamily(font.family())
charformat.setFontPointSize(font.pointSize())
cursor.mergeCharFormat(charformat)
#------ Context menu
def setup_context_menu(self):
"""Setup shell context menu"""
self.menu = QMenu(self)
self.cut_action = create_action(self, _("Cut"),
shortcut=keybinding('Cut'),
icon=get_icon('editcut.png'),
triggered=self.cut)
self.copy_action = create_action(self, _("Copy"),
shortcut=keybinding('Copy'),
icon=get_icon('editcopy.png'),
triggered=self.copy)
paste_action = create_action(self, _("Paste"),
shortcut=keybinding('Paste'),
icon=get_icon('editpaste.png'),
triggered=self.paste)
save_action = create_action(self, _("Save history log..."),
icon=get_icon('filesave.png'),
tip=_("Save current history log (i.e. all "
"inputs and outputs) in a text file"),
triggered=self.save_historylog)
self.delete_action = create_action(self, _("Delete"),
shortcut=keybinding('Delete'),
icon=get_icon('editdelete.png'),
triggered=self.delete)
selectall_action = create_action(self, _("Select All"),
shortcut=keybinding('SelectAll'),
icon=get_icon('selectall.png'),
triggered=self.selectAll)
add_actions(self.menu, (self.cut_action, self.copy_action,
paste_action, self.delete_action, None,
selectall_action, None, save_action) )
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
state = self.has_selected_text()
self.copy_action.setEnabled(state)
self.cut_action.setEnabled(state)
self.delete_action.setEnabled(state)
self.menu.popup(event.globalPos())
event.accept()
#------ Input buffer
def get_current_line_to_cursor(self):
return self.get_text(self.current_prompt_pos, 'cursor')
def get_current_line_from_cursor(self):
return self.get_text('cursor', 'eof')
def _select_input(self):
"""Select current line (without selecting console prompt)"""
line, index = self.get_position('eof')
if self.current_prompt_pos is None:
pline, pindex = line, index
else:
pline, pindex = self.current_prompt_pos
self.setSelection(pline, pindex, line, index)
def clear_line(self):
"""Clear current line (without clearing console prompt)"""
if self.current_prompt_pos is not None:
self.remove_text(self.current_prompt_pos, 'eof')
def clear_terminal(self):
"""
Clear terminal window
Child classes reimplement this method to write prompt
"""
self.clear()
# The buffer being edited
def _set_input_buffer(self, text):
"""Set input buffer"""
if self.current_prompt_pos is not None:
self.replace_text(self.current_prompt_pos, 'eol', text)
else:
self.insert(text)
self.set_cursor_position('eof')
def _get_input_buffer(self):
"""Return input buffer"""
input_buffer = ''
if self.current_prompt_pos is not None:
input_buffer = self.get_text(self.current_prompt_pos, 'eol')
input_buffer = input_buffer.replace(os.linesep, '\n')
return input_buffer
input_buffer = Property("QString", _get_input_buffer, _set_input_buffer)
#------ Prompt
def new_prompt(self, prompt):
"""
Print a new prompt and save its (line, index) position
"""
self.write(prompt, prompt=True)
# now we update our cursor giving end of prompt
self.current_prompt_pos = self.get_position('cursor')
self.ensureCursorVisible()
self.new_input_line = False
def check_selection(self):
"""
Check if selected text is r/w,
otherwise remove read-only parts of selection
"""
if self.current_prompt_pos is None:
self.set_cursor_position('eof')
else:
self.truncate_selection(self.current_prompt_pos)
#------ Copy / Keyboard interrupt
def copy(self):
"""Copy text to clipboard... or keyboard interrupt"""
if self.has_selected_text():
ConsoleBaseWidget.copy(self)
else:
self.emit(SIGNAL("keyboard_interrupt()"))
def cut(self):
"""Cut text"""
self.check_selection()
if self.has_selected_text():
ConsoleBaseWidget.cut(self)
def delete(self):
"""Remove selected text"""
self.check_selection()
if self.has_selected_text():
ConsoleBaseWidget.remove_selected_text(self)
def save_historylog(self):
"""Save current history log (all text in console)"""
title = _("Save history log")
self.emit(SIGNAL('redirect_stdio(bool)'), False)
filename, _selfilter = getsavefilename(self, title,
self.historylog_filename, "%s (*.log)" % _("History logs"))
self.emit(SIGNAL('redirect_stdio(bool)'), True)
if filename:
filename = osp.normpath(filename)
try:
encoding.write(unicode(self.get_text_with_eol()), filename)
self.historylog_filename = filename
CONF.set('main', 'historylog_filename', filename)
except EnvironmentError, error:
QMessageBox.critical(self, title,
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(filename),
unicode(error)))
#------ Basic keypress event handler
def on_enter(self, command):
"""on_enter"""
self.execute_command(command)
def execute_command(self, command):
self.emit(SIGNAL("execute(QString)"), command)
self.add_to_history(command)
self.new_input_line = True
def on_new_line(self):
"""On new input line"""
self.set_cursor_position('eof')
self.current_prompt_pos = self.get_position('cursor')
self.new_input_line = False
def paste(self):
"""Reimplemented slot to handle multiline paste action"""
if self.new_input_line:
self.on_new_line()
ConsoleBaseWidget.paste(self)
def keyPressEvent(self, event):
"""
Reimplement Qt Method
Basic keypress event handler
(reimplemented in InternalShell to add more sophisticated features)
"""
if self.preprocess_keyevent(event):
# Event was accepted in self.preprocess_keyevent
return
self.postprocess_keyevent(event)
def preprocess_keyevent(self, event):
"""Pre-process keypress event:
return True if event is accepted, false otherwise"""
# Copy must be done first to be able to copy read-only text parts
# (otherwise, right below, we would remove selection
# if not on current line)
ctrl = event.modifiers() & Qt.ControlModifier
if event.key() == Qt.Key_C and ctrl:
self.copy()
event.accept()
return True
if self.new_input_line and ( len(event.text()) or event.key() in \
(Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right) ):
self.on_new_line()
return False
def postprocess_keyevent(self, event):
"""Post-process keypress event:
in InternalShell, this is method is called when shell is ready"""
event, text, key, ctrl, shift = restore_keyevent(event)
# Is cursor on the last line? and after prompt?
if len(text):
#XXX: Shouldn't it be: `if len(unicode(text).strip(os.linesep))` ?
if self.has_selected_text():
self.check_selection()
self.restrict_cursor_position(self.current_prompt_pos, 'eof')
cursor_position = self.get_position('cursor')
if key in (Qt.Key_Return, Qt.Key_Enter):
if self.is_cursor_on_last_line():
self._key_enter()
# add and run selection
else:
self.insert_text(self.get_selected_text(), at_end=True)
elif key == Qt.Key_Insert and not shift and not ctrl:
self.setOverwriteMode(not self.overwriteMode())
elif key == Qt.Key_Delete:
if self.has_selected_text():
self.check_selection()
self.remove_selected_text()
elif self.is_cursor_on_last_line():
self.stdkey_clear()
elif key == Qt.Key_Backspace:
self._key_backspace(cursor_position)
elif key == Qt.Key_Tab:
self._key_tab()
elif key == Qt.Key_Space and ctrl:
self._key_ctrl_space()
elif key == Qt.Key_Left:
if self.current_prompt_pos == cursor_position:
# Avoid moving cursor on prompt
return
method = self.extend_selection_to_next if shift \
else self.move_cursor_to_next
method('word' if ctrl else 'character', direction='left')
elif key == Qt.Key_Right:
if self.is_cursor_at_end():
return
method = self.extend_selection_to_next if shift \
else self.move_cursor_to_next
method('word' if ctrl else 'character', direction='right')
elif (key == Qt.Key_Home) or ((key == Qt.Key_Up) and ctrl):
self._key_home(shift)
elif (key == Qt.Key_End) or ((key == Qt.Key_Down) and ctrl):
self._key_end(shift)
elif key == Qt.Key_Up:
if not self.is_cursor_on_last_line():
self.set_cursor_position('eof')
y_cursor = self.get_coordinates(cursor_position)[1]
y_prompt = self.get_coordinates(self.current_prompt_pos)[1]
if y_cursor > y_prompt:
self.stdkey_up(shift)
else:
self.browse_history(backward=True)
elif key == Qt.Key_Down:
if not self.is_cursor_on_last_line():
self.set_cursor_position('eof')
y_cursor = self.get_coordinates(cursor_position)[1]
y_end = self.get_coordinates('eol')[1]
if y_cursor < y_end:
self.stdkey_down(shift)
else:
self.browse_history(backward=False)
elif key in (Qt.Key_PageUp, Qt.Key_PageDown):
#XXX: Find a way to do this programmatically instead of calling
# widget keyhandler (this won't work if the *event* is coming from
# the event queue - i.e. if the busy buffer is ever implemented)
ConsoleBaseWidget.keyPressEvent(self, event)
elif key == Qt.Key_Escape and ctrl and shift:
self._key_ctrl_shift_escape()
elif key == Qt.Key_Escape and shift:
self._key_shift_escape()
elif key == Qt.Key_Escape:
self._key_escape()
elif key == Qt.Key_V and ctrl:
self.paste()
elif key == Qt.Key_X and ctrl:
self.cut()
elif key == Qt.Key_Z and ctrl:
self.undo()
elif key == Qt.Key_Y and ctrl:
self.redo()
elif key == Qt.Key_A and ctrl:
self.selectAll()
elif key == Qt.Key_Question and not self.has_selected_text():
self._key_question(text)
elif key == Qt.Key_ParenLeft and not self.has_selected_text():
self._key_parenleft(text)
elif key == Qt.Key_Period and not self.has_selected_text():
self._key_period(text)
elif len(text) and not self.isReadOnly():
self.hist_wholeline = False
self.insert_text(text)
self._key_other(text)
else:
# Let the parent widget handle the key press event
ConsoleBaseWidget.keyPressEvent(self, event)
#------ Key handlers
def _key_enter(self):
command = self.input_buffer
self.insert_text('\n', at_end=True)
self.on_enter(command)
self.flush()
def _key_other(self, text):
raise NotImplementedError
def _key_backspace(self, cursor_position):
raise NotImplementedError
def _key_tab(self):
raise NotImplementedError
def _key_ctrl_space(self):
raise NotImplementedError
def _key_home(self, shift):
raise NotImplementedError
def _key_end(self, shift):
raise NotImplementedError
def _key_pageup(self):
raise NotImplementedError
def _key_pagedown(self):
raise NotImplementedError
def _key_escape(self):
raise NotImplementedError
def _key_shift_escape(self):
raise NotImplementedError
def _key_ctrl_shift_escape(self):
raise NotImplementedError
def _key_question(self, text):
raise NotImplementedError
def _key_parenleft(self, text):
raise NotImplementedError
def _key_period(self, text):
raise NotImplementedError
#------ History Management
def load_history(self):
"""Load history from a .py file in user home directory"""
if osp.isfile(self.history_filename):
rawhistory, _ = encoding.readlines(self.history_filename)
rawhistory = [line.replace('\n', '') for line in rawhistory]
if rawhistory[1] != self.INITHISTORY[1]:
rawhistory[1] = self.INITHISTORY[1]
else:
rawhistory = self.INITHISTORY
history = [line for line in rawhistory \
if line and not line.startswith('#')]
# Truncating history to X entries:
while len(history) >= CONF.get('historylog', 'max_entries'):
del history[0]
while rawhistory[0].startswith('#'):
del rawhistory[0]
del rawhistory[0]
# Saving truncated history:
encoding.writelines(rawhistory, self.history_filename)
return history
def add_to_history(self, command):
"""Add command to history"""
command = unicode(command)
if command in ['', '\n'] or command.startswith('Traceback'):
return
if command.endswith('\n'):
command = command[:-1]
self.histidx = None
if len(self.history)>0 and self.history[-1] == command:
return
self.history.append(command)
text = os.linesep + command
# When the first entry will be written in history file,
# the separator will be append first:
if self.history_filename not in HISTORY_FILENAMES:
HISTORY_FILENAMES.append(self.history_filename)
text = self.SEPARATOR + text
encoding.write(text, self.history_filename, mode='ab')
self.emit(SIGNAL('append_to_history(QString,QString)'),
self.history_filename, text)
def browse_history(self, backward):
"""Browse history"""
if self.is_cursor_before('eol') and self.hist_wholeline:
self.hist_wholeline = False
tocursor = self.get_current_line_to_cursor()
text, self.histidx = self.__find_in_history(tocursor,
self.histidx, backward)
if text is not None:
if self.hist_wholeline:
self.clear_line()
self.insert_text(text)
else:
cursor_position = self.get_position('cursor')
# Removing text from cursor to the end of the line
self.remove_text('cursor', 'eol')
# Inserting history text
self.insert_text(text)
self.set_cursor_position(cursor_position)
def __find_in_history(self, tocursor, start_idx, backward):
"""Find text 'tocursor' in history, from index 'start_idx'"""
if start_idx is None:
start_idx = len(self.history)
# Finding text in history
step = -1 if backward else 1
idx = start_idx
if len(tocursor) == 0 or self.hist_wholeline:
idx += step
if idx >= len(self.history) or len(self.history) == 0:
return "", len(self.history)
elif idx < 0:
idx = 0
self.hist_wholeline = True
return self.history[idx], idx
else:
for index in xrange(len(self.history)):
idx = (start_idx+step*(index+1)) % len(self.history)
entry = self.history[idx]
if entry.startswith(tocursor):
return entry[len(tocursor):], idx
else:
return None, start_idx
#------ Simulation standards input/output
def write_error(self, text):
"""Simulate stderr"""
self.flush()
self.write(text, flush=True, error=True)
if self.debug:
STDERR.write(text)
def write(self, text, flush=False, error=False, prompt=False):
"""Simulate stdout and stderr"""
if prompt:
self.flush()
if not isinstance(text, basestring):
# This test is useful to discriminate QStrings from decoded str
text = unicode(text)
self.__buffer.append(text)
ts = time.time()
if flush or prompt:
self.flush(error=error, prompt=prompt)
elif ts - self.__timestamp > 0.05:
self.flush(error=error)
self.__timestamp = ts
# Timer to flush strings cached by last write() operation in series
self.__flushtimer.start(50)
def flush(self, error=False, prompt=False):
"""Flush buffer, write text to console"""
text = "".join(self.__buffer)
self.__buffer = []
self.insert_text(text, at_end=True, error=error, prompt=prompt)
QCoreApplication.processEvents()
self.repaint()
# Clear input buffer:
self.new_input_line = True
#------ Text Insertion
def insert_text(self, text, at_end=False, error=False, prompt=False):
"""
Insert text at the current cursor position
or at the end of the command line
"""
if at_end:
# Insert text at the end of the command line
self.append_text_to_shell(text, error, prompt)
else:
# Insert text at current cursor position
ConsoleBaseWidget.insert_text(self, text)
#------ Re-implemented Qt Methods
def focusNextPrevChild(self, next):
"""
Reimplemented to stop Tab moving to the next window
"""
if next:
return False
return ConsoleBaseWidget.focusNextPrevChild(self, next)
def mousePressEvent(self, event):
"""
Re-implemented to handle the mouse press event.
event: the mouse press event (QMouseEvent)
"""
if event.button() == Qt.MidButton:
text = self.get_selected_text()
# Simulating left mouse button:
event = QMouseEvent(QMouseEvent.MouseButtonPress, event.pos(),
Qt.LeftButton, Qt.LeftButton, Qt.NoModifier)
ConsoleBaseWidget.mousePressEvent(self, event)
if self.new_input_line:
self.on_new_line()
self.insert_text(text)
event.accept()
else:
ConsoleBaseWidget.mousePressEvent(self, event)
#------ Drag and drop
def dragEnterEvent(self, event):
"""Drag and Drop - Enter event"""
event.setAccepted(event.mimeData().hasFormat("text/plain"))
def dragMoveEvent(self, event):
"""Drag and Drop - Move event"""
if (event.mimeData().hasFormat("text/plain")):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
"""Drag and Drop - Drop event"""
if (event.mimeData().hasFormat("text/plain")):
text = unicode(event.mimeData().text())
if self.new_input_line:
self.on_new_line()
self.insert_text(text, at_end=True)
self.setFocus()
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
def drop_pathlist(self, pathlist):
"""Drop path list"""
raise NotImplementedError
class PythonShellWidget(ShellBaseWidget):
"""
Python shell widget
"""
INITHISTORY = ['# -*- coding: utf-8 -*-',
'# *** Spyder Python Console History Log ***',]
SEPARATOR = '%s##---(%s)---' % (os.linesep*2, time.ctime())
def __init__(self, parent, history_filename, debug=False, profile=False):
ShellBaseWidget.__init__(self, parent, history_filename, debug, profile)
self.inspector = None
self.inspector_enabled = True
# Allow raw_input support:
self.input_loop = None
self.input_mode = False
# Mouse cursor
self.__cursor_changed = False
# Local shortcuts
self.inspectsc = QShortcut(QKeySequence("Ctrl+I"), self,
self.inspect_current_object)
self.inspectsc.setContext(Qt.WidgetWithChildrenShortcut)
def get_shortcut_data(self):
"""
Returns shortcut data, a list of tuples (shortcut, text, default)
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return [
(self.inspectsc, "Inspect current object", "Ctrl+I"),
]
#------ Context menu
def setup_context_menu(self):
"""Reimplements ShellBaseWidget method"""
ShellBaseWidget.setup_context_menu(self)
self.copy_without_prompts_action = create_action(self,
_("Copy without prompts"),
icon=get_icon('copywop.png'),
triggered=self.copy_without_prompts)
clear_line_action = create_action(self, _("Clear line"),
QKeySequence("Shift+Escape"),
icon=get_icon('eraser.png'),
tip=_("Clear line"),
triggered=self.clear_line)
clear_action = create_action(self, _("Clear shell"),
QKeySequence("Ctrl+Shift+Escape"),
icon=get_icon('clear.png'),
tip=_("Clear shell contents "
"('cls' command)"),
triggered=self.clear_terminal)
add_actions(self.menu, (self.copy_without_prompts_action,
clear_line_action, clear_action))
def contextMenuEvent(self, event):
"""Reimplements ShellBaseWidget method"""
state = self.has_selected_text()
self.copy_without_prompts_action.setEnabled(state)
ShellBaseWidget.contextMenuEvent(self, event)
def copy_without_prompts(self):
"""Copy text to clipboard without prompts"""
text = self.get_selected_text()
lines = text.split(os.linesep)
for index, line in enumerate(lines):
if line.startswith('>>> ') or line.startswith('... '):
lines[index] = line[4:]
text = os.linesep.join(lines)
QApplication.clipboard().setText(text)
#------Mouse events
def mouseReleaseEvent(self, event):
"""Go to error"""
ConsoleBaseWidget.mouseReleaseEvent(self, event)
text = self.get_line_at(event.pos())
if get_error_match(text) and not self.has_selected_text():
self.emit(SIGNAL("go_to_error(QString)"), text)
def mouseMoveEvent(self, event):
"""Show Pointing Hand Cursor on error messages"""
text = self.get_line_at(event.pos())
if get_error_match(text):
if not self.__cursor_changed:
QApplication.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.__cursor_changed = True
event.accept()
return
if self.__cursor_changed:
QApplication.restoreOverrideCursor()
self.__cursor_changed = False
ConsoleBaseWidget.mouseMoveEvent(self, event)
def leaveEvent(self, event):
"""If cursor has not been restored yet, do it now"""
if self.__cursor_changed:
QApplication.restoreOverrideCursor()
self.__cursor_changed = False
ConsoleBaseWidget.leaveEvent(self, event)
#------ Key handlers
def postprocess_keyevent(self, event):
"""Process keypress event"""
ShellBaseWidget.postprocess_keyevent(self, event)
if QToolTip.isVisible():
_event, _text, key, _ctrl, _shift = restore_keyevent(event)
self.hide_tooltip_if_necessary(key)
def _key_other(self, text):
"""1 character key"""
if self.is_completion_widget_visible():
self.completion_text += text
def _key_backspace(self, cursor_position):
"""Action for Backspace key"""
if self.has_selected_text():
self.check_selection()
self.remove_selected_text()
elif self.current_prompt_pos == cursor_position:
# Avoid deleting prompt
return
elif self.is_cursor_on_last_line():
self.stdkey_backspace()
if self.is_completion_widget_visible():
# Removing only last character because if there was a selection
# the completion widget would have been canceled
self.completion_text = self.completion_text[:-1]
def _key_tab(self):
"""Action for TAB key"""
if self.is_cursor_on_last_line():
empty_line = not self.get_current_line_to_cursor().strip()
if empty_line:
self.stdkey_tab()
else:
self.show_code_completion(automatic=False)
def _key_ctrl_space(self):
"""Action for Ctrl+Space"""
if not self.is_completion_widget_visible():
self.show_code_completion(automatic=False)
def _key_home(self, shift):
"""Action for Home key"""
if self.is_cursor_on_last_line():
self.stdkey_home(shift, self.current_prompt_pos)
def _key_end(self, shift):
"""Action for End key"""
if self.is_cursor_on_last_line():
self.stdkey_end(shift)
def _key_pageup(self):
"""Action for PageUp key"""
pass
def _key_pagedown(self):
"""Action for PageDown key"""
pass
def _key_escape(self):
"""Action for ESCAPE key"""
if self.is_completion_widget_visible():
self.hide_completion_widget()
def _key_shift_escape(self):
self.clear_line()
def _key_ctrl_shift_escape(self):
self.clear_terminal()
def _key_question(self, text):
"""Action for '?'"""
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_docstring(last_obj)
self.insert_text(text)
# In case calltip and completion are shown at the same time:
if self.is_completion_widget_visible():
self.completion_text += '?'
def _key_parenleft(self, text):
"""Action for '('"""
self.hide_completion_widget()
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_docstring(last_obj, call=True)
self.insert_text(text)
def _key_period(self, text):
"""Action for '.'"""
self.insert_text(text)
if self.codecompletion_auto:
# Enable auto-completion only if last token isn't a float
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_code_completion(automatic=True)
#------ Paste
def paste(self):
"""Reimplemented slot to handle multiline paste action"""
text = unicode(QApplication.clipboard().text())
if len(text.splitlines()) > 1:
# Multiline paste
if self.new_input_line:
self.on_new_line()
self.remove_selected_text() # Remove selection, eventually
end = self.get_current_line_from_cursor()
lines = self.get_current_line_to_cursor() + text + end
self.clear_line()
self.execute_lines(lines)
self.move_cursor(-len(end))
else:
# Standard paste
ShellBaseWidget.paste(self)
#------ Code Completion / Calltips
# Methods implemented in child class:
# (e.g. InternalShell)
def get_dir(self, objtxt):
"""Return dir(object)"""
raise NotImplementedError
def get_completion(self, objtxt):
"""Return completion list associated to object name"""
pass
def get_module_completion(self, objtxt):
"""Return module completion list associated to object name"""
pass
def get_globals_keys(self):
"""Return shell globals() keys"""
raise NotImplementedError
def get_cdlistdir(self):
"""Return shell current directory list dir"""
raise NotImplementedError
def iscallable(self, objtxt):
"""Is object callable?"""
raise NotImplementedError
def get_arglist(self, objtxt):
"""Get func/method argument list"""
raise NotImplementedError
def get__doc__(self, objtxt):
"""Get object __doc__"""
raise NotImplementedError
def get_doc(self, objtxt):
"""Get object documentation"""
raise NotImplementedError
def get_source(self, objtxt):
"""Get object source"""
raise NotImplementedError
def is_defined(self, objtxt, force_import=False):
"""Return True if object is defined"""
raise NotImplementedError
def show_code_completion(self, automatic):
"""Display a completion list based on the current line"""
# Note: unicode conversion is needed only for ExternalShellBase
text = unicode(self.get_current_line_to_cursor())
last_obj = self.get_last_obj()
if text.startswith('import '):
obj_list = self.get_module_completion(text)
words = text.split(' ')
if ',' in words[-1]:
words = words[-1].split(',')
self.show_completion_list(obj_list, completion_text=words[-1],
automatic=automatic)
return
elif text.startswith('from '):
obj_list = self.get_module_completion(text)
if obj_list is None:
return
words = text.split(' ')
if '(' in words[-1]:
words = words[:-2] + words[-1].split('(')
if ',' in words[-1]:
words = words[:-2] + words[-1].split(',')
self.show_completion_list(obj_list, completion_text=words[-1],
automatic=automatic)
return
#-- IPython only -------------------------------------------------------
# Using IPython code completion feature: __IP.complete
elif ' ' in text and not text.endswith(' '):
try1 = text.split(' ')[-1]
obj_list = self.get_completion(try1)
if obj_list:
self.show_completion_list(obj_list, completion_text=try1,
automatic=automatic)
return
elif text.startswith('%'):
# IPython magic commands
obj_list = self.get_completion(text)
if obj_list:
self.show_completion_list(obj_list, completion_text=text,
automatic=automatic)
# There is no point continuing the process when text starts with '%'
return
obj_list = self.get_completion(last_obj)
if not text.endswith('.') and last_obj and obj_list:
self.show_completion_list(obj_list, completion_text=last_obj,
automatic=automatic)
return
#-----------------------------------------------------------------------
obj_dir = self.get_dir(last_obj)
if last_obj and obj_dir and text.endswith('.'):
self.show_completion_list(obj_dir, automatic=automatic)
return
# Builtins and globals
import __builtin__, keyword
if not text.endswith('.') and last_obj \
and re.match(r'[a-zA-Z_0-9]*$', last_obj):
b_k_g = dir(__builtin__)+self.get_globals_keys()+keyword.kwlist
for objname in b_k_g:
if objname.startswith(last_obj) and objname != last_obj:
self.show_completion_list(b_k_g, completion_text=last_obj,
automatic=automatic)
return
else:
return
# Looking for an incomplete completion
if last_obj is None:
last_obj = text
dot_pos = last_obj.rfind('.')
if dot_pos != -1:
if dot_pos == len(last_obj)-1:
completion_text = ""
else:
completion_text = last_obj[dot_pos+1:]
last_obj = last_obj[:dot_pos]
completions = self.get_dir(last_obj)
if completions is not None:
self.show_completion_list(completions,
completion_text=completion_text,
automatic=automatic)
return
# Looking for ' or ": filename completion
q_pos = max([text.rfind("'"), text.rfind('"')])
if q_pos != -1:
self.show_completion_list(self.get_cdlistdir(),
completion_text=text[q_pos+1:],
automatic=automatic)
return
def show_docstring(self, text, call=False, force=False):
"""Show docstring or arguments"""
text = unicode(text) # Useful only for ExternalShellBase
insp_enabled = self.inspector_enabled or force
if force and self.inspector is not None:
self.inspector.dockwidget.setVisible(True)
self.inspector.dockwidget.raise_()
if insp_enabled and (self.inspector is not None) and \
(self.inspector.dockwidget.isVisible()):
# ObjectInspector widget exists and is visible
self.inspector.set_shell(self)
self.inspector.set_object_text(text, ignore_unknown=True)
self.setFocus() # if inspector was not at top level, raising it to
# top will automatically give it focus because of
# the visibility_changed signal, so we must give
# focus back to shell
if call and self.calltips:
# Display argument list if this is function call
iscallable = self.iscallable(text)
if iscallable is not None:
if iscallable:
arglist = self.get_arglist(text)
if isinstance(arglist, bool):
arglist = []
if arglist:
self.show_calltip(_("Arguments"),
arglist, '#129625')
elif self.calltips: # inspector is not visible or link is disabled
doc = self.get__doc__(text)
if doc is not None:
self.show_calltip(_("Documentation"), doc)
#------ Miscellanous
def get_last_obj(self, last=False):
"""
Return the last valid object on the current line
"""
return getobj(self.get_current_line_to_cursor(), last=last)
def set_inspector(self, inspector):
"""Set ObjectInspector DockWidget reference"""
self.inspector = inspector
self.inspector.set_shell(self)
def set_inspector_enabled(self, state):
self.inspector_enabled = state
def inspect_current_object(self):
text = ''
text1 = self.get_text('sol', 'cursor')
tl1 = re.findall(r'([a-zA-Z_]+[0-9a-zA-Z_\.]*)', text1)
if tl1 and text1.endswith(tl1[-1]):
text += tl1[-1]
text2 = self.get_text('cursor', 'eol')
tl2 = re.findall(r'([0-9a-zA-Z_\.]+[0-9a-zA-Z_\.]*)', text2)
if tl2 and text2.startswith(tl2[0]):
text += tl2[0]
if text:
self.show_docstring(text, force=True)
#------ Drag'n Drop
def drop_pathlist(self, pathlist):
"""Drop path list"""
if pathlist:
files = ["r'%s'" % path for path in pathlist]
if len(files) == 1:
text = files[0]
else:
text = "[" + ", ".join(files) + "]"
if self.new_input_line:
self.on_new_line()
self.insert_text(text)
self.setFocus()
class TerminalWidget(ShellBaseWidget):
"""
Terminal widget
"""
COM = 'rem' if os.name == 'nt' else '#'
INITHISTORY = ['%s *** Spyder Terminal History Log ***' % COM, COM,]
SEPARATOR = '%s%s ---(%s)---' % (os.linesep*2, COM, time.ctime())
def __init__(self, parent, history_filename, debug=False, profile=False):
ShellBaseWidget.__init__(self, parent, history_filename, debug, profile)
#------ Key handlers
def _key_other(self, text):
"""1 character key"""
pass
def _key_backspace(self, cursor_position):
"""Action for Backspace key"""
if self.has_selected_text():
self.check_selection()
self.remove_selected_text()
elif self.current_prompt_pos == cursor_position:
# Avoid deleting prompt
return
elif self.is_cursor_on_last_line():
self.stdkey_backspace()
def _key_tab(self):
"""Action for TAB key"""
if self.is_cursor_on_last_line():
self.stdkey_tab()
def _key_ctrl_space(self):
"""Action for Ctrl+Space"""
pass
def _key_escape(self):
"""Action for ESCAPE key"""
self.clear_line()
def _key_question(self, text):
"""Action for '?'"""
self.insert_text(text)
def _key_parenleft(self, text):
"""Action for '('"""
self.insert_text(text)
def _key_period(self, text):
"""Action for '.'"""
self.insert_text(text)
#------ Drag'n Drop
def drop_pathlist(self, pathlist):
"""Drop path list"""
if pathlist:
files = ['"%s"' % path for path in pathlist]
if len(files) == 1:
text = files[0]
else:
text = " ".join(files)
if self.new_input_line:
self.on_new_line()
self.insert_text(text)
self.setFocus()
| gpl-3.0 | -2,504,595,074,585,314,300 | 36.902192 | 80 | 0.522541 | false |
gribozavr/swift | utils/lldb/lldbToolBox.py | 2 | 4787 | """
LLDB Helpers for working with the swift compiler.
Load into LLDB with 'command script import /path/to/lldbToolBox.py'
This will also import LLVM data formatters as well, assuming that llvm is next
to the swift checkout.
"""
import argparse
import os
import shlex
import subprocess
import sys
import tempfile
import lldb
REPO_BASE = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,
os.pardir, os.pardir))
SWIFT_REPO = os.path.join(REPO_BASE, "swift")
LLVM_REPO = os.path.join(REPO_BASE, "llvm")
LLVM_DATAFORMATTER_PATH = os.path.join(LLVM_REPO, "utils",
"lldbDataFormatters.py")
SWIFT_DATAFORMATTER_PATH = os.path.join(SWIFT_REPO, "utils",
"lldb", "lldbSwiftDataFormatters.py")
def import_llvm_dataformatters(debugger):
if not os.access(LLVM_DATAFORMATTER_PATH, os.F_OK):
print("WARNING! Could not find LLVM data formatters!")
return
cmd = 'command script import {}'.format(LLVM_DATAFORMATTER_PATH)
debugger.HandleCommand(cmd)
print("Loaded LLVM data formatters.")
def import_swift_dataformatters(debugger):
if not os.access(SWIFT_DATAFORMATTER_PATH, os.F_OK):
print("WARNING! Could not find Swift data formatters!")
return
cmd = 'command script import {}'.format(SWIFT_DATAFORMATTER_PATH)
debugger.HandleCommand(cmd)
print("Loaded Swift data formatters.")
VIEWCFG_PATH = os.path.join(SWIFT_REPO, "utils", "viewcfg")
BLOCKIFYASM_PATH = os.path.join(SWIFT_REPO, "utils", "dev-scripts",
"blockifyasm")
def disassemble_asm_cfg(debugger, command, exec_ctx, result, internal_dict):
"""
This function disassembles the current assembly frame into a temporary file
and then uses that temporary file as input to blockifyasm | viewcfg. This
will cause a pdf of the cfg to be opened on Darwin.
"""
d = exec_ctx.frame.Disassemble()
with tempfile.TemporaryFile() as f:
f.write(bytes(d, 'utf-8'))
f.flush()
f.seek(0)
p1 = subprocess.Popen([BLOCKIFYASM_PATH], stdin=f,
stdout=subprocess.PIPE)
subprocess.Popen([VIEWCFG_PATH], stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
def disassemble_to_file(debugger, command, exec_ctx, result, internal_dict):
"""This function disassembles the current assembly frame into a file specified
by the user.
"""
parser = argparse.ArgumentParser(prog='disassemble-to-file', description="""
Dump the disassembly of the current frame to the specified file.
""")
parser.add_argument('file', type=argparse.FileType('w'),
default=sys.stdout)
args = parser.parse_args(shlex.split(command))
args.file.write(exec_ctx.frame.disassembly)
def sequence(debugger, command, exec_ctx, result, internal_dict):
"""
Combine multiple semicolon separated lldb commands into one command.
This command is particularly useful for defining aliases and breakpoint
commands. Some examples:
# Define an alias that prints rax and also steps one instruction.
command alias xs sequence p/x $rax; stepi
# Breakpoint command to show the frame's info and arguments.
breakpoint command add -o 'seq frame info; reg read arg1 arg2 arg3'
# Override `b` to allow a condition to be specified. For example:
# b someMethod if someVar > 2
command regex b
s/(.+) if (.+)/seq _regexp-break %1; break mod -c "%2"/
s/(.*)/_regexp-break %1/
"""
interpreter = debugger.GetCommandInterpreter()
for subcommand in command.split(';'):
subcommand = subcommand.strip()
if not subcommand:
continue # skip empty commands
ret = lldb.SBCommandReturnObject()
interpreter.HandleCommand(subcommand, exec_ctx, ret)
if ret.GetOutput():
print >>result, ret.GetOutput().strip()
if not ret.Succeeded():
result.SetError(ret.GetError())
result.SetStatus(ret.GetStatus())
return
def __lldb_init_module(debugger, internal_dict):
import_llvm_dataformatters(debugger)
import_swift_dataformatters(debugger)
debugger.HandleCommand('command script add disassemble-asm-cfg '
'-f lldbToolBox.disassemble_asm_cfg')
debugger.HandleCommand('command script add disassemble-to-file '
'-f lldbToolBox.disassemble_to_file')
debugger.HandleCommand('command script add sequence '
'-h "Run multiple semicolon separated commands" '
'-f lldbToolBox.sequence')
| apache-2.0 | 6,903,050,177,763,743,000 | 36.398438 | 82 | 0.646961 | false |
Azure/azure-sdk-for-python | sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/operations/_firewall_rules_operations.py | 1 | 21913 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FirewallRulesOperations(object):
"""FirewallRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
firewall_rule_name, # type: str
parameters, # type: "_models.FirewallRule"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.FirewallRule"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.FirewallRule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
firewall_rule_name, # type: str
parameters, # type: "_models.FirewallRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FirewallRule"]
"""Creates a new firewall rule or updates an existing firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:param parameters: The required parameters for creating or updating a firewall rule.
:type parameters: ~azure.mgmt.rdbms.mysql.models.FirewallRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FirewallRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.FirewallRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
server_name, # type: str
firewall_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
server_name, # type: str
firewall_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a server firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
server_name, # type: str
firewall_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallRule"
"""Gets information about a server firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FirewallRuleListResult"]
"""List all the firewall rules in a given server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mysql.models.FirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/firewallRules'} # type: ignore
| mit | 1,473,500,773,960,985,900 | 48.915718 | 219 | 0.635513 | false |
danxshap/django-rest-surveys | rest_surveys/views.py | 1 | 2197 | from __future__ import unicode_literals
from django.conf import settings
from rest_framework import mixins, viewsets
from django_filters.rest_framework import DjangoFilterBackend
from rest_surveys.serializers import SurveySerializer
from rest_surveys.utils import get_field_names, to_class
import swapper
Survey = swapper.load_model('rest_surveys', 'Survey')
SurveyResponse = swapper.load_model('rest_surveys', 'SurveyResponse')
class SurveyResponseViewSet(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = SurveyResponse.objects.all()
serializer_class = to_class(
getattr(
settings,
'REST_SURVEYS_SURVEYRESPONSE_SERIALIZER',
'rest_surveys.serializers.SurveyResponseSerializer'
)
)
authentication_classes = [to_class(authentication_class) for authentication_class in getattr(
settings,
'REST_SURVEYS_SURVEYRESPONSE_AUTHENTICATION_CLASSES',
['rest_framework.authentication.SessionAuthentication']
)]
permission_classes = [to_class(permission_class) for permission_class in getattr(
settings,
'REST_SURVEYS_SURVEYRESPONSE_PERMISSION_CLASSES',
['rest_framework.permissions.IsAuthenticated']
)]
filter_backends = (DjangoFilterBackend,)
filter_fields = getattr(settings,
'REST_SURVEYS_SURVEYRESPONSE_FILTER_FIELDS',
get_field_names(SurveyResponse))
class SurveyViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
queryset = Survey.objects.all()
serializer_class = SurveySerializer
authentication_classes = [to_class(authentication_class) for authentication_class in getattr(
settings,
'REST_SURVEYS_SURVEY_AUTHENTICATION_CLASSES',
['rest_framework.authentication.SessionAuthentication']
)]
permission_classes = [to_class(permission_class) for permission_class in getattr(
settings,
'REST_SURVEYS_SURVEY_PERMISSION_CLASSES',
['rest_framework.permissions.IsAuthenticated']
)]
| mit | -5,330,597,751,584,385,000 | 38.945455 | 97 | 0.689577 | false |
Cue/eventlet | eventlet/hubs/__init__.py | 7 | 4048 | import sys
import os
from eventlet.support import greenlets as greenlet
from eventlet import patcher
__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
threading = patcher.original('threading')
_threadlocal = threading.local()
def get_default_hub():
"""Select the default hub implementation based on what multiplexing
libraries are installed. The order that the hubs are tried is:
* twistedr
* epoll
* poll
* select
It won't automatically select the pyevent hub, because it's not
python-thread-safe.
.. include:: ../../doc/common.txt
.. note :: |internal|
"""
# pyevent hub disabled for now because it is not thread-safe
#try:
# import eventlet.hubs.pyevent
# return eventlet.hubs.pyevent
#except:
# pass
select = patcher.original('select')
try:
import eventlet.hubs.epolls
return eventlet.hubs.epolls
except ImportError:
if hasattr(select, 'poll'):
import eventlet.hubs.poll
return eventlet.hubs.poll
else:
import eventlet.hubs.selects
return eventlet.hubs.selects
def use_hub(mod=None):
"""Use the module *mod*, containing a class called Hub, as the
event hub. Usually not required; the default hub is usually fine.
Mod can be an actual module, a string, or None. If *mod* is a module,
it uses it directly. If *mod* is a string, use_hub tries to import
`eventlet.hubs.mod` and use that as the hub module. If *mod* is None,
use_hub uses the default hub. Only call use_hub during application
initialization, because it resets the hub's state and any existing
timers or listeners will never be resumed.
"""
if mod is None:
mod = os.environ.get('EVENTLET_HUB', None)
if mod is None:
mod = get_default_hub()
if hasattr(_threadlocal, 'hub'):
del _threadlocal.hub
if isinstance(mod, str):
assert mod.strip(), "Need to specify a hub"
mod = __import__('eventlet.hubs.' + mod, globals(), locals(), ['Hub'])
if hasattr(mod, 'Hub'):
_threadlocal.Hub = mod.Hub
else:
_threadlocal.Hub = mod
def get_hub():
"""Get the current event hub singleton object.
.. note :: |internal|
"""
try:
hub = _threadlocal.hub
except AttributeError:
try:
_threadlocal.Hub
except AttributeError:
use_hub()
hub = _threadlocal.hub = _threadlocal.Hub()
return hub
from eventlet import timeout
def trampoline(fd, read=None, write=None, timeout=None,
timeout_exc=timeout.Timeout):
"""Suspend the current coroutine until the given socket object or file
descriptor is ready to *read*, ready to *write*, or the specified
*timeout* elapses, depending on arguments specified.
To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
argument in seconds.
If the specified *timeout* elapses before the socket is ready to read or
write, *timeout_exc* will be raised instead of ``trampoline()``
returning normally.
.. note :: |internal|
"""
t = None
hub = get_hub()
current = greenlet.getcurrent()
assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
assert not (read and write), 'not allowed to trampoline for reading and writing'
try:
fileno = fd.fileno()
except AttributeError:
fileno = fd
if timeout is not None:
t = hub.schedule_call_global(timeout, current.throw, timeout_exc)
try:
if read:
listener = hub.add(hub.READ, fileno, current.switch)
elif write:
listener = hub.add(hub.WRITE, fileno, current.switch)
try:
return hub.switch()
finally:
hub.remove(listener)
finally:
if t is not None:
t.cancel()
| mit | -4,491,102,245,266,735,000 | 31.126984 | 90 | 0.623518 | false |
yeokm1/bus-number-reader | arduino/libraries/ble-sdk-arduino-master/Build/BuildBLE.py | 5 | 1287 | import os
import commands
print "Building all BLE examples"
Our_dir = os.getcwd() # get current directory
if "ARDUINO_DIR" in os.environ:
#Find out a way to use the variable ARDUINO_DIR, as it is set now
#Right now it has to be relative in order for the makefile to work
#BLE_DIR = os.environ.get('ARDUINO_DIR')+"/libraries/BLE"
BLE_DIR = "/cygdrive/c/Arduino/libraries/BLE"
#BLE_EXAMPLES_DIR = "libraries/BLE/examples/ble_A_Hello_World_Program"
BLE_EXAMPLES_DIR = BLE_DIR+"/examples"
print "BLE_DIR=%s" %BLE_DIR
print "BLE_EXAMPLES_DIR=%s" %BLE_EXAMPLES_DIR
os.chdir(BLE_EXAMPLES_DIR)
#os.system("ls")
BLE_FOLDERS = commands.getoutput('ls')
LIST_EXAMPLES=BLE_FOLDERS.split()
print "====================", "\nCompiling each example using the ARDMK_FILE"
for element in LIST_EXAMPLES:
print "\n\n\nChange directory:"
OBJ_EXAMPLE=BLE_EXAMPLES_DIR+"/"+element
print OBJ_EXAMPLE
os.chdir(OBJ_EXAMPLE)
print "make clean"
os.system("make clean")
os.system("make")
print "===================="
print "Go back to our folder %s" %Our_dir
os.chdir(Our_dir)
else:
print "Environmental variable ARDUINO_DIR not declared."
print "Python script unable to run." | mit | 683,465,716,063,125,900 | 33.810811 | 81 | 0.640249 | false |
zestrada/nova-cs498cc | nova/tests/conf_fixture.py | 8 | 3244 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
from nova import config
from nova import ipv6
from nova import paths
from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(fixtures.Fixture):
"""Fixture to manage global conf settings."""
def __init__(self, conf):
self.conf = conf
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('fake_rabbit', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('lock_path', None)
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('rpc_backend',
'nova.openstack.common.rpc.impl_fake')
self.conf.set_default('rpc_cast_timeout', 5)
self.conf.set_default('rpc_response_timeout', 5)
self.conf.set_default('sql_connection', "sqlite://")
self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True)
self.conf.set_default('vlan_interface', 'eth0')
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
| apache-2.0 | 5,529,820,310,094,195,000 | 41.684211 | 78 | 0.67201 | false |
lazybios/v2ex | avatar.py | 16 | 1714 | #!/usr/bin/env python
# coding=utf-8
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext.webapp import util
from v2ex.babel import Avatar
from v2ex.babel.security import *
from v2ex.babel.da import *
class AvatarHandler(webapp.RequestHandler):
def get(self, member_num, size):
avatar = GetKindByName('Avatar', 'avatar_' + str(member_num) + '_' + str(size))
if avatar:
self.response.headers['Content-Type'] = "image/png"
self.response.headers['Cache-Control'] = "max-age=172800, public, must-revalidate"
self.response.headers['Expires'] = "Sun, 25 Apr 2011 20:00:00 GMT"
self.response.out.write(avatar.content)
else:
self.redirect('/static/img/avatar_' + str(size) + '.png')
class NodeAvatarHandler(webapp.RequestHandler):
def get(self, node_num, size):
avatar = GetKindByName('Avatar', 'node_' + str(node_num) + '_' + str(size))
if avatar:
self.response.headers['Content-Type'] = "image/png"
self.response.headers['Cache-Control'] = "max-age=172800, public, must-revalidate"
self.response.headers['Expires'] = "Sun, 25 Apr 2011 20:00:00 GMT"
self.response.out.write(avatar.content)
else:
self.error(404)
def main():
application = webapp.WSGIApplication([
('/avatar/([0-9]+)/(large|normal|mini)', AvatarHandler),
('/navatar/([0-9]+)/(large|normal|mini)', NodeAvatarHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| bsd-3-clause | 2,484,973,681,558,934,000 | 36.26087 | 94 | 0.615519 | false |
SmartInfrastructures/dreamer | tools/test/topos/tower.py | 39 | 1460 | #!/usr/bin/env python
from mininet.topo import Topo
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.node import RemoteController, OVSKernelSwitch
from mininet.log import setLogLevel
class TowerTopo( Topo ):
"""Create a tower topology"""
def build( self, k=4, h=6 ):
spines = []
leaves = []
hosts = []
# Create the two spine switches
spines.append(self.addSwitch('s1'))
spines.append(self.addSwitch('s2'))
# Create two links between the spine switches
self.addLink(spines[0], spines[1])
#TODO add second link between spines when multi-link topos are supported
#self.addLink(spines[0], spines[1])
# Now create the leaf switches, their hosts and connect them together
i = 1
c = 0
while i <= k:
leaves.append(self.addSwitch('s1%d' % i))
for spine in spines:
self.addLink(leaves[i-1], spine)
j = 1
while j <= h:
hosts.append(self.addHost('h%d%d' % (i, j)))
self.addLink(hosts[c], leaves[i-1])
j+=1
c+=1
i+=1
topos = { 'tower': TowerTopo }
def run():
topo = TowerTopo()
net = Mininet( topo=topo, controller=RemoteController, autoSetMacs=True )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
run()
| apache-2.0 | -785,527,125,628,379,600 | 25.545455 | 80 | 0.560959 | false |
mindw/numpy | numpy/core/getlimits.py | 12 | 9474 | """Machine limits for Float32 and Float64 and (long double) if available...
"""
from __future__ import division, absolute_import, print_function
__all__ = ['finfo', 'iinfo']
from .machar import MachAr
from . import numeric
from . import numerictypes as ntypes
from .numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0: a.shape = (1,)
return a
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
class finfo(object):
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
eps : float
The smallest representable positive number such that
``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
point type.
epsneg : floating point number of the appropriate type
The smallest representable positive number such that
``1.0 - epsneg != 1.0``.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more
detailed information.
machep : int
The exponent that yields `eps`.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there
being no leading 0's in the mantissa.
negep : int
The exponent that yields `epsneg`.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of
float is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
The smallest positive usable number. Type of `tiny` is an
appropriate floating point type.
Parameters
----------
dtype : float, dtype, or instance
Kind of floating point data-type about which to get information.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
Notes
-----
For developers of NumPy: do not instantiate this at the module level.
The initial calculation of these parameters is expensive and negatively
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError("data type %r not inexact" % (dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
if dtype is ntypes.double:
itype = ntypes.int64
fmt = '%24.16e'
precname = 'double'
elif dtype is ntypes.single:
itype = ntypes.int32
fmt = '%15.7e'
precname = 'single'
elif dtype is ntypes.longdouble:
itype = ntypes.longlong
fmt = '%s'
precname = 'long double'
elif dtype is ntypes.half:
itype = ntypes.int16
fmt = '%12.5e'
precname = 'half'
else:
raise ValueError(repr(dtype))
machar = MachAr(lambda v:array([v], dtype),
lambda v:_frz(v.astype(itype))[0],
lambda v:array(_frz(v)[0], dtype),
lambda v: fmt % array(_frz(v)[0], dtype),
'numpy %s precision floating point number' % precname)
for word in ['precision', 'iexp',
'maxexp', 'minexp', 'negep',
'machep']:
setattr(self, word, getattr(machar, word))
for word in ['tiny', 'resolution', 'epsneg']:
setattr(self, word, getattr(machar, word).flat[0])
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self.machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
return self
def __str__(self):
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
precision=%(precision)3s resolution= %(_str_resolution)s
machep=%(machep)6s eps= %(_str_eps)s
negep =%(negep)6s epsneg= %(_str_epsneg)s
minexp=%(minexp)6s tiny= %(_str_tiny)s
maxexp=%(maxexp)6s max= %(_str_max)s
nexp =%(nexp)6s min= -max
---------------------------------------------------------------------
''' % self.__dict__
def __repr__(self):
c = self.__class__.__name__
d = self.__dict__.copy()
d['klass'] = c
return ("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," \
+ " max=%(_str_max)s, dtype=%(dtype)s)") \
% d
class iinfo(object):
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
int_type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if not self.kind in 'iu':
raise ValueError("Invalid integer data type.")
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1 << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
min = property(min)
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1 << self.bits) - 1)
else:
val = int((1 << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
max = property(max)
def __str__(self):
"""String representation."""
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
min = %(min)s
max = %(max)s
---------------------------------------------------------------------
''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
if __name__ == '__main__':
f = finfo(ntypes.single)
print('single epsilon:', f.eps)
print('single tiny:', f.tiny)
f = finfo(ntypes.float)
print('float epsilon:', f.eps)
print('float tiny:', f.tiny)
f = finfo(ntypes.longfloat)
print('longfloat epsilon:', f.eps)
print('longfloat tiny:', f.tiny)
| bsd-3-clause | 6,760,943,881,196,687,000 | 29.960784 | 78 | 0.540954 | false |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/join/cosine_join.py | 1 | 8912 | # cosine join
def cosine_join(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op='>=',
allow_empty=True, allow_missing=False,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
out_sim_score=True, n_jobs=1, show_progress=True):
"""Join two tables using a variant of cosine similarity known as Ochiai
coefficient.
This is not the cosine measure that computes the cosine of the angle
between two given vectors. Rather, it is a variant of cosine measure known
as Ochiai coefficient (see the Wikipedia page
`Cosine Similarity <https://en.wikipedia.org/wiki/Cosine_similarity>`_).
Specifically, for two sets X and Y, this measure computes:
:math:`cosine(X, Y) = \\frac{|X \\cap Y|}{\\sqrt{|X| \\cdot |Y|}}`
In the case where one of X and Y is an empty set and the other is a
non-empty set, we define their cosine score to be 0. In the case where both
X and Y are empty sets, we define their cosine score to be 1.
Finds tuple pairs from left table and right table such that the cosine
similarity between the join attributes satisfies the condition on input
threshold. For example, if the comparison operator is '>=', finds tuple
pairs whose cosine similarity between the strings that are the values of
the join attributes is greater than or equal to the input threshold, as
specified in "threshold".
Args:
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_join_attr (string): join attribute in left table.
r_join_attr (string): join attribute in right table.
tokenizer (Tokenizer): tokenizer to be used to tokenize join
attributes.
threshold (float): cosine similarity threshold to be satisfied.
comp_op (string): comparison operator. Supported values are '>=', '>'
and '=' (defaults to '>=').
allow_empty (boolean): flag to indicate whether tuple pairs with empty
set of tokens in both the join attributes should be included in the
output (defaults to True).
allow_missing (boolean): flag to indicate whether tuple pairs with
missing value in at least one of the join attributes should be
included in the output (defaults to False). If this flag is set to
True, a tuple in ltable with missing value in the join attribute
will be matched with every tuple in rtable and vice versa.
l_out_attrs (list): list of attribute names from the left table to be
included in the output table (defaults to None).
r_out_attrs (list): list of attribute names from the right table to be
included in the output table (defaults to None).
l_out_prefix (string): prefix to be used for the attribute names coming
from the left table, in the output table (defaults to 'l\_').
r_out_prefix (string): prefix to be used for the attribute names coming
from the right table, in the output table (defaults to 'r\_').
out_sim_score (boolean): flag to indicate whether similarity score
should be included in the output table (defaults to True). Setting
this flag to True will add a column named '_sim_score' in the
output table. This column will contain the similarity scores for the
tuple pairs in the output.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used
(where n_cpus is the total number of CPUs in the machine). Thus for
n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs)
becomes less than 1, then no parallel computing code will be used
(i.e., equivalent to the default).
show_progress (boolean): flag to indicate whether task progress should
be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs that satisfy the join
condition (DataFrame).
"""
from py_stringsimjoin import __use_cython__
if __use_cython__:
from py_stringsimjoin.join.cosine_join_cy import cosine_join_cy
return cosine_join_cy(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty, allow_missing,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, n_jobs, show_progress)
else:
from py_stringsimjoin.join.cosine_join_py import cosine_join_py
return cosine_join_py(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty, allow_missing,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, n_jobs, show_progress)
| bsd-3-clause | 5,833,514,857,801,300,000 | 73.266667 | 127 | 0.386782 | false |
cisco-openstack/tempest | tempest/api/compute/admin/test_networks.py | 2 | 2940 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class NetworksTest(base.BaseV2ComputeAdminTest):
"""Tests Nova Networks API that usually requires admin privileges.
API docs:
https://docs.openstack.org/api-ref/compute/#networks-os-networks-deprecated
"""
max_microversion = '2.35'
@classmethod
def setup_clients(cls):
super(NetworksTest, cls).setup_clients()
cls.client = cls.os_admin.compute_networks_client
@decorators.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
def test_get_network(self):
"""Test getting network from nova side"""
networks = self.client.list_networks()['networks']
if CONF.compute.fixed_network_name:
configured_network = [x for x in networks if x['label'] ==
CONF.compute.fixed_network_name]
self.assertEqual(1, len(configured_network),
"{0} networks with label {1}".format(
len(configured_network),
CONF.compute.fixed_network_name))
elif CONF.network.public_network_id:
configured_network = [x for x in networks if x['id'] ==
CONF.network.public_network_id]
else:
raise self.skipException(
"Environment has no known-for-sure existing network.")
configured_network = configured_network[0]
network = (self.client.show_network(configured_network['id'])
['network'])
self.assertEqual(configured_network['label'], network['label'])
@decorators.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
def test_list_all_networks(self):
"""Test getting all networks from nova side"""
networks = self.client.list_networks()['networks']
# Check the configured network is in the list
if CONF.compute.fixed_network_name:
configured_network = CONF.compute.fixed_network_name
self.assertIn(configured_network, [x['label'] for x in networks])
else:
network_labels = [x['label'] for x in networks]
self.assertNotEmpty(network_labels)
| apache-2.0 | 2,261,651,658,045,614,800 | 42.235294 | 79 | 0.642177 | false |
badock/nova | nova/virt/vmwareapi/constants.py | 3 | 1383 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware driver
"""
from nova.network import model as network_model
DISK_FORMAT_ISO = 'iso'
DISK_FORMAT_VMDK = 'vmdk'
DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK]
DISK_TYPE_SPARSE = 'sparse'
DISK_TYPE_PREALLOCATED = 'preallocated'
DEFAULT_VIF_MODEL = network_model.VIF_MODEL_E1000
DEFAULT_OS_TYPE = "otherGuest"
DEFAULT_ADAPTER_TYPE = "lsiLogic"
DEFAULT_DISK_TYPE = DISK_TYPE_PREALLOCATED
DEFAULT_DISK_FORMAT = DISK_FORMAT_VMDK
ADAPTER_TYPE_BUSLOGIC = "busLogic"
ADAPTER_TYPE_IDE = "ide"
ADAPTER_TYPE_LSILOGICSAS = "lsiLogicsas"
ADAPTER_TYPE_PARAVIRTUAL = "paraVirtual"
SUPPORTED_FLAT_VARIANTS = ["thin", "preallocated", "thick", "eagerZeroedThick"]
EXTENSION_KEY = 'org.openstack.compute'
EXTENSION_TYPE_INSTANCE = 'instance'
| apache-2.0 | 659,830,939,234,525,400 | 31.928571 | 79 | 0.741142 | false |
greeness/yahtzee-optimal-strategy | parse_optimal.py | 1 | 2451 | from scoring import Category
from array import array
log = open('./data/small_T')
idx = -1
optimal = []
def to_id(name):
return Category.CATEGORY_NAME_TO_ID[name]
kept_option = array('B')
id_to_state = {}
highest_bit = 0
nbits = 0
action_option = array("B")
n = 0
for line in log:
if line[0] == '$':
continue
if line[0] == '#' and len(line) >= 5:
state = int(line[4:-1])
id_to_state[len(id_to_state)] = state
idx = -1
roll_left = 0
n = len(id_to_state)-1
if n and n %1001 == 0:
print len(id_to_state)/ (357632.0),len(action_option), len(kept_option)/n
action_option = array("B")
if line[0] == '#' and len(line) < 5:
idx = -1
roll_left += 1
# apend best action to the end of kept_option for this state
if roll_left == 1:
kept_option.extend(action_option)
#n+=1
nbits = 0
highest_bit = 0
if idx >= 0:
mapper_func = int if roll_left >= 1 else to_id
res = map(mapper_func, line.strip().split(' '))
mask = 0
if roll_left == 0:
segment = 4
added_move = 0
for i,move in enumerate(res):
if i<= 1:
mask |= move << (segment*i)
added_move += 1
# set to all 1s for n/a options
if added_move == 1:
mask |= ((1<<segment)-1) << segment
action_option.append(mask)
else:
for i,move in enumerate(res):
kept_option.append(move & 0b11111111)
highest_bit >>= 1
# the 9th bit of each move
highest_bit |= (move & 0b100000000)>>1
# set to all 1s for n/a options
for i in range(2-len(res)):
#
kept_option.append(0b11111111)
highest_bit >>= 1
highest_bit |= 0b10000000
nbits += 2
if nbits == 8:
kept_option.append(highest_bit)
nbits = 0
highest_bit = 0
idx += 1
kept_option.tofile(open('./data/options.dat','w+'))
import json
json.dump(id_to_state, open('./data/id_to_state.json','w+'),indent=2)
print 'done'
| mit | -6,284,049,645,558,799,000 | 25.652174 | 85 | 0.453284 | false |
hughsaunders/python-openstackclient | doc/source/conf.py | 1 | 8206 | # -*- coding: utf-8 -*-
#
# OpenStack Command Line Client documentation build configuration file, created
# by sphinx-quickstart on Wed May 16 12:05:58 2012.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenStack Command Line Client'
copyright = u'2012, OpenStack'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCommandLineClientdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
# .
latex_documents = [
('index', 'OpenStackCommandLineClient.tex',
u'OpenStack Command Line Client Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openstackcommandlineclient',
u'OpenStack Command Line Client Documentation',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenStackCommandLineClient',
u'OpenStack Command Line Client Documentation',
u'OpenStack', 'OpenStackCommandLineClient',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 | -5,045,885,596,383,731,000 | 30.929961 | 79 | 0.704972 | false |
mhl/yournextmp-popit | mysite/settings.py | 1 | 7006 | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
import os
import sys
import yaml
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
configuration_file = os.path.join(
BASE_DIR, 'conf', 'general.yml'
)
with open(configuration_file) as f:
conf = yaml.load(f)
ALLOWED_HOSTS = conf.get('ALLOWED_HOSTS')
# Load the credentials for the PopIt instance
POPIT_INSTANCE = conf['POPIT_INSTANCE']
POPIT_HOSTNAME = conf['POPIT_HOSTNAME']
POPIT_PORT = conf.get('POPIT_PORT', 80)
POPIT_USER = conf.get('POPIT_USER', '')
POPIT_PASSWORD = conf.get('POPIT_PASSWORD', '')
POPIT_API_KEY = conf.get('POPIT_API_KEY', '')
# Email addresses that error emails are sent to when DEBUG = False
ADMINS = conf['ADMINS']
# The From: address for all emails except error emails
DEFAULT_FROM_EMAIL = conf['DEFAULT_FROM_EMAIL']
# The From: address for error emails
SERVER_EMAIL = conf['SERVER_EMAIL']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = conf['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(conf.get('STAGING')))
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'mysite', 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
# Required by allauth template tags
"django.core.context_processors.request",
# allauth specific context processors
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_nose',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.twitter',
'pipeline',
'candidates',
'debug_toolbar',
'south',
)
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
SOCIALACCOUNT_PROVIDERS = {
'google': {'SCOPE': ['https://www.googleapis.com/auth/userinfo.profile'],
'AUTH_PARAMS': {'access_type': 'online'}},
'facebook': {'SCOPE': ['email',]},
}
LOGIN_REDIRECT_URL = '/'
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
SOCIALACCOUNT_AUTO_SIGNUP = True
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if conf.get('DATABASE_SYSTEM') == 'postgresql':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': conf.get('YNMP_DB_NAME'),
'USER': conf.get('YNMP_DB_USER'),
'PASSWORD': conf.get('YNMP_DB_PASS'),
'HOST': conf.get('YNMP_DB_HOST'),
'PORT': conf.get('YNMP_DB_PORT'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
if 'test' not in sys.argv:
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_CSS = {
'all': {
'source_filenames': (
'candidates/style.scss',
'select2/select2.css',
'jquery/jquery-ui.css',
'jquery/jquery-ui.structure.css',
'jquery/jquery-ui.theme.css',
),
'output_filename': 'css/all.css',
}
}
PIPELINE_JS = {
'all': {
'source_filenames': (
'jquery/jquery-1.11.1.js',
'jquery/jquery-ui.js',
'foundation/js/foundation/foundation.js',
'foundation/js/foundation/foundation.equalizer.js',
'foundation/js/foundation/foundation.dropdown.js',
'foundation/js/foundation/foundation.tooltip.js',
'foundation/js/foundation/foundation.offcanvas.js',
'foundation/js/foundation/foundation.accordion.js',
'foundation/js/foundation/foundation.joyride.js',
'foundation/js/foundation/foundation.alert.js',
'foundation/js/foundation/foundation.topbar.js',
'foundation/js/foundation/foundation.reveal.js',
'foundation/js/foundation/foundation.slider.js',
'foundation/js/foundation/foundation.magellan.js',
'foundation/js/foundation/foundation.clearing.js',
'foundation/js/foundation/foundation.orbit.js',
'foundation/js/foundation/foundation.interchange.js',
'foundation/js/foundation/foundation.abide.js',
'foundation/js/foundation/foundation.tab.js',
'select2/select2.js',
'js/mapit-areas-ni.js',
'js/constituency.js',
'js/person_form.js',
),
'output_filename': 'js/all.js'
}
}
PIPELINE_COMPILERS = (
'pipeline.compilers.sass.SASSCompiler',
)
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yui.YUICompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yui.YUICompressor'
# On some platforms this might be called "yuicompressor", so it may be
# necessary to symlink it into your PATH as "yui-compressor".
PIPELINE_YUI_BINARY = '/usr/bin/env yui-compressor'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--nocapture',
'--with-doctest',
'--with-coverage',
'--cover-package=candidates'
]
| agpl-3.0 | -7,302,497,677,890,623,000 | 28.812766 | 77 | 0.668142 | false |
yanchen036/pylearning | pylearning/linear_model/tests/test_linear_regression.py | 1 | 1504 | import unittest
import numpy as np
from pylearning.linear_model.linear_regression import LinearRegression
class LinearRegressionTestCase(unittest.TestCase):
def setUp(self):
# y = 2x-1
x = np.matrix('1;0;2')
y = np.matrix('1;-1;3')
self.model = LinearRegression(x, y, 0.1, 120)
def tearDown(self):
self.model = None
def test_cost(self):
cost = self.model._cost()
self.assertEqual(cost, 11./6)
def test_calc_gradient(self):
theta = self.model._calc_gradient()
self.assertEqual(theta[0, 0], self.model.alpha)
self.assertAlmostEqual(theta[0, 1], 7./3*self.model.alpha)
def test_fit(self):
J_history = self.model.fit()
self.assertAlmostEqual(self.model.theta[0, 0], -1., delta=0.1)
self.assertAlmostEqual(self.model.theta[0, 1], 2., delta=0.1)
self.assertAlmostEqual(J_history[-1], 0., places=2)
def test_predict(self):
x = 3.0
X = np.matrix(x)
self.model.fit()
y = self.model.predict(X)
self.assertAlmostEqual(y[0, 0], 5., delta=0.1)
if __name__ == '__main__':
#suite = unittest.TestSuite()
#suite.addTest(LinearRegressionTestCase('test_cost'))
#suite.addTest(LinearRegressionTestCase('test_calc_gradient'))
#suite.addTest(LinearRegressionTestCase('test_fit'))
#suite.addTest(LinearRegressionTestCase('test_predict'))
#runner = unittest.TextTestRunner()
#runner.run(suite)
unittest.main()
| mit | 8,103,180,439,647,034,000 | 30.333333 | 70 | 0.629654 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.