repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
lukeiwanski/tensorflow
|
tensorflow/contrib/distribute/__init__.py
|
11
|
1910
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prototype of a distributed computation library for TF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.distribute.python.cross_tower_ops import *
from tensorflow.contrib.distribute.python.mirrored_strategy import MirroredStrategy
from tensorflow.contrib.distribute.python.monitor import Monitor
from tensorflow.contrib.distribute.python.one_device_strategy import OneDeviceStrategy
from tensorflow.contrib.distribute.python.step_fn import *
from tensorflow.python.training.distribute import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'AllReduceCrossTowerOps',
'CrossTowerOps',
'DistributionStrategy',
'MirroredStrategy',
'Monitor',
'OneDeviceStrategy',
'ReductionToOneDeviceCrossTowerOps',
'Step',
'StandardInputStep',
'StandardSingleLossStep',
'TowerContext',
'get_cross_tower_context',
'get_distribution_strategy',
'get_loss_reduction',
'get_tower_context',
'has_distribution_strategy',
'require_tower_context',
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
SEMAFORInformatik/femagtools
|
femagtools/winding_diagram.py
|
1
|
6730
|
from xml.etree import ElementTree as ET
def _winding_data(Q, p, m):
slots_per_pole_and_phase = int(Q / p / 2 / m)
keys = [p + 1 for p in range(m) for _ in range(slots_per_pole_and_phase)] * p * 2
direction = -1
for i in range(Q):
if i % slots_per_pole_and_phase == 0:
direction *= -1
keys[i] *= direction
return keys
def winding_diagram(Q, p, m, filename):
def color(key):
colors = {
1: "lime",
2: "magenta",
3: "gold",
}
try:
return colors[key]
except KeyError:
return "blue"
coilspan = int(Q / p / 2) * 10
coil_height = 25
top_height = 5
neck_height = 3
base_gap = 3
arrow_head_length = 2
arrow_head_width = 2
width = Q * 10 + coilspan / 4 + 10
min_x = -(coilspan / 4)
svg = ET.Element("svg", dict(xmlns="http://www.w3.org/2000/svg", viewBox=f"{min_x} -40 {width} 70"))
data = _winding_data(Q, p, m)
for i, key in enumerate(data):
coil_pos = i * 10
base = abs(key) * base_gap + top_height + neck_height
out = key > 0
coil_color = color(abs(key))
if out:
xdata = [
coil_pos + coilspan / 2 - 1,
coil_pos,
coil_pos,
coil_pos + coilspan / 2,
]
ydata = [
top_height,
0,
-coil_height,
-coil_height - top_height
]
up_or_down = -arrow_head_length
arrow_y_pos = coil_height * .88
else:
xdata = [
coil_pos - coilspan / 2,
coil_pos,
coil_pos,
coil_pos - coilspan / 2 + 1,
]
ydata = [
-coil_height - top_height,
-coil_height,
0,
top_height,
]
up_or_down = arrow_head_length
arrow_y_pos = coil_height * .12
ET.SubElement(svg, "rect", {
"x": f"{coil_pos + 2.5}",
"y": f"{-coil_height + 1}",
"width": f"5",
"height": f"{coil_height - 2}",
"fill": "lightblue",
})
ET.SubElement(svg, "path", {
"d": f"M {xdata[0]} {ydata[0]} "
+ " ".join([f"L {x} {y}" for (x, y) in zip(xdata[1:], ydata[1:])]),
"fill": "none",
"stroke": f"{coil_color}",
"stroke-width": ".25px",
"stroke-linejoin": "round",
"stroke-linecap": "round",
})
arrow_points = [
(coil_pos, -arrow_y_pos),
(coil_pos - arrow_head_width / 2, -arrow_y_pos - up_or_down),
(coil_pos + arrow_head_width / 2, -arrow_y_pos - up_or_down),
]
ET.SubElement(svg, "polygon", {
"points": " ".join([f"{x},{y}" for (x, y) in arrow_points]),
"fill": f"{coil_color}",
"stroke": "none",
})
ET.SubElement(svg, "circle", {
"cx": f"{coil_pos}",
"cy": f"{-coil_height / 2}",
"r": ".1em",
"fill": "white",
})
ET.SubElement(svg, "text", {
"x": f"{coil_pos}",
"y": f"{-coil_height / 2}",
"text-anchor": "middle",
"dominant-baseline": "middle",
"style": "font-size: .15em; font-family: sans-serif;",
}).text = str(i + 1)
if i == data.index(abs(key)) and abs(key) != 2:
if out:
x = coil_pos + coilspan / 2 - 1
name = str(abs(key))
else:
x = coil_pos - coilspan / 2 + 1
name = str(abs(key)) + "'"
ET.SubElement(svg, "path", {
"d": f"M {x} {top_height} L {x} {20}",
"stroke": f"{coil_color}",
"stroke-width": ".25px",
"stroke-linecap": "round",
})
ET.SubElement(svg, "text", {
"x": f"{x + 1}",
"y": f"22",
"fill": f"{coil_color}",
"style": "font-size: .25em; font-family: sans-serif;",
}).text = name
elif i == len(data) - 1 - data[::-1].index(-abs(key)) and abs(key) != 2:
if out:
x = coil_pos + coilspan / 2 - 1
else:
x = coil_pos - coilspan / 2 + 1
ET.SubElement(svg, "path", {
"d": f"M {x} {top_height} L {x} {20}",
"stroke": f"{coil_color}",
"stroke-width": ".25px",
"stroke-linecap": "round",
})
ET.SubElement(svg, "text", {
"x": f"{x + 1}",
"y": f"22",
"fill": f"{coil_color}",
"style": "font-size: .25em; font-family: sans-serif;",
}).text = str(abs(key)) + "'"
elif abs(key) == 2 and (i == data.index(key) or data[data.index(key):].index(key)):
if out:
x = coil_pos + coilspan / 2 - 1
name = str(abs(key))
else:
x = coil_pos - coilspan / 2 + 1
name = str(abs(key)) + "'"
ET.SubElement(svg, "path", {
"d": f"M {x} {top_height} L {x} {20}",
"stroke": f"{coil_color}",
"stroke-width": ".25px",
"stroke-linecap": "round",
})
ET.SubElement(svg, "text", {
"x": f"{x + 1}",
"y": f"22",
"fill": f"{coil_color}",
"style": "font-size: .25em; font-family: sans-serif;",
}).text = name
else:
if out:
xdata = [
coil_pos + coilspan / 2 - 1,
coil_pos + coilspan / 2 - 1,
coil_pos - coilspan / 2,
]
else:
xdata = [
coil_pos - coilspan / 2 + 1,
coil_pos - coilspan / 2 + 1,
coil_pos + coilspan / 2,
]
ydata = [
top_height,
base,
base
]
ET.SubElement(svg, "path", {
"d": f"M {xdata[0]} {ydata[0]} " + " ".join([f"L {x} {y}" for (x, y) in zip(xdata[1:], ydata[1:])]),
"fill": "none",
"stroke": f"{coil_color}",
"stroke-width": ".25px",
"stroke-linejoin": "round",
"stroke-linecap": "round",
})
ET.ElementTree(svg).write(filename)
|
bsd-2-clause
|
yoava333/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_stream_hixie75.py
|
496
|
2285
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.stream import StreamHixie75
from test.test_msgutil import _create_request_hixie75
class StreamHixie75Test(unittest.TestCase):
"""A unittest for StreamHixie75 class."""
def test_payload_length(self):
for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
(0x1234, '\x80\xa4\x34')):
test_stream = StreamHixie75(_create_request_hixie75(bytes))
self.assertEqual(
length, test_stream._read_payload_length_hixie75())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
mpl-2.0
|
jeremiahmarks/sl4a
|
python-build/python-libs/gdata/src/gdata/base/service.py
|
166
|
9638
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GBaseService extends the GDataService to streamline Google Base operations.
GBaseService: Provides methods to query feeds and manipulate items. Extends
GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.base
import atom
# URL to which all batch requests are sent.
BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
class Error(Exception):
pass
class RequestError(Error):
pass
class GBaseService(gdata.service.GDataService):
"""Client for the Google Base service."""
def __init__(self, email=None, password=None, source=None,
server='base.google.com', api_key=None, additional_headers=None,
handler=None, **kwargs):
"""Creates a client for the Google Base service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
api_key: string (optional) The Google Base API key to use.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='gbase', source=source,
server=server, additional_headers=additional_headers, handler=handler,
**kwargs)
self.api_key = api_key
def _SetAPIKey(self, api_key):
if not isinstance(self.additional_headers, dict):
self.additional_headers = {}
self.additional_headers['X-Google-Key'] = api_key
def __SetAPIKey(self, api_key):
self._SetAPIKey(api_key)
def _GetAPIKey(self):
if 'X-Google-Key' not in self.additional_headers:
return None
else:
return self.additional_headers['X-Google-Key']
def __GetAPIKey(self):
return self._GetAPIKey()
api_key = property(__GetAPIKey, __SetAPIKey,
doc="""Get or set the API key to be included in all requests.""")
def Query(self, uri, converter=None):
"""Performs a style query and returns a resulting feed or entry.
Args:
uri: string The full URI which be queried. Examples include
'/base/feeds/snippets?bq=digital+camera',
'http://www.google.com/base/feeds/snippets?bq=digital+camera'
'/base/feeds/items'
I recommend creating a URI using a query class.
converter: func (optional) A function which will be executed on the
server's response. Examples include GBaseItemFromString, etc.
Returns:
If converter was specified, returns the results of calling converter on
the server's response. If converter was not specified, and the result
was an Atom Entry, returns a GBaseItem, by default, the method returns
the result of calling gdata.service's Get method.
"""
result = self.Get(uri, converter=converter)
if converter:
return result
elif isinstance(result, atom.Entry):
return gdata.base.GBaseItemFromString(result.ToString())
return result
def QuerySnippetsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString)
def QueryItemsFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString)
def QueryAttributesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString)
def QueryItemTypesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString)
def QueryLocalesFeed(self, uri):
return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString)
def GetItem(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemFromString)
def GetSnippet(self, uri):
return self.Get(uri, converter=gdata.base.GBaseSnippetFromString)
def GetAttribute(self, uri):
return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString)
def GetItemType(self, uri):
return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString)
def GetLocale(self, uri):
return self.Get(uri, converter=gdata.base.GDataEntryFromString)
def InsertItem(self, new_item, url_params=None, escape_params=True,
converter=None):
"""Adds an item to Google Base.
Args:
new_item: atom.Entry or subclass A new item which is to be added to
Google Base.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Post(new_item, '/base/feeds/items', url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def DeleteItem(self, item_id, url_params=None, escape_params=True):
"""Removes an item with the specified ID from Google Base.
Args:
item_id: string The ID of the item to be deleted. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete('%s' % (item_id[len('http://www.google.com'):],),
url_params=url_params, escape_params=escape_params)
def UpdateItem(self, item_id, updated_item, url_params=None,
escape_params=True,
converter=gdata.base.GBaseItemFromString):
"""Updates an existing item.
Args:
item_id: string The ID of the item to be updated. Example:
'http://www.google.com/base/feeds/items/13185446517496042648'
updated_item: atom.Entry, subclass, or string, containing
the Atom Entry which will replace the base item which is
stored at the item_id.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
GBaseItemFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a GBaseItem.
"""
response = self.Put(updated_item,
item_id, url_params=url_params, escape_params=escape_params,
converter=converter)
if not converter and isinstance(response, atom.Entry):
return gdata.base.GBaseItemFromString(response.ToString())
return response
def ExecuteBatch(self, batch_feed,
converter=gdata.base.GBaseItemFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
contain the desired CRUD operation and any necessary entry data.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is GBaseItemFeedFromString which will turn the result
into a gdata.base.GBaseItem object.
Returns:
A gdata.BatchFeed containing the results.
"""
return self.Post(batch_feed, BASE_BATCH_URL, converter=converter)
class BaseQuery(gdata.service.Query):
def _GetBaseQuery(self):
return self['bq']
def _SetBaseQuery(self, base_query):
self['bq'] = base_query
bq = property(_GetBaseQuery, _SetBaseQuery,
doc="""The bq query parameter""")
|
apache-2.0
|
Serag8/Bachelor
|
google_appengine/lib/django-1.5/django/dispatch/dispatcher.py
|
105
|
9373
|
import weakref
import threading
from django.dispatch import saferef
from django.utils.six.moves import xrange
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak-referencable (more
precisely saferef.safeRef() must be able to create a reference
to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver)
with self.lock:
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
with self.lock:
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
def has_listeners(self, sender=None):
return bool(self._live_receivers(_make_id(sender)))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
none_senderkey = _make_id(None)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
receivers.append(receiver)
else:
receivers.append(receiver)
return receivers
def _remove_receiver(self, receiver):
"""
Remove dead receivers from connections.
"""
with self.lock:
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
last_idx = len(self.receivers) - 1
# enumerate in reverse order so that indexes are valid even
# after we delete some items
for idx, (r_key, _) in enumerate(reversed(self.receivers)):
if r_key == key:
del self.receivers[last_idx-idx]
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
mit
|
lento/cortex
|
test/IECoreGL/MeshPrimitiveTest.py
|
4
|
7978
|
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
import shutil
import IECore
import IECoreGL
IECoreGL.init( False )
class MeshPrimitiveTest( unittest.TestCase ) :
outputFileName = os.path.dirname( __file__ ) + "/output/testMesh.tif"
def testVertexAttributes( self ) :
vertexSource = """
#include "IECoreGL/VertexShader.h"
IECOREGL_VERTEXSHADER_IN vec3 vertexP;
IECOREGL_VERTEXSHADER_IN vec2 vertexst;
IECOREGL_VERTEXSHADER_OUT vec4 stColor;
void main()
{
vec4 pCam = gl_ModelViewMatrix * vec4( vertexP, 1 );
gl_Position = gl_ProjectionMatrix * pCam;
stColor = vec4(vertexst.x, vertexst.y, 0.0, 1.0);
}
"""
fragmentSource = """
varying vec4 stColor;
void main()
{
gl_FragColor = stColor;
}
"""
m = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob").read()
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.camera( "main", {
"projection" : IECore.StringData( "orthographic" ),
"resolution" : IECore.V2iData( IECore.V2i( 256 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
}
)
r.display( self.outputFileName, "tif", "rgba", {} )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -15 ) ) )
r.shader( "surface", "showST",
{ "gl:fragmentSource" : IECore.StringData( fragmentSource ),
"gl:vertexSource" : IECore.StringData( vertexSource )
}
)
m.render( r )
reader = IECore.Reader.create( os.path.dirname( __file__ ) + "/expectedOutput/meshST.tif" )
reader['colorSpace'] = 'linear'
expectedImage = reader.read()
actualImage = IECore.Reader.create( self.outputFileName ).read()
self.assertEqual( IECore.ImageDiffOp()( imageA = expectedImage, imageB = actualImage, maxError = 0.05 ).value, False )
def testUniformCs( self ) :
fragmentSource = """
#include "IECoreGL/FragmentShader.h"
IECOREGL_FRAGMENTSHADER_IN vec3 fragmentCs;
void main()
{
gl_FragColor = vec4( fragmentCs, 1.0 );
}
"""
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.camera( "main", {
"projection" : IECore.StringData( "orthographic" ),
"resolution" : IECore.V2iData( IECore.V2i( 256 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
}
)
r.display( self.outputFileName, "tif", "rgba", {} )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -15 ) ) )
r.shader( "surface", "test", { "gl:fragmentSource" : IECore.StringData( fragmentSource ) } )
m = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ), IECore.V2i( 2 ) )
m["Cs"] = IECore.PrimitiveVariable(
IECore.PrimitiveVariable.Interpolation.Uniform,
IECore.Color3fVectorData( [
IECore.Color3f( 1, 0, 0 ),
IECore.Color3f( 0, 1, 0 ),
IECore.Color3f( 0, 0, 1 ),
IECore.Color3f( 1, 1, 1, ),
] )
)
m.render( r )
image = IECore.Reader.create( self.outputFileName ).read()
e = IECore.ImagePrimitiveEvaluator( image )
r = e.createResult()
e.pointAtUV( IECore.V2f( 0.25, 0.75 ), r )
self.assertEqual( r.floatPrimVar( image["R"] ), 1 )
self.assertEqual( r.floatPrimVar( image["G"] ), 0 )
self.assertEqual( r.floatPrimVar( image["B"] ), 0 )
e.pointAtUV( IECore.V2f( 0.75, 0.75 ), r )
self.assertEqual( r.floatPrimVar( image["R"] ), 0 )
self.assertEqual( r.floatPrimVar( image["G"] ), 1 )
self.assertEqual( r.floatPrimVar( image["B"] ), 0 )
e.pointAtUV( IECore.V2f( 0.75, 0.25 ), r )
self.assertEqual( r.floatPrimVar( image["R"] ), 1 )
self.assertEqual( r.floatPrimVar( image["G"] ), 1 )
self.assertEqual( r.floatPrimVar( image["B"] ), 1 )
e.pointAtUV( IECore.V2f( 0.25, 0.25 ), r )
self.assertEqual( r.floatPrimVar( image["R"] ), 0 )
self.assertEqual( r.floatPrimVar( image["G"] ), 0 )
self.assertEqual( r.floatPrimVar( image["B"] ), 1 )
def testBound( self ) :
m = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.5 ), IECore.V2f( 0.5 ) ) )
m2 = IECoreGL.ToGLMeshConverter( m ).convert()
self.assertEqual( m.bound(), m2.bound() )
def testFaceNormals( self ) :
# when a polygon mesh has no normals, we must calculate face normals so we can
# shade it in a faceted manner.
fragmentSource = """
#include "IECoreGL/FragmentShader.h"
IECOREGL_FRAGMENTSHADER_IN vec3 fragmentN;
void main()
{
gl_FragColor = vec4( fragmentN, 1.0 );
}
"""
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.camera( "main", {
"projection" : IECore.StringData( "orthographic" ),
"resolution" : IECore.V2iData( IECore.V2i( 256 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
}
)
r.display( self.outputFileName, "tif", "rgba", {} )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -15 ) ) )
r.shader( "surface", "test", { "gl:fragmentSource" : IECore.StringData( fragmentSource ) } )
m = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.5 ), IECore.V2f( 0.5 ) ) )
self.assertTrue( "N" not in m )
m.render( r )
image = IECore.Reader.create( self.outputFileName ).read()
e = IECore.ImagePrimitiveEvaluator( image )
r = e.createResult()
e.pointAtUV( IECore.V2f( 0.5, 0.5 ), r )
self.assertEqual( r.floatPrimVar( image["R"] ), 0 )
self.assertEqual( r.floatPrimVar( image["G"] ), 0 )
self.assertEqual( r.floatPrimVar( image["B"] ), 1 )
def setUp( self ) :
if not os.path.isdir( "test/IECoreGL/output" ) :
os.makedirs( "test/IECoreGL/output" )
def tearDown( self ) :
if os.path.isdir( "test/IECoreGL/output" ) :
shutil.rmtree( "test/IECoreGL/output" )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
rg3/youtube-dl
|
youtube_dl/YoutubeDL.py
|
2
|
110536
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import socket
import sys
import time
import tokenize
import traceback
import random
from string import ascii_letters
from .compat import (
compat_basestring,
compat_cookiejar,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DownloadError,
encode_compat_str,
encodeFilename,
error_to_compat_str,
expand_path,
ExtractorError,
format_bytes,
formatSeconds,
GeoRestrictedError,
int_or_none,
ISO3166Utils,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
orderedSet,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
register_socks_protocols,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
str_or_none,
subtitles_filename,
UnavailableVideoError,
url_basename,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieJar,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
from .extractor.openload import PhantomJSwrapper
from .downloader import get_suitable_downloader
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegPostProcessor,
get_postprocessor,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forceid: Force printing ID.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code. See options.py for more information.
outtmpl: Template for output names.
restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors.
force_generic_extractor: Force downloader to use the generic extractor
nooverwrites: Prevent overwriting files.
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
write_all_thumbnails: Write all thumbnail formats to files
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
cookiefile: File name where cookies should be read from and dumped to.
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites.
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
youtube_dl/postprocessor/__init__.py for a list.
as well as any further keyword arguments for the
postprocessor.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
merge_output_format: Extension to use when merging formats.
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
youtube-dl servers for debugging.
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header
geo_bypass_ip_block:
IP range in CIDR notation that will be used similarly to
geo_bypass_country
The following options determine which downloader is picked:
external_downloader: Executable of the external downloader to call.
None or unset for standard (built-in) downloader.
hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see youtube_dl/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle,
xattr_set_filesize, external_downloader_args, hls_use_mpegts,
http_chunk_size.
The following options are used by the post processors:
prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
otherwise prefer ffmpeg.
postprocessor_args: A list of additional command-line arguments for the
postprocessor.
The following options are used by the Youtube extractor:
youtube_include_dash_manifest: If True (default), DASH manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about DASH.
"""
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
params = None
_ies = []
_pps = []
_download_retcode = None
_num_downloads = None
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
# Default parameters
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning(
'%s is deprecated. Use %s instead.' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
if isinstance(params.get('outtmpl'), bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
self._setup_opener()
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_class = get_postprocessor(pp_def_raw['key'])
pp_def = dict(pp_def_raw)
del pp_def['key']
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl'] +
[a for i, a in enumerate(argv) if i not in idxs] +
['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
def _write_string(self, s, out=None):
write_string(s, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, check_quiet=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not check_quiet or not self.params.get('quiet', False):
message = self._bidi_workaround(message)
terminator = ['\n', ''][skip_eol]
output = message + terminator
self._write_string(output, self._screen_file)
def to_stderr(self, message):
"""Print message to stderr."""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
message = self._bidi_workaround(message)
output = message + '\n'
self._write_string(output, self._err_file)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate', False):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate', False):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
elif template_dict.get('height'):
template_dict['resolution'] = '%sp' % template_dict['height']
elif template_dict.get('width'):
template_dict['resolution'] = '%dx?' % template_dict['width']
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
for k, v in template_dict.items()
if v is not None and not isinstance(v, (list, tuple, dict)))
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(template_dict['n_entries'])),
'autonumber': autonumber_size,
}
FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
if mobj:
outtmpl = re.sub(
FIELD_SIZE_COMPAT_RE,
r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
outtmpl)
# Missing numeric fields used together with integer presentation types
# in format specification will break the argument substitution since
# string 'NA' is returned for missing fields. We will patch output
# template for missing fields to meet string presentation type.
for numeric_field in self._NUMERIC_FIELDS:
if numeric_field not in template_dict:
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
FORMAT_RE = r'''(?x)
(?<!%)
%
\({0}\) # mapping key
(?:[#0\-+ ]+)? # conversion flags (optional)
(?:\d+)? # minimum field width (optional)
(?:\.\d+)? # precision (optional)
[hlL]? # length modifier (optional)
[diouxXeEfFgGcrs%] # conversion type
'''
outtmpl = re.sub(
FORMAT_RE.format(numeric_field),
r'%({0})s'.format(numeric_field), outtmpl)
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
filename = expand_path(outtmpl).replace(sep, '') % template_dict
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
filename = encodeFilename(filename, True).decode(preferredencoding())
return sanitize_path(filename)
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def _match_entry(self, info_dict, incomplete):
""" Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
'''
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie = self.get_info_extractor(ie.ie_key())
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
try:
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
break
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
break
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
break
else:
raise
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def add_default_extra_info(self, ie_result, ie, url):
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
extract_flat is True):
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(ie_result))
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(ie_result['url'],
download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/rg3/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
def make_playlistitems_entries(list_ie_entries):
num_entries = len(list_ie_entries)
return [
list_ie_entries[i - 1] for i in playlistitems
if -num_entries <= i - 1 < num_entries]
def report_download(num_entries):
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, num_entries))
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = make_playlistitems_entries(ie_entries)
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(
item - 1, item
))
else:
entries = ie_entries.getslice(
playliststart, playlistend)
n_entries = len(entries)
report_download(n_entries)
else: # iterable
if playlistitems:
entries = make_playlistitems_entries(list(itertools.islice(
ie_entries, 0, max(playlistitems))))
else:
entries = list(itertools.islice(
ie_entries, playliststart, playlistend))
n_entries = len(entries)
report_download(n_entries)
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen('[download] ' + reason)
continue
entry_result = self.process_ie_result(entry,
download=download,
extra_info=extra)
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(
r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
)
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
$
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9._-]+)
\s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
def prefer_best():
if self.params.get('simulate', False):
return False
if not download:
return False
if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
return True
if info_dict.get('is_live'):
return True
if not can_merge():
return True
return False
req_format_list = ['bestvideo+bestaudio', 'best']
if prefer_best():
req_format_list.reverse()
return '/'.join(req_format_list)
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
video_selector = current_selector
audio_selector = _parse_format_selection(tokens, inside_merge=True)
if not video_selector or not audio_selector:
raise syntax_error('"+" must be between two format selectors', start)
current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _build_selector_function(selector):
if isinstance(selector, list):
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
for format in f(ctx):
yield format
return selector_function
elif selector.type == GROUP:
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST:
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == SINGLE:
format_spec = selector.selector
def selector_function(ctx):
formats = list(ctx['formats'])
if not formats:
return
if format_spec == 'all':
for f in formats:
yield f
elif format_spec in ['best', 'worst', None]:
format_idx = 0 if format_spec == 'worst' else -1
audiovideo_formats = [
f for f in formats
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
if audiovideo_formats:
yield audiovideo_formats[format_idx]
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) we will fallback to best/worst
# {video,audio}-only format
elif ctx['incomplete_formats']:
yield formats[format_idx]
elif format_spec == 'bestaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[-1]
elif format_spec == 'worstaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[0]
elif format_spec == 'bestvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[-1]
elif format_spec == 'worstvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, formats))
if matches:
yield matches[-1]
elif selector.type == MERGE:
def _merge(formats_info):
format_1, format_2 = [f['format_id'] for f in formats_info]
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
# Formats must be opposite (video+audio)
if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
self.report_error(
'Both formats %s and %s are video-only, you must specify "-f video+audio"'
% (format_1, format_2))
return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
else self.params['merge_output_format'])
return {
'requested_formats': formats_info,
'format': '%s+%s' % (formats_info[0].get('format'),
formats_info[1].get('format')),
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
formats_info[1].get('format_id')),
'width': formats_info[0].get('width'),
'height': formats_info[0].get('height'),
'resolution': formats_info[0].get('resolution'),
'fps': formats_info[0].get('fps'),
'vcodec': formats_info[0].get('vcodec'),
'vbr': formats_info[0].get('vbr'),
'stretched_ratio': formats_info[0].get('stretched_ratio'),
'acodec': formats_info[1].get('acodec'),
'abr': formats_info[1].get('abr'),
'ext': output_ext,
}
video_selector, audio_selector = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
yield _merge(pair)
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '', t.get('url')))
for i, t in enumerate(thumbnails):
t['url'] = sanitize_url(t['url'])
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
if t.get('id') is None:
t['id'] = '%d' % i
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
return
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
raise ExtractorError('No video formats found!')
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats'):
self.list_formats(info_dict)
return
req_format = self.params.get('format')
if req_format is None:
req_format = self._default_format_spec(info_dict, download=download)
if self.params.get('verbose'):
self.to_stdout('[debug] Default format spec: %s' % req_format)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/rg3/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/rg3/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
# all formats are audio-only
all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if download:
if len(formats_to_download) > 1:
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
for format in formats_to_download:
new_info = dict(info_dict)
new_info.update(format)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
if self.params.get('allsubtitles', False):
requested_langs = available_subs.keys()
else:
if self.params.get('subtitleslangs', False):
requested_langs = self.params.get('subtitleslangs')
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(available_subs.keys())[0]]
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict['fulltitle'] = info_dict['title']
if len(info_dict['title']) > 200:
info_dict['title'] = info_dict['title'][:197] + '...'
if 'format' not in info_dict:
info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict, incomplete=False)
if reason is not None:
self.to_screen('[download] ' + reason)
return
self._num_downloads += 1
info_dict['_filename'] = filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get('forcetitle', False):
self.to_stdout(info_dict['fulltitle'])
if self.params.get('forceid', False):
self.to_stdout(info_dict['id'])
if self.params.get('forceurl', False):
if info_dict.get('requested_formats') is not None:
for f in info_dict['requested_formats']:
self.to_stdout(f['url'] + f.get('play_path', ''))
else:
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
self.to_stdout(info_dict['thumbnail'])
if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
self.to_stdout(info_dict['description'])
if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
if self.params.get('forceformat', False):
self.to_stdout(info_dict['format'])
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(info_dict))
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
return
if filename is None:
return
def ensure_dir_exists(path):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
self.report_error('unable to create directory ' + error_to_compat_str(err))
return False
if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
return
if self.params.get('writedescription', False):
descfn = replace_extension(filename, 'description', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/rg3/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
else:
try:
sub_data = ie._request_webpage(
sub_info['url'], info_dict['id'], note=False).read()
with io.open(encodeFilename(sub_filename), 'wb') as subfile:
subfile.write(sub_data)
except (ExtractorError, IOError, OSError, ValueError) as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err)))
continue
if self.params.get('writeinfojson', False):
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
self._write_thumbnails(info_dict, filename)
if not self.params.get('skip_download', False):
try:
def dl(name, info):
fd = get_suitable_downloader(info, self.params)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get('verbose'):
self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self)
if not merger.available:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged.')
else:
postprocessors = [merger]
def compatible_formats(formats):
video, audio = formats
# Check extension
video_ext, audio_ext = video.get('ext'), audio.get('ext')
if video_ext and audio_ext:
COMPATIBLE_EXTS = (
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
('webm')
)
for exts in COMPATIBLE_EXTS:
if video_ext in exts and audio_ext in exts:
return True
# TODO: Check acodec/vcodec
return False
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext == info_dict['ext']
else filename)
requested_formats = info_dict['requested_formats']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
# Ensure filename always has a correct extension for successful merge
filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
if os.path.exists(encodeFilename(filename)):
self.to_screen(
'[download] %s has already been downloaded and '
'merged' % filename)
else:
for f in requested_formats:
new_info = dict(info_dict)
new_info.update(f)
fname = prepend_extension(
self.prepare_filename(new_info),
'f%s' % f['format_id'], new_info['ext'])
if not ensure_dir_exists(fname):
return
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict['__postprocessors'] = postprocessors
info_dict['__files_to_merge'] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and filename != '-':
# Fixup content
fixup_policy = self.params.get('fixup')
if fixup_policy is None:
fixup_policy = 'detect_or_warn'
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
stretched_ratio = info_dict.get('stretched_ratio')
if stretched_ratio is not None and stretched_ratio != 1:
if fixup_policy == 'warn':
self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
info_dict['id'], stretched_ratio))
elif fixup_policy == 'detect_or_warn':
stretched_pp = FFmpegFixupStretchedPP(self)
if stretched_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(stretched_pp)
else:
self.report_warning(
'%s: Non-uniform pixel ratio (%s). %s'
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None and
info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn':
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container.'
% info_dict['id'])
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM4aPP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native' or
info_dict.get('protocol') == 'm3u8' and
self.params.get('hls_prefer_native')):
if fixup_policy == 'warn':
self.report_warning('%s: malformed AAC bitstream detected.' % (
info_dict['id']))
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM3u8PP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: malformed AAC bitstream detected. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.report_error('postprocessing: %s' % str(err))
return
self.record_download_archive(info_dict)
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1 and
outtmpl != '-' and
'%' not in outtmpl and
self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
else:
if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.filter_requested_info(json.loads('\n'.join(f)))
try:
self.process_ie_result(info, download=True)
except DownloadError:
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def filter_requested_info(info_dict):
return dict(
(k, v) for k, v in info_dict.items()
if k not in ['requested_formats', 'requested_subtitles'])
def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
pps_chain = []
if ie_info.get('__postprocessors') is not None:
pps_chain.extend(ie_info['__postprocessors'])
pps_chain.extend(self._pps)
for pp in pps_chain:
files_to_delete = []
try:
files_to_delete, info = pp.run(info)
except PostProcessingError as e:
self.report_error(e.msg)
if files_to_delete and not self.params.get('keepvideo', False):
for old_filename in files_to_delete:
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie in self._ies:
if ie.suitable(url):
extractor = ie.ie_key()
break
else:
return
return extractor.lower() + ' ' + video_id
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
if line.strip() == vid_id:
return True
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('height') is not None:
if format.get('width') is not None:
res = '%sx%s' % (format['width'], format['height'])
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None and
fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
table = [
[f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
if len(formats) > 1:
table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:\n%s' %
(info_dict['id'], render_table(header_line, table)))
def list_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_screen(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
self.to_screen(render_table(
['Language', 'formats'],
[[lang, ', '.join(f['ext'] for f in reversed(formats))]
for lang, formats in subtitles.items()]))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
if type('') is not compat_str:
# Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n')
except Exception:
try:
sys.exc_clear()
except Exception:
pass
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
self._write_string('[debug] Python version %s (%s) - %s\n' % (
platform.python_version(), python_implementation(),
platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())
if v
)
if not exe_str:
exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar()
else:
opts_cookiefile = expand_path(opts_cookiefile)
self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load(ignore_discard=True, ignore_expires=True)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/rg3/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename):
if self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails')
if thumbnails:
thumbnails = [thumbnails[-1]]
elif self.params.get('write_all_thumbnails', False):
thumbnails = info_dict.get('thumbnails')
else:
return
if not thumbnails:
# No thumbnails present, so return immediately
return
for t in thumbnails:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
|
unlicense
|
deathping1994/sendmail-api
|
venv/lib/python2.7/site-packages/werkzeug/contrib/wrappers.py
|
295
|
10331
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return path.lstrip('/')
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return path.rstrip('/') + '/'
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
|
apache-2.0
|
stefrobb/namebench
|
nb_third_party/dns/ipv6.py
|
248
|
4995
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""IPv6 helper functions."""
import re
import dns.exception
import dns.ipv4
_leading_zero = re.compile(r'0+([0-9a-f]+)')
def inet_ntoa(address):
"""Convert a network format IPv6 address into text.
@param address: the binary address
@type address: string
@rtype: string
@raises ValueError: the address isn't 16 bytes long
"""
if len(address) != 16:
raise ValueError("IPv6 addresses are 16 bytes long")
hex = address.encode('hex_codec')
chunks = []
i = 0
l = len(hex)
while i < l:
chunk = hex[i : i + 4]
# strip leading zeros. we do this with an re instead of
# with lstrip() because lstrip() didn't support chars until
# python 2.2.2
m = _leading_zero.match(chunk)
if not m is None:
chunk = m.group(1)
chunks.append(chunk)
i += 4
#
# Compress the longest subsequence of 0-value chunks to ::
#
best_start = 0
best_len = 0
start = -1
last_was_zero = False
for i in xrange(8):
if chunks[i] != '0':
if last_was_zero:
end = i
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
last_was_zero = False
elif not last_was_zero:
start = i
last_was_zero = True
if last_was_zero:
end = 8
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
if best_len > 0:
if best_start == 0 and \
(best_len == 6 or
best_len == 5 and chunks[5] == 'ffff'):
# We have an embedded IPv4 address
if best_len == 6:
prefix = '::'
else:
prefix = '::ffff:'
hex = prefix + dns.ipv4.inet_ntoa(address[12:])
else:
hex = ':'.join(chunks[:best_start]) + '::' + \
':'.join(chunks[best_start + best_len:])
else:
hex = ':'.join(chunks)
return hex
_v4_ending = re.compile(r'(.*):(\d+)\.(\d+)\.(\d+)\.(\d+)$')
_colon_colon_start = re.compile(r'::.*')
_colon_colon_end = re.compile(r'.*::$')
def inet_aton(text):
"""Convert a text format IPv6 address into network format.
@param text: the textual address
@type text: string
@rtype: string
@raises dns.exception.SyntaxError: the text was not properly formatted
"""
#
# Our aim here is not something fast; we just want something that works.
#
if text == '::':
text = '0::'
#
# Get rid of the icky dot-quad syntax if we have it.
#
m = _v4_ending.match(text)
if not m is None:
text = "%s:%04x:%04x" % (m.group(1),
int(m.group(2)) * 256 + int(m.group(3)),
int(m.group(4)) * 256 + int(m.group(5)))
#
# Try to turn '::<whatever>' into ':<whatever>'; if no match try to
# turn '<whatever>::' into '<whatever>:'
#
m = _colon_colon_start.match(text)
if not m is None:
text = text[1:]
else:
m = _colon_colon_end.match(text)
if not m is None:
text = text[:-1]
#
# Now canonicalize into 8 chunks of 4 hex digits each
#
chunks = text.split(':')
l = len(chunks)
if l > 8:
raise dns.exception.SyntaxError
seen_empty = False
canonical = []
for c in chunks:
if c == '':
if seen_empty:
raise dns.exception.SyntaxError
seen_empty = True
for i in xrange(0, 8 - l + 1):
canonical.append('0000')
else:
lc = len(c)
if lc > 4:
raise dns.exception.SyntaxError
if lc != 4:
c = ('0' * (4 - lc)) + c
canonical.append(c)
if l < 8 and not seen_empty:
raise dns.exception.SyntaxError
text = ''.join(canonical)
#
# Finally we can go to binary.
#
try:
return text.decode('hex_codec')
except TypeError:
raise dns.exception.SyntaxError
|
apache-2.0
|
Achint08/open-event-orga-server
|
app/views/users/my_tickets.py
|
7
|
1355
|
from flask import Blueprint
from flask import render_template
from app.helpers.data_getter import DataGetter
from app.helpers.ticketing import TicketingManager
my_tickets = Blueprint('my_tickets', __name__, url_prefix='/mytickets')
@my_tickets.route('/')
def display_my_tickets():
page_content = {"tab_upcoming_events": "Upcoming Events",
"tab_past_events": "Past Events",
"title": "My Tickets"}
upcoming_events_orders = TicketingManager.get_orders_of_user(upcoming_events=True)
past_events_orders = TicketingManager.get_orders_of_user(upcoming_events=False)
placeholder_images = DataGetter.get_event_default_images()
custom_placeholder = DataGetter.get_custom_placeholders()
im_config = DataGetter.get_image_configs()
im_size = ''
for config in im_config:
if config.page == 'mysession':
im_size = config.size
return render_template('gentelella/users/mytickets/mytickets_list.html',
page_content=page_content,
upcoming_events_orders=upcoming_events_orders,
past_events_orders=past_events_orders,
placeholder_images=placeholder_images,
custom_placeholder=custom_placeholder,
im_size=im_size)
|
gpl-3.0
|
bankonmecoin/bitcoin
|
qa/rpc-tests/test_framework/netutil.py
|
328
|
4562
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
|
mit
|
3manuek/scikit-learn
|
sklearn/linear_model/passive_aggressive.py
|
106
|
9705
|
# Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True,
n_iter=5, shuffle=True, verbose=0, loss="hinge",
n_jobs=1, random_state=None, warm_start=False):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, class_weight=None,
warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
|
bsd-3-clause
|
eyesnears/ardupilot
|
Tools/autotest/arducopter.py
|
8
|
44386
|
# fly ArduCopter in SITL
# Flight mode switch positions are set-up in arducopter.param to be
# switch 1 = Circle
# switch 2 = Land
# switch 3 = RTL
# switch 4 = Auto
# switch 5 = Loiter
# switch 6 = Stabilize
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil, mavwp
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
FRAME='+'
TARGET='sitl'
HOME=mavutil.location(-35.362938,149.165085,584,270)
AVCHOME=mavutil.location(40.072842,-105.230575,1586,0)
homeloc = None
num_wp = 0
speedup_default = 5
def hover(mavproxy, mav, hover_throttle=1450):
mavproxy.send('rc 3 %u\n' % hover_throttle)
return True
def arm_motors(mavproxy, mav):
'''arm motors'''
print("Arming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 2000\n')
mavproxy.expect('APM: ARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_armed_wait()
print("MOTORS ARMED OK")
return True
def disarm_motors(mavproxy, mav):
'''disarm motors'''
print("Disarming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 1000\n')
mavproxy.expect('APM: DISARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
return True
def takeoff(mavproxy, mav, alt_min = 30, takeoff_throttle=1700):
'''takeoff get to 30m altitude'''
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 %u\n' % takeoff_throttle)
m = mav.recv_match(type='VFR_HUD', blocking=True)
if (m.alt < alt_min):
wait_altitude(mav, alt_min, (alt_min + 5))
hover(mavproxy, mav)
print("TAKEOFF COMPLETE")
return True
# loiter - fly south west, then hold loiter within 5m position and altitude
def loiter(mavproxy, mav, holdtime=10, maxaltchange=5, maxdistchange=5):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first aim south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 170):
return False
mavproxy.send('rc 4 1500\n')
#fly south east 50m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 50):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
if success:
print("Loiter OK for %u seconds" % holdtime)
else:
print("Loiter FAILED")
return success
def change_alt(mavproxy, mav, alt_min, climb_throttle=1920, descend_throttle=1080):
'''change altitude'''
m = mav.recv_match(type='VFR_HUD', blocking=True)
if(m.alt < alt_min):
print("Rise to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % climb_throttle)
wait_altitude(mav, alt_min, (alt_min + 5))
else:
print("Lower to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % descend_throttle)
wait_altitude(mav, (alt_min -5), alt_min)
hover(mavproxy, mav)
return True
# fly a square in stabilize mode
def fly_square(mavproxy, mav, side=50, timeout=300):
'''fly a square, flying N then E'''
tstart = get_sim_time(mav)
success = True
# ensure all sticks in the middle
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 2 1500\n')
mavproxy.send('rc 3 1500\n')
mavproxy.send('rc 4 1500\n')
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim north
print("turn right towards north")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 10):
print("Failed to reach heading")
success = False
mavproxy.send('rc 4 1500\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan4_raw==1500', blocking=True)
# save bottom left corner of box as waypoint
print("Save WP 1 & 2")
save_wp(mavproxy, mav)
# switch back to stabilize mode
mavproxy.send('rc 3 1430\n')
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
# pitch forward to fly north
print("Going north %u meters" % side)
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save top left corner of square as waypoint
print("Save WP 3")
save_wp(mavproxy, mav)
# roll right to fly east
print("Going east %u meters" % side)
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save top right corner of square as waypoint
print("Save WP 4")
save_wp(mavproxy, mav)
# pitch back to fly south
print("Going south %u meters" % side)
mavproxy.send('rc 2 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save bottom right corner of square as waypoint
print("Save WP 5")
save_wp(mavproxy, mav)
# roll left to fly west
print("Going west %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save bottom left corner of square (should be near home) as waypoint
print("Save WP 6")
save_wp(mavproxy, mav)
# descend to 10m
print("Descend to 10m in Loiter")
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
mavproxy.send('rc 3 1300\n')
time_left = timeout - (get_sim_time(mav) - tstart)
print("timeleft = %u" % time_left)
if time_left < 20:
time_left = 20
if not wait_altitude(mav, -10, 10, time_left):
print("Failed to reach alt of 10m")
success = False
save_wp(mavproxy, mav)
return success
def fly_RTL(mavproxy, mav, side=60, timeout=250):
'''Return, land'''
print("# Enter RTL")
mavproxy.send('switch 3\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
if(m.alt <= 1 and home_distance < 10):
return True
return False
def fly_throttle_failsafe(mavproxy, mav, side=60, timeout=180):
'''Fly east, Failsafe, return, land'''
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 135):
return False
mavproxy.send('rc 4 1500\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
hover(mavproxy, mav)
failed = False
# fly east 60 meters
print("# Going forward %u meters" % side)
mavproxy.send('rc 2 1350\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
# pull throttle low
print("# Enter Failsafe")
mavproxy.send('rc 3 900\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# check if we've reached home
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached failsafe home OK")
return True
print("Failed to land on failsafe RTL - timed out after %u seconds" % timeout)
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize mode
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
return False
def fly_battery_failsafe(mavproxy, mav, timeout=30):
# assume failure
success = False
# switch to loiter mode so that we hold position
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
mavproxy.send("rc 3 1500\n")
# enable battery failsafe
mavproxy.send("param set FS_BATT_ENABLE 1\n")
# trigger low voltage
mavproxy.send('param set SIM_BATT_VOLTAGE 10\n')
# wait for LAND mode
new_mode = wait_mode(mav, 'LAND')
if new_mode == 'LAND':
success = True
# disable battery failsafe
mavproxy.send('param set FS_BATT_ENABLE 0\n')
# return status
if success:
print("Successfully entered LAND mode after battery failsafe")
else:
print("Failed to enter LAND mode after battery failsafe")
return success
# fly_stability_patch - fly south, then hold loiter within 5m position and altitude and reduce 1 motor to 60% efficiency
def fly_stability_patch(mavproxy, mav, holdtime=30, maxaltchange=5, maxdistchange=10):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first south
print("turn south")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 180):
return False
mavproxy.send('rc 4 1500\n')
#fly west 80m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 80):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
# cut motor 1 to 55% efficiency
print("Cutting motor 1 to 55% efficiency")
mavproxy.send('param set SIM_ENGINE_MUL 0.55\n')
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
# restore motor 1 to 100% efficiency
mavproxy.send('param set SIM_ENGINE_MUL 1.0\n')
if success:
print("Stability patch and Loiter OK for %u seconds" % holdtime)
else:
print("Stability Patch FAILED")
return success
# fly_fence_test - fly east until you hit the horizontal circular fence
def fly_fence_test(mavproxy, mav, timeout=180):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# enable fence
mavproxy.send('param set FENCE_ENABLE 1\n')
# first east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 160):
return False
mavproxy.send('rc 4 1500\n')
# fly forward (east) at least 20m
pitching_forward = True
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 20):
return False
# start timer
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# recenter pitch sticks once we reach home so we don't fly off again
if pitching_forward and home_distance < 10 :
pitching_forward = False
mavproxy.send('rc 2 1500\n')
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached home OK")
return True
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Fence test failed to reach home - timed out after %u seconds" % timeout)
return False
def show_gps_and_sim_positions(mavproxy, on_off):
if on_off == True:
# turn on simulator display of gps and actual position
mavproxy.send('map set showgpspos 1\n')
mavproxy.send('map set showsimpos 1\n')
else:
# turn off simulator display of gps and actual position
mavproxy.send('map set showgpspos 0\n')
mavproxy.send('map set showsimpos 0\n')
# fly_gps_glitch_loiter_test - fly south east in loiter and test reaction to gps glitch
def fly_gps_glitch_loiter_test(mavproxy, mav, timeout=30, max_distance=20):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# turn south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 150):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 4 1500\n')
# fly forward (south east) at least 60m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow down
if not wait_groundspeed(mav, 0, 1):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
success = True
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while tnow < tstart + timeout:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# turn off glitching if we've reached the end of the glitch list
if glitch_current >= glitch_num:
glitch_current = -1
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
else:
print("Applying glitch %u" % glitch_current)
#move onto the next glitch
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# start displaying distance moved after all glitches applied
if (glitch_current == -1):
m = mav.recv_match(type='VFR_HUD', blocking=True)
curr_pos = sim_location(mav)
moved_distance = get_distance(curr_pos, start_pos)
print("Alt: %u Moved: %.0f" % (m.alt, moved_distance))
if moved_distance > max_distance:
print("Moved over %u meters, Failed!" % max_distance)
success = False
# disable gps glitch
if glitch_current != -1:
glitch_current = -1
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
show_gps_and_sim_positions(mavproxy, False)
if success:
print("GPS glitch test passed! stayed within %u meters for %u seconds" % (max_distance, timeout))
else:
print("GPS glitch test FAILED!")
return success
# fly_gps_glitch_auto_test - fly mission and test reaction to gps glitch
def fly_gps_glitch_auto_test(mavproxy, mav, timeout=30, max_distance=100):
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# Fly mission #1
print("# Load copter_glitch_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_glitch_mission.txt")):
print("load copter_glitch_mission failed")
return False
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# wait until 100m from home
if not wait_distance(mav, 100, 5, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while glitch_current < glitch_num:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# apply next glitch
if glitch_current < glitch_num:
print("Applying glitch %u" % glitch_current)
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# turn off glitching
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
# continue with the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# wait for arrival back home
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
while dist_to_home > 5:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
print("Dist from home: %u" % dist_to_home)
# turn off simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, False)
print("GPS Glitch test Auto completed: passed=%s" % ret)
return ret
#fly_simple - assumes the simple bearing is initialised to be directly north
# flies a box with 100m west, 15 seconds north, 50 seconds east, 15 seconds south
def fly_simple(mavproxy, mav, side=50, timeout=120):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
#set SIMPLE mode for all flight modes
mavproxy.send('param set SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# fly south 50m
print("# Flying south %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly west 8 seconds
print("# Flying west for 8 seconds")
mavproxy.send('rc 2 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
# fly north 25 meters
print("# Flying north %u meters" % (side/2.0))
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side/2, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly east 8 seconds
print("# Flying east for 8 seconds")
mavproxy.send('rc 2 1700\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
#restore to default
mavproxy.send('param set SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_super_simple - flies a circle around home for 45 seconds
def fly_super_simple(mavproxy, mav, timeout=45):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# fly forward 20m
print("# Flying forward 20 meters")
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, 20, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
#set SUPER SIMPLE mode for all flight modes
mavproxy.send('param set SUPER_SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# start copter yawing slowly
mavproxy.send('rc 4 1550\n')
# roll left for timeout seconds
print("# rolling left from pilot's point of view for %u seconds" % timeout)
mavproxy.send('rc 1 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + timeout):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
# stop rolling and yawing
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 4 1500\n')
#restore simple mode parameters to default
mavproxy.send('param set SUPER_SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_circle - flies a circle with 20m radius
def fly_circle(mavproxy, mav, maxaltchange=10, holdtime=36):
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# face west
print("turn west")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 270):
return False
mavproxy.send('rc 4 1500\n')
#set CIRCLE radius
mavproxy.send('param set CIRCLE_RADIUS 3000\n')
# fly forward (east) at least 100m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 100):
return False
# return pitch stick back to middle
mavproxy.send('rc 2 1500\n')
# set CIRCLE mode
mavproxy.send('switch 1\n') # circle mode
wait_mode(mav, 'CIRCLE')
# wait
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Circle at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("heading %u" % m.heading)
print("CIRCLE OK for %u seconds" % holdtime)
return True
# fly_auto_test - fly mission which tests a significant number of commands
def fly_auto_test(mavproxy, mav):
# Fly mission #1
print("# Load copter_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_mission.txt")):
print("load copter_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("Auto mission completed: passed=%s" % ret)
return ret
# fly_avc_test - fly AVC mission
def fly_avc_test(mavproxy, mav):
# upload mission from file
print("# Load copter_AVC2013_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_AVC2013_mission.txt")):
print("load copter_AVC2013_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("Fly AVC mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("AVC mission completed: passed=%s" % ret)
return ret
def land(mavproxy, mav, timeout=60):
'''land the quad'''
print("STARTING LANDING")
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
print("Entered Landing Mode")
ret = wait_altitude(mav, -5, 1)
print("LANDING: ok= %s" % ret)
return ret
def fly_mission(mavproxy, mav, height_accuracy=-1, target_altitude=None):
'''fly a mission from a file'''
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
expect_msg = "Reached Command #%u" % (num_wp-1)
if (ret):
mavproxy.expect(expect_msg)
print("test: MISSION COMPLETE: passed=%s" % ret)
# wait here until ready
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
return ret
def load_mission_from_file(mavproxy, mav, filename):
'''Load a mission from a file to flight controller'''
global num_wp
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
# update num_wp
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return True
def save_mission_to_file(mavproxy, mav, filename):
global num_wp
mavproxy.send('wp save %s\n' % filename)
mavproxy.expect('Saved ([0-9]+) waypoints')
num_wp = int(mavproxy.match.group(1))
print("num_wp: %d" % num_wp)
return True
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in range(1,9):
mavproxy.send('rc %u 1500\n' % chan)
# zero throttle
mavproxy.send('rc 3 1000\n')
def fly_ArduCopter(viewerip=None, map=False):
'''fly ArduCopter in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sil = util.start_SIL('ArduCopter', wipe=True, model='+', home=home, speedup=speedup_default)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('ArduCopter', model='+', home=home, speedup=speedup_default)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Logging to (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/ArduCopter-test.tlog")
print("buildlog=%s" % buildlog)
copyTLog = False
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
print( "WARN: Failed to create symlink: " + logfile + " => " + buildlog + ", Will copy tlog manually to target location" )
copyTLog = True
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a square in Stabilize mode
print("#")
print("########## Fly a square and save WPs with CH7 switch ##########")
print("#")
if not fly_square(mavproxy, mav):
failed_test_msg = "fly_square failed"
print(failed_test_msg)
failed = True
# save the stored mission to file
print("# Save out the CH7 mission to file")
if not save_mission_to_file(mavproxy, mav, os.path.join(testdir, "ch7_mission.txt")):
failed_test_msg = "save_mission_to_file failed"
print(failed_test_msg)
failed = True
# fly the stored mission
print("# Fly CH7 saved mission")
if not fly_mission(mavproxy, mav,height_accuracy = 0.5, target_altitude=10):
failed_test_msg = "fly ch7_mission failed"
print(failed_test_msg)
failed = True
# Throttle Failsafe
print("#")
print("########## Test Failsafe ##########")
print("#")
if not fly_throttle_failsafe(mavproxy, mav):
failed_test_msg = "fly_throttle_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Battery failsafe
if not fly_battery_failsafe(mavproxy, mav):
failed_test_msg = "fly_battery_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Stability patch
print("#")
print("########## Test Stability Patch ##########")
print("#")
if not fly_stability_patch(mavproxy, mav, 30):
failed_test_msg = "fly_stability_patch failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after stab patch failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fence test
print("#")
print("########## Test Horizontal Fence ##########")
print("#")
if not fly_fence_test(mavproxy, mav, 180):
failed_test_msg = "fly_fence_test failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch Loiter test
print("# GPS Glitch Loiter Test")
if not fly_gps_glitch_loiter_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_loiter_test failed"
print(failed_test_msg)
failed = True
# RTL after GPS Glitch Loiter test
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch test in auto mode
print("# GPS Glitch Auto Test")
if not fly_gps_glitch_auto_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_auto_test failed"
print(failed_test_msg)
failed = True
# take-off ahead of next test
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Loiter for 10 seconds
print("#")
print("########## Test Loiter for 10 seconds ##########")
print("#")
if not loiter(mavproxy, mav):
failed_test_msg = "loiter failed"
print(failed_test_msg)
failed = True
# Loiter Climb
print("#")
print("# Loiter - climb to 30m")
print("#")
if not change_alt(mavproxy, mav, 30):
failed_test_msg = "change_alt climb failed"
print(failed_test_msg)
failed = True
# Loiter Descend
print("#")
print("# Loiter - descend to 20m")
print("#")
if not change_alt(mavproxy, mav, 20):
failed_test_msg = "change_alt descend failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after Loiter climb/descend failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Simple mode
print("# Fly in SIMPLE mode")
if not fly_simple(mavproxy, mav):
failed_test_msg = "fly_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a circle in super simple mode
print("# Fly a circle in SUPER SIMPLE mode")
if not fly_super_simple(mavproxy, mav):
failed_test_msg = "fly_super_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after super simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Circle mode
print("# Fly CIRCLE mode")
if not fly_circle(mavproxy, mav):
failed_test_msg = "fly_circle failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after circle failed"
print(failed_test_msg)
failed = True
print("# Fly copter mission")
if not fly_auto_test(mavproxy, mav):
failed_test_msg = "fly_auto_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew copter mission OK")
# wait for disarm
mav.motors_disarmed_wait()
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/ArduCopter-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if os.path.exists('ArduCopter-valgrind.log'):
os.chmod('ArduCopter-valgrind.log', 0644)
shutil.copy("ArduCopter-valgrind.log", util.reltopdir("../buildlogs/ArduCopter-valgrind.log"))
# [2014/05/07] FC Because I'm doing a cross machine build (source is on host, build is on guest VM) I cannot hard link
# This flag tells me that I need to copy the data out
if copyTLog:
shutil.copy(logfile, buildlog)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
def fly_CopterAVC(viewerip=None, map=False):
'''fly ArduCopter in SIL for AVC2013 mission
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
home = "%f,%f,%u,%u" % (AVCHOME.lat, AVCHOME.lng, AVCHOME.alt, AVCHOME.heading)
sil = util.start_SIL('ArduCopter', wipe=True, model='+', home=home, speedup=speedup_default)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_AVC2013_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('ArduCopter', model='+', home=home, speedup=speedup_default)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Logging to (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/CopterAVC-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
if map:
mavproxy.send('map icon 40.072467969730496 -105.2314389590174\n')
mavproxy.send('map icon 40.072600990533829 -105.23146100342274\n')
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Fly AVC mission")
if not fly_avc_test(mavproxy, mav):
failed_test_msg = "fly_avc_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew AVC mission OK")
#mission includes disarm at end so should be ok to download logs now
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/CopterAVC-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
|
gpl-3.0
|
oihane/odoo
|
addons/l10n_in_hr_payroll/report/report_payroll_advice.py
|
374
|
3442
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import osv
from openerp.report import report_sxw
from openerp.tools import amount_to_text_en
class payroll_advice_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payroll_advice_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_month': self.get_month,
'convert': self.convert,
'get_detail': self.get_detail,
'get_bysal_total': self.get_bysal_total,
})
self.context = context
def get_month(self, input_date):
payslip_pool = self.pool.get('hr.payslip')
res = {
'from_name': '', 'to_name': ''
}
slip_ids = payslip_pool.search(self.cr, self.uid, [('date_from','<=',input_date), ('date_to','>=',input_date)], context=self.context)
if slip_ids:
slip = payslip_pool.browse(self.cr, self.uid, slip_ids, context=self.context)[0]
from_date = datetime.strptime(slip.date_from, '%Y-%m-%d')
to_date = datetime.strptime(slip.date_to, '%Y-%m-%d')
res['from_name']= from_date.strftime('%d')+'-'+from_date.strftime('%B')+'-'+from_date.strftime('%Y')
res['to_name']= to_date.strftime('%d')+'-'+to_date.strftime('%B')+'-'+to_date.strftime('%Y')
return res
def convert(self, amount, cur):
return amount_to_text_en.amount_to_text(amount, 'en', cur);
def get_bysal_total(self):
return self.total_bysal
def get_detail(self, line_ids):
result = []
self.total_bysal = 0.00
for l in line_ids:
res = {}
res.update({
'name': l.employee_id.name,
'acc_no': l.name,
'ifsc_code': l.ifsc_code,
'bysal': l.bysal,
'debit_credit': l.debit_credit,
})
self.total_bysal += l.bysal
result.append(res)
return result
class wrapped_report_payroll_advice(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payrolladvice'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payrolladvice'
_wrapped_report_class = payroll_advice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsnaptrrec.py
|
1
|
14627
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnsnaptrrec(base_resource) :
""" Configuration for NAPTR record resource. """
def __init__(self) :
self._domain = ""
self._order = 0
self._preference = 0
self._flags = ""
self._services = ""
self._regexp = ""
self._replacement = ""
self._ttl = 0
self._recordid = 0
self._type = ""
self._authtype = ""
self.___count = 0
@property
def domain(self) :
"""Name of the domain for the NAPTR record.<br/>Minimum length = 1.
"""
try :
return self._domain
except Exception as e:
raise e
@domain.setter
def domain(self, domain) :
"""Name of the domain for the NAPTR record.<br/>Minimum length = 1
"""
try :
self._domain = domain
except Exception as e:
raise e
@property
def order(self) :
"""An integer specifying the order in which the NAPTR records MUST be processed in order to accurately represent the ordered list of Rules. The ordering is from lowest to highest.<br/>Maximum length = 65535.
"""
try :
return self._order
except Exception as e:
raise e
@order.setter
def order(self, order) :
"""An integer specifying the order in which the NAPTR records MUST be processed in order to accurately represent the ordered list of Rules. The ordering is from lowest to highest.<br/>Maximum length = 65535
"""
try :
self._order = order
except Exception as e:
raise e
@property
def preference(self) :
"""An integer specifying the preference of this NAPTR among NAPTR records having same order. lower the number, higher the preference.<br/>Maximum length = 65535.
"""
try :
return self._preference
except Exception as e:
raise e
@preference.setter
def preference(self, preference) :
"""An integer specifying the preference of this NAPTR among NAPTR records having same order. lower the number, higher the preference.<br/>Maximum length = 65535
"""
try :
self._preference = preference
except Exception as e:
raise e
@property
def flags(self) :
"""flags for this NAPTR.<br/>Maximum length = 255.
"""
try :
return self._flags
except Exception as e:
raise e
@flags.setter
def flags(self, flags) :
"""flags for this NAPTR.<br/>Maximum length = 255
"""
try :
self._flags = flags
except Exception as e:
raise e
@property
def services(self) :
"""Service Parameters applicable to this delegation path.<br/>Maximum length = 255.
"""
try :
return self._services
except Exception as e:
raise e
@services.setter
def services(self, services) :
"""Service Parameters applicable to this delegation path.<br/>Maximum length = 255
"""
try :
self._services = services
except Exception as e:
raise e
@property
def regexp(self) :
"""The regular expression, that specifies the substitution expression for this NAPTR.<br/>Maximum length = 255.
"""
try :
return self._regexp
except Exception as e:
raise e
@regexp.setter
def regexp(self, regexp) :
"""The regular expression, that specifies the substitution expression for this NAPTR.<br/>Maximum length = 255
"""
try :
self._regexp = regexp
except Exception as e:
raise e
@property
def replacement(self) :
"""The replacement domain name for this NAPTR.<br/>Maximum length = 255.
"""
try :
return self._replacement
except Exception as e:
raise e
@replacement.setter
def replacement(self, replacement) :
"""The replacement domain name for this NAPTR.<br/>Maximum length = 255
"""
try :
self._replacement = replacement
except Exception as e:
raise e
@property
def ttl(self) :
"""Time to Live (TTL), in seconds, for the record. TTL is the time for which the record must be cached by DNS proxies. The specified TTL is applied to all the resource records that are of the same record type and belong to the specified domain name. For example, if you add an address record, with a TTL of 36000, to the domain name example.com, the TTLs of all the address records of example.com are changed to 36000. If the TTL is not specified, the NetScaler appliance uses either the DNS zone's minimum TTL or, if the SOA record is not available on the appliance, the default value of 3600.<br/>Default value: 3600<br/>Maximum length = 2147483647.
"""
try :
return self._ttl
except Exception as e:
raise e
@ttl.setter
def ttl(self, ttl) :
"""Time to Live (TTL), in seconds, for the record. TTL is the time for which the record must be cached by DNS proxies. The specified TTL is applied to all the resource records that are of the same record type and belong to the specified domain name. For example, if you add an address record, with a TTL of 36000, to the domain name example.com, the TTLs of all the address records of example.com are changed to 36000. If the TTL is not specified, the NetScaler appliance uses either the DNS zone's minimum TTL or, if the SOA record is not available on the appliance, the default value of 3600.<br/>Default value: 3600<br/>Maximum length = 2147483647
"""
try :
self._ttl = ttl
except Exception as e:
raise e
@property
def recordid(self) :
"""Unique, internally generated record ID. View the details of the naptr record to obtain its record ID. Records can be removed by either specifying the domain name and record id OR by specifying
domain name and all other naptr record attributes as was supplied during the add command.<br/>Minimum length = 1<br/>Maximum length = 65535.
"""
try :
return self._recordid
except Exception as e:
raise e
@recordid.setter
def recordid(self, recordid) :
"""Unique, internally generated record ID. View the details of the naptr record to obtain its record ID. Records can be removed by either specifying the domain name and record id OR by specifying
domain name and all other naptr record attributes as was supplied during the add command.<br/>Minimum length = 1<br/>Maximum length = 65535
"""
try :
self._recordid = recordid
except Exception as e:
raise e
@property
def type(self) :
"""Type of records to display. Available settings function as follows:
* ADNS - Display all authoritative address records.
* PROXY - Display all proxy address records.
* ALL - Display all address records.<br/>Default value: ADNS<br/>Possible values = ALL, ADNS, PROXY.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Type of records to display. Available settings function as follows:
* ADNS - Display all authoritative address records.
* PROXY - Display all proxy address records.
* ALL - Display all address records.<br/>Default value: ADNS<br/>Possible values = ALL, ADNS, PROXY
"""
try :
self._type = type
except Exception as e:
raise e
@property
def authtype(self) :
"""Authentication type.<br/>Possible values = ALL, ADNS, PROXY.
"""
try :
return self._authtype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnsnaptrrec_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnsnaptrrec
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.domain) :
return str(self.domain)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add dnsnaptrrec.
"""
try :
if type(resource) is not list :
addresource = dnsnaptrrec()
addresource.domain = resource.domain
addresource.order = resource.order
addresource.preference = resource.preference
addresource.flags = resource.flags
addresource.services = resource.services
addresource.regexp = resource.regexp
addresource.replacement = resource.replacement
addresource.ttl = resource.ttl
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ dnsnaptrrec() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].domain = resource[i].domain
addresources[i].order = resource[i].order
addresources[i].preference = resource[i].preference
addresources[i].flags = resource[i].flags
addresources[i].services = resource[i].services
addresources[i].regexp = resource[i].regexp
addresources[i].replacement = resource[i].replacement
addresources[i].ttl = resource[i].ttl
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete dnsnaptrrec.
"""
try :
if type(resource) is not list :
deleteresource = dnsnaptrrec()
if type(resource) != type(deleteresource):
deleteresource.domain = resource
else :
deleteresource.domain = resource.domain
deleteresource.order = resource.order
deleteresource.recordid = resource.recordid
deleteresource.preference = resource.preference
deleteresource.flags = resource.flags
deleteresource.services = resource.services
deleteresource.regexp = resource.regexp
deleteresource.replacement = resource.replacement
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ dnsnaptrrec() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].domain = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ dnsnaptrrec() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].domain = resource[i].domain
deleteresources[i].order = resource[i].order
deleteresources[i].recordid = resource[i].recordid
deleteresources[i].preference = resource[i].preference
deleteresources[i].flags = resource[i].flags
deleteresources[i].services = resource[i].services
deleteresources[i].regexp = resource[i].regexp
deleteresources[i].replacement = resource[i].replacement
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the dnsnaptrrec resources that are configured on netscaler.
"""
try :
if not name :
obj = dnsnaptrrec()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = dnsnaptrrec()
obj.domain = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [dnsnaptrrec() for _ in range(len(name))]
obj = [dnsnaptrrec() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = dnsnaptrrec()
obj[i].domain = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
""" Use this API to fetch all the dnsnaptrrec resources that are configured on netscaler.
# This uses dnsnaptrrec_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = dnsnaptrrec()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of dnsnaptrrec resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsnaptrrec()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the dnsnaptrrec resources configured on NetScaler.
"""
try :
obj = dnsnaptrrec()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of dnsnaptrrec resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsnaptrrec()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Authtype:
ALL = "ALL"
ADNS = "ADNS"
PROXY = "PROXY"
class Type:
ALL = "ALL"
ADNS = "ADNS"
PROXY = "PROXY"
class dnsnaptrrec_response(base_response) :
def __init__(self, length=1) :
self.dnsnaptrrec = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnsnaptrrec = [dnsnaptrrec() for _ in range(length)]
|
apache-2.0
|
openatv/enigma2
|
lib/python/Plugins/SystemPlugins/SoftwareManager/ImageBackup.py
|
1
|
39463
|
#################################################################################
# FULL BACKUP UYILITY FOR ENIGMA2, SUPPORTS THE MODELS OE-A 4.4 #
# #
# MAKES A FULLBACK-UP READY FOR FLASHING. #
# #
#################################################################################
from enigma import getEnigmaVersionString
from Screens.Screen import Screen
from Components.Sources.StaticText import StaticText
from Components.SystemInfo import SystemInfo
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.About import about
from Components import Harddisk
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from Screens.Console import Console
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from time import time, strftime, localtime
from Tools.BoundFunction import boundFunction
from Tools.Multiboot import GetImagelist, GetCurrentImage, GetCurrentImageMode, GetCurrentKern, GetCurrentRoot, GetBoxName
import os, commands, datetime
from boxbranding import getMachineBrand, getMachineName, getDriverDate, getImageVersion, getImageBuild, getBrandOEM, getMachineBuild, getImageFolder, getMachineUBINIZE, getMachineMKUBIFS, getMachineMtdKernel, getMachineMtdRoot, getMachineKernelFile, getMachineRootFile, getImageFileSystem, getImageDistro, getImageVersion
VERSION = _("Version %s %s") %(getImageDistro(), getImageVersion())
class ImageBackup(Screen):
skin = """
<screen name="Image Backup" position="center,center" size="750,900" flags="wfNoBorder" backgroundColor="transparent">
<eLabel name="b" position="0,0" size="750,700" backgroundColor="#00ffffff" zPosition="-2" />
<eLabel name="a" position="1,1" size="748,698" backgroundColor="#00000000" zPosition="-1" />
<widget source="Title" render="Label" position="60,10" foregroundColor="#00ffffff" size="480,50" halign="left" font="Regular; 28" backgroundColor="#00000000" />
<eLabel name="line" position="1,60" size="748,1" backgroundColor="#00ffffff" zPosition="1" />
<eLabel name="line2" position="1,250" size="748,4" backgroundColor="#00ffffff" zPosition="1" />
<widget name="config" position="2,280" size="730,380" halign="center" font="Regular; 22" backgroundColor="#00000000" foregroundColor="#00e5b243" />
<widget source="description" render="Label" position="2,80" size="730,30" halign="center" font="Regular; 22" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget source="options" render="Label" position="2,130" size="730,60" halign="center" font="Regular; 22" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget source="key_red" render="Label" position="30,200" size="150,30" noWrap="1" zPosition="1" valign="center" font="Regular; 20" halign="left" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget source="key_green" render="Label" position="200,200" size="150,30" noWrap="1" zPosition="1" valign="center" font="Regular; 20" halign="left" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<eLabel position="20,200" size="6,40" backgroundColor="#00e61700" /> <!-- Should be a pixmap -->
<eLabel position="190,200" size="6,40" backgroundColor="#0061e500" /> <!-- Should be a pixmap -->
</screen>
"""
def __init__(self, session, *args):
Screen.__init__(self, session)
self.title = _("Image Backup")
self["key_red"] = StaticText(_("Cancel"))
self["description"] = StaticText(_("Use the cursor keys to select an installed image and then Start button."))
self["options"] = StaticText(_(" "))
self["key_green"] = StaticText(_("Start"))
self["config"] = ChoiceList(list=[ChoiceEntryComponent('',((_("Retrieving image slots - Please wait...")), "Queued"))])
imagedict = []
self.getImageList = None
self.startit()
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "KeyboardInputActions", "MenuActions"],
{
"red": boundFunction(self.close, None),
"green": self.start,
"ok": self.start,
"cancel": boundFunction(self.close, None),
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"leftRepeated": self.keyLeft,
"rightRepeated": self.keyRight,
"menu": boundFunction(self.close, True),
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.title)
def startit(self):
self.getImageList = GetImagelist(self.ImageList)
def ImageList(self, imagedict):
self.saveImageList = imagedict
list = []
currentimageslot = GetCurrentImage() or 1
print "[Image Backup] Current Image Slot %s, Imagelist %s"% ( currentimageslot, imagedict)
if imagedict:
for x in sorted(imagedict.keys()):
if imagedict[x]["imagename"] != _("Empty slot"):
if x == 1 and currentimageslot == 1 and SystemInfo["canRecovery"]:
list.append(ChoiceEntryComponent('',(_("slot%s - %s as USB Recovery") % (x, imagedict[x]["imagename"]), x, True)))
list.append(ChoiceEntryComponent('',((_("slot%s - %s (current image)") if x == currentimageslot else _("slot%s - %s")) % (x, imagedict[x]["imagename"]), x, False)))
else:
if SystemInfo["canRecovery"]:
list.append(ChoiceEntryComponent('',(_("internal flash: %s %s as USB Recovery") %(getImageDistro(), getImageVersion()),"x",True)))
list.append(ChoiceEntryComponent('',(_("internal flash: %s %s ") %(getImageDistro(), getImageVersion()),"x",False)))
self["config"].setList(list)
def start(self):
self.currentSelected = self["config"].l.getCurrentSelection()
title = _("Please select a backup destination")
choices = []
retval = []
if self.currentSelected[0][1] != "Queued":
for media in ['/media/%s' % x for x in os.listdir('/media')] + (['/media/net/%s' % x for x in os.listdir('/media/net')] if os.path.isdir('/media/net') else []):
if Harddisk.Freespace(media) > 300000:
choices.append((_("Backup to destination: %s") % (media),self.currentSelected[0][1], media, self.currentSelected[0][2]))
choices.append((_("No, do not backup a image"), False))
self.session.openWithCallback(self.doFullBackup, ChoiceBox,title=title,list=choices)
def selectionChanged(self):
currentSelected = self["config"].l.getCurrentSelection()
def keyLeft(self):
self["config"].instance.moveSelection(self["config"].instance.moveUp)
self.selectionChanged()
def keyRight(self):
self["config"].instance.moveSelection(self["config"].instance.moveDown)
self.selectionChanged()
def keyUp(self):
self["config"].instance.moveSelection(self["config"].instance.moveUp)
self.selectionChanged()
def keyDown(self):
self["config"].instance.moveSelection(self["config"].instance.moveDown)
self.selectionChanged()
def doFullBackup(self, answer):
if answer is not None:
if answer[1]:
self.RECOVERY = answer[3]
self.DIRECTORY = "%s/images" %answer[2]
if not os.path.exists(self.DIRECTORY):
try:
os.makedirs(self.DIRECTORY)
except:
self.session.open(MessageBox, _("Cannot create backup directory"), MessageBox.TYPE_ERROR, timeout=10)
return
self.SLOT = answer[1]
self.MODEL = GetBoxName()
self.OEM = getBrandOEM()
self.MACHINEBUILD = getMachineBuild()
self.MACHINENAME = getMachineName()
self.MACHINEBRAND = getMachineBrand()
self.IMAGEFOLDER = getImageFolder()
self.UBINIZE_ARGS = getMachineUBINIZE()
self.MKUBIFS_ARGS = getMachineMKUBIFS()
self.ROOTFSSUBDIR = "none"
self.ROOTFSBIN = getMachineRootFile()
self.KERNELBIN = getMachineKernelFile()
self.ROOTFSTYPE = getImageFileSystem().strip()
self.IMAGEDISTRO = getImageDistro()
self.DISTROVERSION = getImageVersion()
if SystemInfo["canRecovery"]:
self.EMMCIMG = SystemInfo["canRecovery"][0]
self.MTDBOOT = SystemInfo["canRecovery"][1]
else:
self.EMMCIMG = "none"
self.MTDBOOT = "none"
self.getImageList = self.saveImageList
if SystemInfo["canMultiBoot"]:
self.MTDKERNEL = SystemInfo["canMultiBoot"][self.SLOT]["kernel"].split('/')[2]
self.MTDROOTFS = SystemInfo["canMultiBoot"][self.SLOT]["device"].split('/')[2]
if SystemInfo["HasRootSubdir"]:
self.ROOTFSSUBDIR = SystemInfo["canMultiBoot"][self.SLOT]['rootsubdir']
else:
self.MTDKERNEL = getMachineMtdKernel()
self.MTDROOTFS = getMachineMtdRoot()
print "[Image Backup] BOX MACHINEBUILD = >%s<" %self.MACHINEBUILD
print "[Image Backup] BOX MACHINENAME = >%s<" %self.MACHINENAME
print "[Image Backup] BOX MACHINEBRAND = >%s<" %self.MACHINEBRAND
print "[Image Backup] BOX MODEL = >%s<" %self.MODEL
print "[Image Backup] OEM MODEL = >%s<" %self.OEM
print "[Image Backup] IMAGEFOLDER = >%s<" %self.IMAGEFOLDER
print "[Image Backup] UBINIZE = >%s<" %self.UBINIZE_ARGS
print "[Image Backup] MKUBIFS = >%s<" %self.MKUBIFS_ARGS
print "[Image Backup] MTDBOOT = >%s<" %self.MTDBOOT
print "[Image Backup] MTDKERNEL = >%s<" %self.MTDKERNEL
print "[Image Backup] MTDROOTFS = >%s<" %self.MTDROOTFS
print "[Image Backup] ROOTFSBIN = >%s<" %self.ROOTFSBIN
print "[Image Backup] KERNELBIN = >%s<" %self.KERNELBIN
print "[Image Backup] ROOTFSSUBDIR = >%s<" %self.ROOTFSSUBDIR
print "[Image Backup] ROOTFSTYPE = >%s<" %self.ROOTFSTYPE
print "[Image Backup] EMMCIMG = >%s<" %self.EMMCIMG
print "[Image Backup] IMAGEDISTRO = >%s<" %self.IMAGEDISTRO
print "[Image Backup] DISTROVERSION = >%s<" %self.DISTROVERSION
print "[Image Backup] MTDBOOT = >%s<" %self.MTDBOOT
print "[Image Backup] USB RECOVERY = >%s< " %self.RECOVERY
print "[Image Backup] DESTINATION = >%s< " %self.DIRECTORY
print "[Image Backup] SLOT = >%s< " %self.SLOT
self.TITLE = _("Full back-up on %s") % (self.DIRECTORY)
self.START = time()
self.DATE = strftime("%Y%m%d_%H%M", localtime(self.START))
self.IMAGEVERSION = self.imageInfo()
self.MKFS_UBI = "/usr/sbin/mkfs.ubifs"
self.MKFS_TAR = "/bin/tar"
self.BZIP2 = "/usr/bin/bzip2"
self.MKFS_JFFS2 = "/usr/sbin/mkfs.jffs2"
self.UBINIZE = "/usr/sbin/ubinize"
self.NANDDUMP = "/usr/sbin/nanddump"
self.FASTBOOT = "/usr/bin/ext2simg"
self.WORKDIR= "%s/bi" %self.DIRECTORY
self.SHOWNAME = "%s %s" %(self.MACHINEBRAND, self.MODEL)
self.MAINDEST = "%s/build_%s/%s" % (self.DIRECTORY, self.MODEL, self.IMAGEFOLDER)
self.MAINDESTROOT = "%s/build_%s" % (self.DIRECTORY, self.MODEL)
self.message = "echo -e '\n"
if getMachineBrand().startswith('A') or getMachineBrand().startswith('E') or getMachineBrand().startswith('I') or getMachineBrand().startswith('O') or getMachineBrand().startswith('U') or getMachineBrand().startswith('Xt'):
self.message += (_('Back-up Tool for an %s\n') % self.SHOWNAME).upper()
else:
self.message += (_('Back-up Tool for a %s\n') % self.SHOWNAME).upper()
self.message += VERSION + '\n'
self.message += "_________________________________________________\n\n"
self.message += _("Please be patient, a backup will now be made,\n")
self.message += _("because of the used filesystem the back-up\n")
self.message += _("will take about 1-15 minutes for this system\n")
self.message += "_________________________________________________\n\n"
if self.RECOVERY:
self.message += _("Backup Mode: USB Recovery\n")
else:
self.message += _("Backup Mode: Flash Online\n")
self.message += "_________________________________________________\n"
self.message += "'"
## PREPARING THE BUILDING ENVIRONMENT
os.system("rm -rf %s" %self.WORKDIR)
self.backuproot = "/tmp/bi/root"
if SystemInfo["HasRootSubdir"]:
self.backuproot = "/tmp/bi/RootSubdir/"
if not os.path.exists(self.WORKDIR):
os.makedirs(self.WORKDIR)
if not os.path.exists(self.backuproot):
os.makedirs(self.backuproot)
os.system("sync")
if SystemInfo["canMultiBoot"]:
if SystemInfo["HasRootSubdir"]:
os.system("mount /dev/%s /tmp/bi/RootSubdir" %self.MTDROOTFS)
self.backuproot = self.backuproot + self.ROOTFSSUBDIR
else:
os.system("mount /dev/%s %s" %(self.MTDROOTFS, self.backuproot))
else:
os.system("mount --bind / %s" %(self.backuproot))
if "jffs2" in self.ROOTFSTYPE.split():
cmd1 = "%s --root=%s --faketime --output=%s/root.jffs2 %s" % (self.MKFS_JFFS2, self.backuproot, self.WORKDIR, self.MKUBIFS_ARGS)
cmd2 = None
cmd3 = None
elif "ubi" in self.ROOTFSTYPE.split():
f = open("%s/ubinize.cfg" %self.WORKDIR, "w")
f.write("[ubifs]\n")
f.write("mode=ubi\n")
f.write("image=%s/root.ubi\n" %self.WORKDIR)
f.write("vol_id=0\n")
f.write("vol_type=dynamic\n")
f.write("vol_name=rootfs\n")
f.write("vol_flags=autoresize\n")
f.close()
ff = open("%s/root.ubi" %self.WORKDIR, "w")
ff.close()
cmd1 = "%s -r %s -o %s/root.ubi %s" % (self.MKFS_UBI, self.backuproot, self.WORKDIR, self.MKUBIFS_ARGS)
cmd2 = "%s -o %s/root.ubifs %s %s/ubinize.cfg" % (self.UBINIZE, self.WORKDIR, self.UBINIZE_ARGS, self.WORKDIR)
cmd3 = "mv %s/root.ubifs %s/root.%s" %(self.WORKDIR, self.WORKDIR, self.ROOTFSTYPE)
else:
if self.RECOVERY:
cmd1 = None
cmd2 = None
else:
cmd1 = "%s -cf %s/rootfs.tar -C %s --exclude ./var/nmbd --exclude ./.resizerootfs --exclude ./.resize-rootfs --exclude ./.resize-linuxrootfs --exclude ./.resize-userdata --exclude ./var/lib/samba/private/msg.sock --exclude ./var/lib/samba/msg.sock/* --exclude ./run/avahi-daemon/socket ." % (self.MKFS_TAR, self.WORKDIR, self.backuproot)
cmd2 = "%s %s/rootfs.tar" % (self.BZIP2, self.WORKDIR)
cmd3 = None
cmdlist = []
cmdlist.append(self.message)
if cmd1:
cmdlist.append('echo "' + _("Create:") + ' %s"' %self.ROOTFSBIN)
cmdlist.append(cmd1)
if cmd2:
cmdlist.append(cmd2)
if cmd3:
cmdlist.append(cmd3)
if self.MODEL in ("gbquad4k","gbue4k","gbx34k"):
cmdlist.append('echo "' + _("Create:") + " boot dump" + '"')
cmdlist.append("dd if=/dev/mmcblk0p1 of=%s/boot.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " rescue dump" + '"')
cmdlist.append("dd if=/dev/mmcblk0p3 of=%s/rescue.bin" % self.WORKDIR)
if self.MACHINEBUILD in ("h9","i55plus"):
cmdlist.append('echo "' + _("Create:") + " fastboot dump" + '"')
cmdlist.append("dd if=/dev/mtd0 of=%s/fastboot.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " bootargs dump" + '"')
cmdlist.append("dd if=/dev/mtd1 of=%s/bootargs.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " baseparam dump" + '"')
cmdlist.append("dd if=/dev/mtd2 of=%s/baseparam.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " pq_param dump" + '"')
cmdlist.append("dd if=/dev/mtd3 of=%s/pq_param.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " logo dump" + '"')
cmdlist.append("dd if=/dev/mtd4 of=%s/logo.bin" % self.WORKDIR)
if self.EMMCIMG == "usb_update.bin" and self.RECOVERY:
SEEK_CONT = (Harddisk.getFolderSize(self.backuproot)/ 1024) + 100000
cmdlist.append('echo "' + _("Create:") + " fastboot dump" + '"')
cmdlist.append('cp -f /usr/share/fastboot.bin %s/fastboot.bin' %(self.WORKDIR))
#cmdlist.append("dd if=/dev/mmcblk0p1 of=%s/fastboot.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " bootargs dump" + '"')
cmdlist.append('cp -f /usr/share/bootargs.bin %s/bootargs.bin' %(self.WORKDIR))
#cmdlist.append("dd if=/dev/mmcblk0p2 of=%s/bootargs.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " boot dump" + '"')
cmdlist.append("dd if=/dev/mmcblk0p3 of=%s/boot.img" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " baseparam dump" + '"')
#cmdlist.append('cp -f /usr/share/bootargs.bin %s/baseparam.img' %(self.WORKDIR))
cmdlist.append("dd if=/dev/mmcblk0p4 of=%s/baseparam.img" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " pq_param dump" + '"')
#cmdlist.append('cp -f /usr/share/bootargs.bin %s/pq_param.bin' %(self.WORKDIR))
cmdlist.append("dd if=/dev/mmcblk0p5 of=%s/pq_param.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " logo dump" + '"')
cmdlist.append("dd if=/dev/mmcblk0p6 of=%s/logo.img" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " deviceinfo dump" + '"')
#cmdlist.append('cp -f /usr/share/bootargs.bin %s/deviceinfo.bin' %(self.WORKDIR))
cmdlist.append("dd if=/dev/mmcblk0p7 of=%s/deviceinfo.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " apploader dump" + '"')
cmdlist.append('cp -f /usr/share/apploader.bin %s/apploader.bin' %(self.WORKDIR))
#cmdlist.append("dd if=/dev/mmcblk0p10 of=%s/apploader.bin" % self.WORKDIR)
cmdlist.append('echo "' + _("Create:") + " rootfs dump" + '"')
cmdlist.append("dd if=/dev/zero of=%s/rootfs.ext4 seek=%s count=60 bs=1024" % (self.WORKDIR, SEEK_CONT))
cmdlist.append("mkfs.ext4 -F -i 4096 %s/rootfs.ext4" % (self.WORKDIR))
cmdlist.append("mkdir -p %s/userdata" % self.WORKDIR)
cmdlist.append("mount %s/rootfs.ext4 %s/userdata" %(self.WORKDIR,self.WORKDIR))
cmdlist.append("mkdir -p %s/userdata/linuxrootfs1" % self.WORKDIR)
cmdlist.append("mkdir -p %s/userdata/linuxrootfs2" % self.WORKDIR)
cmdlist.append("mkdir -p %s/userdata/linuxrootfs3" % self.WORKDIR)
cmdlist.append("mkdir -p %s/userdata/linuxrootfs4" % self.WORKDIR)
cmdlist.append("rsync -aAX %s/ %s/userdata/linuxrootfs1/" % (self.backuproot,self.WORKDIR))
cmdlist.append("umount %s/userdata" %(self.WORKDIR))
cmdlist.append('echo "' + _("Create:") + " kerneldump" + '"')
if SystemInfo["canMultiBoot"] or self.MTDKERNEL.startswith('mmcblk0'):
cmdlist.append("dd if=/dev/%s of=%s/%s" % (self.MTDKERNEL ,self.WORKDIR, self.KERNELBIN))
else:
cmdlist.append("nanddump -a -f %s/vmlinux.gz /dev/%s" % (self.WORKDIR, self.MTDKERNEL))
if self.EMMCIMG == "disk.img" and self.RECOVERY:
EMMC_IMAGE = "%s/%s"% (self.WORKDIR,self.EMMCIMG)
BLOCK_SIZE=512
BLOCK_SECTOR=2
IMAGE_ROOTFS_ALIGNMENT=1024
BOOT_PARTITION_SIZE=3072
KERNEL_PARTITION_SIZE=8192
ROOTFS_PARTITION_SIZE=1048576
EMMC_IMAGE_SIZE=3817472
KERNEL_PARTITION_OFFSET = int(IMAGE_ROOTFS_ALIGNMENT) + int(BOOT_PARTITION_SIZE)
ROOTFS_PARTITION_OFFSET = int(KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
SECOND_KERNEL_PARTITION_OFFSET = int(ROOTFS_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
THRID_KERNEL_PARTITION_OFFSET = int(SECOND_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
FOURTH_KERNEL_PARTITION_OFFSET = int(THRID_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
MULTI_ROOTFS_PARTITION_OFFSET = int(FOURTH_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
EMMC_IMAGE_SEEK = int(EMMC_IMAGE_SIZE) * int(BLOCK_SECTOR)
cmdlist.append('echo "' + _("Create: Recovery Fullbackup %s")% (self.EMMCIMG) + '"')
cmdlist.append('dd if=/dev/zero of=%s bs=%s count=0 seek=%s' % (EMMC_IMAGE, BLOCK_SIZE , EMMC_IMAGE_SEEK))
cmdlist.append('parted -s %s mklabel gpt' %EMMC_IMAGE)
PARTED_END_BOOT = int(IMAGE_ROOTFS_ALIGNMENT) + int(BOOT_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart boot fat16 %s %s' % (EMMC_IMAGE, IMAGE_ROOTFS_ALIGNMENT, PARTED_END_BOOT ))
PARTED_END_KERNEL1 = int(KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart linuxkernel %s %s' % (EMMC_IMAGE, KERNEL_PARTITION_OFFSET, PARTED_END_KERNEL1 ))
PARTED_END_ROOTFS1 = int(ROOTFS_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart linuxrootfs ext4 %s %s' % (EMMC_IMAGE, ROOTFS_PARTITION_OFFSET, PARTED_END_ROOTFS1 ))
PARTED_END_KERNEL2 = int(SECOND_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart linuxkernel2 %s %s' % (EMMC_IMAGE, SECOND_KERNEL_PARTITION_OFFSET, PARTED_END_KERNEL2 ))
PARTED_END_KERNEL3 = int(THRID_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart linuxkernel3 %s %s' % (EMMC_IMAGE, THRID_KERNEL_PARTITION_OFFSET, PARTED_END_KERNEL3 ))
PARTED_END_KERNEL4 = int(FOURTH_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart linuxkernel4 %s %s' % (EMMC_IMAGE, FOURTH_KERNEL_PARTITION_OFFSET, PARTED_END_KERNEL4 ))
rd = open("/proc/swaps", "r").read()
if "mmcblk0p7" in rd:
SWAP_PARTITION_OFFSET = int(FOURTH_KERNEL_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
SWAP_PARTITION_SIZE = int(262144)
MULTI_ROOTFS_PARTITION_OFFSET = int(SWAP_PARTITION_OFFSET) + int(SWAP_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart swap linux-swap %s %s' % (EMMC_IMAGE, SWAP_PARTITION_OFFSET, SWAP_PARTITION_OFFSET + SWAP_PARTITION_SIZE))
cmdlist.append('parted -s %s unit KiB mkpart userdata ext4 %s 100%%' % (EMMC_IMAGE, MULTI_ROOTFS_PARTITION_OFFSET))
else:
cmdlist.append('parted -s %s unit KiB mkpart userdata ext4 %s 100%%' % (EMMC_IMAGE, MULTI_ROOTFS_PARTITION_OFFSET))
BOOT_IMAGE_SEEK = int(IMAGE_ROOTFS_ALIGNMENT) * int(BLOCK_SECTOR)
cmdlist.append('dd if=/dev/%s of=%s seek=%s' % (self.MTDBOOT, EMMC_IMAGE, BOOT_IMAGE_SEEK ))
KERNAL_IMAGE_SEEK = int(KERNEL_PARTITION_OFFSET) * int(BLOCK_SECTOR)
cmdlist.append('dd if=/dev/%s of=%s seek=%s' % (self.MTDKERNEL, EMMC_IMAGE, KERNAL_IMAGE_SEEK ))
ROOTFS_IMAGE_SEEK = int(ROOTFS_PARTITION_OFFSET) * int(BLOCK_SECTOR)
cmdlist.append('dd if=/dev/%s of=%s seek=%s ' % (self.MTDROOTFS, EMMC_IMAGE, ROOTFS_IMAGE_SEEK ))
elif self.EMMCIMG == "emmc.img" and self.RECOVERY:
EMMC_IMAGE = "%s/%s"% (self.WORKDIR,self.EMMCIMG)
BLOCK_SECTOR=2
IMAGE_ROOTFS_ALIGNMENT=1024
BOOT_PARTITION_SIZE=3072
KERNEL_PARTITION_SIZE=8192
ROOTFS_PARTITION_SIZE=1898496
EMMC_IMAGE_SIZE=7634944
BOOTDD_VOLUME_ID = "boot"
KERNEL1_PARTITION_OFFSET = int(IMAGE_ROOTFS_ALIGNMENT) + int(BOOT_PARTITION_SIZE)
ROOTFS1_PARTITION_OFFSET = int(KERNEL1_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
KERNEL2_PARTITION_OFFSET = int(ROOTFS1_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
ROOTFS2_PARTITION_OFFSET = int(KERNEL2_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
KERNEL3_PARTITION_OFFSET = int(ROOTFS2_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
ROOTFS3_PARTITION_OFFSET = int(KERNEL3_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
KERNEL4_PARTITION_OFFSET = int(ROOTFS3_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
ROOTFS4_PARTITION_OFFSET = int(KERNEL4_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
EMMC_IMAGE_SEEK = int(EMMC_IMAGE_SIZE) * int(IMAGE_ROOTFS_ALIGNMENT)
cmdlist.append('echo "' + _("Create: Recovery Fullbackup %s")% (self.EMMCIMG) + '"')
cmdlist.append('dd if=/dev/zero of=%s bs=1 count=0 seek=%s' % (EMMC_IMAGE, EMMC_IMAGE_SEEK))
cmdlist.append('parted -s %s mklabel gpt' %EMMC_IMAGE)
PARTED_END_BOOT = int(IMAGE_ROOTFS_ALIGNMENT) + int(BOOT_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart boot fat16 %s %s' % (EMMC_IMAGE, IMAGE_ROOTFS_ALIGNMENT, PARTED_END_BOOT ))
cmdlist.append('parted -s %s set 1 boot on' %EMMC_IMAGE)
PARTED_END_KERNEL1 = int(KERNEL1_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart kernel1 %s %s' % (EMMC_IMAGE, KERNEL1_PARTITION_OFFSET, PARTED_END_KERNEL1 ))
PARTED_END_ROOTFS1 = int(ROOTFS1_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart rootfs1 ext4 %s %s' % (EMMC_IMAGE, ROOTFS1_PARTITION_OFFSET, PARTED_END_ROOTFS1 ))
PARTED_END_KERNEL2 = int(KERNEL2_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart kernel2 %s %s' % (EMMC_IMAGE, KERNEL2_PARTITION_OFFSET, PARTED_END_KERNEL2 ))
PARTED_END_ROOTFS2 = int(ROOTFS2_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart rootfs2 ext4 %s %s' % (EMMC_IMAGE, ROOTFS2_PARTITION_OFFSET, PARTED_END_ROOTFS2 ))
PARTED_END_KERNEL3 = int(KERNEL3_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart kernel3 %s %s' % (EMMC_IMAGE, KERNEL3_PARTITION_OFFSET, PARTED_END_KERNEL3 ))
PARTED_END_ROOTFS3 = int(ROOTFS3_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart rootfs3 ext4 %s %s' % (EMMC_IMAGE, ROOTFS3_PARTITION_OFFSET, PARTED_END_ROOTFS3 ))
PARTED_END_KERNEL4 = int(KERNEL4_PARTITION_OFFSET) + int(KERNEL_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart kernel4 %s %s' % (EMMC_IMAGE, KERNEL4_PARTITION_OFFSET, PARTED_END_KERNEL4 ))
PARTED_END_ROOTFS4 = int(ROOTFS4_PARTITION_OFFSET) + int(ROOTFS_PARTITION_SIZE)
cmdlist.append('parted -s %s unit KiB mkpart rootfs4 ext4 %s %s' % (EMMC_IMAGE, ROOTFS4_PARTITION_OFFSET, PARTED_END_ROOTFS4 ))
BOOT_IMAGE_SEEK = int(IMAGE_ROOTFS_ALIGNMENT) * int(BLOCK_SECTOR)
cmdlist.append('dd if=/dev/%s of=%s seek=%s' % (self.MTDBOOT, EMMC_IMAGE, BOOT_IMAGE_SEEK ))
KERNAL_IMAGE_SEEK = int(KERNEL1_PARTITION_OFFSET) * int(BLOCK_SECTOR)
cmdlist.append('dd if=/dev/%s of=%s seek=%s' % (self.MTDKERNEL, EMMC_IMAGE, KERNAL_IMAGE_SEEK ))
ROOTFS_IMAGE_SEEK = int(ROOTFS1_PARTITION_OFFSET) * int(BLOCK_SECTOR)
cmdlist.append('dd if=/dev/%s of=%s seek=%s ' % (self.MTDROOTFS, EMMC_IMAGE, ROOTFS_IMAGE_SEEK ))
elif self.EMMCIMG == "usb_update.bin" and self.RECOVERY:
cmdlist.append('echo "' + _("Create: Recovery Fullbackup %s")% (self.EMMCIMG) + '"')
f = open("%s/emmc_partitions.xml" %self.WORKDIR, "w")
f.write('<?xml version="1.0" encoding="GB2312" ?>\n')
f.write('<Partition_Info>\n')
f.write('<Part Sel="1" PartitionName="fastboot" FlashType="emmc" FileSystem="none" Start="0" Length="1M" SelectFile="fastboot.bin"/>\n')
f.write('<Part Sel="1" PartitionName="bootargs" FlashType="emmc" FileSystem="none" Start="1M" Length="1M" SelectFile="bootargs.bin"/>\n')
f.write('<Part Sel="1" PartitionName="bootoptions" FlashType="emmc" FileSystem="none" Start="2M" Length="1M" SelectFile="boot.img"/>\n')
f.write('<Part Sel="1" PartitionName="baseparam" FlashType="emmc" FileSystem="none" Start="3M" Length="3M" SelectFile="baseparam.img"/>\n')
f.write('<Part Sel="1" PartitionName="pqparam" FlashType="emmc" FileSystem="none" Start="6M" Length="4M" SelectFile="pq_param.bin"/>\n')
f.write('<Part Sel="1" PartitionName="logo" FlashType="emmc" FileSystem="none" Start="10M" Length="4M" SelectFile="logo.img"/>\n')
f.write('<Part Sel="1" PartitionName="deviceinfo" FlashType="emmc" FileSystem="none" Start="14M" Length="4M" SelectFile="deviceinfo.bin"/>\n')
f.write('<Part Sel="1" PartitionName="loader" FlashType="emmc" FileSystem="none" Start="26M" Length="32M" SelectFile="apploader.bin"/>\n')
f.write('<Part Sel="1" PartitionName="linuxkernel1" FlashType="emmc" FileSystem="none" Start="66M" Length="16M" SelectFile="kernel.bin"/>\n')
if self.MACHINEBUILD in ("sf8008m"):
f.write('<Part Sel="1" PartitionName="userdata" FlashType="emmc" FileSystem="ext3/4" Start="130M" Length="3580M" SelectFile="rootfs.ext4"/>\n')
else:
f.write('<Part Sel="1" PartitionName="userdata" FlashType="emmc" FileSystem="ext3/4" Start="130M" Length="7000M" SelectFile="rootfs.ext4"/>\n')
f.write('</Partition_Info>\n')
f.close()
cmdlist.append('mkupdate -s 00000003-00000001-01010101 -f %s/emmc_partitions.xml -d %s/%s' % (self.WORKDIR,self.WORKDIR,self.EMMCIMG))
self.session.open(Console, title = self.TITLE, cmdlist = cmdlist, finishedCallback = self.doFullBackupCB, closeOnSuccess = True)
else:
self.close()
else:
self.close()
def doFullBackupCB(self):
cmdlist = []
cmdlist.append(self.message)
cmdlist.append('echo "' + _("Almost there... ") + '"')
cmdlist.append('echo "' + _("Now building the Backup Image") + '"')
if self.EMMCIMG == "usb_update.bin" and self.RECOVERY:
os.system('rm -rf %s' %self.MAINDESTROOT)
if not os.path.exists(self.MAINDESTROOT):
os.makedirs(self.MAINDESTROOT)
f = open("%s/imageversion" %self.MAINDESTROOT, "w")
f.write(self.IMAGEVERSION)
f.close()
else:
os.system('rm -rf %s' %self.MAINDEST)
if not os.path.exists(self.MAINDEST):
os.makedirs(self.MAINDEST)
f = open("%s/imageversion" %self.MAINDEST, "w")
f.write(self.IMAGEVERSION)
f.close()
if not self.RECOVERY:
if self.ROOTFSBIN == "rootfs.tar.bz2":
os.system('mv %s/rootfs.tar.bz2 %s/rootfs.tar.bz2' %(self.WORKDIR, self.MAINDEST))
else:
os.system('mv %s/root.%s %s/%s' %(self.WORKDIR, self.ROOTFSTYPE, self.MAINDEST, self.ROOTFSBIN))
if SystemInfo["canMultiBoot"] or self.MTDKERNEL.startswith('mmcblk0'):
os.system('mv %s/%s %s/%s' %(self.WORKDIR, self.KERNELBIN, self.MAINDEST, self.KERNELBIN))
else:
os.system('mv %s/vmlinux.gz %s/%s' %(self.WORKDIR, self.MAINDEST, self.KERNELBIN))
if self.RECOVERY:
if self.EMMCIMG == "usb_update.bin":
os.system('mv %s/%s %s/%s' %(self.WORKDIR,self.EMMCIMG, self.MAINDESTROOT,self.EMMCIMG))
cmdlist.append('cp -f /usr/share/fastboot.bin %s/fastboot.bin' %(self.MAINDESTROOT))
cmdlist.append('cp -f /usr/share/bootargs.bin %s/bootargs.bin' %(self.MAINDESTROOT))
cmdlist.append('cp -f /usr/share/apploader.bin %s/apploader.bin' %(self.MAINDESTROOT))
else:
os.system('mv %s/%s %s/%s' %(self.WORKDIR,self.EMMCIMG, self.MAINDEST,self.EMMCIMG))
if self.EMMCIMG == "emmc.img":
cmdlist.append('echo "rename this file to "force" to force an update without confirmation" > %s/noforce' %self.MAINDEST)
elif self.MODEL in ("vuultimo4k","vusolo4k", "vuduo2", "vusolo2", "vusolo", "vuduo", "vuultimo", "vuuno"):
cmdlist.append('echo "This file forces a reboot after the update." > %s/reboot.update' %self.MAINDEST)
elif self.MODEL in ("vuzero" , "vusolose", "vuuno4k", "vuzero4k"):
cmdlist.append('echo "This file forces the update." > %s/force.update' %self.MAINDEST)
elif self.MODEL in ('viperslim','evoslimse','evoslimt2c', "novaip" , "zgemmai55" , "sf98", "xpeedlxpro",'evoslim','vipert2c'):
cmdlist.append('echo "This file forces the update." > %s/force' %self.MAINDEST)
elif SystemInfo["HasRootSubdir"]:
cmdlist.append('echo "Rename the unforce_%s.txt to force_%s.txt and move it to the root of your usb-stick" > %s/force_%s_READ.ME' %(self.MACHINEBUILD, self.MACHINEBUILD, self.MAINDEST, self.MACHINEBUILD))
cmdlist.append('echo "When you enter the recovery menu then it will force to install the image in the linux1 selection" >> %s/force_%s_READ.ME' %(self.MAINDEST, self.MACHINEBUILD))
else:
cmdlist.append('echo "rename this file to "force" to force an update without confirmation" > %s/noforce' %self.MAINDEST)
if self.MODEL in ("gbquad4k","gbue4k","gbx34k"):
os.system('mv %s/boot.bin %s/boot.bin' %(self.WORKDIR, self.MAINDEST))
os.system('mv %s/rescue.bin %s/rescue.bin' %(self.WORKDIR, self.MAINDEST))
os.system('cp -f /usr/share/gpt.bin %s/gpt.bin' %(self.MAINDEST))
if self.MACHINEBUILD in ("h9","i55plus"):
os.system('mv %s/fastboot.bin %s/fastboot.bin' %(self.WORKDIR, self.MAINDEST))
os.system('mv %s/pq_param.bin %s/pq_param.bin' %(self.WORKDIR, self.MAINDEST))
os.system('mv %s/bootargs.bin %s/bootargs.bin' %(self.WORKDIR, self.MAINDEST))
os.system('mv %s/baseparam.bin %s/baseparam.bin' %(self.WORKDIR, self.MAINDEST))
os.system('mv %s/logo.bin %s/logo.bin' %(self.WORKDIR, self.MAINDEST))
if self.MODEL in ("gbquad", "gbquadplus", "gb800ue", "gb800ueplus", "gbultraue", "gbultraueh", "twinboxlcd", "twinboxlcdci", "singleboxlcd", "sf208", "sf228"):
lcdwaitkey = '/usr/share/lcdwaitkey.bin'
lcdwarning = '/usr/share/lcdwarning.bin'
if os.path.isfile(lcdwaitkey):
os.system('cp %s %s/lcdwaitkey.bin' %(lcdwaitkey, self.MAINDEST))
if os.path.isfile(lcdwarning):
os.system('cp %s %s/lcdwarning.bin' %(lcdwarning, self.MAINDEST))
if self.MODEL in ("e4hdultra","protek4k"):
lcdwarning = '/usr/share/lcdflashing.bmp'
if os.path.isfile(lcdwarning):
os.system('cp %s %s/lcdflashing.bmp' %(lcdwarning, self.MAINDEST))
if self.MODEL == "gb800solo":
f = open("%s/burn.bat" % (self.MAINDESTROOT), "w")
f.write("flash -noheader usbdisk0:gigablue/solo/kernel.bin flash0.kernel\n")
f.write("flash -noheader usbdisk0:gigablue/solo/rootfs.bin flash0.rootfs\n")
f.write('setenv -p STARTUP "boot -z -elf flash0.kernel: ')
f.write("'rootfstype=jffs2 bmem=106M@150M root=/dev/mtdblock6 rw '")
f.write('"\n')
f.close()
if self.MACHINEBUILD in ("h9","i55plus"):
cmdlist.append('cp -f /usr/share/fastboot.bin %s/fastboot.bin' %(self.MAINDESTROOT))
cmdlist.append('cp -f /usr/share/bootargs.bin %s/bootargs.bin' %(self.MAINDESTROOT))
if SystemInfo["canRecovery"] and self.RECOVERY:
cmdlist.append('7za a -r -bt -bd %s/%s-%s-%s-backup-%s_recovery_emmc.zip %s/*' %(self.DIRECTORY, self.IMAGEDISTRO, self.DISTROVERSION, self.MODEL, self.DATE, self.MAINDESTROOT))
else:
cmdlist.append('7za a -r -bt -bd %s/%s-%s-%s-backup-%s_usb.zip %s/*' %(self.DIRECTORY, self.IMAGEDISTRO, self.DISTROVERSION, self.MODEL, self.DATE, self.MAINDESTROOT))
cmdlist.append("sync")
file_found = True
if self.RECOVERY:
if self.EMMCIMG == "usb_update.bin":
if not os.path.isfile("%s/%s" % (self.MAINDESTROOT, self.EMMCIMG)):
print "[Image Backup] %s file not found" %(self.EMMCIMG)
file_found = False
else:
if not os.path.isfile("%s/%s" % (self.MAINDEST, self.EMMCIMG)):
print "[Image Backup] %s file not found" %(self.EMMCIMG)
file_found = False
else:
if not os.path.isfile("%s/%s" % (self.MAINDEST, self.ROOTFSBIN)):
print "[Image Backup] %s file not found" %(self.ROOTFSBIN)
file_found = False
if not os.path.isfile("%s/%s" % (self.MAINDEST, self.KERNELBIN)):
print "[Image Backup] %s file not found" %(self.KERNELBIN)
file_found = False
if SystemInfo["canMultiBoot"] and not self.RECOVERY and not SystemInfo["HasRootSubdir"]:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo "' + _("Multiboot Image created on: %s/%s-%s-%s-backup-%s_usb.zip") %(self.DIRECTORY, self.IMAGEDISTRO, self.DISTROVERSION, self.MODEL, self.DATE) + '"')
cmdlist.append('echo "_________________________________________________"')
cmdlist.append('echo " "')
cmdlist.append('echo "' + _("Please wait...almost ready! ") + '"')
cmdlist.append('echo " "')
cmdlist.append('echo "' + _("To restore the image:") + '"')
cmdlist.append('echo "' + _("Use OnlineFlash in SoftwareManager") + '"')
elif file_found:
cmdlist.append('echo "_________________________________________________\n"')
if SystemInfo["canRecovery"] and self.RECOVERY:
cmdlist.append('echo "' + _("Image created on: %s/%s-%s-%s-backup-%s_recovery_emmc.zip") %(self.DIRECTORY, self.IMAGEDISTRO, self.DISTROVERSION, self.MODEL, self.DATE) + '"')
else:
cmdlist.append('echo "' + _("Image created on: %s/%s-%s-%s-backup-%s_usb.zip") %(self.DIRECTORY, self.IMAGEDISTRO, self.DISTROVERSION, self.MODEL, self.DATE) + '"')
cmdlist.append('echo "_________________________________________________"')
cmdlist.append('echo " "')
cmdlist.append('echo "' + _("Please wait...almost ready! ") + '"')
cmdlist.append('echo " "')
cmdlist.append('echo "' + _("To restore the image:") + '"')
cmdlist.append('echo "' + _("Please check the manual of the receiver") + '"')
cmdlist.append('echo "' + _("on how to restore the image") + '"')
else:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo "' + _("Image creation failed - ") + '"')
cmdlist.append('echo "' + _("Probable causes could be") + ':"')
cmdlist.append('echo "' + _(" wrong back-up destination ") + '"')
cmdlist.append('echo "' + _(" no space left on back-up device") + '"')
cmdlist.append('echo "' + _(" no writing permission on back-up device") + '"')
cmdlist.append('echo " "')
cmdlist.append("rm -rf %s/build_%s" %(self.DIRECTORY, self.MODEL))
if SystemInfo["HasRootSubdir"]:
cmdlist.append("umount /tmp/bi/RootSubdir")
cmdlist.append("rmdir /tmp/bi/RootSubdir")
else:
cmdlist.append("umount /tmp/bi/root")
cmdlist.append("rmdir /tmp/bi/root")
cmdlist.append("rmdir /tmp/bi")
cmdlist.append("rm -rf %s" % self.WORKDIR)
cmdlist.append("sleep 5")
END = time()
DIFF = int(END - self.START)
TIMELAP = str(datetime.timedelta(seconds=DIFF))
cmdlist.append('echo "' + _("Time required for this process: %s") %TIMELAP + '\n"')
self.session.open(Console, title = self.TITLE, cmdlist = cmdlist, closeOnSuccess = False)
def imageInfo(self):
AboutText = _("Full Image Backup ")
AboutText += _("By openATV Image Team") + "\n"
AboutText += _("Support at") + " www.opena.tv\n\n"
AboutText += _("[Image Info]\n")
AboutText += _("Model: %s %s\n") % (getMachineBrand(), getMachineName())
AboutText += _("Backup Date: %s\n") % strftime("%Y-%m-%d", localtime(self.START))
if os.path.exists('/proc/stb/info/chipset'):
AboutText += _("Chipset: BCM%s") % about.getChipSetString().lower().replace('\n','').replace('bcm','') + "\n"
AboutText += _("CPU: %s") % about.getCPUString() + "\n"
AboutText += _("Cores: %s") % about.getCpuCoresString() + "\n"
AboutText += _("Version: %s") % getImageVersion() + "\n"
AboutText += _("Build: %s") % getImageBuild() + "\n"
AboutText += _("Kernel: %s") % about.getKernelVersionString() + "\n"
string = getDriverDate()
year = string[0:4]
month = string[4:6]
day = string[6:8]
driversdate = '-'.join((year, month, day))
AboutText += _("Drivers:\t%s") % driversdate + "\n"
AboutText += _("Last update:\t%s") % getEnigmaVersionString() + "\n\n"
AboutText += _("[Enigma2 Settings]\n")
AboutText += commands.getoutput("cat /etc/enigma2/settings")
AboutText += _("\n\n[User - bouquets (TV)]\n")
try:
f = open("/etc/enigma2/bouquets.tv","r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("#SERVICE:"):
bouqet = line.split()
if len(bouqet) > 3:
bouqet[3] = bouqet[3].replace('"','')
f = open("/etc/enigma2/" + bouqet[3],"r")
userbouqet = f.readline()
AboutText += userbouqet.replace('#NAME ','')
f.close()
except:
AboutText += _("Error reading bouquets.tv")
AboutText += _("\n[User - bouquets (RADIO)]\n")
try:
f = open("/etc/enigma2/bouquets.radio","r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("#SERVICE:"):
bouqet = line.split()
if len(bouqet) > 3:
bouqet[3] = bouqet[3].replace('"','')
f = open("/etc/enigma2/" + bouqet[3],"r")
userbouqet = f.readline()
AboutText += userbouqet.replace('#NAME ','')
f.close()
except:
AboutText += _("Error reading bouquets.radio")
AboutText += _("\n[Installed Plugins]\n")
AboutText += commands.getoutput("opkg list_installed | grep enigma2-plugin-")
return AboutText
|
gpl-2.0
|
jumpstarter-io/neutron
|
neutron/api/api_common.py
|
17
|
10729
|
# Copyright 2011 Citrix System.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo.config import cfg
from webob import exc
from neutron.common import constants
from neutron.common import exceptions
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_filters(request, attr_info, skips=[]):
"""Extracts the filters from the request string.
Returns a dict of lists for the filters:
check=a&check=b&name=Bob&
becomes:
{'check': [u'a', u'b'], 'name': [u'Bob']}
"""
res = {}
for key, values in request.GET.dict_of_lists().iteritems():
if key in skips:
continue
values = [v for v in values if v]
key_attr_info = attr_info.get(key, {})
if 'convert_list_to' in key_attr_info:
values = key_attr_info['convert_list_to'](values)
elif 'convert_to' in key_attr_info:
convert_to = key_attr_info['convert_to']
values = [convert_to(v) for v in values]
if values:
res[key] = values
return res
def get_previous_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_next_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_limit_and_marker(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If limit == 0, it means we needn't
pagination, then return None.
"""
max_limit = _get_pagination_max_limit()
limit = _get_limit_param(request, max_limit)
if max_limit > 0:
limit = min(max_limit, limit) or max_limit
if not limit:
return None, None
marker = request.GET.get('marker', None)
return limit, marker
def _get_pagination_max_limit():
max_limit = -1
if (cfg.CONF.pagination_max_limit.lower() !=
constants.PAGINATION_INFINITE):
try:
max_limit = int(cfg.CONF.pagination_max_limit)
if max_limit == 0:
raise ValueError()
except ValueError:
LOG.warn(_("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0"),
cfg.CONF.pagination_max_limit)
return max_limit
def _get_limit_param(request, max_limit):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET.get('limit', 0))
if limit >= 0:
return limit
except ValueError:
pass
msg = _("Limit must be an integer 0 or greater and not '%d'")
raise exceptions.BadRequest(resource='limit', msg=msg)
def list_args(request, arg):
"""Extracts the list of arg from request."""
return [v for v in request.GET.getall(arg) if v]
def get_sorts(request, attr_info):
"""Extract sort_key and sort_dir from request.
Return as: [(key1, value1), (key2, value2)]
"""
sort_keys = list_args(request, "sort_key")
sort_dirs = list_args(request, "sort_dir")
if len(sort_keys) != len(sort_dirs):
msg = _("The number of sort_keys and sort_dirs must be same")
raise exc.HTTPBadRequest(explanation=msg)
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
absent_keys = [x for x in sort_keys if x not in attr_info]
if absent_keys:
msg = _("%s is invalid attribute for sort_keys") % absent_keys
raise exc.HTTPBadRequest(explanation=msg)
invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
if invalid_dirs:
msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
"valid value is '%(asc)s' and '%(desc)s'") %
{'invalid_dirs': invalid_dirs,
'asc': constants.SORT_DIRECTION_ASC,
'desc': constants.SORT_DIRECTION_DESC})
raise exc.HTTPBadRequest(explanation=msg)
return zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs])
def get_page_reverse(request):
data = request.GET.get('page_reverse', 'False')
return data.lower() == "true"
def get_pagination_links(request, items, limit,
marker, page_reverse, key="id"):
key = key if key else 'id'
links = []
if not limit:
return links
if not (len(items) < limit and not page_reverse):
links.append({"rel": "next",
"href": get_next_link(request, items,
key)})
if not (len(items) < limit and page_reverse):
links.append({"rel": "previous",
"href": get_previous_link(request, items,
key)})
return links
class PaginationHelper(object):
def __init__(self, request, primary_key='id'):
self.request = request
self.primary_key = primary_key
def update_fields(self, original_fields, fields_to_add):
pass
def update_args(self, args):
pass
def paginate(self, items):
return items
def get_links(self, items):
return {}
class PaginationEmulatedHelper(PaginationHelper):
def __init__(self, request, primary_key='id'):
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
self.limit, self.marker = get_limit_and_marker(request)
self.page_reverse = get_page_reverse(request)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
if self.primary_key not in original_fields:
original_fields.append(self.primary_key)
fields_to_add.append(self.primary_key)
def paginate(self, items):
if not self.limit:
return items
i = -1
if self.marker:
for item in items:
i = i + 1
if item[self.primary_key] == self.marker:
break
if self.page_reverse:
return items[i - self.limit:i]
return items[i + 1:i + self.limit + 1]
def get_links(self, items):
return get_pagination_links(
self.request, items, self.limit, self.marker,
self.page_reverse, self.primary_key)
class PaginationNativeHelper(PaginationEmulatedHelper):
def update_args(self, args):
if self.primary_key not in dict(args.get('sorts', [])).keys():
args.setdefault('sorts', []).append((self.primary_key, True))
args.update({'limit': self.limit, 'marker': self.marker,
'page_reverse': self.page_reverse})
def paginate(self, items):
return items
class NoPaginationHelper(PaginationHelper):
pass
class SortingHelper(object):
def __init__(self, request, attr_info):
pass
def update_args(self, args):
pass
def update_fields(self, original_fields, fields_to_add):
pass
def sort(self, items):
return items
class SortingEmulatedHelper(SortingHelper):
def __init__(self, request, attr_info):
super(SortingEmulatedHelper, self).__init__(request, attr_info)
self.sort_dict = get_sorts(request, attr_info)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
for key in dict(self.sort_dict).keys():
if key not in original_fields:
original_fields.append(key)
fields_to_add.append(key)
def sort(self, items):
def cmp_func(obj1, obj2):
for key, direction in self.sort_dict:
ret = cmp(obj1[key], obj2[key])
if ret:
return ret * (1 if direction else -1)
return 0
return sorted(items, cmp=cmp_func)
class SortingNativeHelper(SortingHelper):
def __init__(self, request, attr_info):
self.sort_dict = get_sorts(request, attr_info)
def update_args(self, args):
args['sorts'] = self.sort_dict
class NoSortingHelper(SortingHelper):
pass
class NeutronController(object):
"""Base controller class for Neutron API."""
# _resource_name will be redefined in sub concrete controller
_resource_name = None
def __init__(self, plugin):
self._plugin = plugin
super(NeutronController, self).__init__()
def _prepare_request_body(self, body, params):
"""Verifies required parameters are in request body.
Sets default value for missing optional parameters.
Body argument must be the deserialized body.
"""
try:
if body is None:
# Initialize empty resource for setting default value
body = {self._resource_name: {}}
data = body[self._resource_name]
except KeyError:
# raise if _resource_name is not in req body.
raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") %
self._resource_name)
for param in params:
param_name = param['param-name']
param_value = data.get(param_name)
# If the parameter wasn't found and it was required, return 400
if param_value is None and param['required']:
msg = (_("Failed to parse request. "
"Parameter '%s' not specified") % param_name)
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
data[param_name] = param_value or param.get('default-value')
return body
|
apache-2.0
|
HalCanary/skia-hc
|
tools/skp/page_sets/skia_tiger8svg_desktop.py
|
6
|
1271
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_tiger8svg_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(5)
class SkiaTiger8svgDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaTiger8svgDesktopPageSet, self).__init__(
archive_data_file='data/skia_tiger8svg_desktop.json')
urls_list = [
# Why: from skbug.com/4713
('https://storage.googleapis.com/skia-infra-testdata/images-for-skps/'
'tiger-8.svg'),
]
for url in urls_list:
self.AddStory(SkiaBuildbotDesktopPage(url, self))
|
bsd-3-clause
|
rspavel/spack
|
var/spack/repos/builtin/packages/daligner/package.py
|
5
|
1234
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Daligner(MakefilePackage):
"""Daligner: The Dazzler "Overlap" Module."""
homepage = "https://github.com/thegenemyers/DALIGNER"
url = "https://github.com/thegenemyers/DALIGNER/archive/V1.0.tar.gz"
version('1.0', sha256='2fb03616f0d60df767fbba7c8f0021ec940c8d822ab2011cf58bd56a8b9fb414')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
kwargs = {'ignore_absent': False, 'backup': False, 'string': True}
makefile.filter('cp $(ALL) ~/bin',
'cp $(ALL) {0}'.format(prefix.bin),
**kwargs)
# He changed the Makefile in commit dae119.
# You'll need this instead if/when he cuts a new release
# or if you try to build from the tip of master.
# makefile.filter('DEST_DIR = .*',
# 'DEST_DIR = {0}'.format(prefix.bin))
# or pass DEST_DIR in to the make
@run_before('install')
def make_prefix_dot_bin(self):
mkdir(prefix.bin)
|
lgpl-2.1
|
hjanime/VisTrails
|
vistrails/tests/__init__.py
|
2
|
1913
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
|
bsd-3-clause
|
edx/edx-platform
|
pavelib/servers.py
|
3
|
10174
|
"""
Run and manage servers for local development.
"""
import argparse
import sys
from paver.easy import call_task, cmdopts, consume_args, needs, sh, task
from .assets import collect_assets
from .utils.cmd import cmd, django_cmd
from .utils.envs import Env
from .utils.process import run_multi_processes, run_process
from .utils.timer import timed
DEFAULT_PORT = {"lms": 8000, "studio": 8001}
DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS
OPTIMIZED_SETTINGS = "devstack_optimized"
OPTIMIZED_ASSETS_SETTINGS = "test_static_optimized"
ASSET_SETTINGS_HELP = (
"Settings file used for updating assets. Defaults to the value of the settings variable if not provided."
)
def run_server(
system, fast=False, settings=None, asset_settings=None, port=None
):
"""Start the server for LMS or Studio.
Args:
system (str): The system to be run (lms or studio).
fast (bool): If true, then start the server immediately without updating assets (defaults to False).
settings (str): The Django settings module to use; if not provided, use the default.
asset_settings (str) The settings to use when generating assets. If not provided, assets are not generated.
port (str): The port number to run the server on. If not provided, uses the default port for the system.
"""
if system not in ['lms', 'studio']:
print("System must be either lms or studio", file=sys.stderr)
exit(1) # lint-amnesty, pylint: disable=consider-using-sys-exit
if not settings:
settings = DEFAULT_SETTINGS
if not fast and asset_settings:
args = [system, f'--settings={asset_settings}', '--watch']
# The default settings use DEBUG mode for running the server which means that
# the optimized assets are ignored, so we skip collectstatic in that case
# to save time.
if settings == DEFAULT_SETTINGS:
args.append('--skip-collect')
call_task('pavelib.assets.update_assets', args=args)
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', f'0.0.0.0:{port}']
run_process(django_cmd(system, *args))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def lms(options):
"""
Run the LMS server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'lms',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def studio(options):
"""
Run the Studio server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'studio',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def devstack(args):
"""
Start the devstack lms or studio server
"""
parser = argparse.ArgumentParser(prog='paver devstack')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets")
parser.add_argument('--optimized', action='store_true', default=False, help="Run with optimized assets")
parser.add_argument('--settings', type=str, default=DEFAULT_SETTINGS, help="Settings file")
parser.add_argument('--asset-settings', type=str, default=None, help=ASSET_SETTINGS_HELP)
args = parser.parse_args(args)
settings = args.settings
asset_settings = args.asset_settings if args.asset_settings else settings
if args.optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
sh(django_cmd('cms', settings, 'reindex_course', '--setup'))
run_server(
args.system[0],
fast=args.fast,
settings=settings,
asset_settings=asset_settings,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def celery(options):
"""
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'devstack_with_worker')
run_process(cmd(f'DJANGO_SETTINGS_MODULE=lms.envs.{settings}',
'celery', 'worker', '--app=lms.celery:APP',
'--beat', '--loglevel=INFO', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings for both LMS and Studio"),
("asset-settings=", "a", "Django settings for updating assets for both LMS and Studio (defaults to settings)"),
("worker-settings=", "w", "Celery worker Django settings"),
("fast", "f", "Skip updating assets"),
("optimized", "o", "Run with optimized assets"),
("settings-lms=", "l", "Set LMS only, overriding the value from --settings (if provided)"),
("asset-settings-lms=", None, "Set LMS only, overriding the value from --asset-settings (if provided)"),
("settings-cms=", "c", "Set Studio only, overriding the value from --settings (if provided)"),
("asset-settings-cms=", None, "Set Studio only, overriding the value from --asset-settings (if provided)"),
("asset_settings=", None, "deprecated in favor of asset-settings"),
("asset_settings_cms=", None, "deprecated in favor of asset-settings-cms"),
("asset_settings_lms=", None, "deprecated in favor of asset-settings-lms"),
("settings_cms=", None, "deprecated in favor of settings-cms"),
("settings_lms=", None, "deprecated in favor of settings-lms"),
("worker_settings=", None, "deprecated in favor of worker-settings"),
])
def run_all_servers(options):
"""
Runs Celery workers, Studio, and LMS.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset_settings', settings)
worker_settings = getattr(options, 'worker_settings', 'devstack_with_worker')
fast = getattr(options, 'fast', False)
optimized = getattr(options, 'optimized', False)
if optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
settings_lms = getattr(options, 'settings_lms', settings)
settings_cms = getattr(options, 'settings_cms', settings)
asset_settings_lms = getattr(options, 'asset_settings_lms', asset_settings)
asset_settings_cms = getattr(options, 'asset_settings_cms', asset_settings)
if not fast:
# First update assets for both LMS and Studio but don't collect static yet
args = [
'lms', 'studio',
f'--settings={asset_settings}',
'--skip-collect'
]
call_task('pavelib.assets.update_assets', args=args)
# Now collect static for each system separately with the appropriate settings.
# Note that the default settings use DEBUG mode for running the server which
# means that the optimized assets are ignored, so we skip collectstatic in that
# case to save time.
if settings != DEFAULT_SETTINGS:
collect_assets(['lms'], asset_settings_lms)
collect_assets(['studio'], asset_settings_cms)
# Install an asset watcher to regenerate files that change
call_task('pavelib.assets.watch_assets', options={'background': True})
# Start up LMS, CMS and Celery
lms_port = DEFAULT_PORT['lms']
cms_port = DEFAULT_PORT['studio']
lms_runserver_args = [f"0.0.0.0:{lms_port}"]
cms_runserver_args = [f"0.0.0.0:{cms_port}"]
run_multi_processes([
django_cmd(
'lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', *lms_runserver_args
),
django_cmd(
'studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', *cms_runserver_args
),
cmd(
f'DJANGO_SETTINGS_MODULE=lms.envs.{worker_settings}',
'celery', 'worker', '--app=lms.celery:APP',
'--beat', '--loglevel=INFO', '--pythonpath=.'
)
])
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("fake-initial", None, "Fake the initial migrations"),
])
@timed
def update_db(options):
"""
Migrates the lms and cms across all databases
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
fake = "--fake-initial" if getattr(options, 'fake_initial', False) else ""
for system in ('lms', 'cms'):
# pylint: disable=line-too-long
sh("NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{system} --traceback --pythonpath=. {fake}".format(
settings=settings,
system=system,
fake=fake))
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
@timed
def check_settings(args):
"""
Checks settings files.
"""
parser = argparse.ArgumentParser(prog='paver check_settings')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('settings', type=str, nargs=1, help='Django settings')
args = parser.parse_args(args)
system = args.system[0]
settings = args.settings[0]
try:
import_cmd = f"echo 'import {system}.envs.{settings}'"
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh(f"{import_cmd} | {django_shell_cmd}")
except: # pylint: disable=bare-except
print("Failed to import settings", file=sys.stderr)
|
agpl-3.0
|
ppanczyk/ansible
|
lib/ansible/modules/packaging/language/easy_install.py
|
41
|
6867
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: easy_install
short_description: Installs Python libraries
description:
- Installs Python libraries, optionally in a I(virtualenv)
version_added: "0.7"
options:
name:
description:
- A Python library name
required: true
default: null
aliases: []
virtualenv:
description:
- an optional I(virtualenv) directory path to install into. If the
I(virtualenv) does not exist, it is created automatically
required: false
default: null
virtualenv_site_packages:
version_added: "1.1"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command to create the virtual environment with. For example
C(pyvenv), C(virtualenv), C(virtualenv2).
required: false
default: virtualenv
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run easy_install for a specific version of Python installed in the
system. For example C(easy_install-3.3), if there are both Python 2.7
and 3.3 installations in the system and you want to run easy_install
for the Python 3.3 installation.
version_added: "1.3"
required: false
default: null
state:
version_added: "2.0"
description:
- The desired state of the library. C(latest) ensures that the latest version is installed.
required: false
choices: [present, latest]
default: present
notes:
- Please note that the C(easy_install) module can only install Python
libraries. Thus this module is not able to remove libraries. It is
generally recommended to use the M(pip) module which you can first install
using M(easy_install).
- Also note that I(virtualenv) must be installed on the remote host if the
C(virtualenv) parameter is specified.
requirements: [ "virtualenv" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Examples from Ansible Playbooks
- easy_install:
name: pip
state: latest
# Install Bottle into the specified virtualenv.
- easy_install:
name: bottle
virtualenv: /webapps/myapp/venv
'''
import os
import os.path
import tempfile
from ansible.module_utils.basic import AnsibleModule
def install_package(module, name, easy_install, executable_arguments):
cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
rc, out, err = module.run_command(cmd)
return rc, out, err
def _is_package_installed(module, name, easy_install, executable_arguments):
# Copy and add to the arguments
executable_arguments = executable_arguments[:]
executable_arguments.append('--dry-run')
rc, out, err = install_package(module, name, easy_install, executable_arguments)
if rc:
module.fail_json(msg=err)
return 'Downloading' not in out
def _get_easy_install(module, env=None, executable=None):
candidate_easy_inst_basenames = ['easy_install']
easy_install = None
if executable is not None:
if os.path.isabs(executable):
easy_install = executable
else:
candidate_easy_inst_basenames.insert(0, executable)
if easy_install is None:
if env is None:
opt_dirs = []
else:
# Try easy_install with the virtualenv directory first.
opt_dirs = ['%s/bin' % env]
for basename in candidate_easy_inst_basenames:
easy_install = module.get_bin_path(basename, False, opt_dirs)
if easy_install is not None:
break
# easy_install should have been found by now. The final call to
# get_bin_path will trigger fail_json.
if easy_install is None:
basename = candidate_easy_inst_basenames[0]
easy_install = module.get_bin_path(basename, True, opt_dirs)
return easy_install
def main():
arg_spec = dict(
name=dict(required=True),
state=dict(required=False,
default='present',
choices=['present', 'latest'],
type='str'),
virtualenv=dict(default=None, required=False),
virtualenv_site_packages=dict(default='no', type='bool'),
virtualenv_command=dict(default='virtualenv', required=False),
executable=dict(default='easy_install', required=False),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
env = module.params['virtualenv']
executable = module.params['executable']
site_packages = module.params['virtualenv_site_packages']
virtualenv_command = module.params['virtualenv_command']
executable_arguments = []
if module.params['state'] == 'latest':
executable_arguments.append('--upgrade')
rc = 0
err = ''
out = ''
if env:
virtualenv = module.get_bin_path(virtualenv_command, True)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
command = '%s %s' % (virtualenv, env)
if site_packages:
command += ' --system-site-packages'
cwd = tempfile.gettempdir()
rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
rc += rc_venv
out += out_venv
err += err_venv
easy_install = _get_easy_install(module, env, executable)
cmd = None
changed = False
installed = _is_package_installed(module, name, easy_install, executable_arguments)
if not installed:
if module.check_mode:
module.exit_json(changed=True)
rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
rc += rc_easy_inst
out += out_easy_inst
err += err_easy_inst
changed = True
if rc != 0:
module.fail_json(msg=err, cmd=cmd)
module.exit_json(changed=changed, binary=easy_install,
name=name, virtualenv=env)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ClearCorp/odoo-clearcorp
|
TODO-9.0/account_invoice_journal_defaults/account_invoice_journal_defaults.py
|
3
|
8382
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import netsvc
from openerp.osv import fields, orm
from openerp import models, fields, api
from openerp.tools.translate import _
class AccountInvoice(orm.Model):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False, journal_id = False, account_id = False):
result = super(AccountInvoice, self).onchange_partner_id(cr, uid, ids, type, partner_id, date_invoice, payment_term, partner_bank_id, company_id)
#If the partner have account_id, but the journal doesn't have one, put the account that appears
#in result dictionary.
if not account_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id)
acc_id = False
if journal.type == 'sale':
acc_id = journal.default_receivable_account_id
elif journal.type == 'purchase':
acc_id = journal.default_payable_account_id
elif journal.type == 'sale_refund':
acc_id = journal.default_payable_account_id
elif journal.type == 'purchase_refund':
acc_id = journal.default_receivable_account_id
if not acc_id:
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
if partner.id != uid:
if journal.type == 'sale':
acc_id = partner.property_account_receivable
elif journal.type == 'purchase':
acc_id = partner.property_account_payable
elif journal.type == 'sale_refund':
acc_id = partner.property_account_payable
elif journal.type == 'purchase_refund':
acc_id = partner.property_account_receivable
else:
result['value']['account_id'] = False
result['value']['account_id'] = acc_id.id
else:
del result['value']['account_id']
return result
def onchange_journal_id(self, cr, uid, ids, journal_id=False, partner_id = False, context=None):
result = super(AccountInvoice, self).onchange_journal_id(cr, uid, ids, journal_id, context)
if journal_id is not False:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
acc_id = journal.default_receivable_account_id
elif journal.type == 'purchase':
acc_id = journal.default_payable_account_id
elif journal.type == 'sale_refund':
acc_id = journal.default_payable_account_id
elif journal.type == 'purchase_refund':
acc_id = journal.default_receivable_account_id
if not acc_id:
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id['uid'], context)
if partner.id != uid:
if journal.type == 'sale':
acc_id = partner.property_account_receivable
elif journal.type == 'purchase':
acc_id = partner.property_account_payable
elif journal.type == 'sale_refund':
acc_id = partner.property_account_payable
elif journal.type == 'purchase_refund':
acc_id = partner.property_account_receivable
result['value']['account_id'] = acc_id.id
else:
result['value']['account_id'] = False
else:
result['value']['account_id'] = acc_id.id
return result
def create(self, cr, uid, vals, context=None):
if 'journal_id' in vals:
journal_val_id = vals['journal_id']
journal_id = self.pool.get('account.journal').search(cr,uid,[('id','=',journal_val_id)])
journal_obj = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
for journal in journal_obj:
if journal.type == 'sale':
acc_id = journal.default_receivable_account_id.id
elif journal.type == 'purchase':
acc_id = journal.default_payable_account_id.id
elif journal.type == 'sale_refund':
acc_id = journal.default_payable_account_id.id
elif journal.type == 'purchase_refund':
acc_id = journal.default_receivable_account_id.id
if journal and journal.id:
currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id
else:
currency_id = False
if not 'account_id' in vals:
vals['account_id'] = acc_id
if not 'currency_id' in vals:
vals['currency_id'] = currency_id
return super(AccountInvoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'journal_id' in vals:
journal_val_id = vals['journal_id']
journal_id = self.pool.get('account.journal').search(cr,uid,[('id','=',journal_val_id)])
journal_obj = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
for journal in journal_obj:
if journal.type == 'sale':
acc_id = journal.default_receivable_account_id.id
elif journal.type == 'purchase':
acc_id = journal.default_payable_account_id.id
elif journal.type == 'sale_refund':
acc_id = journal.default_payable_account_id.id
elif journal.type == 'purchase_refund':
acc_id = journal.default_receivable_account_id.id
if journal and journal.id:
currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id
else:
currency_id = False
if not 'account_id' in vals:
vals['account_id'] = acc_id
if not 'currency_id' in vals:
vals['currency_id'] = currency_id
return super(AccountInvoice, self).write(cr, uid, ids, vals, context=context)
class AccountJournal(models.Model):
_inherit = 'account.journal'
default_receivable_account_id= fields.Many2one('account.account', 'Default Receivable Account', help="It acts as a default receivable account")
default_payable_account_id= fields.Many2one('account.account', 'Default Payable Account', help="It acts as a default payable account")
# # # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jean/sentry
|
tests/sentry/api/endpoints/test_organization_member_team_details.py
|
2
|
5902
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import (Organization, OrganizationAccessRequest, OrganizationMemberTeam)
from sentry.testutils import APITestCase
class CreateOrganizationMemberTeamTest(APITestCase):
def test_can_join_as_owner_without_open_membership(self):
organization = self.create_organization(
name='foo',
owner=self.user,
flags=0,
)
team = self.create_team(name='foo', organization=organization)
user = self.create_user('[email protected]')
member_om = self.create_member(
organization=organization,
user=user,
role='owner',
teams=[],
)
path = reverse(
'sentry-api-0-organization-member-team-details',
args=[
organization.slug,
member_om.id,
team.slug,
]
)
self.login_as(user)
resp = self.client.post(path)
assert resp.status_code == 201
def test_cannot_join_as_member_without_open_membership(self):
organization = self.create_organization(
name='foo',
owner=self.user,
flags=0,
)
team = self.create_team(name='foo', organization=organization)
user = self.create_user('[email protected]')
member_om = self.create_member(
organization=organization,
user=user,
role='member',
teams=[],
)
path = reverse(
'sentry-api-0-organization-member-team-details',
args=[
organization.slug,
member_om.id,
team.slug,
]
)
self.login_as(user)
resp = self.client.post(path)
assert resp.status_code == 202
assert not OrganizationMemberTeam.objects.filter(
team=team,
organizationmember=member_om,
).exists()
assert OrganizationAccessRequest.objects.filter(
team=team,
member=member_om,
).exists()
def test_can_join_as_member_with_open_membership(self):
organization = self.create_organization(
name='foo',
owner=self.user,
flags=Organization.flags.allow_joinleave,
)
team = self.create_team(name='foo', organization=organization)
user = self.create_user('[email protected]')
member_om = self.create_member(
organization=organization,
user=user,
role='member',
teams=[],
)
path = reverse(
'sentry-api-0-organization-member-team-details',
args=[
organization.slug,
member_om.id,
team.slug,
]
)
self.login_as(user)
resp = self.client.post(path)
assert resp.status_code == 201
assert OrganizationMemberTeam.objects.filter(
team=team,
organizationmember=member_om,
).exists()
class DeleteOrganizationMemberTeamTest(APITestCase):
def test_can_leave_as_member(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(name='foo', organization=organization)
user = self.create_user('[email protected]')
member_om = self.create_member(
organization=organization,
user=user,
role='member',
teams=[team],
)
path = reverse(
'sentry-api-0-organization-member-team-details',
args=[
organization.slug,
member_om.id,
team.slug,
]
)
self.login_as(user)
resp = self.client.delete(path)
assert resp.status_code == 200
assert not OrganizationMemberTeam.objects.filter(
team=team,
organizationmember=member_om,
).exists()
def test_can_leave_as_non_member(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(name='foo', organization=organization)
user = self.create_user('[email protected]', is_superuser=False)
member_om = self.create_member(
organization=organization,
user=user,
role='member',
teams=[],
)
path = reverse(
'sentry-api-0-organization-member-team-details',
args=[
organization.slug,
member_om.id,
team.slug,
]
)
self.login_as(user)
resp = self.client.delete(path)
assert resp.status_code == 200
assert not OrganizationMemberTeam.objects.filter(
team=team,
organizationmember=member_om,
).exists()
def test_can_leave_as_superuser_without_membership(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(name='foo', organization=organization)
user = self.create_user('[email protected]', is_superuser=True)
member_om = self.create_member(
organization=organization,
user=user,
role='member',
teams=[],
)
path = reverse(
'sentry-api-0-organization-member-team-details',
args=[
organization.slug,
member_om.id,
team.slug,
]
)
self.login_as(user)
resp = self.client.delete(path)
assert resp.status_code == 200
assert not OrganizationMemberTeam.objects.filter(
team=team,
organizationmember=member_om,
).exists()
|
bsd-3-clause
|
convexengineering/gplibrary
|
gpkitmodels/GP/aircraft/wing/wing_test.py
|
1
|
2433
|
" wing test "
from gpkitmodels.GP.aircraft.wing.wing import Wing
from gpkitmodels.GP.aircraft.wing.wing_skin import WingSkin
from gpkitmodels.GP.aircraft.wing.wing_core import WingCore
from gpkitmodels.GP.aircraft.wing.boxspar import BoxSpar
from gpkit import Model, parse_variables
#pylint: disable=no-member, exec-used
class FlightState(Model):
""" Flight State
Variables
---------
V 50 [m/s] airspeed
rho 1.255 [kg/m^3] air density
mu 1.5e-5 [N*s/m**2] air viscosity
qne [kg/s^2/m] never exceed dynamic pressure
"""
@parse_variables(__doc__, globals())
def setup(self):
return [qne == V**2*rho*1.2]
def wing_test():
" test wing models "
W = Wing()
W.substitutions[W.W] = 50
W.substitutions[W.planform.tau] = 0.115
fs = FlightState()
perf = W.flight_model(W, fs)
loading = [W.spar.loading(W, fs)]
loading[0].substitutions["W"] = 100
loading.append(W.spar.gustloading(W, fs))
loading[1].substitutions["W"] = 100
from gpkit import settings
if settings["default_solver"] == "cvxopt":
for l in loading:
for v in ["Mtip", "Stip", "wroot", "throot"]:
l.substitutions[v] = 1e-1
m = Model(perf.Cd, [
loading[1].v == fs.V,
loading[1].cl == perf.CL,
loading[1].Ww == W.W,
loading[1].Ww <= 0.5*fs.rho*fs.V**2*perf.CL*W.planform.S,
W, fs, perf, loading])
m.solve(verbosity=0)
def box_spar():
" test wing models "
Wing.sparModel = BoxSpar
W = Wing()
W.substitutions[W.W] = 50
W.substitutions[W.planform.tau] = 0.115
fs = FlightState()
perf = W.flight_model(W, fs)
loading = [W.spar.loading(W, fs)]
loading[0].substitutions["W"] = 100
loading.append(W.spar.gustloading(W, fs))
loading[1].substitutions["W"] = 100
from gpkit import settings
if settings["default_solver"] == "cvxopt":
for l in loading:
for v in ["Mtip", "Stip", "wroot", "throot"]:
l.substitutions[v] = 1e-2
m = Model(perf.Cd, [
loading[1].v == fs.V,
loading[1].cl == perf.CL,
loading[1].Ww == W.W,
loading[1].Ww <= fs.qne*perf.CL*W.planform.S,
W, fs, perf, loading])
m.solve(verbosity=0)
def test():
" tests "
wing_test()
box_spar()
if __name__ == "__main__":
test()
|
mit
|
htzy/bigfour
|
lms/djangoapps/commerce/__init__.py
|
18
|
1176
|
""" Commerce app. """
from django.conf import settings
from ecommerce_api_client.client import EcommerceApiClient
from eventtracking import tracker
def create_tracking_context(user):
""" Assembles attributes from user and request objects to be sent along
in ecommerce api calls for tracking purposes. """
return {
'lms_user_id': user.id,
'lms_client_id': tracker.get_tracker().resolve_context().get('client_id')
}
def is_commerce_service_configured():
"""
Return a Boolean indicating whether or not configuration is present to use
the external commerce service.
"""
return bool(settings.ECOMMERCE_API_URL and settings.ECOMMERCE_API_SIGNING_KEY)
def ecommerce_api_client(user):
""" Returns an E-Commerce API client setup with authentication for the specified user. """
return EcommerceApiClient(settings.ECOMMERCE_API_URL, settings.ECOMMERCE_API_SIGNING_KEY, user.username,
user.profile.name, user.email, tracking_context=create_tracking_context(user))
# this is here to support registering the signals in signals.py
from commerce import signals # pylint: disable=unused-import
|
agpl-3.0
|
irvingpop/digital-beer-menu
|
src/lib/flaskext/wtf/recaptcha/validators.py
|
27
|
2281
|
import urllib2
from flask import request, current_app
from wtforms import ValidationError
from werkzeug import url_encode
RECAPTCHA_VERIFY_SERVER = 'http://api-verify.recaptcha.net/verify'
__all__ = ["Recaptcha"]
class Recaptcha(object):
"""Validates a ReCaptcha."""
_error_codes = {
'invalid-site-public-key': 'The public key for reCAPTCHA is invalid',
'invalid-site-private-key': 'The private key for reCAPTCHA is invalid',
'invalid-referrer': 'The public key for reCAPTCHA is not valid for '
'this domainin',
'verify-params-incorrect': 'The parameters passed to reCAPTCHA '
'verification are incorrect',
}
def __init__(self, message=u'Invalid word. Please try again.'):
self.message = message
def __call__(self, form, field):
challenge = request.form.get('recaptcha_challenge_field', '')
response = request.form.get('recaptcha_response_field', '')
remote_ip = request.remote_addr
if not challenge or not response:
raise ValidationError(field.gettext('This field is required.'))
if not self._validate_recaptcha(challenge, response, remote_ip):
field.recaptcha_error = 'incorrect-captcha-sol'
raise ValidationError(field.gettext(self.message))
def _validate_recaptcha(self, challenge, response, remote_addr):
"""Performs the actual validation."""
if current_app.testing:
return True
try:
private_key = current_app.config['RECAPTCHA_PRIVATE_KEY']
except KeyError:
raise RuntimeError, "No RECAPTCHA_PRIVATE_KEY config set"
data = url_encode({
'privatekey': private_key,
'remoteip': remote_addr,
'challenge': challenge,
'response': response
})
response = urllib2.urlopen(RECAPTCHA_VERIFY_SERVER, data)
if response.code != 200:
return False
rv = [l.strip() for l in response.readlines()]
if rv and rv[0] == 'true':
return True
if len(rv) > 1:
error = rv[1]
if error in self._error_codes:
raise RuntimeError(self._error_codes[error])
return False
|
gpl-2.0
|
theflofly/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py
|
22
|
11376
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
ds = distributions
class MultivariateNormalDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.MultivariateNormalDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tensor_util.constant_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = array_ops.zeros((1, 3))
diag = array_ops.ones((1, 3))
with self.cached_session():
base_dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
dist = ds.TransformedDistribution(
base_dist,
validate_args=True,
bijector=bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mean().eval())
def testEntropy(self):
mu = [-1., 1]
diag = [-1., 5]
diag_mat = np.diag(diag)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2)
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllClose(scipy_mvn.entropy(), dist.entropy().eval(), atol=1e-4)
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e3), seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
self.assertAllClose(mu, samps.mean(axis=0),
atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T),
atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu, mean.eval())
n = int(1e3)
samps = dist.sample(n, seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(samps.transpose([1, 2, 0]),
samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0),
atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov,
atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.diag(np.ones([3], dtype=np.float32)),
mvn.covariance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0],
[0, 3, 0],
[0, 0, 3]],
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]]])**2.,
mvn.covariance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0],
[0, 2, 0],
[0, 0, 1]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]]])**2.,
mvn.covariance().eval())
def testVariance(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32),
mvn.variance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3],
[2, 2, 2]])**2.,
mvn.variance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1],
[4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1],
[4, 5, 6]])**2.,
mvn.variance().eval())
def testStddev(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32),
mvn.stddev().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3],
[2, 2, 2]]),
mvn.stddev().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1],
[4, 5, 6]]),
mvn.stddev().eval())
def testMultivariateNormalDiagWithSoftplusScale(self):
mu = [-1.0, 1.0]
diag = [-1.0, -2.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagWithSoftplusScale(
mu, diag, validate_args=True)
samps = dist.sample(1000, seed=0).eval()
cov_mat = array_ops.matrix_diag(nn_ops.softplus(diag)).eval()**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
def testMultivariateNormalDiagNegLogLikelihood(self):
num_draws = 50
dims = 3
with self.cached_session() as sess:
x_pl = array_ops.placeholder(dtype=dtypes.float32,
shape=[None, dims],
name="x")
mu_var = variable_scope.get_variable(
name="mu",
shape=[dims],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(1.))
sess.run([variables.global_variables_initializer()])
mvn = ds.MultivariateNormalDiag(
loc=mu_var,
scale_diag=array_ops.ones(shape=[dims], dtype=dtypes.float32))
# Typically you'd use `mvn.log_prob(x_pl)` which is always at least as
# numerically stable as `tf.log(mvn.prob(x_pl))`. However in this test
# we're testing a bug specific to `prob` and not `log_prob`;
# http://stackoverflow.com/q/45109305. (The underlying issue was not
# related to `Distributions` but that `reduce_prod` didn't correctly
# handle negative indexes.)
neg_log_likelihood = -math_ops.reduce_sum(math_ops.log(mvn.prob(x_pl)))
grad_neg_log_likelihood = gradients_impl.gradients(
neg_log_likelihood, variables.trainable_variables())
x = np.zeros([num_draws, dims], dtype=np.float32)
grad_neg_log_likelihood_ = sess.run(
grad_neg_log_likelihood,
feed_dict={x_pl: x})
self.assertEqual(1, len(grad_neg_log_likelihood_))
self.assertAllClose(grad_neg_log_likelihood_[0],
np.tile(num_draws, dims),
rtol=1e-6, atol=0.)
def testDynamicBatchShape(self):
mvn = ds.MultivariateNormalDiag(
loc=array_ops.placeholder(dtypes.float32, shape=[None, None, 2]),
scale_diag=array_ops.placeholder(dtypes.float32, shape=[None, None, 2]))
self.assertListEqual(mvn.batch_shape.as_list(), [None, None])
self.assertListEqual(mvn.event_shape.as_list(), [2])
def testDynamicEventShape(self):
mvn = ds.MultivariateNormalDiag(
loc=array_ops.placeholder(dtypes.float32, shape=[2, 3, None]),
scale_diag=array_ops.placeholder(dtypes.float32, shape=[2, 3, None]))
self.assertListEqual(mvn.batch_shape.as_list(), [2, 3])
self.assertListEqual(mvn.event_shape.as_list(), [None])
def testKLDivIdenticalGradientDefined(self):
dims = 3
with self.cached_session() as sess:
loc = array_ops.zeros([dims], dtype=dtypes.float32)
mvn = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=np.ones([dims], dtype=np.float32))
g = gradients_impl.gradients(ds.kl_divergence(mvn, mvn), loc)
g_ = sess.run(g)
self.assertAllEqual(np.ones_like(g_, dtype=np.bool),
np.isfinite(g_))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
ovidiul/XCloner-Wordpress
|
vendor/sabre/dav/bin/googlecode_upload.py
|
124
|
8913
|
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: [email protected] (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = '[email protected] (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occurred.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of [email protected]. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
|
gpl-2.0
|
ibm-research-ireland/sparkoscope
|
python/pyspark/ml/tests.py
|
10
|
67778
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for MLlib Python DataFrame-based APIs.
"""
import sys
if sys.version > '3':
xrange = range
basestring = str
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from shutil import rmtree
import tempfile
import array as pyarray
import numpy as np
from numpy import (
array, array_equal, zeros, inf, random, exp, dot, all, mean, abs, arange, tile, ones)
from numpy import sum as array_sum
import inspect
from pyspark import keyword_only, SparkContext
from pyspark.ml import Estimator, Model, Pipeline, PipelineModel, Transformer
from pyspark.ml.classification import *
from pyspark.ml.clustering import *
from pyspark.ml.evaluation import BinaryClassificationEvaluator, RegressionEvaluator
from pyspark.ml.feature import *
from pyspark.ml.linalg import Vector, SparseVector, DenseVector, VectorUDT,\
DenseMatrix, SparseMatrix, Vectors, Matrices, MatrixUDT, _convert_to_vector
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasMaxIter, HasInputCol, HasSeed
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import LinearRegression, DecisionTreeRegressor, \
GeneralizedLinearRegression
from pyspark.ml.tuning import *
from pyspark.ml.wrapper import JavaParams
from pyspark.ml.common import _java2py
from pyspark.serializers import PickleSerializer
from pyspark.sql import DataFrame, Row, SparkSession
from pyspark.sql.functions import rand
from pyspark.sql.utils import IllegalArgumentException
from pyspark.storagelevel import *
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
ser = PickleSerializer()
class MLlibTestCase(unittest.TestCase):
def setUp(self):
self.sc = SparkContext('local[4]', "MLlib tests")
self.spark = SparkSession(self.sc)
def tearDown(self):
self.spark.stop()
class SparkSessionTestCase(PySparkTestCase):
@classmethod
def setUpClass(cls):
PySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
PySparkTestCase.tearDownClass()
cls.spark.stop()
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l),
range(len(l)), l), pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
def test_identity_pipeline(self):
dataset = MockDataset()
def doTransform(pipeline):
pipeline_model = pipeline.fit(dataset)
return pipeline_model.transform(dataset)
# check that empty pipeline did not perform any transformation
self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index)
# check that failure to set stages param will raise KeyError for missing param
self.assertRaises(KeyError, lambda: doTransform(Pipeline()))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(PySparkTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
def test_word2vec_param(self):
model = Word2Vec().setWindowSize(6)
# Check windowSize is set properly
self.assertEqual(model.getWindowSize(), 6)
class EvaluatorTests(SparkSessionTestCase):
def test_java_params(self):
"""
This tests a bug fixed by SPARK-18274 which causes multiple copies
of a Params instance in Python to be linked to the same Java instance.
"""
evaluator = RegressionEvaluator(metricName="r2")
df = self.spark.createDataFrame([Row(label=1.0, prediction=1.1)])
evaluator.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
evaluatorCopy = evaluator.copy({evaluator.metricName: "mae"})
evaluator.evaluate(df)
evaluatorCopy.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
self.assertEqual(evaluatorCopy._java_obj.getMetricName(), "mae")
class FeatureTests(SparkSessionTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
def test_ngram(self):
dataset = self.spark.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
dataset = self.spark.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
# with language selection
stopwords = StopWordsRemover.loadDefaultStopWords("turkish")
dataset = self.spark.createDataFrame([Row(input=["acaba", "ama", "biri"])])
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
def test_count_vectorizer_with_binary(self):
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
def test_rformula_force_index_label(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
# Does not index label by default since it's numeric type.
rf = RFormula(formula="y ~ x + s")
model = rf.fit(df)
transformedDF = model.transform(df)
self.assertEqual(transformedDF.head().label, 1.0)
# Force to index label.
rf2 = RFormula(formula="y ~ x + s").setForceIndexLabel(True)
model2 = rf2.fit(df)
transformedDF2 = model2.transform(df)
self.assertEqual(transformedDF2.head().label, 0.0)
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(SparkSessionTestCase):
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvCopied = cv.copy()
self.assertEqual(cv.getEstimator().uid, cvCopied.getEstimator().uid)
cvModel = cv.fit(dataset)
cvModelCopied = cvModel.copy()
for index in range(len(cvModel.avgMetrics)):
self.assertTrue(abs(cvModel.avgMetrics[index] - cvModelCopied.avgMetrics[index])
< 0.0001)
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load(self):
# This tests saving and loading the trained model only.
# Save/load for CrossValidator will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
lrModel = cvModel.bestModel
cvModelPath = temp_path + "/cvModel"
lrModel.save(cvModelPath)
loadedLrModel = LogisticRegressionModel.load(cvModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
class TrainValidationSplitTests(SparkSessionTestCase):
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(0.0, min(validationMetrics))
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(1.0, max(validationMetrics))
def test_save_load(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
lrModel = tvsModel.bestModel
tvsModelPath = temp_path + "/tvsModel"
lrModel.save(tvsModelPath)
loadedLrModel = LogisticRegressionModel.load(tvsModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsCopied = tvs.copy()
tvsModelCopied = tvsModel.copy()
self.assertEqual(tvs.getEstimator().uid, tvsCopied.getEstimator().uid,
"Copied TrainValidationSplit has the same uid of Estimator")
self.assertEqual(tvsModel.bestModel.uid, tvsModelCopied.bestModel.uid)
self.assertEqual(len(tvsModel.validationMetrics),
len(tvsModelCopied.validationMetrics),
"Copied validationMetrics has the same size of the original")
for index in range(len(tvsModel.validationMetrics)):
self.assertEqual(tvsModel.validationMetrics[index],
tvsModelCopied.validationMetrics[index])
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_onevsrest(self):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))] * 10,
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
ovrPath = temp_path + "/ovr"
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/ovrModel"
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, basestring))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
class HashingTFTest(SparkSessionTestCase):
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class DefaultValuesTests(PySparkTestCase):
"""
Test :py:class:`JavaParams` classes to see if their default Param values match
those in their Scala counterparts.
"""
def check_params(self, py_stage):
if not hasattr(py_stage, "_to_java"):
return
java_stage = py_stage._to_java()
if java_stage is None:
return
for p in py_stage.params:
java_param = java_stage.getParam(p.name)
py_has_default = py_stage.hasDefault(p)
java_has_default = java_stage.hasDefault(java_param)
self.assertEqual(py_has_default, java_has_default,
"Default value mismatch of param %s for Params %s"
% (p.name, str(py_stage)))
if py_has_default:
if p.name == "seed":
return # Random seeds between Spark and PySpark are different
java_default =\
_java2py(self.sc, java_stage.clear(java_param).getOrDefault(java_param))
py_stage._clear(p)
py_default = py_stage.getOrDefault(p)
self.assertEqual(java_default, py_default,
"Java default %s != python default %s of param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
def test_java_params(self):
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.pipeline, pyspark.ml.recommendation, pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and issubclass(cls, JavaParams)\
and not inspect.isabstract(cls):
self.check_params(cls())
def _squared_distance(a, b):
if isinstance(a, Vector):
return a.squared_distance(b)
else:
return b.squared_distance(a)
class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
jvec = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(v)))
nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvec)))
self.assertEqual(v, nv)
vs = [v] * 100
jvecs = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(vs)))
nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvecs)))
self.assertEqual(vs, nvs)
def test_serialize(self):
self._test_serialize(DenseVector(range(10)))
self._test_serialize(DenseVector(array([1., 2., 3., 4.])))
self._test_serialize(DenseVector(pyarray.array('d', range(10))))
self._test_serialize(SparseVector(4, {1: 1, 3: 2}))
self._test_serialize(SparseVector(3, {}))
self._test_serialize(DenseMatrix(2, 3, range(6)))
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self._test_serialize(sm1)
def test_dot(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([1, 2, 3, 4])
mat = array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
arr = pyarray.array('d', [0, 1, 2, 3])
self.assertEqual(10.0, sv.dot(dv))
self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
self.assertEqual(30.0, dv.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
self.assertEqual(30.0, lst.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
self.assertEqual(7.0, sv.dot(arr))
def test_squared_distance(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([4, 3, 2, 1])
lst1 = [4, 3, 2, 1]
arr = pyarray.array('d', [0, 2, 1, 3])
narr = array([0, 2, 1, 3])
self.assertEqual(15.0, _squared_distance(sv, dv))
self.assertEqual(25.0, _squared_distance(sv, lst))
self.assertEqual(20.0, _squared_distance(dv, lst))
self.assertEqual(15.0, _squared_distance(dv, sv))
self.assertEqual(25.0, _squared_distance(lst, sv))
self.assertEqual(20.0, _squared_distance(lst, dv))
self.assertEqual(0.0, _squared_distance(sv, sv))
self.assertEqual(0.0, _squared_distance(dv, dv))
self.assertEqual(0.0, _squared_distance(lst, lst))
self.assertEqual(25.0, _squared_distance(sv, lst1))
self.assertEqual(3.0, _squared_distance(sv, arr))
self.assertEqual(3.0, _squared_distance(sv, narr))
def test_hash(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(hash(v1), hash(v2))
self.assertEqual(hash(v1), hash(v3))
self.assertEqual(hash(v2), hash(v3))
self.assertFalse(hash(v1) == hash(v4))
self.assertFalse(hash(v2) == hash(v4))
def test_eq(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v2 == v4)
self.assertFalse(v1 == v5)
self.assertFalse(v1 == v6)
def test_equals(self):
indices = [1, 2, 4]
values = [1., 3., 2.]
self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.]))
def test_conversion(self):
# numpy arrays should be automatically upcast to float64
# tests for fix of [SPARK-5089]
v = array([1, 2, 3, 4], dtype='float64')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
v = array([1, 2, 3, 4], dtype='float32')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
def test_sparse_vector_indexing(self):
sv = SparseVector(5, {1: 1, 3: 2})
self.assertEqual(sv[0], 0.)
self.assertEqual(sv[3], 2.)
self.assertEqual(sv[1], 1.)
self.assertEqual(sv[2], 0.)
self.assertEqual(sv[4], 0.)
self.assertEqual(sv[-1], 0.)
self.assertEqual(sv[-2], 2.)
self.assertEqual(sv[-3], 0.)
self.assertEqual(sv[-5], 0.)
for ind in [5, -6]:
self.assertRaises(IndexError, sv.__getitem__, ind)
for ind in [7.8, '1']:
self.assertRaises(TypeError, sv.__getitem__, ind)
zeros = SparseVector(4, {})
self.assertEqual(zeros[0], 0.0)
self.assertEqual(zeros[3], 0.0)
for ind in [4, -5]:
self.assertRaises(IndexError, zeros.__getitem__, ind)
empty = SparseVector(0, {})
for ind in [-1, 0, 1]:
self.assertRaises(IndexError, empty.__getitem__, ind)
def test_sparse_vector_iteration(self):
self.assertListEqual(list(SparseVector(3, [], [])), [0.0, 0.0, 0.0])
self.assertListEqual(list(SparseVector(5, [0, 3], [1.0, 2.0])), [1.0, 0.0, 0.0, 2.0, 0.0])
def test_matrix_indexing(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
expected = [[0, 6], [1, 8], [4, 10]]
for i in range(3):
for j in range(2):
self.assertEqual(mat[i, j], expected[i][j])
for i, j in [(-1, 0), (4, 1), (3, 4)]:
self.assertRaises(IndexError, mat.__getitem__, (i, j))
def test_repr_dense_matrix(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True)
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(6, 3, zeros(18))
self.assertTrue(
repr(mat),
'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)')
def test_repr_sparse_matrix(self):
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertTrue(
repr(sm1t),
'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)')
indices = tile(arange(6), 3)
values = ones(18)
sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values)
self.assertTrue(
repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \
[0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)")
self.assertTrue(
str(sm),
"6 X 3 CSCMatrix\n\
(0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\
(0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\
(0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..")
sm = SparseMatrix(1, 18, zeros(19), [], [])
self.assertTrue(
repr(sm),
'SparseMatrix(1, 18, \
[0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)')
def test_sparse_matrix(self):
# Test sparse matrix creation.
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self.assertEqual(sm1.numRows, 3)
self.assertEqual(sm1.numCols, 4)
self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
self.assertTrue(
repr(sm1),
'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)')
# Test indexing
expected = [
[0, 0, 0, 0],
[1, 0, 4, 0],
[2, 0, 5, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1[i, j])
self.assertTrue(array_equal(sm1.toArray(), expected))
for i, j in [(-1, 1), (4, 3), (3, 5)]:
self.assertRaises(IndexError, sm1.__getitem__, (i, j))
# Test conversion to dense and sparse.
smnew = sm1.toDense().toSparse()
self.assertEqual(sm1.numRows, smnew.numRows)
self.assertEqual(sm1.numCols, smnew.numCols)
self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
self.assertTrue(array_equal(sm1.values, smnew.values))
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertEqual(sm1t.numRows, 3)
self.assertEqual(sm1t.numCols, 4)
self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
expected = [
[3, 2, 0, 0],
[0, 0, 4, 0],
[9, 0, 8, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1t[i, j])
self.assertTrue(array_equal(sm1t.toArray(), expected))
def test_dense_matrix_is_transposed(self):
mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
self.assertEqual(mat1, mat)
expected = [[0, 4], [1, 6], [3, 9]]
for i in range(3):
for j in range(2):
self.assertEqual(mat1[i, j], expected[i][j])
self.assertTrue(array_equal(mat1.toArray(), expected))
sm = mat1.toSparse()
self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2]))
self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5]))
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
def test_norms(self):
a = DenseVector([0, 2, 3, -1])
self.assertAlmostEqual(a.norm(2), 3.742, 3)
self.assertTrue(a.norm(1), 6)
self.assertTrue(a.norm(inf), 3)
a = SparseVector(4, [0, 2], [3, -4])
self.assertAlmostEqual(a.norm(2), 5)
self.assertTrue(a.norm(1), 7)
self.assertTrue(a.norm(inf), 4)
tmp = SparseVector(4, [0, 2], [3, 0])
self.assertEqual(tmp.numNonzeros(), 1)
class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
sv0 = SparseVector(2, [], [])
sv1 = SparseVector(2, [1], [2.0])
udt = VectorUDT()
def test_json_schema(self):
self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for v in [self.dv0, self.dv1, self.sv0, self.sv1]:
self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v)))
def test_infer_schema(self):
rdd = self.sc.parallelize([Row(label=1.0, features=self.dv1),
Row(label=0.0, features=self.sv1)])
df = rdd.toDF()
schema = df.schema
field = [f for f in schema.fields if f.name == "features"][0]
self.assertEqual(field.dataType, self.udt)
vectors = df.rdd.map(lambda p: p.features).collect()
self.assertEqual(len(vectors), 2)
for v in vectors:
if isinstance(v, SparseVector):
self.assertEqual(v, self.sv1)
elif isinstance(v, DenseVector):
self.assertEqual(v, self.dv1)
else:
raise TypeError("expecting a vector but got %r of type %r" % (v, type(v)))
class MatrixUDTTests(MLlibTestCase):
dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10])
dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True)
sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0])
sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True)
udt = MatrixUDT()
def test_json_schema(self):
self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for m in [self.dm1, self.dm2, self.sm1, self.sm2]:
self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m)))
def test_infer_schema(self):
rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)])
df = rdd.toDF()
schema = df.schema
self.assertTrue(schema.fields[1].dataType, self.udt)
matrices = df.rdd.map(lambda x: x._2).collect()
self.assertEqual(len(matrices), 2)
for m in matrices:
if isinstance(m, DenseMatrix):
self.assertTrue(m, self.dm1)
elif isinstance(m, SparseMatrix):
self.assertTrue(m, self.sm1)
else:
raise ValueError("Expected a matrix but got type %r" % type(m))
if __name__ == "__main__":
from pyspark.ml.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
|
apache-2.0
|
kellielu/q
|
lib/werkzeug/test.py
|
183
|
34152
|
# -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if not isinstance(value, string_types):
value = str(value)
else:
value = to_bytes(value, charset)
write('\r\n\r\n')
write_binary(value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self._files:
return 'multipart/form-data'
elif self._form:
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
# XXX: py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
status_code = int(response[1].split(None, 1)[0])
if status_code == 307:
method = environ['REQUEST_METHOD']
else:
method = 'GET'
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered, method=method)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ,
buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_rv = app(environ, start_response)
close_func = getattr(app_rv, 'close', None)
app_iter = iter(app_rv)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have a response, chain
# the already received data with the already collected data and wrap it in
# a new `ClosingIterator` if we need to restore a `close` callable from the
# original return value.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
app_iter = chain(buffer, app_iter)
if close_func is not None and app_iter is not app_rv:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
|
apache-2.0
|
dirkjot/kivy
|
kivy/core/audio/audio_gstplayer.py
|
40
|
2636
|
'''
Audio Gstplayer
===============
.. versionadded:: 1.8.0
Implementation of a VideoBase with Kivy :class:`~kivy.lib.gstplayer.GstPlayer`
This player is the prefered player, using Gstreamer 1.0, working on both Python
2 and 3.
'''
from kivy.lib.gstplayer import GstPlayer, get_gst_version
from kivy.core.audio import Sound, SoundLoader
from kivy.logger import Logger
from kivy.compat import PY2
from kivy.clock import Clock
from os.path import realpath
if PY2:
from urllib import pathname2url
else:
from urllib.request import pathname2url
Logger.info('AudioGstplayer: Using Gstreamer {}'.format(
'.'.join(map(str, get_gst_version()))))
def _on_gstplayer_message(mtype, message):
if mtype == 'error':
Logger.error('AudioGstplayer: {}'.format(message))
elif mtype == 'warning':
Logger.warning('AudioGstplayer: {}'.format(message))
elif mtype == 'info':
Logger.info('AudioGstplayer: {}'.format(message))
class SoundGstplayer(Sound):
@staticmethod
def extensions():
return ('wav', 'ogg', 'mp3', 'm4a')
def __init__(self, **kwargs):
self.player = None
super(SoundGstplayer, self).__init__(**kwargs)
def _on_gst_eos_sync(self):
Clock.schedule_once(self._on_gst_eos, 0)
def _on_gst_eos(self, *dt):
if self.loop:
self.player.stop()
self.player.play()
else:
self.stop()
def load(self):
self.unload()
uri = self._get_uri()
self.player = GstPlayer(uri, None, self._on_gst_eos_sync,
_on_gstplayer_message)
self.player.load()
def play(self):
# we need to set the volume everytime, it seems that stopping + playing
# the sound reset the volume.
self.player.set_volume(self.volume)
self.player.play()
super(SoundGstplayer, self).play()
def stop(self):
self.player.stop()
super(SoundGstplayer, self).stop()
def unload(self):
if self.player:
self.player.unload()
self.player = None
def seek(self, position):
self.player.seek(position / self.length)
def get_pos(self):
return self.player.get_position()
def _get_length(self):
return self.player.get_duration()
def on_volume(self, instance, volume):
self.player.set_volume(volume)
def _get_uri(self):
uri = self.filename
if not uri:
return
if not '://' in uri:
uri = 'file:' + pathname2url(realpath(uri))
return uri
SoundLoader.register(SoundGstplayer)
|
mit
|
yufish/youtube-dl
|
youtube_dl/extractor/vgtv.py
|
102
|
7096
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
)
class VGTVIE(InfoExtractor):
IE_DESC = 'VGTV and BTTV'
_VALID_URL = r'''(?x)
(?:
vgtv:|
http://(?:www\.)?
)
(?P<host>vgtv|bt)
(?:
:|
\.no/(?:tv/)?\#!/(?:video|live)/
)
(?P<id>[0-9]+)
'''
_TESTS = [
{
# streamType: vod
'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu',
'md5': 'b8be7a234cebb840c0d512c78013e02f',
'info_dict': {
'id': '84196',
'ext': 'mp4',
'title': 'Hevnen er søt: Episode 10 - Abu',
'description': 'md5:e25e4badb5f544b04341e14abdc72234',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 648.000,
'timestamp': 1404626400,
'upload_date': '20140706',
'view_count': int,
},
},
{
# streamType: wasLive
'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
'info_dict': {
'id': '100764',
'ext': 'flv',
'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 9103.0,
'timestamp': 1410113864,
'upload_date': '20140907',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
# streamType: live
'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla',
'info_dict': {
'id': '113063',
'ext': 'flv',
'title': 're:^DIREKTE: V75 fra Solvalla [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:b3743425765355855f88e096acc93231',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 0,
'timestamp': 1432975582,
'upload_date': '20150530',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
host = mobj.group('host')
HOST_WEBSITES = {
'vgtv': 'vgtv',
'bt': 'bttv',
}
data = self._download_json(
'http://svp.vg.no/svp/api/v1/%s/assets/%s?appName=%s-website'
% (host, video_id, HOST_WEBSITES[host]),
video_id, 'Downloading media JSON')
if data.get('status') == 'inactive':
raise ExtractorError(
'Video %s is no longer available' % video_id, expected=True)
streams = data['streamUrls']
stream_type = data.get('streamType')
formats = []
hls_url = streams.get('hls')
if hls_url:
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, 'mp4', m3u8_id='hls'))
hds_url = streams.get('hds')
# wasLive hds are always 404
if hds_url and stream_type != 'wasLive':
formats.extend(self._extract_f4m_formats(
hds_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
video_id, f4m_id='hds'))
mp4_url = streams.get('mp4')
if mp4_url:
_url = hls_url or hds_url
MP4_URL_TEMPLATE = '%s/%%s.%s' % (mp4_url.rpartition('/')[0], mp4_url.rpartition('.')[-1])
for mp4_format in _url.split(','):
m = re.search('(?P<width>\d+)_(?P<height>\d+)_(?P<vbr>\d+)', mp4_format)
if not m:
continue
width = int(m.group('width'))
height = int(m.group('height'))
vbr = int(m.group('vbr'))
formats.append({
'url': MP4_URL_TEMPLATE % mp4_format,
'format_id': 'mp4-%s' % vbr,
'width': width,
'height': height,
'vbr': vbr,
'preference': 1,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': self._live_title(data['title']),
'description': data['description'],
'thumbnail': data['images']['main'] + '?t[]=900x506q80',
'timestamp': data['published'],
'duration': float_or_none(data['duration'], 1000),
'view_count': data['displays'],
'formats': formats,
'is_live': True if stream_type == 'live' else False,
}
class BTArticleIE(InfoExtractor):
IE_NAME = 'bt:article'
IE_DESC = 'Bergens Tidende Articles'
_VALID_URL = 'http://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html'
_TEST = {
'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html',
'md5': 'd055e8ee918ef2844745fcfd1a4175fb',
'info_dict': {
'id': '23199',
'ext': 'mp4',
'title': 'Alrekstad internat',
'description': 'md5:dc81a9056c874fedb62fc48a300dac58',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 191,
'timestamp': 1289991323,
'upload_date': '20101117',
'view_count': int,
},
}
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
video_id = self._search_regex(
r'SVP\.Player\.load\(\s*(\d+)', webpage, 'video id')
return self.url_result('vgtv:bt:%s' % video_id, 'VGTV')
class BTVestlendingenIE(InfoExtractor):
IE_NAME = 'bt:vestlendingen'
IE_DESC = 'Bergens Tidende - Vestlendingen'
_VALID_URL = 'http://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)'
_TEST = {
'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588',
'md5': 'd7d17e3337dc80de6d3a540aefbe441b',
'info_dict': {
'id': '86588',
'ext': 'mov',
'title': 'Otto Wollertsen',
'description': 'Vestlendingen Otto Fredrik Wollertsen',
'timestamp': 1430473209,
'upload_date': '20150501',
},
}
def _real_extract(self, url):
return self.url_result('xstream:btno:%s' % self._match_id(url), 'Xstream')
|
unlicense
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_vpn_gateways_operations.py
|
1
|
30312
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations:
"""VpnGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> "_models.VpnGateway":
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs
) -> "_models.VpnGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2018_11_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VpnGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
|
mit
|
nitin-cherian/LifeLongLearning
|
Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jedi/evaluate/representation.py
|
3
|
25594
|
"""
Like described in the :mod:`jedi.parser.python.tree` module,
there's a need for an ast like module to represent the states of parsed
modules.
But now there are also structures in Python that need a little bit more than
that. An ``Instance`` for example is only a ``Class`` before it is
instantiated. This class represents these cases.
So, why is there also a ``Class`` class here? Well, there are decorators and
they change classes in Python 3.
Representation modules also define "magic methods". Those methods look like
``py__foo__`` and are typically mappable to the Python equivalents ``__call__``
and others. Here's a list:
====================================== ========================================
**Method** **Description**
-------------------------------------- ----------------------------------------
py__call__(params: Array) On callable objects, returns types.
py__bool__() Returns True/False/None; None means that
there's no certainty.
py__bases__() Returns a list of base classes.
py__mro__() Returns a list of classes (the mro).
py__iter__() Returns a generator of a set of types.
py__class__() Returns the class of an instance.
py__getitem__(index: int/str) Returns a a set of types of the index.
Can raise an IndexError/KeyError.
py__file__() Only on modules. Returns None if does
not exist.
py__package__() Only on modules. For the import system.
py__path__() Only on modules. For the import system.
py__get__(call_object) Only on instances. Simulates
descriptors.
====================================== ========================================
"""
import os
import pkgutil
import imp
import re
from itertools import chain
from jedi._compatibility import use_metaclass
from jedi.parser.python import tree
from jedi import debug
from jedi import common
from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT
from jedi.evaluate import compiled
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate import docstrings
from jedi.evaluate import pep0484
from jedi.evaluate import param
from jedi.evaluate import flow_analysis
from jedi.evaluate import imports
from jedi.evaluate import helpers
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
GlobalNameFilter, DictFilter, ContextName, AbstractNameDefinition, \
ParamName, AnonymousInstanceParamName, TreeNameDefinition, \
ContextNameMixin
from jedi.evaluate.dynamic import search_params
from jedi.evaluate import context
from jedi.evaluate.context import ContextualizedNode
def apply_py__get__(context, base_context):
try:
method = context.py__get__
except AttributeError:
yield context
else:
for descriptor_context in method(base_context):
yield descriptor_context
class ClassName(TreeNameDefinition):
def __init__(self, parent_context, tree_name, name_context):
super(ClassName, self).__init__(parent_context, tree_name)
self._name_context = name_context
def infer(self):
# TODO this _name_to_types might get refactored and be a part of the
# parent class. Once it is, we can probably just overwrite method to
# achieve this.
from jedi.evaluate.finder import _name_to_types
inferred = _name_to_types(
self.parent_context.evaluator, self._name_context, self.tree_name)
for result_context in inferred:
for c in apply_py__get__(result_context, self.parent_context):
yield c
class ClassFilter(ParserTreeFilter):
name_class = ClassName
def _convert_names(self, names):
return [self.name_class(self.context, name, self._node_context)
for name in names]
class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext)):
"""
This class is not only important to extend `tree.Class`, it is also a
important for descriptors (if the descriptor methods are evaluated or not).
"""
api_type = 'class'
def __init__(self, evaluator, classdef, parent_context):
super(ClassContext, self).__init__(evaluator, parent_context=parent_context)
self.tree_node = classdef
@memoize_default(default=())
def py__mro__(self):
def add(cls):
if cls not in mro:
mro.append(cls)
mro = [self]
# TODO Do a proper mro resolution. Currently we are just listing
# classes. However, it's a complicated algorithm.
for lazy_cls in self.py__bases__():
# TODO there's multiple different mro paths possible if this yields
# multiple possibilities. Could be changed to be more correct.
for cls in lazy_cls.infer():
# TODO detect for TypeError: duplicate base class str,
# e.g. `class X(str, str): pass`
try:
mro_method = cls.py__mro__
except AttributeError:
# TODO add a TypeError like:
"""
>>> class Y(lambda: test): pass
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: function() argument 1 must be code, not str
>>> class Y(1): pass
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: int() takes at most 2 arguments (3 given)
"""
pass
else:
add(cls)
for cls_new in mro_method():
add(cls_new)
return tuple(mro)
@memoize_default(default=())
def py__bases__(self):
arglist = self.tree_node.get_super_arglist()
if arglist:
args = param.TreeArguments(self.evaluator, self, arglist)
return [value for key, value in args.unpack() if key is None]
else:
return [context.LazyKnownContext(compiled.create(self.evaluator, object))]
def py__call__(self, params):
from jedi.evaluate.instance import TreeInstance
return set([TreeInstance(self.evaluator, self.parent_context, self, params)])
def py__class__(self):
return compiled.create(self.evaluator, type)
def get_params(self):
from jedi.evaluate.instance import AnonymousInstance
anon = AnonymousInstance(self.evaluator, self.parent_context, self)
return [AnonymousInstanceParamName(anon, param.name) for param in self.funcdef.params]
def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False):
if search_global:
yield ParserTreeFilter(
self.evaluator,
context=self,
until_position=until_position,
origin_scope=origin_scope
)
else:
for cls in self.py__mro__():
if isinstance(cls, compiled.CompiledObject):
for filter in cls.get_filters(is_instance=is_instance):
yield filter
else:
yield ClassFilter(
self.evaluator, self, node_context=cls,
origin_scope=origin_scope)
def is_class(self):
return True
def get_subscope_by_name(self, name):
raise DeprecationWarning
for s in self.py__mro__():
for sub in reversed(s.subscopes):
if sub.name.value == name:
return sub
raise KeyError("Couldn't find subscope.")
def get_function_slot_names(self, name):
for filter in self.get_filters(search_global=False):
names = filter.get(name)
if names:
return names
return []
def get_param_names(self):
for name in self.get_function_slot_names('__init__'):
for context_ in name.infer():
try:
method = context_.get_param_names
except AttributeError:
pass
else:
return list(method())[1:]
return []
@property
def name(self):
return ContextName(self, self.tree_node.name)
class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext)):
"""
Needed because of decorators. Decorators are evaluated here.
"""
api_type = 'function'
def __init__(self, evaluator, parent_context, funcdef):
""" This should not be called directly """
super(FunctionContext, self).__init__(evaluator, parent_context)
self.tree_node = funcdef
def get_filters(self, search_global, until_position=None, origin_scope=None):
if search_global:
yield ParserTreeFilter(
self.evaluator,
context=self,
until_position=until_position,
origin_scope=origin_scope
)
else:
scope = self.py__class__()
for filter in scope.get_filters(search_global=False, origin_scope=origin_scope):
yield filter
def infer_function_execution(self, function_execution):
"""
Created to be used by inheritance.
"""
if self.tree_node.is_generator():
return set([iterable.Generator(self.evaluator, function_execution)])
else:
return function_execution.get_return_values()
def get_function_execution(self, arguments=None):
e = self.evaluator
if arguments is None:
return AnonymousFunctionExecution(e, self.parent_context, self)
else:
return FunctionExecutionContext(e, self.parent_context, self, arguments)
def py__call__(self, arguments):
function_execution = self.get_function_execution(arguments)
return self.infer_function_execution(function_execution)
def py__class__(self):
# This differentiation is only necessary for Python2. Python3 does not
# use a different method class.
if isinstance(self.tree_node.get_parent_scope(), tree.Class):
name = 'METHOD_CLASS'
else:
name = 'FUNCTION_CLASS'
return compiled.get_special_object(self.evaluator, name)
@property
def name(self):
return ContextName(self, self.tree_node.name)
def get_param_names(self):
function_execution = self.get_function_execution()
return [ParamName(function_execution, param.name) for param in self.tree_node.params]
class FunctionExecutionContext(context.TreeContext):
"""
This class is used to evaluate functions and their returns.
This is the most complicated class, because it contains the logic to
transfer parameters. It is even more complicated, because there may be
multiple calls to functions and recursion has to be avoided. But this is
responsibility of the decorators.
"""
function_execution_filter = FunctionExecutionFilter
def __init__(self, evaluator, parent_context, function_context, var_args):
super(FunctionExecutionContext, self).__init__(evaluator, parent_context)
self.function_context = function_context
self.tree_node = function_context.tree_node
self.var_args = var_args
@memoize_default(default=set())
@recursion.execution_recursion_decorator()
def get_return_values(self, check_yields=False):
funcdef = self.tree_node
if funcdef.type == 'lambda':
return self.evaluator.eval_element(self, funcdef.children[-1])
if check_yields:
types = set()
returns = funcdef.yields
else:
returns = funcdef.returns
types = set(docstrings.find_return_types(self.get_root_context(), funcdef))
types |= set(pep0484.find_return_types(self.get_root_context(), funcdef))
for r in returns:
check = flow_analysis.reachability_check(self, funcdef, r)
if check is flow_analysis.UNREACHABLE:
debug.dbg('Return unreachable: %s', r)
else:
if check_yields:
types |= set(self._eval_yield(r))
else:
types |= self.eval_node(r.children[1])
if check is flow_analysis.REACHABLE:
debug.dbg('Return reachable: %s', r)
break
return types
def _eval_yield(self, yield_expr):
node = yield_expr.children[1]
if node.type == 'yield_arg': # It must be a yield from.
cn = ContextualizedNode(self, node.children[1])
for lazy_context in iterable.py__iter__(self.evaluator, cn.infer(), cn):
yield lazy_context
else:
yield context.LazyTreeContext(self, node)
@recursion.execution_recursion_decorator(default=iter([]))
def get_yield_values(self):
for_parents = [(y, tree.search_ancestor(y, ('for_stmt', 'funcdef',
'while_stmt', 'if_stmt')))
for y in self.tree_node.yields]
# Calculate if the yields are placed within the same for loop.
yields_order = []
last_for_stmt = None
for yield_, for_stmt in for_parents:
# For really simple for loops we can predict the order. Otherwise
# we just ignore it.
parent = for_stmt.parent
if parent.type == 'suite':
parent = parent.parent
if for_stmt.type == 'for_stmt' and parent == self.tree_node \
and for_stmt.defines_one_name(): # Simplicity for now.
if for_stmt == last_for_stmt:
yields_order[-1][1].append(yield_)
else:
yields_order.append((for_stmt, [yield_]))
elif for_stmt == self.tree_node:
yields_order.append((None, [yield_]))
else:
types = self.get_return_values(check_yields=True)
if types:
yield context.get_merged_lazy_context(list(types))
return
last_for_stmt = for_stmt
evaluator = self.evaluator
for for_stmt, yields in yields_order:
if for_stmt is None:
# No for_stmt, just normal yields.
for yield_ in yields:
for result in self._eval_yield(yield_):
yield result
else:
input_node = for_stmt.get_input_node()
cn = ContextualizedNode(self, input_node)
ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
ordered = list(ordered)
for lazy_context in ordered:
dct = {str(for_stmt.children[1]): lazy_context.infer()}
with helpers.predefine_names(self, for_stmt, dct):
for yield_in_same_for_stmt in yields:
for result in self._eval_yield(yield_in_same_for_stmt):
yield result
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield self.function_execution_filter(self.evaluator, self,
until_position=until_position,
origin_scope=origin_scope)
@memoize_default(default=NO_DEFAULT)
def get_params(self):
return param.get_params(self.evaluator, self.parent_context, self.tree_node, self.var_args)
class AnonymousFunctionExecution(FunctionExecutionContext):
def __init__(self, evaluator, parent_context, function_context):
super(AnonymousFunctionExecution, self).__init__(
evaluator, parent_context, function_context, var_args=None)
@memoize_default(default=NO_DEFAULT)
def get_params(self):
# We need to do a dynamic search here.
return search_params(self.evaluator, self.parent_context, self.tree_node)
class ModuleAttributeName(AbstractNameDefinition):
"""
For module attributes like __file__, __str__ and so on.
"""
api_type = 'instance'
def __init__(self, parent_module, string_name):
self.parent_context = parent_module
self.string_name = string_name
def infer(self):
return compiled.create(self.parent_context.evaluator, str).execute(
param.ValuesArguments([])
)
class ModuleName(ContextNameMixin, AbstractNameDefinition):
start_pos = 1, 0
def __init__(self, context, name):
self._context = context
self._name = name
@property
def string_name(self):
return self._name
class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext)):
api_type = 'module'
parent_context = None
def __init__(self, evaluator, module_node, path):
super(ModuleContext, self).__init__(evaluator, parent_context=None)
self.tree_node = module_node
self._path = path
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield ParserTreeFilter(
self.evaluator,
context=self,
until_position=until_position,
origin_scope=origin_scope
)
yield GlobalNameFilter(self, self.tree_node)
yield DictFilter(self._sub_modules_dict())
yield DictFilter(self._module_attributes_dict())
for star_module in self.star_imports():
yield next(star_module.get_filters(search_global))
# I'm not sure if the star import cache is really that effective anymore
# with all the other really fast import caches. Recheck. Also we would need
# to push the star imports into Evaluator.modules, if we reenable this.
@memoize_default([])
def star_imports(self):
modules = []
for i in self.tree_node.imports:
if i.is_star_import():
name = i.star_import_name()
new = imports.infer_import(self, name)
for module in new:
if isinstance(module, ModuleContext):
modules += module.star_imports()
modules += new
return modules
@memoize_default()
def _module_attributes_dict(self):
names = ['__file__', '__package__', '__doc__', '__name__']
# All the additional module attributes are strings.
return dict((n, ModuleAttributeName(self, n)) for n in names)
@property
def _string_name(self):
""" This is used for the goto functions. """
if self._path is None:
return '' # no path -> empty name
else:
sep = (re.escape(os.path.sep),) * 2
r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path)
# Remove PEP 3149 names
return re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
@property
@memoize_default()
def name(self):
return ModuleName(self, self._string_name)
def _get_init_directory(self):
"""
:return: The path to the directory of a package. None in case it's not
a package.
"""
for suffix, _, _ in imp.get_suffixes():
ending = '__init__' + suffix
py__file__ = self.py__file__()
if py__file__ is not None and py__file__.endswith(ending):
# Remove the ending, including the separator.
return self.py__file__()[:-len(ending) - 1]
return None
def py__name__(self):
for name, module in self.evaluator.modules.items():
if module == self and name != '':
return name
return '__main__'
def py__file__(self):
"""
In contrast to Python's __file__ can be None.
"""
if self._path is None:
return None
return os.path.abspath(self._path)
def py__package__(self):
if self._get_init_directory() is None:
return re.sub(r'\.?[^\.]+$', '', self.py__name__())
else:
return self.py__name__()
def _py__path__(self):
search_path = self.evaluator.sys_path
init_path = self.py__file__()
if os.path.basename(init_path) == '__init__.py':
with open(init_path, 'rb') as f:
content = common.source_to_unicode(f.read())
# these are strings that need to be used for namespace packages,
# the first one is ``pkgutil``, the second ``pkg_resources``.
options = ('declare_namespace(__name__)', 'extend_path(__path__')
if options[0] in content or options[1] in content:
# It is a namespace, now try to find the rest of the
# modules on sys_path or whatever the search_path is.
paths = set()
for s in search_path:
other = os.path.join(s, self.name.string_name)
if os.path.isdir(other):
paths.add(other)
if paths:
return list(paths)
# TODO I'm not sure if this is how nested namespace
# packages work. The tests are not really good enough to
# show that.
# Default to this.
return [self._get_init_directory()]
@property
def py__path__(self):
"""
Not seen here, since it's a property. The callback actually uses a
variable, so use it like::
foo.py__path__(sys_path)
In case of a package, this returns Python's __path__ attribute, which
is a list of paths (strings).
Raises an AttributeError if the module is not a package.
"""
path = self._get_init_directory()
if path is None:
raise AttributeError('Only packages have __path__ attributes.')
else:
return self._py__path__
@memoize_default()
def _sub_modules_dict(self):
"""
Lists modules in the directory of this module (if this module is a
package).
"""
path = self._path
names = {}
if path is not None and path.endswith(os.path.sep + '__init__.py'):
mods = pkgutil.iter_modules([os.path.dirname(path)])
for module_loader, name, is_pkg in mods:
# It's obviously a relative import to the current module.
names[name] = imports.SubModuleName(self, name)
# TODO add something like this in the future, its cleaner than the
# import hacks.
# ``os.path`` is a hardcoded exception, because it's a
# ``sys.modules`` modification.
# if str(self.name) == 'os':
# names.append(Name('path', parent_context=self))
return names
def py__class__(self):
return compiled.get_special_object(self.evaluator, 'MODULE_CLASS')
def __repr__(self):
return "<%s: %s@%s-%s>" % (
self.__class__.__name__, self._string_name,
self.tree_node.start_pos[0], self.tree_node.end_pos[0])
class ImplicitNSName(AbstractNameDefinition):
"""
Accessing names for implicit namespace packages should infer to nothing.
This object will prevent Jedi from raising exceptions
"""
def __init__(self, implicit_ns_context, string_name):
self.implicit_ns_context = implicit_ns_context
self.string_name = string_name
def infer(self):
return []
def get_root_context(self):
return self.implicit_ns_context
class ImplicitNamespaceContext(use_metaclass(CachedMetaClass, context.TreeContext)):
"""
Provides support for implicit namespace packages
"""
api_type = 'module'
parent_context = None
def __init__(self, evaluator, fullname):
super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None)
self.evaluator = evaluator
self.fullname = fullname
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield DictFilter(self._sub_modules_dict())
@property
@memoize_default()
def name(self):
string_name = self.py__package__().rpartition('.')[-1]
return ImplicitNSName(self, string_name)
def py__file__(self):
return None
def py__package__(self):
"""Return the fullname
"""
return self.fullname
@property
def py__path__(self):
return lambda: [self.paths]
@memoize_default()
def _sub_modules_dict(self):
names = {}
paths = self.paths
file_names = chain.from_iterable(os.listdir(path) for path in paths)
mods = [
file_name.rpartition('.')[0] if '.' in file_name else file_name
for file_name in file_names
if file_name != '__pycache__'
]
for name in mods:
names[name] = imports.SubModuleName(self, name)
return names
|
mit
|
tima/ansible
|
lib/ansible/plugins/vars/__init__.py
|
128
|
1329
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2014, Serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.path import basedir
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class BaseVarsPlugin(object):
"""
Loads variables for groups and/or hosts
"""
def __init__(self):
""" constructor """
self._display = display
def get_vars(self, loader, path, entities):
""" Gets variables. """
self._basedir = basedir(path)
|
gpl-3.0
|
livecd-tools/livecd-tools
|
imgcreate/errors.py
|
4
|
1198
|
#
# errors.py : exception definitions
#
# Copyright 2007, Red Hat, Inc.
# Copyright 2017, Fedora Project
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class CreatorError(Exception):
"""An exception base class for all imgcreate errors."""
def __init__(self, message):
Exception.__init__(self, message)
class KickstartError(CreatorError):
pass
class MountError(CreatorError):
pass
class SnapshotError(CreatorError):
pass
class CryptoLUKSError(CreatorError):
pass
class SquashfsError(CreatorError):
pass
class ResizeError(CreatorError):
pass
|
gpl-2.0
|
todaychi/hue
|
desktop/core/ext-py/Django-1.6.10/tests/servers/test_basehttp.py
|
45
|
2207
|
import sys
from django.core.servers.basehttp import WSGIRequestHandler
from django.test import TestCase
from django.utils.six import BytesIO, StringIO
class Stub(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class WSGIRequestHandlerTestCase(TestCase):
def test_strips_underscore_headers(self):
"""WSGIRequestHandler ignores headers containing underscores.
This follows the lead of nginx and Apache 2.4, and is to avoid
ambiguity between dashes and underscores in mapping to WSGI environ,
which can have security implications.
"""
def test_app(environ, start_response):
"""A WSGI app that just reflects its HTTP environ."""
start_response('200 OK', [])
http_environ_items = sorted(
'%s:%s' % (k, v) for k, v in environ.items()
if k.startswith('HTTP_')
)
yield (','.join(http_environ_items)).encode('utf-8')
rfile = BytesIO()
rfile.write(b"GET / HTTP/1.0\r\n")
rfile.write(b"Some-Header: good\r\n")
rfile.write(b"Some_Header: bad\r\n")
rfile.write(b"Other_Header: bad\r\n")
rfile.seek(0)
# WSGIRequestHandler closes the output file; we need to make this a
# no-op so we can still read its contents.
class UnclosableBytesIO(BytesIO):
def close(self):
pass
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == 'rb':
return rfile
elif mode == 'wb':
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# We don't need to check stderr, but we don't want it in test output
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
# instantiating a handler runs the request as side effect
WSGIRequestHandler(request, '192.168.0.2', server)
finally:
sys.stderr = old_stderr
wfile.seek(0)
body = list(wfile.readlines())[-1]
self.assertEqual(body, b'HTTP_SOME_HEADER:good')
|
apache-2.0
|
FedoraScientific/salome-smesh
|
src/Tools/blocFissure/gmu/facesFissure.py
|
1
|
2085
|
# -*- coding: utf-8 -*-
import logging
from geomsmesh import geompy
# -----------------------------------------------------------------------------
# --- faces fissure dans et hors tore, et edges face hors tore
def facesFissure(blocp, faceFissure, extrusionDefaut, genint):
"""
extraction des faces de fissure dans et hors tore, des edges le long du tore et en paroi
@param faceFissure : la face de fissure avec la partie dans le tore elliptique et la partie externe
@return (facefissintore, facefissoutore, edgeint, edgeext)
"""
logging.info('start')
[f0,f1] = geompy.ExtractShapes(faceFissure, geompy.ShapeType["FACE"], True)
ed0 = geompy.ExtractShapes(f0, geompy.ShapeType["EDGE"], True)
ed1 = geompy.ExtractShapes(f1, geompy.ShapeType["EDGE"], True)
if len(ed0) > len(ed1):
facefissintore = f0
facefissoutore = f1
else:
facefissintore = f1
facefissoutore = f0
geompy.addToStudyInFather(faceFissure, facefissintore,'facefissintore')
geompy.addToStudyInFather(faceFissure, facefissoutore,'facefissoutore')
edgeint = geompy.GetShapesOnShape(extrusionDefaut, facefissoutore, geompy.ShapeType["EDGE"], GEOM.ST_IN)
edgeext = geompy.GetShapesOnShape(extrusionDefaut, facefissoutore, geompy.ShapeType["EDGE"], GEOM.ST_ON)
for i in range(len(edgeint)):
name = "edgeint_%d"%i
geompy.addToStudyInFather(facefissoutore, edgeint[i],name)
for i in range(len(edgeext)):
name = "edgeext_%d"%i
geompy.addToStudyInFather(facefissoutore, edgeext[i],name)
reverext = []
if len(edgeext) > 1:
vertices = geompy.ExtractShapes(genint, geompy.ShapeType["VERTEX"], False)
for i in range(len(edgeext)):
vertedge = geompy.ExtractShapes(edgeext[i], geompy.ShapeType["VERTEX"], False)
if ((geompy.GetSubShapeID(blocp, vertedge[0]) == geompy.GetSubShapeID(blocp, vertices[0])) or
(geompy.GetSubShapeID(blocp, vertedge[0]) == geompy.GetSubShapeID(blocp, vertices[1]))):
reverext.append(0)
else:
reverext.append(1)
return facefissintore, facefissoutore, edgeint, edgeext, reverext
|
lgpl-2.1
|
rubyinhell/brython
|
www/src/Lib/unittest/test/testmock/testmagicmethods.py
|
737
|
12145
|
import unittest
import inspect
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
vincentintime/vnpy
|
vn.trader/ctpGateway.py
|
9
|
36058
|
# encoding: UTF-8
from vnctpmd import MdApi
from vnctptd import TdApi
from gateway import *
import os
########################################################################
class CtpGateway(VtGateway):
"""CTP接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""Constructor"""
super(CtpGateway, self).__init__(eventEngine)
self.mdApi = None # 行情API
self.tdApi = None # 交易API
self.mdConnected = False # 行情API连接状态
self.tdConnected = False # 交易API连接状态
########################################################################
class CtpMdApi(MdApi):
"""CTP行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway, userID, password, brokerID, address):
"""Constructor"""
super(CtpMdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.subscribedSymbols = set() # 已订阅合约代码
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
# 因为API的心跳报警比较常被触发,且与API工作关系不大,因此选择忽略
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.symbol = data['InstrumentID']
tick.vtSymbol = '.'.join([self.gatewayName, tick.symbol])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.tickTime = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100]))
# CTP只有一档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
#----------------------------------------------------------------------
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
#----------------------------------------------------------------------
def connect(self):
"""初始化连接"""
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
self.subscribeMarketData(subscribeReq.symbol)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
########################################################################
class CtpTdApi(TdApi):
"""CTP交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway, userID, password, brokerID, address):
"""API对象的初始化函数"""
super(CtpTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspAuthenticate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 确认结算信息
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqID += 1
self.reqSettlementInfoConfirm(req, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspQueryMaxOrderVolume(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'结算信息确认完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
#----------------------------------------------------------------------
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspRemoveParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_POSITION)
event.dict_['data'] = data
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'持仓查询回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_ACCOUNT)
event.dict_['data'] = data
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'账户查询回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
"""投资者查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProduct(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""
合约查询回报
由于该回报的推送速度极快,因此不适合全部存入队列中处理,
选择先储存在一个本地字典中,全部收集完毕后再推送到队列中
(由于耗时过长目前使用其他进程读取)
"""
if error['ErrorID'] == 0:
event = Event(type_=EVENT_INSTRUMENT)
event.dict_['data'] = data
event.dict_['last'] = last
self.__eventEngine.put(event)
else:
event = Event(type_=EVENT_LOG)
log = u'合约投资者回报,错误代码:' + unicode(error['ErrorID']) + u',' + u'错误信息:' + error['ErrorMsg'].decode('gbk')
event.dict_['log'] = log
self.__eventEngine.put(event)
#----------------------------------------------------------------------
def onRspQryDepthMarketData(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfo(self, data, error, n, last):
"""查询结算信息回报"""
pass
#----------------------------------------------------------------------
def onRspQryTransferBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryEWarrantOffset(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrTradeCost(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryForQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryAccountregister(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.vtSymbol = '.'.join([self.gatewayName, order.symbol])
order.orderID = data['OrderRef']
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 多空
if data['']
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 常规成交事件
event1 = Event(type_=EVENT_TRADE)
event1.dict_['data'] = data
self.__eventEngine.put(event1)
# 特定合约成交事件
event2 = Event(type_=(EVENT_TRADE_CONTRACT+data['InstrumentID']))
event2.dict_['data'] = data
self.__eventEngine.put(event2)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnInstrumentStatus(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnTradingNotice(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnErrorConditionalOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnExecOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnForQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnQuote(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRspQryContractBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingParams(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingAlgos(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnQueryBankBalanceByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnBankToFutureByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnFutureToBankByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQueryBankBalanceByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRspFromBankToFutureByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspFromFutureToBankByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQueryBankAccountMoneyByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnOpenAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnCancelAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnChangeAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def login(self, address, userid, password, brokerid):
"""连接服务器"""
self.__userid = userid
self.__password = password
self.__brokerid = brokerid
# 数据重传模式设为从本日开始
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
# 注册服务器地址
self.registerFront(address)
# 初始化连接,成功会调用onFrontConnected
self.init()
#----------------------------------------------------------------------
def getInstrument(self):
"""查询合约"""
self.__reqid = self.__reqid + 1
self.reqQryInstrument({}, self.__reqid)
#----------------------------------------------------------------------
def getAccount(self):
"""查询账户"""
self.__reqid = self.__reqid + 1
self.reqQryTradingAccount({}, self.__reqid)
#----------------------------------------------------------------------
def getInvestor(self):
"""查询投资者"""
self.__reqid = self.__reqid + 1
self.reqQryInvestor({}, self.__reqid)
#----------------------------------------------------------------------
def getPosition(self):
"""查询持仓"""
self.__reqid = self.__reqid + 1
req = {}
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqQryInvestorPosition(req, self.__reqid)
#----------------------------------------------------------------------
def sendOrder(self, instrumentid, exchangeid, price, pricetype, volume, direction, offset):
"""发单"""
self.__reqid = self.__reqid + 1
req = {}
req['InstrumentID'] = instrumentid
req['OrderPriceType'] = pricetype
req['LimitPrice'] = price
req['VolumeTotalOriginal'] = volume
req['Direction'] = direction
req['CombOffsetFlag'] = offset
self.__orderref = self.__orderref + 1
req['OrderRef'] = str(self.__orderref)
req['InvestorID'] = self.__userid
req['UserID'] = self.__userid
req['BrokerID'] = self.__brokerid
req['CombHedgeFlag'] = defineDict['THOST_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['THOST_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['THOST_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['THOST_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
self.reqOrderInsert(req, self.__reqid)
# 返回订单号,便于某些算法进行动态管理
return self.__orderref
#----------------------------------------------------------------------
def cancelOrder(self, instrumentid, exchangeid, orderref, frontid, sessionid):
"""撤单"""
self.__reqid = self.__reqid + 1
req = {}
req['InstrumentID'] = instrumentid
req['ExchangeID'] = exchangeid
req['OrderRef'] = orderref
req['FrontID'] = frontid
req['SessionID'] = sessionid
req['ActionFlag'] = defineDict['THOST_FTDC_AF_Delete']
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqOrderAction(req, self.__reqid)
#----------------------------------------------------------------------
def getSettlement(self):
"""查询结算信息"""
self.__reqid = self.__reqid + 1
req = {}
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqQrySettlementInfo(req, self.__reqid)
#----------------------------------------------------------------------
def confirmSettlement(self):
"""确认结算信息"""
self.__reqid = self.__reqid + 1
req = {}
req['BrokerID'] = self.__brokerid
req['InvestorID'] = self.__userid
self.reqSettlementInfoConfirm(req, self.__reqid)
|
mit
|
sublime1809/django
|
django/db/models/related.py
|
34
|
3413
|
from collections import namedtuple
from django.utils.encoding import smart_text
from django.db.models.fields import BLANK_CHOICE_DASH
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo',
'from_opts to_opts target_fields join_field '
'm2m direct')
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.name = '%s:%s' % (self.opts.app_label, self.opts.model_name)
self.var_name = self.opts.model_name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field.
Analogue of django.db.models.fields.Field.get_choices, provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.parent_model._meta.model_name: False})
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
if self.field.rel.related_name:
return self.field.rel.related_name
if self.opts.default_related_name:
return self.opts.default_related_name % {
'model_name': self.opts.model_name.lower(),
'app_label': self.opts.app_label.lower(),
}
return self.opts.model_name + '_set'
else:
return self.field.rel.related_name or (self.opts.model_name)
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
|
bsd-3-clause
|
Bulochkin/tensorflow_pack
|
tensorflow/contrib/memory_stats/python/ops/memory_stats_ops.py
|
67
|
1396
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.ops import gen_memory_stats_ops
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_memory_stats_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_memory_stats_ops.so"))
def BytesLimit():
"""Generates an op that measures the total memory (in bytes) of a device."""
return gen_memory_stats_ops.bytes_limit()
def MaxBytesInUse():
"""Generates an op that computes the peak memory of a device."""
return gen_memory_stats_ops.max_bytes_in_use()
|
apache-2.0
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/action/copy.py
|
21
|
26766
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2017 Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import os.path
import stat
import tempfile
import traceback
from itertools import chain
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, base_path=None, local_follow=False, trailing_slash_detector=None):
"""
Walk a filesystem tree returning enough information to copy the files
:arg topdir: The directory that the filesystem tree is rooted at
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:returns: dictionary of tuples. All of the path elements in the structure are text strings.
This separates all the files, directories, and symlinks along with
important information about each::
{ 'files': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
'directories': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
'symlinks': [('/symlink/target/path', 'relative/path/to/copy/to'), ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u''):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = os.path.realpath(filepath)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append((real_file, dest_filepath))
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append((os.readlink(filepath), dest_filepath))
else:
# Just a normal file
r_files['files'].append((filepath, dest_filepath))
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a a circular symlink. So add it as
# a symlink
r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append((real_dir, dest_dirpath))
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents), rel_base=dest_dirpath)
else:
# Add the symlink to the destination
r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
else:
# Just a normal directory
r_files['directories'].append((dirpath, dest_dirpath))
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = (os.readlink(topdir), os.path.basename(topdir))
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents)
return r_files
class ActionModule(ActionBase):
def _create_remote_file_args(self, module_args):
# remove action plugin only keys
return dict((k, v) for k, v in module_args.items() if k not in ('content', 'decrypt'))
def _copy_file(self, source_full, source_rel, content, content_tempfile,
dest, task_vars, tmp, delete_remote_tmp):
decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
follow = boolean(self._task.args.get('follow', False), strict=False)
force = boolean(self._task.args.get('force', 'yes'), strict=False)
raw = boolean(self._task.args.get('raw', 'no'), strict=False)
result = {}
result['diff'] = []
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source_full, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
self._remove_tmp_path(tmp)
return result
# Get the local mode and set if user wanted it preserved
# https://github.com/ansible/ansible-modules-core/issues/1124
lmode = None
if self._task.args.get('mode', None) == 'preserve':
lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = self._connection._shell.join_path(dest)
# Create a tmp path if missing only if this is not recursive.
# If this is recursive we already have a tmp path.
if delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# Attempt to get remote file info
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp, checksum=force)
if dest_status['exists'] and dest_status['isdir']:
# The dest is a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(tmp)
result['failed'] = True
result['msg'] = "can not use content with a dir as dest"
return result
else:
# Append the relative source location to the destination and get remote stats again
dest_file = self._connection._shell.join_path(dest, source_rel)
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp, checksum=force)
if dest_status['exists'] and not force:
# remote_file exists so continue to next iteration.
return None
# Generate a hash of the local file.
local_checksum = checksum(source_full)
if local_checksum != dest_status['checksum']:
# The checksums don't match and we will change or error out.
if self._play_context.diff and not raw:
result['diff'].append(self._get_diff_data(dest_file, source_full, task_vars))
if self._play_context.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
result['changed'] = True
return result
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(tmp, 'source')
remote_path = None
if not raw:
remote_path = self._transfer_file(source_full, tmp_src)
else:
self._transfer_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
# fix file permissions when the copy is done as a different user
if remote_path:
self._fixup_perms2((tmp, remote_path))
if raw:
# Continue to next iteration if raw is defined.
return None
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = self._create_remote_file_args(self._task.args)
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
original_basename=source_rel,
)
)
if lmode:
new_module_args['mode'] = lmode
module_return = self._execute_module(module_name='copy',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
if raw:
# Continue to next iteration if raw is defined.
self._remove_tmp_path(tmp)
return None
# Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
# If checksums match, and follow = True, find out if 'dest' is a link. If so,
# change it to point to the source of the link.
if follow:
dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, tmp=tmp, follow=False)
if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys():
dest = dest_status_nofollow['lnk_source']
# Build temporary module_args.
new_module_args = self._create_remote_file_args(self._task.args)
new_module_args.update(
dict(
src=source_rel,
dest=dest,
original_basename=source_rel,
state='file',
)
)
if lmode:
new_module_args['mode'] = lmode
# Execute the file module.
module_return = self._execute_module(module_name='file',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
result.update(module_return)
return result
def _get_file_args(self):
new_module_args = {'recurse': False}
if 'attributes' in self._task.args:
new_module_args['attributes'] = self._task.args['attributes']
if 'follow' in self._task.args:
new_module_args['follow'] = self._task.args['follow']
if 'force' in self._task.args:
new_module_args['force'] = self._task.args['force']
if 'group' in self._task.args:
new_module_args['group'] = self._task.args['group']
if 'mode' in self._task.args:
new_module_args['mode'] = self._task.args['mode']
if 'owner' in self._task.args:
new_module_args['owner'] = self._task.args['owner']
if 'selevel' in self._task.args:
new_module_args['selevel'] = self._task.args['selevel']
if 'serole' in self._task.args:
new_module_args['serole'] = self._task.args['serole']
if 'setype' in self._task.args:
new_module_args['setype'] = self._task.args['setype']
if 'seuser' in self._task.args:
new_module_args['seuser'] = self._task.args['seuser']
if 'unsafe_writes' in self._task.args:
new_module_args['unsafe_writes'] = self._task.args['unsafe_writes']
return new_module_args
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', True), strict=False)
result['failed'] = True
if not source and content is None:
result['msg'] = 'src (or content) is required'
elif not dest:
result['msg'] = 'dest is required'
elif source and content is not None:
result['msg'] = 'src and content are mutually exclusive'
elif content is not None and dest is not None and dest.endswith("/"):
result['msg'] = "can not use content with a dir as dest"
else:
del result['failed']
if result.get('failed'):
return result
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a temp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content temp file: %s" % to_native(err)
return result
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif remote_src:
result.update(self._execute_module(task_vars=task_vars))
return result
else:
# find_needle returns a path that may not have a trailing slash on
# a directory so we need to determine that now (we use it just
# like rsync does to figure out whether to include the directory
# or only the files inside the directory
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
# FIXME: Can we optimize cases where there's only one file, no
# symlinks and any number of directories? In the original code,
# empty directories are not copied....
else:
source_files['files'] = [(source, os.path.basename(source))]
changed = False
module_return = dict(changed=False)
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# Optimization: Can delete remote_tmp on the first call if we're only
# copying a single file. Otherwise we keep the remote_tmp until it
# is no longer needed.
delete_remote_tmp = False
if sum(len(f) for f in chain(source_files.values())) == 1:
# Tell _execute_module to delete the file if there is one file.
delete_remote_tmp = True
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
if not delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# expand any user home dir specifier
dest = self._remote_expand_user(dest)
implicit_directories = set()
for source_full, source_rel in source_files['files']:
# copy files over. This happens first as directories that have
# a file do not need to be created later
module_return = self._copy_file(source_full, source_rel, content, content_tempfile, dest, task_vars, tmp, delete_remote_tmp)
if module_return is None:
continue
paths = os.path.split(source_rel)
dir_path = ''
for dir_component in paths:
os.path.join(dir_path, dir_component)
implicit_directories.add(dir_path)
if 'diff' in result and not result['diff']:
del result['diff']
module_executed = True
changed = changed or module_return.get('changed', False)
for src, dest_path in source_files['directories']:
# Find directories that are leaves as they might not have been
# created yet.
if dest_path in implicit_directories:
continue
# Use file module to create these
new_module_args = self._get_file_args()
new_module_args['path'] = os.path.join(dest, dest_path)
new_module_args['state'] = 'directory'
new_module_args['mode'] = self._task.args.get('directory_mode', None)
module_return = self._execute_module(module_name='file',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True
changed = changed or module_return.get('changed', False)
for target_path, dest_path in source_files['symlinks']:
# Copy symlinks over
new_module_args = self._get_file_args()
new_module_args['path'] = os.path.join(dest, dest_path)
new_module_args['src'] = target_path
new_module_args['state'] = 'link'
new_module_args['force'] = True
module_return = self._execute_module(module_name='file',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if module_return.get('failed'):
result.update(module_return)
if not delete_remote_tmp:
self._remove_tmp_path(tmp)
return result
changed = changed or module_return.get('changed', False)
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
# Delete tmp path if we were recursive or if we did not execute a module.
if not delete_remote_tmp or (delete_remote_tmp and not module_executed):
self._remove_tmp_path(tmp)
if module_executed and len(source_files['files']) == 1:
result.update(module_return)
else:
result.update(dict(dest=dest, src=source, changed=changed))
return result
|
bsd-3-clause
|
ClusterHQ/libcloud
|
libcloud/test/compute/test_abiquo.py
|
1
|
20736
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abiquo Test Suite
"""
import unittest
import sys
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.abiquo import AbiquoNodeDriver
from libcloud.common.abiquo import ForbiddenError, get_href
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.base import NodeLocation, NodeImage
from libcloud.test.compute import TestCaseMixin
from libcloud.test import MockHttpTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
class AbiquoNodeDriverTest(unittest.TestCase, TestCaseMixin):
"""
Abiquo Node Driver test suite
"""
def setUp(self):
"""
Set up the driver with the main user
"""
AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None)
self.driver = AbiquoNodeDriver('son', 'goku',
'http://dummy.host.com/api')
def test_unauthorized_controlled(self):
"""
Test the Unauthorized Exception is Controlled.
Test, through the 'login' method, that a '401 Unauthorized'
raises a 'InvalidCredsError' instead of the 'MalformedUrlException'
"""
self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son',
'goten', 'http://dummy.host.com/api')
def test_forbidden_controlled(self):
"""
Test the Forbidden Exception is Controlled.
Test, through the 'list_images' method, that a '403 Forbidden'
raises an 'ForbidenError' instead of the 'MalformedUrlException'
"""
AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None)
conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api')
self.assertRaises(ForbiddenError, conn.list_images)
def test_handle_other_errors_such_as_not_found(self):
"""
Test common 'logical' exceptions are controlled.
Test that common exception (normally 404-Not Found and 409-Conflict),
that return an XMLResponse with the explanation of the errors are
controlled.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
self.assertRaises(LibcloudError, self.driver.list_images)
def test_ex_create_and_delete_empty_group(self):
"""
Test the creation and deletion of an empty group.
"""
group = self.driver.ex_create_group('libcloud_test_group')
group.destroy()
def test_create_node_no_image_raise_exception(self):
"""
Test 'create_node' without image.
Test the 'create_node' function without 'image' parameter raises
an Exception
"""
self.assertRaises(LibcloudError, self.driver.create_node)
def test_create_node_specify_location(self):
"""
Test you can create a node specifying the location.
"""
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
self.driver.create_node(image=image, location=location)
def test_create_node_specify_wrong_location(self):
"""
Test you can not create a node with wrong location.
"""
image = self.driver.list_images()[0]
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_wrong_image(self):
"""
Test image compatibility.
Some locations only can handle a group of images, not all of them.
Test you can not create a node with incompatible image-location.
"""
# Create fake NodeImage
image = NodeImage(3234, 'dummy-image', self.driver)
location = self.driver.list_locations()[0]
# With this image, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_group_name(self):
"""
Test 'create_node' into a concrete group.
"""
image = self.driver.list_images()[0]
self.driver.create_node(image=image, group_name='new_group_name')
def test_create_group_location_does_not_exist(self):
"""
Test 'create_node' with an unexistent location.
Defines a 'fake' location and tries to create a node into it.
"""
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
# With this location, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.ex_create_group,
name='new_group_name',
location=location)
def test_destroy_node_response(self):
"""
'destroy_node' basic test.
Override the destroy to return a different node available
to be undeployed. (by default it returns an already undeployed node,
for test creation).
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_destroy_node_response_failed(self):
"""
'destroy_node' asynchronous error.
Test that the driver handles correctly when, for some reason,
the 'destroy' job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertFalse(ret)
def test_destroy_node_allocation_state(self):
"""
Test the 'destroy_node' invalid state.
Try to destroy a node when the node is not running.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
# Override the destroy to return a different node available to be
# undeployed
node = self.driver.list_nodes()[0]
# The mock class with the user:password 've:geta' returns a node that
# is in 'ALLOCATION' state and hence, the 'destroy_node' method should
# raise a LibcloudError
self.assertRaises(LibcloudError, self.driver.destroy_node, node)
def test_destroy_not_deployed_group(self):
"""
Test 'ex_destroy_group' when group is not deployed.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertTrue(group.destroy())
def test_destroy_deployed_group(self):
"""
Test 'ex_destroy_group' when there are machines running.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertTrue(group.destroy())
def test_destroy_deployed_group_failed(self):
"""
Test 'ex_destroy_group' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertFalse(group.destroy())
def test_destroy_group_invalid_state(self):
"""
Test 'ex_destroy_group' invalid state.
Test the Driver raises an exception when the group is in
invalid temporal state.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertRaises(LibcloudError, group.destroy)
def test_run_node(self):
"""
Test 'ex_run_node' feature.
"""
node = self.driver.list_nodes()[0]
# Node is by default in NodeState.TERMINATED and AbiquoState ==
# 'NOT_ALLOCATED'
# so it is available to be runned
self.driver.ex_run_node(node)
def test_run_node_invalid_state(self):
"""
Test 'ex_run_node' invalid state.
Test the Driver raises an exception when try to run a
node that is in invalid state to run.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is by default in AbiquoState = 'ON' for user 'go:trunks'
# so is not available to be runned
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_run_node_failed(self):
"""
Test 'ex_run_node' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('ten', 'shin',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is in the correct state, but it fails because of the
# async task and it raises the error.
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_get_href(self):
xml = '''
<datacenter>
<link href="http://10.60.12.7:80/api/admin/datacenters/2"
type="application/vnd.abiquo.datacenter+xml" rel="edit1"/>
<link href="http://10.60.12.7:80/ponies/bar/foo/api/admin/datacenters/3"
type="application/vnd.abiquo.datacenter+xml" rel="edit2"/>
<link href="http://vdcbridge.interoute.com:80/jclouds/apiouds/api/admin/enterprises/1234"
type="application/vnd.abiquo.datacenter+xml" rel="edit3"/>
</datacenter>
'''
elem = ET.XML(xml)
href = get_href(element=elem, rel='edit1')
self.assertEqual(href, '/admin/datacenters/2')
href = get_href(element=elem, rel='edit2')
self.assertEqual(href, '/admin/datacenters/3')
href = get_href(element=elem, rel='edit3')
self.assertEqual(href, '/admin/enterprises/1234')
class AbiquoMockHttp(MockHttpTestCase):
"""
Mock the functionallity of the remote Abiquo API.
"""
fixtures = ComputeFileFixtures('abiquo')
fixture_tag = 'default'
def _api_login(self, method, url, body, headers):
if headers['Authorization'] == 'Basic c29uOmdvdGVu':
expected_response = self.fixtures.load('unauthorized_user.html')
expected_status = httplib.UNAUTHORIZED
else:
expected_response = self.fixtures.load('login.xml')
expected_status = httplib.OK
return (expected_status, expected_response, {}, '')
def _api_cloud_virtualdatacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '')
def _api_cloud_virtualdatacenters_4(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers):
if method == 'POST':
vapp_name = ET.XML(body).findtext('name')
if vapp_name == 'libcloud_test_group':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
elif vapp_name == 'new_group_name':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
else:
# It will be a 'GET';
return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers):
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to destroy a group with 'needs_sync' state
response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml')
else:
# Try to destroy a group with 'undeployed' state
response = self.fixtures.load('vdc_4_vapp_5.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers):
if method == 'GET':
# deployed vapp
response = self.fixtures.load('vdc_4_vapp_6.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines(
self, method, url, body, headers):
# This virtual app never have virtual machines
if method == 'GET':
response = self.fixtures.load('vdc_4_vapp_5_vms.xml')
return (httplib.OK, response, {}, '')
elif method == 'POST':
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines(
self, method, url, body, headers):
# Default-created virtual app virtual machines'
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vms.xml')
return (httplib.OK, response, {}, '')
else:
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers):
if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or
headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='):
# Undeploy node
response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml")
elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to undeploy a node with 'allocation' state
response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml')
else:
# Get node
response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers):
if headers['Authorization'] == 'Basic dGVuOnNoaW4=':
# User 'ten:shin' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_vm_3_deploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy(
self, method, url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method,
url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml')
return (httplib.OK, response, {}, '')
def _api_admin_datacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '')
def _api_admin_enterprises_1(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers):
# When the user is the common one for all the tests ('son, 'goku')
# it creates this basic auth and we return the datacenters value
if headers['Authorization'] == 'Basic Z286dHJ1bmtz':
expected_response = self.fixtures.load("not_found_error.xml")
return (httplib.NOT_FOUND, expected_response, {}, '')
elif headers['Authorization'] != 'Basic c29uOmdvaGFu':
return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '')
else:
# son:gohan user: forbidden error
expected_response = self.fixtures.load("privilege_errors.html")
return (httplib.FORBIDDEN, expected_response, {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'),
{}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers):
return (
httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'),
{}, '')
if __name__ == '__main__':
sys.exit(unittest.main())
|
apache-2.0
|
littleweaver/django-daguerre
|
daguerre/adjustments.py
|
1
|
11610
|
from PIL import Image
from daguerre.utils import exif_aware_resize, exif_aware_size
class AdjustmentRegistry(object):
def __init__(self):
self._registry = {}
self._default = None
def register(self, cls):
self._registry[cls.__name__.lower()] = cls
return cls
def __getitem__(self, key):
return self._registry[key]
def get(self, key, default=None):
return self._registry.get(key, default)
def __contains__(self, item):
return item in self._registry
def items(self):
return self._registry.items()
registry = AdjustmentRegistry()
class Adjustment(object):
"""
Base class for all adjustments which can be carried out on an image. The
adjustment itself represents a set of parameters, which can then be
applied to images (taking areas into account if applicable).
Adjustment subclasses need to define two methods: :meth:`calculate` and
:meth:`adjust`. If the method doesn't use areas, you can set the
``uses_areas`` attribute on the method to ``False`` to optimize
adjustment.
:param kwargs: The requested kwargs for the adjustment. The keys must
be in :attr:`parameters` or the adjustment is invalid.
"""
#: Accepted parameters for this adjustment - for example, ``"width"``,
#: ``"height"``, ``"color"``, ``"unicorns"``, etc.
parameters = ()
def __init__(self, **kwargs):
self.kwargs = kwargs
for key in kwargs:
if key not in self.parameters:
raise ValueError('Parameter "{0}" not accepted by {1}.'
''.format(key, self.__class__.__name__))
def calculate(self, dims, areas=None):
"""
Calculates the dimensions of the adjusted image without actually
manipulating the image. By default, just returns the given dimensions.
:param dims: ``(width, height)`` tuple of the current image
dimensions.
:param areas: iterable of :class:`.Area` instances to be considered in
calculating the adjustment.
"""
return dims
calculate.uses_areas = False
def adjust(self, image, areas=None):
"""
Manipulates and returns the image. Must be implemented by subclasses.
:param image: PIL Image which will be adjusted.
:param areas: iterable of :class:`.Area` instances to be considered in
performing the adjustment.
"""
raise NotImplementedError
@registry.register
class Fit(Adjustment):
"""
Resizes an image to fit entirely within the given dimensions
without cropping and maintaining the width/height ratio.
If neither width nor height is specified, this adjustment will simply
return a copy of the image.
"""
parameters = ('width', 'height')
def calculate(self, dims, areas=None):
image_width, image_height = dims
width, height = self.kwargs.get('width'), self.kwargs.get('height')
if width is None and height is None:
return image_width, image_height
image_ratio = float(image_width) / image_height
if height is None:
# Constrain first by width, then by max_height.
new_width = int(width)
new_height = int(round(new_width / image_ratio))
elif width is None:
# Constrain first by height, then by max_width.
new_height = int(height)
new_width = int(round(new_height * image_ratio))
else:
# Constrain strictly by both dimensions.
width, height = int(width), int(height)
new_width = int(min(width, round(height * image_ratio)))
new_height = int(min(height, round(width / image_ratio)))
return new_width, new_height
calculate.uses_areas = False
def adjust(self, image, areas=None):
image_width, image_height = exif_aware_size(image)
new_width, new_height = self.calculate((image_width, image_height))
if (new_width, new_height) == (image_width, image_height):
return image.copy()
# Choose a resize filter based on whether
# we're upscaling or downscaling.
if new_width < image_width:
f = Image.ANTIALIAS
else:
f = Image.BICUBIC
return exif_aware_resize(image, (new_width, new_height), f)
adjust.uses_areas = False
@registry.register
class Crop(Adjustment):
"""
Crops an image to the given width and height, without scaling it.
:class:`~daguerre.models.Area` instances which are passed in will be
protected as much as possible during the crop.
"""
parameters = ('width', 'height')
def calculate(self, dims, areas=None):
image_width, image_height = dims
width, height = self.kwargs.get('width'), self.kwargs.get('height')
# image_width and image_height are known to be defined.
new_width = int(width) if width is not None else image_width
new_height = int(height) if height is not None else image_height
new_width = min(new_width, image_width)
new_height = min(new_height, image_height)
return new_width, new_height
calculate.uses_areas = False
def adjust(self, image, areas=None):
image_width, image_height = exif_aware_size(image)
new_width, new_height = self.calculate((image_width, image_height))
if (new_width, new_height) == (image_width, image_height):
return image.copy()
if not areas:
x1 = int((image_width - new_width) / 2)
y1 = int((image_height - new_height) / 2)
else:
min_penalty = None
optimal_coords = None
for x in range(image_width - new_width + 1):
for y in range(image_height - new_height + 1):
penalty = 0
for area in areas:
penalty += self._get_penalty(area, x, y,
new_width, new_height)
if min_penalty is not None and penalty > min_penalty:
break
if min_penalty is None or penalty < min_penalty:
min_penalty = penalty
optimal_coords = [(x, y)]
elif penalty == min_penalty:
optimal_coords.append((x, y))
x1, y1 = optimal_coords[0]
x2 = x1 + new_width
y2 = y1 + new_height
return image.crop((x1, y1, x2, y2))
def _get_penalty(self, area, x1, y1, new_width, new_height):
x2 = x1 + new_width
y2 = y1 + new_height
if area.x1 >= x1 and area.x2 <= x2 and area.y1 >= y1 and area.y2 <= y2:
# The area is enclosed. No penalty
penalty_area = 0
elif area.x2 < x1 or area.x1 > x2 or area.y2 < y1 or area.y1 > y2:
# The area is excluded. Penalty for the whole thing.
penalty_area = area.area
else:
# Partial penalty.
non_penalty_area = (min(area.x2 - x1, x2 - area.x1, area.width) *
min(area.y2 - y1, y2 - area.y1, area.height))
penalty_area = area.area - non_penalty_area
return penalty_area / area.priority
@registry.register
class RatioCrop(Crop):
"""
Crops an image to the given aspect ratio, without scaling it.
:class:`~daguerre.models.Area` instances which are passed in will be
protected as much as possible during the crop.
"""
#: ``ratio`` should be formatted as ``"<width>:<height>"``
parameters = ('ratio',)
def calculate(self, dims, areas=None):
image_width, image_height = dims
image_ratio = float(image_width) / image_height
ratio_str = self.kwargs.get('ratio')
if ratio_str is None:
return image_width, image_height
width, height = ratio_str.split(':')
ratio = float(width) / float(height)
if ratio > image_ratio:
# New ratio is wider. Cut the height.
new_width = image_width
new_height = int(image_width / ratio)
else:
new_width = int(image_height * ratio)
new_height = image_height
return new_width, new_height
calculate.uses_areas = False
@registry.register
class NamedCrop(Adjustment):
"""
Crops an image to the given named area, without scaling it.
:class:`~daguerre.models.Area` instances which are passed in will be
protected as much as possible during the crop.
If no area with the given name exists, this adjustment is a no-op.
"""
parameters = ('name',)
def calculate(self, dims, areas=None):
image_width, image_height = dims
if not areas:
return image_width, image_height
for area in areas:
if area.name == self.kwargs['name']:
break
else:
return image_width, image_height
return area.width, area.height
def adjust(self, image, areas=None):
image_width, image_height = exif_aware_size(image)
if not areas:
return image.copy()
for area in areas:
if area.name == self.kwargs['name']:
break
else:
return image.copy()
return image.crop((area.x1, area.y1,
area.x2, area.y2))
@registry.register
class Fill(Adjustment):
"""
Crops the image to the requested ratio (using the same logic as
:class:`.Crop` to protect :class:`~daguerre.models.Area` instances which
are passed in), then resizes it to the actual requested dimensions. If
``width`` or ``height`` is not given, then the unspecified dimension will
be allowed to expand up to ``max_width`` or ``max_height``, respectively.
"""
parameters = ('width', 'height', 'max_width', 'max_height')
def calculate(self, dims, areas=None):
image_width, image_height = dims
width, height = self.kwargs.get('width'), self.kwargs.get('height')
if width is None and height is None:
# No restrictions: return original dimensions.
return image_width, image_height
max_width = self.kwargs.get('max_width')
max_height = self.kwargs.get('max_height')
image_ratio = float(image_width) / image_height
if width is None:
new_height = int(height)
new_width = int(new_height * image_ratio)
if max_width is not None:
new_width = min(new_width, int(max_width))
elif height is None:
new_width = int(width)
new_height = int(new_width / image_ratio)
if max_height is not None:
new_height = min(new_height, int(max_height))
else:
new_width = int(width)
new_height = int(height)
return new_width, new_height
calculate.uses_areas = False
def adjust(self, image, areas=None):
image_width, image_height = exif_aware_size(image)
new_width, new_height = self.calculate((image_width, image_height))
if (new_width, new_height) == (image_width, image_height):
return image.copy()
ratiocrop = RatioCrop(ratio="{0}:{1}".format(new_width, new_height))
new_image = ratiocrop.adjust(image, areas=areas)
fit = Fit(width=new_width, height=new_height)
return fit.adjust(new_image)
|
bsd-3-clause
|
herow/planning_qgis
|
python/plugins/processing/algs/lidar/lastools/lasground.py
|
9
|
3362
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lasground.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
class lasground(LAStoolsAlgorithm):
NO_BULGE = "NO_BULGE"
TERRAIN = "TERRAIN"
TERRAINS = ["wilderness", "nature", "town", "city", "metro"]
GRANULARITY = "GRANULARITY"
GRANULARITIES = ["coarse", "default", "fine", "extra_fine", "ultra_fine"]
def defineCharacteristics(self):
self.name = "lasground"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersHorizontalAndVerticalFeetGUI()
self.addParameter(ParameterBoolean(lasground.NO_BULGE,
self.tr("no triangle bulging during TIN refinement"), False))
self.addParameter(ParameterSelection(lasground.TERRAIN,
self.tr("terrain type"), lasground.TERRAINS, 1))
self.addParameter(ParameterSelection(lasground.GRANULARITY,
self.tr("preprocessing"), lasground.GRANULARITIES, 1))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasground")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersHorizontalAndVerticalFeetCommands(commands)
if (self.getParameterValue(lasground.NO_BULGE)):
commands.append("-no_bulge")
method = self.getParameterValue(lasground.TERRAIN)
if (method != 1):
commands.append("-" + lasground.TERRAINS[method])
granularity = self.getParameterValue(lasground.GRANULARITY)
if (granularity != 1):
commands.append("-" + lasground.GRANULARITIES[granularity])
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
gpl-2.0
|
bdrung/audacity
|
lib-src/lv2/lv2/plugins/eg-fifths.lv2/waflib/Context.py
|
177
|
8376
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,imp,sys
from waflib import Utils,Errors,Logs
import waflib.Node
HEXVERSION=0x1071000
WAFVERSION="1.7.16"
WAFREVISION="73c1705078f8c9c51a33e20f221a309d5a94b5e1"
ABI=98
DBFILE='.wafpickle-%s-%d-%d'%(sys.platform,sys.hexversion,ABI)
APPNAME='APPNAME'
VERSION='VERSION'
TOP='top'
OUT='out'
WSCRIPT_FILE='wscript'
launch_dir=''
run_dir=''
top_dir=''
out_dir=''
waf_dir=''
local_repo=''
remote_repo='http://waf.googlecode.com/git/'
remote_locs=['waflib/extras','waflib/Tools']
g_module=None
STDOUT=1
STDERR=-1
BOTH=0
classes=[]
def create_context(cmd_name,*k,**kw):
global classes
for x in classes:
if x.cmd==cmd_name:
return x(*k,**kw)
ctx=Context(*k,**kw)
ctx.fun=cmd_name
return ctx
class store_context(type):
def __init__(cls,name,bases,dict):
super(store_context,cls).__init__(name,bases,dict)
name=cls.__name__
if name=='ctx'or name=='Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)'%name)
if not getattr(cls,'fun',None):
cls.fun=cls.cmd
global classes
classes.insert(0,cls)
ctx=store_context('ctx',(object,),{})
class Context(ctx):
errors=Errors
tools={}
def __init__(self,**kw):
try:
rd=kw['run_dir']
except KeyError:
global run_dir
rd=run_dir
self.node_class=type("Nod3",(waflib.Node.Node,),{})
self.node_class.__module__="waflib.Node"
self.node_class.ctx=self
self.root=self.node_class('',None)
self.cur_script=None
self.path=self.root.find_dir(rd)
self.stack_path=[]
self.exec_dict={'ctx':self,'conf':self,'bld':self,'opt':self}
self.logger=None
def __hash__(self):
return id(self)
def load(self,tool_list,*k,**kw):
tools=Utils.to_list(tool_list)
path=Utils.to_list(kw.get('tooldir',''))
for t in tools:
module=load_tool(t,path)
fun=getattr(module,kw.get('name',self.fun),None)
if fun:
fun(self)
def execute(self):
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self,node):
self.stack_path.append(self.cur_script)
self.cur_script=node
self.path=node.parent
def post_recurse(self,node):
self.cur_script=self.stack_path.pop()
if self.cur_script:
self.path=self.cur_script.parent
def recurse(self,dirs,name=None,mandatory=True,once=True):
try:
cache=self.recurse_cache
except AttributeError:
cache=self.recurse_cache={}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
d=os.path.join(self.path.abspath(),d)
WSCRIPT=os.path.join(d,WSCRIPT_FILE)
WSCRIPT_FUN=WSCRIPT+'_'+(name or self.fun)
node=self.root.find_node(WSCRIPT_FUN)
if node and(not once or node not in cache):
cache[node]=True
self.pre_recurse(node)
try:
function_code=node.read('rU')
exec(compile(function_code,node.abspath(),'exec'),self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node=self.root.find_node(WSCRIPT)
tup=(node,name or self.fun)
if node and(not once or tup not in cache):
cache[tup]=True
self.pre_recurse(node)
try:
wscript_module=load_module(node.abspath())
user_function=getattr(wscript_module,(name or self.fun),None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s'%(name or self.fun,node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
raise Errors.WafError('No wscript file in directory %s'%d)
def exec_command(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
Logs.debug('runner_env: kw=%s'%kw)
if self.logger:
self.logger.info(cmd)
if'stdout'not in kw:
kw['stdout']=subprocess.PIPE
if'stderr'not in kw:
kw['stderr']=subprocess.PIPE
try:
if kw['stdout']or kw['stderr']:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
ret=p.returncode
else:
out,err=(None,None)
ret=subprocess.Popen(cmd,**kw).wait()
except Exception ,e:
raise Errors.WafError('Execution failure: %s'%str(e),ex=e)
if out:
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if self.logger:
self.logger.debug('out: %s'%out)
else:
sys.stdout.write(out)
if err:
if not isinstance(err,str):
err=err.decode(sys.stdout.encoding or'iso8859-1')
if self.logger:
self.logger.error('err: %s'%err)
else:
sys.stderr.write(err)
return ret
def cmd_and_log(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
if'quiet'in kw:
quiet=kw['quiet']
del kw['quiet']
else:
quiet=None
if'output'in kw:
to_ret=kw['output']
del kw['output']
else:
to_ret=STDOUT
kw['stdout']=kw['stderr']=subprocess.PIPE
if quiet is None:
self.to_log(cmd)
try:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
except Exception ,e:
raise Errors.WafError('Execution failure: %s'%str(e),ex=e)
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if not isinstance(err,str):
err=err.decode(sys.stdout.encoding or'iso8859-1')
if out and quiet!=STDOUT and quiet!=BOTH:
self.to_log('out: %s'%out)
if err and quiet!=STDERR and quiet!=BOTH:
self.to_log('err: %s'%err)
if p.returncode:
e=Errors.WafError('Command %r returned %r'%(cmd,p.returncode))
e.returncode=p.returncode
e.stderr=err
e.stdout=out
raise e
if to_ret==BOTH:
return(out,err)
elif to_ret==STDERR:
return err
return out
def fatal(self,msg,ex=None):
if self.logger:
self.logger.info('from %s: %s'%(self.path.abspath(),msg))
try:
msg='%s\n(complete log in %s)'%(msg,self.logger.handlers[0].baseFilename)
except Exception:
pass
raise self.errors.ConfigurationError(msg,ex=ex)
def to_log(self,msg):
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self,msg,result,color=None):
self.start_msg(msg)
if not isinstance(color,str):
color=result and'GREEN'or'YELLOW'
self.end_msg(result,color)
def start_msg(self,msg):
try:
if self.in_msg:
self.in_msg+=1
return
except AttributeError:
self.in_msg=0
self.in_msg+=1
try:
self.line_just=max(self.line_just,len(msg))
except AttributeError:
self.line_just=max(40,len(msg))
for x in(self.line_just*'-',msg):
self.to_log(x)
Logs.pprint('NORMAL',"%s :"%msg.ljust(self.line_just),sep='')
def end_msg(self,result,color=None):
self.in_msg-=1
if self.in_msg:
return
defcolor='GREEN'
if result==True:
msg='ok'
elif result==False:
msg='not found'
defcolor='YELLOW'
else:
msg=str(result)
self.to_log(msg)
Logs.pprint(color or defcolor,msg)
def load_special_tools(self,var,ban=[]):
global waf_dir
lst=self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py',''))
cache_modules={}
def load_module(path):
try:
return cache_modules[path]
except KeyError:
pass
module=imp.new_module(WSCRIPT_FILE)
try:
code=Utils.readf(path,m='rU')
except(IOError,OSError):
raise Errors.WafError('Could not read the file %r'%path)
module_dir=os.path.dirname(path)
sys.path.insert(0,module_dir)
exec(compile(code,path,'exec'),module.__dict__)
sys.path.remove(module_dir)
cache_modules[path]=module
return module
def load_tool(tool,tooldir=None):
if tool=='java':
tool='javaw'
elif tool=='compiler_cc':
tool='compiler_c'
else:
tool=tool.replace('++','xx')
if tooldir:
assert isinstance(tooldir,list)
sys.path=tooldir+sys.path
try:
__import__(tool)
ret=sys.modules[tool]
Context.tools[tool]=ret
return ret
finally:
for d in tooldir:
sys.path.remove(d)
else:
global waf_dir
try:
os.stat(os.path.join(waf_dir,'waflib','extras',tool+'.py'))
except OSError:
try:
os.stat(os.path.join(waf_dir,'waflib','Tools',tool+'.py'))
except OSError:
d=tool
else:
d='waflib.Tools.%s'%tool
else:
d='waflib.extras.%s'%tool
__import__(d)
ret=sys.modules[d]
Context.tools[tool]=ret
return ret
|
gpl-2.0
|
CouchPotato/CouchPotatoV1
|
library/sqlalchemy/dialects/mysql/pyodbc.py
|
18
|
2514
|
"""Support for the MySQL database via the pyodbc adapter.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
----------
Connect string::
mysql+pyodbc://<username>:<password>@<dsnname>
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy.engine import base as engine_base
from sqlalchemy import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
|
gpl-3.0
|
jinnykoo/wuyisj
|
src/oscar/apps/order/south_migrations/0017_auto__add_field_order_shipping_code.py
|
13
|
36010
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.shipping_code'
db.add_column(u'order_order', 'shipping_code',
self.gf('django.db.models.fields.CharField')(default='', max_length=128, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.shipping_code'
db.delete_column(u'order_order', 'shipping_code')
models = {
u'address.country': {
'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
},
u'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
u'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'order.communicationevent': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'CommunicationEvent'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['customer.CommunicationEventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': u"orm['order.Order']"})
},
u'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['partner.StockRecord']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
u'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': u"orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'order.lineprice': {
'Meta': {'ordering': "('id',)", 'object_name': 'LinePrice'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': u"orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': u"orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
u'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
},
u'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'category': ('django.db.models.fields.CharField', [], {'default': "'Basket'", 'max_length': '64'}),
'frequency': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offer_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': u"orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': u"orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'})
},
u'order.paymentevent': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.PaymentEventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['order.Line']", 'through': u"orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': u"orm['order.Order']"}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'shipping_event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'null': 'True', 'to': u"orm['order.ShippingEvent']"})
},
u'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': u"orm['order.PaymentEvent']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_event_quantities'", 'to': u"orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'order.paymenteventtype': {
'Meta': {'ordering': "('name',)", 'object_name': 'PaymentEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'order.shippingevent': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'ShippingEvent'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.ShippingEventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shipping_events'", 'symmetrical': 'False', 'through': u"orm['order.ShippingEventQuantity']", 'to': u"orm['order.Line']"}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': u"orm['order.Order']"})
},
u'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': u"orm['order.ShippingEvent']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_event_quantities'", 'to': u"orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'order.shippingeventtype': {
'Meta': {'ordering': "('name',)", 'object_name': 'ShippingEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
},
u'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['catalogue.Product']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['order']
|
bsd-3-clause
|
DataDog/integrations-extras
|
tidb/tests/conftest.py
|
1
|
3226
|
import os
from copy import deepcopy
import mock
import pytest
from datadog_checks.dev import get_docker_hostname, get_here
HERE = get_here()
HOST = get_docker_hostname()
TIDB_PORT = 10080
TIKV_PORT = 20180
PD_PORT = 2379
TIFLASH_PROXY_PORT = 20292
TIFLASH_PORT = 8234
TICDC_PORT = 8301
DM_MASTER_PORT = 8261
DM_WORKER_PORT = 8262
PUMP_PORT = 8250
# mock metrics
@pytest.fixture()
def mock_tidb_metrics():
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200,
iter_lines=lambda **kwargs: get_mock_metrics("mock_tidb_metrics.txt").split("\n"),
headers={'Content-Type': "text/plain"},
),
):
yield
@pytest.fixture()
def mock_pd_metrics():
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200,
iter_lines=lambda **kwargs: get_mock_metrics("mock_pd_metrics.txt").split("\n"),
headers={'Content-Type': "text/plain"},
),
):
yield
@pytest.fixture()
def mock_tikv_metrics():
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200,
iter_lines=lambda **kwargs: get_mock_metrics("mock_tikv_metrics.txt").split("\n"),
headers={'Content-Type': "text/plain"},
),
):
yield
def get_mock_metrics(filename):
f_name = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(f_name, 'r') as f:
text_data = f.read()
return text_data
# tidb check instance
required_instance = {
'tidb_metric_url': "http://{}:{}/metrics".format(HOST, TIDB_PORT),
'tikv_metric_url': "http://{}:{}/metrics".format(HOST, TIKV_PORT),
'pd_metric_url': "http://{}:{}/metrics".format(HOST, PD_PORT),
}
@pytest.fixture(scope="session")
def full_instance():
base = deepcopy(required_instance)
base.update(
{
"tiflash_metric_url": "http://{}:{}/metrics".format(HOST, TIFLASH_PORT),
"tiflash_proxy_metric_url": "http://{}:{}/metrics".format(HOST, TIFLASH_PROXY_PORT),
"ticdc_metric_url": "http://{}:{}/metrics".format(HOST, TICDC_PORT),
"dm_master_metric_url": "http://{}:{}/metrics".format(HOST, DM_MASTER_PORT),
"dm_worker_metric_url": "http://{}:{}/metrics".format(HOST, DM_WORKER_PORT),
"pump_metric_url": "http://{}:{}/metrics".format(HOST, PUMP_PORT),
}
)
return base
@pytest.fixture(scope="session")
def customized_metric_instance():
base = deepcopy(required_instance)
base.update({"tidb_customized_metrics": [{"tidb_tikvclient_rawkv_cmd_seconds": "tikvclient_rawkv_cmd_seconds"}]})
return base
# openmetrics check instances
@pytest.fixture(scope="session")
def tidb_instance():
return {'prometheus_url': "http://{}:{}/metrics".format(HOST, TIDB_PORT), 'namespace': 'tidb'}
@pytest.fixture(scope="session")
def tikv_instance():
return {'prometheus_url': "http://{}:{}/metrics".format(HOST, TIKV_PORT), 'namespace': 'tikv'}
@pytest.fixture(scope="session")
def pd_instance():
return {'prometheus_url': "http://{}:{}/metrics".format(HOST, PD_PORT), 'namespace': 'pd'}
|
bsd-3-clause
|
gzzhanghao/mitmproxy
|
mitmproxy/protocol/http_replay.py
|
3
|
4482
|
from __future__ import absolute_import, print_function, division
import traceback
import netlib.exceptions
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import models
from netlib.http import http1
from netlib import basethread
# TODO: Doesn't really belong into mitmproxy.protocol...
class RequestReplayThread(basethread.BaseThread):
name = "RequestReplayThread"
def __init__(self, config, flow, event_queue, should_exit):
"""
event_queue can be a queue or None, if no scripthooks should be
processed.
"""
self.config, self.flow = config, flow
if event_queue:
self.channel = controller.Channel(event_queue, should_exit)
else:
self.channel = None
super(RequestReplayThread, self).__init__(
"RequestReplay (%s)" % flow.request.url
)
def run(self):
r = self.flow.request
first_line_format_backup = r.first_line_format
try:
self.flow.response = None
# If we have a channel, run script hooks.
if self.channel:
request_reply = self.channel.ask("request", self.flow)
if isinstance(request_reply, models.HTTPResponse):
self.flow.response = request_reply
if not self.flow.response:
# In all modes, we directly connect to the server displayed
if self.config.options.mode == "upstream":
server_address = self.config.upstream_server.address
server = models.ServerConnection(server_address, (self.config.options.listen_host, 0))
server.connect()
if r.scheme == "https":
connect_request = models.make_connect_request((r.data.host, r.port))
server.wfile.write(http1.assemble_request(connect_request))
server.wfile.flush()
resp = http1.read_response(
server.rfile,
connect_request,
body_size_limit=self.config.options.body_size_limit
)
if resp.status_code != 200:
raise exceptions.ReplayException("Upstream server refuses CONNECT request")
server.establish_ssl(
self.config.clientcerts,
sni=self.flow.server_conn.sni
)
r.first_line_format = "relative"
else:
r.first_line_format = "absolute"
else:
server_address = (r.host, r.port)
server = models.ServerConnection(server_address, (self.config.options.listen_host, 0))
server.connect()
if r.scheme == "https":
server.establish_ssl(
self.config.clientcerts,
sni=self.flow.server_conn.sni
)
r.first_line_format = "relative"
server.wfile.write(http1.assemble_request(r))
server.wfile.flush()
self.flow.server_conn = server
self.flow.response = models.HTTPResponse.wrap(http1.read_response(
server.rfile,
r,
body_size_limit=self.config.options.body_size_limit
))
if self.channel:
response_reply = self.channel.ask("response", self.flow)
if response_reply == exceptions.Kill:
raise exceptions.Kill()
except (exceptions.ReplayException, netlib.exceptions.NetlibException) as e:
self.flow.error = models.Error(str(e))
if self.channel:
self.channel.ask("error", self.flow)
except exceptions.Kill:
# Kill should only be raised if there's a channel in the
# first place.
from ..proxy.root_context import Log
self.channel.tell("log", Log("Connection killed", "info"))
except Exception:
from ..proxy.root_context import Log
self.channel.tell("log", Log(traceback.format_exc(), "error"))
finally:
r.first_line_format = first_line_format_backup
|
mit
|
lhl/vrdev
|
002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/dek/PILNumeric.py
|
2
|
1160
|
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
## This isn't really a PyOpenGL demo, but it's a nice
## example of how Numeric, Tkinter, and PIL can be used
## together to create all sorts of images.
try:
import numpy as Numeric
except ImportError, err:
try:
import Numeric
except ImportError, err:
print "This demo requires the numpy or Numeric extension, sorry"
import sys
sys.exit()
import FFT
import Tkinter
import Image
import ImageTk
import sys
w = 256
h = 256
def demo():
data = Numeric.arrayrange(w*h)
## fftdata = FFT.fft(data)
## fftdata2 = FFT.fft(data2)
## fftdata3 = (fftdata + fftdata2) / 2.
## invfftdata = FFT.inverse_fft(fftdata3)
## data = invfftdata.real
data = data.astype('l')
im = Image.new("RGBA", (w, h))
print len(data.tostring("raw", "RGBX", 0, -1))
print len(im.tostring("raw", "RGBX", 0, -1))
im.fromstring(data.tostring("raw", "RGBX", 0, -1),"raw", "RGBX", 0, -1)
root = Tkinter.Tk()
image = ImageTk.PhotoImage(im)
x = Tkinter.Label(root, image=image)
x.pack()
root.mainloop()
|
apache-2.0
|
joealcorn/xbox
|
xbox/vendor/requests/packages/chardet/compat.py
|
2943
|
1157
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
mit
|
fernandalavalle/mlab-ns
|
environment_bootstrap.py
|
1
|
1853
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2015 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
"""Configure Observatory based on the type of environment this is."""
def create_environment_symlink(link_name, environment_type):
target_name = '%s.%s' % (os.path.basename(link_name), environment_type)
print 'Creating symlink %s -> %s' % (link_name, target_name)
existing_link_removed = False
if os.path.islink(link_name):
os.remove(link_name)
existing_link_removed = True
os.symlink(target_name, link_name)
if existing_link_removed:
print 'Warning: Replaced existing symbolic link: %s' % link_name
def setup_environment(environment_type):
create_environment_symlink('server/config.py',
environment_type)
create_environment_symlink('server/app.yaml',
environment_type)
def main(args):
setup_environment(args.environment_type)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='mlab-ns Environment Bootstrapper',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('environment_type',
choices=['testing', 'live'],
help='The type of environment to configure.')
main(parser.parse_args())
|
apache-2.0
|
R4stl1n/allianceauth
|
allianceauth/services/modules/mumble/auth_hooks.py
|
5
|
2642
|
import logging
from django.conf import settings
from django.template.loader import render_to_string
from allianceauth.notifications import notify
from allianceauth import hooks
from allianceauth.services.hooks import ServicesHook
from .tasks import MumbleTasks
from .models import MumbleUser
from .urls import urlpatterns
logger = logging.getLogger(__name__)
class MumbleService(ServicesHook):
def __init__(self):
ServicesHook.__init__(self)
self.name = 'mumble'
self.urlpatterns = urlpatterns
self.service_url = settings.MUMBLE_URL
self.access_perm = 'mumble.access_mumble'
self.service_ctrl_template = 'services/mumble/mumble_service_ctrl.html'
self.name_format = '[{corp_ticker}]{character_name}'
def delete_user(self, user, notify_user=False):
logging.debug("Deleting user %s %s account" % (user, self.name))
try:
if user.mumble.delete():
if notify_user:
notify(user, 'Mumble Account Disabled', level='danger')
return True
return False
except MumbleUser.DoesNotExist:
logging.debug("User does not have a mumble account")
def update_groups(self, user):
logger.debug("Updating %s groups for %s" % (self.name, user))
if MumbleTasks.has_account(user):
MumbleTasks.update_groups.delay(user.pk)
def validate_user(self, user):
if MumbleTasks.has_account(user) and not self.service_active_for_user(user):
self.delete_user(user, notify_user=True)
def update_all_groups(self):
logger.debug("Updating all %s groups" % self.name)
MumbleTasks.update_all_groups.delay()
def service_active_for_user(self, user):
return user.has_perm(self.access_perm)
def render_services_ctrl(self, request):
urls = self.Urls()
urls.auth_activate = 'mumble:activate'
urls.auth_deactivate = 'mumble:deactivate'
urls.auth_reset_password = 'mumble:reset_password'
urls.auth_set_password = 'mumble:set_password'
return render_to_string(self.service_ctrl_template, {
'service_name': self.title,
'urls': urls,
'service_url': self.service_url,
'connect_url': request.user.mumble.username + '@' + self.service_url if MumbleTasks.has_account(request.user) else self.service_url,
'username': request.user.mumble.username if MumbleTasks.has_account(request.user) else '',
}, request=request)
@hooks.register('services_hook')
def register_mumble_service():
return MumbleService()
|
gpl-2.0
|
KyleJamesWalker/ansible
|
lib/ansible/modules/files/acl.py
|
44
|
11662
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
path:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['name']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if
path is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for
some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now
superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query).
author:
- "Brian Coca (@bcoca)"
- "Jérémie Astori (@astorije)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
- As of Ansible 2.0, this module only supports Linux distributions.
- As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl:
path: /etc/foo.conf
entity: joe
etype: user
permissions: r
state: present
# Removes the acl for Joe on a specific file
- acl:
path: /etc/foo.conf
entity: joe
etype: user
state: absent
# Sets default acl for joe on foo.d
- acl:
path: /etc/foo.d
entity: joe
etype: user
permissions: rw
default: yes
state: present
# Same as previous but using entry shorthand
- acl:
path: /etc/foo.d
entry: "default:user:joe:rw-"
state: present
# Obtain the acl for a specific file
- acl:
path: /etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
import os
# import module snippets
from ansible.module_utils.basic import AnsibleModule, get_platform
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
d = None
if entry.lower().startswith("d"):
d = True
a.pop(0)
if len(a) == 2:
a.append(None)
t, e, p = a
t = t.lower()
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
return [d, t, e, p]
def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False):
'''Builds and returns an entry string. Does not include the permissions bit if they are not provided.'''
if use_nfsv4_acls:
return ':'.join([etype, entity, permissions, 'allow'])
if permissions:
return etype + ':' + entity + ':' + permissions
else:
return etype + ':' + entity
def build_command(module, mode, path, follow, default, recursive, entry=''):
'''Builds and returns a getfacl/setfacl command.'''
if mode == 'set':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-m "%s"' % entry)
elif mode == 'rm':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-x "%s"' % entry)
else: # mode == 'get'
cmd = [module.get_bin_path('getfacl', True)]
# prevents absolute path warnings and removes headers
if get_platform().lower() == 'linux':
cmd.append('--omit-header')
cmd.append('--absolute-names')
if recursive:
cmd.append('--recursive')
if not follow:
if get_platform().lower() == 'linux':
cmd.append('--physical')
elif get_platform().lower() == 'freebsd':
cmd.append('-h')
if default:
if(mode == 'rm'):
cmd.insert(1, '-k')
else: # mode == 'set' or mode == 'get'
cmd.insert(1, '-d')
cmd.append(path)
return cmd
def acl_changed(module, cmd):
'''Returns true if the provided command affects the existing ACLs, false otherwise.'''
# FreeBSD do not have a --test flag, so by default, it is safer to always say "true"
if get_platform().lower() == 'freebsd':
return True
cmd = cmd[:] # lists are mutables so cmd would be overwritten without this
cmd.insert(1, '--test')
lines = run_acl(module, cmd)
for line in lines:
if not line.endswith('*,*'):
return True
return False
def run_acl(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg=e.strerror)
lines = []
for l in out.splitlines():
if not l.startswith('#'):
lines.append(l.strip())
if lines and not lines[-1].split():
# trim last line only when it is empty
return lines[:-1]
else:
return lines
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['name'], type='path'),
entry=dict(required=False, type='str'),
entity=dict(required=False, type='str', default=''),
etype=dict(
required=False,
choices=['other', 'user', 'group', 'mask'],
type='str'
),
permissions=dict(required=False, type='str'),
state=dict(
required=False,
default='query',
choices=['query', 'present', 'absent'],
type='str'
),
follow=dict(required=False, type='bool', default=True),
default=dict(required=False, type='bool', default=False),
recursive=dict(required=False, type='bool', default=False),
use_nfsv4_acls=dict(required=False, type='bool', default=False)
),
supports_check_mode=True,
)
if get_platform().lower() not in ['linux', 'freebsd']:
module.fail_json(msg="The acl module is not available on this system.")
path = module.params.get('path')
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
recursive = module.params.get('recursive')
use_nfsv4_acls = module.params.get('use_nfsv4_acls')
if not os.path.exists(path):
module.fail_json(msg="Path not found or not accessible.")
if state == 'query' and recursive:
module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.")
if not entry:
if state == 'absent' and permissions:
module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.")
if state == 'absent' and not entity:
module.fail_json(msg="'entity' MUST be set when 'state=absent'.")
if state in ['present', 'absent'] and not etype:
module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
if state == 'present' and not entry.count(":") in [2, 3]:
module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.")
if state == 'absent' and not entry.count(":") in [1, 2]:
module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.")
if state == 'query':
module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.")
default_flag, etype, entity, permissions = split_entry(entry)
if default_flag is not None:
default = default_flag
if get_platform().lower() == 'freebsd':
if recursive:
module.fail_json(msg="recursive is not supported on that platform.")
changed = False
msg = ""
if state == 'present':
entry = build_entry(etype, entity, permissions, use_nfsv4_acls)
command = build_command(
module, 'set', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command)
msg = "%s is present" % entry
elif state == 'absent':
entry = build_entry(etype, entity, use_nfsv4_acls)
command = build_command(
module, 'rm', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command, False)
msg = "%s is absent" % entry
elif state == 'query':
msg = "current acl"
acl = run_acl(
module,
build_command(module, 'get', path, follow, default, recursive)
)
module.exit_json(changed=changed, msg=msg, acl=acl)
if __name__ == '__main__':
main()
|
gpl-3.0
|
OsirisSPS/osiris-sps
|
client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-irix6/readcd.py
|
132
|
8576
|
# Class interface to the CD module.
from warnings import warnpy3k
warnpy3k("the readcd module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import cd, CD
class Error(Exception):
pass
class _Stop(Exception):
pass
def _doatime(self, cb_type, data):
if ((data[0] * 60) + data[1]) * 75 + data[2] > self.end:
## print 'done with list entry', repr(self.listindex)
raise _Stop
func, arg = self.callbacks[cb_type]
if func:
func(arg, cb_type, data)
def _dopnum(self, cb_type, data):
if data > self.end:
## print 'done with list entry', repr(self.listindex)
raise _Stop
func, arg = self.callbacks[cb_type]
if func:
func(arg, cb_type, data)
class Readcd:
def __init__(self, *arg):
if len(arg) == 0:
self.player = cd.open()
elif len(arg) == 1:
self.player = cd.open(arg[0])
elif len(arg) == 2:
self.player = cd.open(arg[0], arg[1])
else:
raise Error, 'bad __init__ call'
self.list = []
self.callbacks = [(None, None)] * 8
self.parser = cd.createparser()
self.playing = 0
self.end = 0
self.status = None
self.trackinfo = None
def eject(self):
self.player.eject()
self.list = []
self.end = 0
self.listindex = 0
self.status = None
self.trackinfo = None
if self.playing:
## print 'stop playing from eject'
raise _Stop
def pmsf2msf(self, track, min, sec, frame):
if not self.status:
self.cachestatus()
if track < self.status[5] or track > self.status[6]:
raise Error, 'track number out of range'
if not self.trackinfo:
self.cacheinfo()
start, total = self.trackinfo[track]
start = ((start[0] * 60) + start[1]) * 75 + start[2]
total = ((total[0] * 60) + total[1]) * 75 + total[2]
block = ((min * 60) + sec) * 75 + frame
if block > total:
raise Error, 'out of range'
block = start + block
min, block = divmod(block, 75*60)
sec, frame = divmod(block, 75)
return min, sec, frame
def reset(self):
self.list = []
def appendtrack(self, track):
self.appendstretch(track, track)
def appendstretch(self, start, end):
if not self.status:
self.cachestatus()
if not start:
start = 1
if not end:
end = self.status[6]
if type(end) == type(0):
if end < self.status[5] or end > self.status[6]:
raise Error, 'range error'
else:
l = len(end)
if l == 4:
prog, min, sec, frame = end
if prog < self.status[5] or prog > self.status[6]:
raise Error, 'range error'
end = self.pmsf2msf(prog, min, sec, frame)
elif l != 3:
raise Error, 'syntax error'
if type(start) == type(0):
if start < self.status[5] or start > self.status[6]:
raise Error, 'range error'
if len(self.list) > 0:
s, e = self.list[-1]
if type(e) == type(0):
if start == e+1:
start = s
del self.list[-1]
else:
l = len(start)
if l == 4:
prog, min, sec, frame = start
if prog < self.status[5] or prog > self.status[6]:
raise Error, 'range error'
start = self.pmsf2msf(prog, min, sec, frame)
elif l != 3:
raise Error, 'syntax error'
self.list.append((start, end))
def settracks(self, list):
self.list = []
for track in list:
self.appendtrack(track)
def setcallback(self, cb_type, func, arg):
if cb_type < 0 or cb_type >= 8:
raise Error, 'type out of range'
self.callbacks[cb_type] = (func, arg)
if self.playing:
start, end = self.list[self.listindex]
if type(end) == type(0):
if cb_type != CD.PNUM:
self.parser.setcallback(cb_type, func, arg)
else:
if cb_type != CD.ATIME:
self.parser.setcallback(cb_type, func, arg)
def removecallback(self, cb_type):
if cb_type < 0 or cb_type >= 8:
raise Error, 'type out of range'
self.callbacks[cb_type] = (None, None)
if self.playing:
start, end = self.list[self.listindex]
if type(end) == type(0):
if cb_type != CD.PNUM:
self.parser.removecallback(cb_type)
else:
if cb_type != CD.ATIME:
self.parser.removecallback(cb_type)
def gettrackinfo(self, *arg):
if not self.status:
self.cachestatus()
if not self.trackinfo:
self.cacheinfo()
if len(arg) == 0:
return self.trackinfo[self.status[5]:self.status[6]+1]
result = []
for i in arg:
if i < self.status[5] or i > self.status[6]:
raise Error, 'range error'
result.append(self.trackinfo[i])
return result
def cacheinfo(self):
if not self.status:
self.cachestatus()
self.trackinfo = []
for i in range(self.status[5]):
self.trackinfo.append(None)
for i in range(self.status[5], self.status[6]+1):
self.trackinfo.append(self.player.gettrackinfo(i))
def cachestatus(self):
self.status = self.player.getstatus()
if self.status[0] == CD.NODISC:
self.status = None
raise Error, 'no disc in player'
def getstatus(self):
return self.player.getstatus()
def play(self):
if not self.status:
self.cachestatus()
size = self.player.bestreadsize()
self.listindex = 0
self.playing = 0
for i in range(8):
func, arg = self.callbacks[i]
if func:
self.parser.setcallback(i, func, arg)
else:
self.parser.removecallback(i)
if len(self.list) == 0:
for i in range(self.status[5], self.status[6]+1):
self.appendtrack(i)
try:
while 1:
if not self.playing:
if self.listindex >= len(self.list):
return
start, end = self.list[self.listindex]
if type(start) == type(0):
dummy = self.player.seektrack(
start)
else:
min, sec, frame = start
dummy = self.player.seek(
min, sec, frame)
if type(end) == type(0):
self.parser.setcallback(
CD.PNUM, _dopnum, self)
self.end = end
func, arg = \
self.callbacks[CD.ATIME]
if func:
self.parser.setcallback(CD.ATIME, func, arg)
else:
self.parser.removecallback(CD.ATIME)
else:
min, sec, frame = end
self.parser.setcallback(
CD.ATIME, _doatime,
self)
self.end = (min * 60 + sec) * \
75 + frame
func, arg = \
self.callbacks[CD.PNUM]
if func:
self.parser.setcallback(CD.PNUM, func, arg)
else:
self.parser.removecallback(CD.PNUM)
self.playing = 1
data = self.player.readda(size)
if data == '':
self.playing = 0
self.listindex = self.listindex + 1
continue
try:
self.parser.parseframe(data)
except _Stop:
self.playing = 0
self.listindex = self.listindex + 1
finally:
self.playing = 0
|
gpl-3.0
|
mkieszek/odoo
|
addons/purchase/partner.py
|
3
|
1605
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
def _purchase_invoice_count(self, cr, uid, ids, field_name, arg, context=None):
PurchaseOrder = self.pool['purchase.order']
Invoice = self.pool['account.invoice']
return {
partner_id: {
'purchase_order_count': PurchaseOrder.search_count(cr,uid, [('partner_id', 'child_of', partner_id)], context=context),
'supplier_invoice_count': Invoice.search_count(cr,uid, [('partner_id', 'child_of', partner_id), ('type','=','in_invoice')], context=context)
}
for partner_id in ids
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_product_pricelist_purchase']
_columns = {
'property_product_pricelist_purchase': fields.property(
type='many2one',
relation='product.pricelist',
domain=[('type','=','purchase')],
string="Purchase Pricelist",
help="This pricelist will be used, instead of the default one, for purchases from the current partner"),
'purchase_order_count': fields.function(_purchase_invoice_count, string='# of Purchase Order', type='integer', multi="count"),
'supplier_invoice_count': fields.function(_purchase_invoice_count, string='# Vendor Bills', type='integer', multi="count"),
}
|
agpl-3.0
|
odin1314/security_monkey
|
security_monkey/views/__init__.py
|
6
|
4669
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from security_monkey import db
from security_monkey import app
from flask_wtf.csrf import generate_csrf
from security_monkey.decorators import crossdomain
from flask.ext.restful import fields, marshal, Resource, reqparse
from flask.ext.login import current_user
ORIGINS = [
'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('WEB_PORT')),
# Adding this next one so you can also access the dart UI by prepending /static to the path.
'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('API_PORT')),
'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('NGINX_PORT')),
'https://{}:80'.format(app.config.get('FQDN')),
# FOR LOCAL DEV IN DART EDITOR:
'http://127.0.0.1:3030',
'http://127.0.0.1:8080',
'http://localhost:3030',
'http://localhost:8080'
]
##### Marshal Datastructures #####
# Used by RevisionGet, RevisionList, ItemList
REVISION_FIELDS = {
'id': fields.Integer,
'date_created': fields.String,
'active': fields.Boolean,
'item_id': fields.Integer
}
# Used by RevisionList, ItemGet, ItemList
ITEM_FIELDS = {
'id': fields.Integer,
'region': fields.String,
'name': fields.String
}
# Used by ItemList, Justify
AUDIT_FIELDS = {
'id': fields.Integer,
'score': fields.Integer,
'issue': fields.String,
'notes': fields.String,
'justified': fields.Boolean,
'justification': fields.String,
'justified_date': fields.String,
'item_id': fields.Integer
}
## Single Use Marshal Objects ##
# SINGLE USE - RevisionGet
REVISION_COMMENT_FIELDS = {
'id': fields.Integer,
'revision_id': fields.Integer,
'date_created': fields.String,
'text': fields.String
}
# SINGLE USE - ItemGet
ITEM_COMMENT_FIELDS = {
'id': fields.Integer,
'date_created': fields.String,
'text': fields.String,
'item_id': fields.Integer
}
# SINGLE USE - UserSettings
USER_SETTINGS_FIELDS = {
# 'id': fields.Integer,
'daily_audit_email': fields.Boolean,
'change_reports': fields.String
}
# SINGLE USE - AccountGet
ACCOUNT_FIELDS = {
'id': fields.Integer,
'name': fields.String,
's3_name': fields.String,
'number': fields.String,
'notes': fields.String,
'active': fields.Boolean,
'third_party': fields.Boolean
}
WHITELIST_FIELDS = {
'id': fields.Integer,
'name': fields.String,
'notes': fields.String,
'cidr': fields.String
}
IGNORELIST_FIELDS = {
'id': fields.Integer,
'prefix': fields.String,
'notes': fields.String,
}
AUDITORSETTING_FIELDS = {
'id': fields.Integer,
'disabled': fields.Boolean,
'issue_text': fields.String
}
class AuthenticatedService(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(AuthenticatedService, self).__init__()
self.auth_dict = dict()
if current_user.is_authenticated():
self.auth_dict = {
"authenticated": True,
"user": current_user.email
}
else:
if app.config.get('FRONTED_BY_NGINX'):
url = "https://{}:{}{}".format(app.config.get('FQDN'), app.config.get('NGINX_PORT'), '/login')
else:
url = "http://{}:{}{}".format(app.config.get('FQDN'), app.config.get('API_PORT'), '/login')
self.auth_dict = {
"authenticated": False,
"user": None,
"url": url
}
@app.after_request
@crossdomain(allowed_origins=ORIGINS)
def after(response):
response.set_cookie('XSRF-COOKIE', generate_csrf())
return response
# Wish I could do this with @app.before_request
def __check_auth__(auth_dict):
"""
To be called at the beginning of any GET or POST request.
Returns: True if needs to authenticate. Also returns the
JSON containing the SAML url to login.
Returns None,None when no authentication action needs to occur.
"""
if not current_user.is_authenticated():
return True, ({"auth": auth_dict}, 401)
return None, None
|
apache-2.0
|
liberation/django-elasticsearch
|
test_project/test_app/views.py
|
5
|
2183
|
from django.core import serializers
from django.http import HttpResponse
from django.db.models import Model
from django.views.generic.detail import SingleObjectMixin
from django_elasticsearch.views import ElasticsearchListView
from django_elasticsearch.views import ElasticsearchDetailView
from test_app.models import TestModel
class JsonViewMixin(object):
def render_to_response(self, context):
content = self._get_content()
if isinstance(content, Model):
# Note: for some reason django's serializer only eat iterables
content = [content,]
json = serializers.serialize('json', content)
if isinstance(self, SingleObjectMixin):
json = json[1:-1] # eww
return HttpResponse(json, content_type='application/json; charset=utf-8')
class TestDetailView(JsonViewMixin, ElasticsearchDetailView):
model = TestModel
def _get_content(self):
return self.object
class TestListView(JsonViewMixin, ElasticsearchListView):
model = TestModel
def _get_content(self):
return self.object_list
### contrib.restframework test viewsets
from rest_framework.viewsets import ModelViewSet
from django_elasticsearch.contrib.restframework import AutoCompletionMixin
from django_elasticsearch.contrib.restframework import IndexableModelMixin
from rest_framework import VERSION
if int(VERSION[0]) < 3:
class TestViewSet(AutoCompletionMixin, IndexableModelMixin, ModelViewSet):
model = TestModel
filter_fields = ('username',)
ordering_fields = ('id',)
search_param = 'q'
paginate_by = 10
paginate_by_param = 'page_size'
else:
from rest_framework.serializers import ModelSerializer
class TestSerializer(ModelSerializer):
class Meta:
model = TestModel
class TestViewSet(AutoCompletionMixin, IndexableModelMixin, ModelViewSet):
model = TestModel
queryset = TestModel.objects.all()
serializer_class = TestSerializer
filter_fields = ('username',)
ordering_fields = ('id',)
search_param = 'q'
paginate_by = 10
paginate_by_param = 'page_size'
|
mit
|
grandchild/autohidewibox
|
autohidewibox.py
|
1
|
6439
|
#!/usr/bin/env python3
import configparser
import os.path as path
import re
import subprocess
import sys
import threading
MODE_TRANSIENT = "transient"
MODE_TOGGLE = "toggle"
config = configparser.ConfigParser()
try:
user_awesome_conf = path.join(
path.expanduser("~"), ".config/awesome/autohidewibox.conf"
)
user_conf = path.join(path.expanduser("~"), ".config/autohidewibox.conf")
system_conf = "/etc/autohidewibox.conf"
if len(sys.argv) > 1 and path.isfile(sys.argv[1]):
config.read(sys.argv[1])
elif path.isfile(user_awesome_conf):
config.read(user_awesome_conf)
elif path.isfile(user_conf):
config.read(user_conf)
else:
config.read(system_conf)
except configparser.MissingSectionHeaderError:
pass
awesome_version = config.get("autohidewibox", "awesome_version", fallback=4)
super_keys = config.get("autohidewibox", "super_keys", fallback="133,134").split(",")
wiboxes = config.get("autohidewibox", "wiboxname", fallback="mywibox").split(",")
custom_hide = config.get("autohidewibox", "custom_hide", fallback=None)
custom_show = config.get("autohidewibox", "custom_show", fallback=None)
delay_show = config.getfloat("autohidewibox", "delay_show", fallback=0)
delay_hide = config.getfloat("autohidewibox", "delay_hide", fallback=0)
mode = config.get("autohidewibox", "mode", fallback=MODE_TRANSIENT)
debug = config.getboolean("autohidewibox", "debug", fallback=False)
# (remove the following line if your wibox variables have strange characters)
wiboxes = [w for w in wiboxes if re.match("^[a-zA-Z_][a-zA-Z0-9_]*$", w)]
### python>=3.4:
# wiboxes = [ w for w in wiboxes if re.fullmatch("[a-zA-Z_][a-zA-Z0-9_]*", w) ]
delay = {True: delay_show, False: delay_hide}
delay_thread = None
wibox_is_currently_visible = False
waiting_for = False
non_super_key_was_pressed = False
cancel = threading.Event()
sh_path = ""
sh_potential_paths = ["/usr/bin/sh", "/bin/sh"]
for p in sh_potential_paths:
if path.exists(p):
sh_path = p
break
if sh_path == "":
print("Can't find sh in any of: " + ",".join(sh_potential_paths), file=sys.stderr)
sys.exit(1)
hide_command_v3 = "for k,v in pairs({wibox}) do v.visible = {state} end"
hide_command_v4 = "for s in screen do s.{wibox}.visible = {state} end"
try:
hide_command = hide_command_v4 if int(awesome_version) >= 4 else hide_command_v3
except ValueError:
hide_command = hide_command_v4
def _debug(*args):
if debug:
print(*args)
def set_wibox_state(state=True, immediate=False):
global delay_thread, waiting_for, cancel, wibox_is_currently_visible
wibox_is_currently_visible = state
dbg_pstate = "show" if state else "hide"
if delay[not state] > 0:
_debug(dbg_pstate, "delay other")
if type(delay_thread) == threading.Thread and delay_thread.is_alive():
# two consecutive opposing events cancel out. second event should not be
# called
_debug(dbg_pstate, "delay other, thread alive -> cancel")
cancel.set()
return
if delay[state] > 0 and not immediate:
_debug(dbg_pstate + " delay same")
if not (type(delay_thread) == threading.Thread and delay_thread.is_alive()):
_debug(dbg_pstate, "delay same, thread dead -> start wait")
waiting_for = state
cancel.clear()
delay_thread = threading.Thread(
group=None, target=wait_delay, kwargs={"state": state}
)
delay_thread.daemon = True
delay_thread.start()
# a second event setting the same state is silently discarded
return
_debug("state:", dbg_pstate)
for wibox in wiboxes:
subprocess.call(
sh_path
+ " "
+ "-c \"echo '"
+ hide_command.format(wibox=wibox, state="true" if state else "false")
+ "' | awesome-client\"",
shell=True,
)
customcmd = custom_show if state else custom_hide
if customcmd:
subprocess.call(
sh_path + " " + "-c \"echo '" + customcmd + "' | awesome-client\"",
shell=True,
)
def wait_delay(state=True):
if not cancel.wait(delay[state] / 1000):
set_wibox_state(state=state, immediate=True)
try:
set_wibox_state(False)
proc = subprocess.Popen(
["xinput", "--test-xi2", "--root", "3"], stdout=subprocess.PIPE
)
field = None
key_state = None
for line in proc.stdout:
l = line.decode("utf-8").strip()
event_match = re.match("EVENT type (\\d+) \\(.+\\)", l)
detail_match = re.match("detail: (\\d+)", l)
if event_match:
_debug(event_match)
try:
field = "event"
key_state = event_match.group(1)
_debug("found event, waiting for detail...")
except IndexError:
field = None
key_state = None
if (field == "event") and detail_match:
_debug(detail_match)
try:
if detail_match.group(1) in super_keys:
_debug("is a super key")
if key_state == "13": # press
non_super_key_was_pressed = False
if mode == MODE_TRANSIENT:
_debug("showing wibox")
set_wibox_state(True)
if key_state == "14": # release
if mode == MODE_TRANSIENT:
_debug("hiding wibox")
set_wibox_state(False)
# Avoid toggling the wibox when a super key is used in
# conjunction with another key.
elif mode == MODE_TOGGLE and not non_super_key_was_pressed:
_debug("toggling wibox")
set_wibox_state(not wibox_is_currently_visible)
non_super_key_was_pressed = False
else:
non_super_key_was_pressed = True
except IndexError:
_debug("Couldn't parse key_state number.")
pass
finally:
field = None
key_state = None
except KeyboardInterrupt:
pass
finally:
set_wibox_state(True, True)
_debug("Shutting down")
|
cc0-1.0
|
zhukaixy/kbengine
|
kbe/src/lib/python/Lib/ctypes/test/test_internals.py
|
113
|
2631
|
# This tests the internal _objects attribute
import unittest
from ctypes import *
from sys import getrefcount as grc
# XXX This test must be reviewed for correctness!!!
# ctypes' types are container types.
#
# They have an internal memory block, which only consists of some bytes,
# but it has to keep references to other objects as well. This is not
# really needed for trivial C types like int or char, but it is important
# for aggregate types like strings or pointers in particular.
#
# What about pointers?
class ObjectsTestCase(unittest.TestCase):
def assertSame(self, a, b):
self.assertEqual(id(a), id(b))
def test_ints(self):
i = 42000123
refcnt = grc(i)
ci = c_int(i)
self.assertEqual(refcnt, grc(i))
self.assertEqual(ci._objects, None)
def test_c_char_p(self):
s = b"Hello, World"
refcnt = grc(s)
cs = c_char_p(s)
self.assertEqual(refcnt + 1, grc(s))
self.assertSame(cs._objects, s)
def test_simple_struct(self):
class X(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
a = 421234
b = 421235
x = X()
self.assertEqual(x._objects, None)
x.a = a
x.b = b
self.assertEqual(x._objects, None)
def test_embedded_structs(self):
class X(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
class Y(Structure):
_fields_ = [("x", X), ("y", X)]
y = Y()
self.assertEqual(y._objects, None)
x1, x2 = X(), X()
y.x, y.y = x1, x2
self.assertEqual(y._objects, {"0": {}, "1": {}})
x1.a, x2.b = 42, 93
self.assertEqual(y._objects, {"0": {}, "1": {}})
def test_xxx(self):
class X(Structure):
_fields_ = [("a", c_char_p), ("b", c_char_p)]
class Y(Structure):
_fields_ = [("x", X), ("y", X)]
s1 = b"Hello, World"
s2 = b"Hallo, Welt"
x = X()
x.a = s1
x.b = s2
self.assertEqual(x._objects, {"0": s1, "1": s2})
y = Y()
y.x = x
self.assertEqual(y._objects, {"0": {"0": s1, "1": s2}})
## x = y.x
## del y
## print x._b_base_._objects
def test_ptr_struct(self):
class X(Structure):
_fields_ = [("data", POINTER(c_int))]
A = c_int*4
a = A(11, 22, 33, 44)
self.assertEqual(a._objects, None)
x = X()
x.data = a
##XXX print x._objects
##XXX print x.data[0]
##XXX print x.data._objects
if __name__ == '__main__':
unittest.main()
|
lgpl-3.0
|
dendisuhubdy/tensorflow
|
tensorflow/contrib/framework/python/ops/checkpoint_ops.py
|
76
|
8864
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.training import checkpoint_ops
# pylint: disable=protected-access,line-too-long
load_and_remap_matrix_initializer = checkpoint_ops._load_and_remap_matrix_initializer
# pylint: enable=line-too-long
load_embedding_initializer = checkpoint_ops._load_embedding_initializer
# pylint: enable=protected-access
def load_linear_multiclass_bias_initializer(ckpt_path,
bias_tensor_name,
new_class_vocab_size,
old_class_vocab_file,
new_class_vocab_file,
num_class_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class biases for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class bias and remapping according to the provided vocab files. See docs
for `load_and_remap_matrix_initializer()` for more details. In this case, the
provided row_vocab is the class vocabulary, and the expected shape is
`[new_class_vocab_size, 1]`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
bias_tensor_name: Tensor name to load from in the checkpoints.
new_class_vocab_size: Number of entries in the new class vocab.
old_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old class vocabulary file.
new_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new class vocabulary file.
num_class_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use for the classes. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
# Linear multi-class biases should be zero-initialized.
if initializer is None:
initializer = init_ops.zeros_initializer()
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=bias_tensor_name,
new_row_vocab_size=new_class_vocab_size,
new_col_vocab_size=1,
old_row_vocab_file=old_class_vocab_file,
new_row_vocab_file=new_class_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_class_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_variable_slot_initializer(ckpt_path,
old_tensor_name,
primary_partition_info,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class slots for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class slots (such as optimizer accumulators) and remapping them
according to the provided vocab files. See docs for
`load_and_remap_matrix_initializer()` for more details. Takes in a
`variable_scope._PartitionInfo` representing the slot's primary `Variable`'s
partitioning. This is necessary since accumulator `Variable` creation ignores
primary scoping and partitioning information.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
primary_partition_info: A `variable_scope._PartitionInfo` containing this
slot's primary `Variable`'s partitioning information. This is used to
calculate the offset and override the partition_info passed to the call to
_initialize.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
initializer_fn = load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
del partition_info # Unused by this override.
return initializer_fn(shape, dtype, partition_info=primary_partition_info)
return _initializer
|
apache-2.0
|
ATNF/askapsdp
|
Code/Base/py-iceutils/current/askap/iceutils/icesession.py
|
1
|
8460
|
#!/usr/bin/env python
# Copyright (c) 2012 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# [email protected]
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
__all__ = ["IceSession"]
import sys
import os
import time
import signal
import subprocess
import threading
import shutil
import Ice
class IceSession(object):
"""
Deploy a list of ice application with a registry.
This is a *SIGTERM* interruptable process which will stop all applications
gracefully.
:param app_file: an optional text file with command lines
:param cleanup: optional flag to indicate cleanup of ice meta-data
*ICE_CONFIG* needs to be defined in the environment.
Applications can be added programatically via :meth:`.add_app` or via a file
given at contruction or via :meth:`from_file`. The file should contain one
line per executable with arguments e.g.
.. code-block:: sh
icebox
fcm.py --log-config=askap.pylog_cfg --config=fcm_init.parset
The class provides a context manager, i.e. it can and shoudl be used with a
`with` statement::
with IceSession() as iceenv:
iceenv.start()
do_stuff()
# all process will terminate here
"""
def __init__(self, app_file=None, cleanup=False):
if not "ICE_CONFIG" in os.environ:
raise OSError("Environment variable ICE_CONFIG not defined")
self.apps = []
"""The list of applications to create subprocesses for"""
self.processes = []
"""The list of subprocesses"""
self._started = False
self._clean_dirs = []
self.cleanup = cleanup
"""Clean up ice meta-data directories on terminate"""
self.communicator = None
"""An :obj:`Ice.CommunicatorI` instance given *ICE_CONFIG*"""
self.wake_up = threading.Event()
"""Wake up :meth:`.wait`"""
signal.signal(signal.SIGTERM, self.terminate)
self._init_communicator()
if self.registry_running():
raise RuntimeError("icegridregistry with same config "
"already running")
self.add_app("icegridregistry")
self.from_file(app_file)
def __enter__(self):
return self
def __exit__(self, ttype, value, traceback):
self.terminate()
def _init_communicator(self):
self.communicator = Ice.initialize(sys.argv)
props = self.communicator.getProperties()
dirs = [props.getProperty('IceGrid.Registry.Data'),
props.getProperty('Freeze.DbEnv.IceStorm.DbHome')]
for d in dirs:
if not d:
continue
if os.path.exists(d) and self.cleanup:
shutil.rmtree(d)
if not os.path.exists(d):
print "creating ice metadata directory", d
os.makedirs(d)
self._clean_dirs.append(d)
def terminate(self, signum=None, frame=None):
"""Terminate all application processes in reverse order"""
for proc in self.processes[::-1]:
try:
proc.terminate()
proc.wait()
print >>sys.stdout, "SIGTERM", proc.application
except Exception as ex:
print >>sys.stderr,ex
self.processes = []
self.wake_up.set()
self._started = False
def from_file(self, filename=None):
"""Load application list from file provided in :attr:`sys.argv`.
"""
if filename is None:
return
with open(filename, "r") as f:
for line in f:
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
elem = line.split()
app = elem[0]
args = []
if len(elem) > 1:
args = elem[1:]
self.add_app(app, args)
def add_app(self, app, args=None):
"""Add an application with optional arguments. This doesn't run the
application.
:param str app: the application name
:param list args: the optional arguments for the application
"""
if args is None:
args = []
self.apps.append((app, args))
def run_app(self, application, args):
"""Run the application in a background process and add to
:attr:`.processes`"""
lfile = application+".log"
cmd = [application]+args
with open(lfile,"w") as logfile:
proc = subprocess.Popen(cmd, shell=False,
stderr=logfile, stdout=logfile)
self.processes.append(proc)
proc.application = application
proc.log = lfile
if application == "icegridregistry":
self.wait_for_registry()
if proc.poll() is not None:
print >>sys.stderr, proc.application, "failed:"
print >>sys.stderr, open(proc.log, 'r').read()
raise RuntimeError("Application '%s' failed on start"
% application)
def registry_running(self):
"""Check wether an icegridregistry is running"""
try:
self.communicator.getDefaultLocator().ice_ping()
return True
except Ice.ConnectionRefusedException as ex:
return False
def wait_for_registry(self, timeout=10):
"""Block until the ice registry is up and time out after `timeout`
seconds.
"""
n = timeout/0.1
# t0 = time.time()
i = 0
connected = False
while i < n:
try:
self.communicator.getDefaultLocator().ice_ping()
connected = True
break
except Ice.ConnectionRefusedException as ex:
pass
time.sleep(0.1)
i += 1
if not connected:
raise RuntimeError("Waiting for icegridregistry timed out")
# print >>sys.stderr,i, time.time() - t0
def wait(self):
"""Block the main process. This is interruptable through a
:class:`KeyboardInterrupt` (Ctrl-C)."""
self.wake_up.clear()
if self.processes:
# periodically check if a subprocess had died
while True:
for proc in self.processes:
if proc.poll() is not None:
raise RuntimeError("Process %s died"
% proc.application)
self.wake_up.wait(1)
if self.wake_up.is_set():
return
def start(self):
"""Start all applications, e.g call :meth:`run_app` on all
:attr:`apps`"""
if self._started:
return
try:
for application, args in self.apps:
print "Starting", application, "...",
sys.stdout.flush()
self.run_app(application, args)
print "done."
self._started = True
except KeyboardInterrupt:
# need to wait here otherwise the processes won't terminate???
time.sleep(1)
self.terminate()
if __name__ == "__main__":
fname = None
if len(sys.argv) == 2:
fname = sys.argv[-1]
with IceSession(fname, cleanup=True) as isess:
isess.start()
isess.wait()
|
gpl-2.0
|
varigit/VAR-SOM-AM33-Kernel-3-14
|
tools/perf/util/setup.py
|
989
|
1543
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
gpl-2.0
|
eloquence/unisubs
|
apps/videos/tests/test_video_types.py
|
2
|
14077
|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import datetime
from django.test import TestCase
from babelsubs.storage import SubtitleLine, SubtitleSet
from auth.models import CustomUser as User
from teams.models import Team, TeamVideo
from subtitles import pipeline
from subtitles.models import SubtitleLanguage, SubtitleVersion
from videos.models import Video, VIDEO_TYPE_BRIGHTCOVE
from videos.types import video_type_registrar, VideoTypeError
from videos.types.base import VideoType, VideoTypeRegistrar
from videos.types.bliptv import BlipTvVideoType
from videos.types.brightcove import BrightcoveVideoType
from videos.types.dailymotion import DailymotionVideoType
from videos.types.flv import FLVVideoType
from videos.types.htmlfive import HtmlFiveVideoType
from videos.types.kaltura import KalturaVideoType
from videos.types.mp3 import Mp3VideoType
from videos.types.vimeo import VimeoVideoType
from videos.types.youtube import YoutubeVideoType
from utils import test_utils
from externalsites import google
class YoutubeVideoTypeTest(TestCase):
def setUp(self):
self.vt = YoutubeVideoType
self.data = [{
'url': 'http://www.youtube.com/watch#!v=UOtJUmiUZ08&feature=featured&videos=Qf8YDn9mbGs',
'video_id': 'UOtJUmiUZ08'
},{
'url': 'http://www.youtube.com/v/6Z5msRdai-Q',
'video_id': '6Z5msRdai-Q'
},{
'url': 'http://www.youtube.com/watch?v=woobL2yAxD4',
'video_id': 'woobL2yAxD4'
},{
'url': 'http://www.youtube.com/watch?v=woobL2yAxD4&playnext=1&videos=9ikUhlPnCT0&feature=featured',
'video_id': 'woobL2yAxD4'
}]
self.shorter_url = "http://youtu.be/HaAVZ2yXDBo"
@test_utils.patch_for_test('externalsites.google.get_video_info')
def test_set_values(self, mock_get_video_info):
video_info = google.VideoInfo('test-channel-id', 'title',
'description', 100,
'http://example.com/thumb.png')
mock_get_video_info.return_value = video_info
video, created = Video.get_or_create_for_url(
'http://www.youtube.com/watch?v=_ShmidkrcY0')
vu = video.videourl_set.all()[:1].get()
self.assertEqual(vu.videoid, '_ShmidkrcY0')
self.assertEqual(video.title, video_info.title)
self.assertEqual(video.description, video_info.description)
self.assertEqual(video.duration, video_info.duration)
self.assertEqual(video.thumbnail, video_info.thumbnail_url)
@test_utils.patch_for_test('externalsites.google.get_video_info')
def test_get_video_info_exception(self, mock_get_video_info):
video_info = google.VideoInfo('test-channel-id', 'title',
'description', 100,
'http://example.com/thumb.png')
mock_get_video_info.side_effect = google.APIError()
video, created = Video.get_or_create_for_url(
'http://www.youtube.com/watch?v=_ShmidkrcY0')
vu = video.videourl_set.all()[:1].get()
self.assertEqual(vu.videoid, '_ShmidkrcY0')
self.assertEqual(video.description, '')
self.assertEqual(video.duration, None)
self.assertEqual(video.thumbnail, '')
# since get_video_info failed, we don't know the channel id of our
# video URL. We should use a dummy value to make it easier to fix the
# issue in the future
self.assertEqual(vu.owner_username, None)
def test_matches_video_url(self):
for item in self.data:
self.assertTrue(self.vt.matches_video_url(item['url']))
self.assertFalse(self.vt.matches_video_url('http://some-other-url.com'))
self.assertFalse(self.vt.matches_video_url(''))
self.assertFalse(self.vt.matches_video_url('http://youtube.com/'))
self.assertFalse(self.vt.matches_video_url('http://youtube.com/some-video/'))
self.assertTrue(self.vt.matches_video_url(self.shorter_url))
def test_get_video_id(self):
for item in self.data:
self.failUnlessEqual(item['video_id'], self.vt._get_video_id(item['url']))
def test_shorter_format(self):
vt = self.vt(self.shorter_url)
self.assertTrue(vt)
self.assertEqual(vt.video_id , self.shorter_url.split("/")[-1])
class HtmlFiveVideoTypeTest(TestCase):
def setUp(self):
self.vt = HtmlFiveVideoType
def test_type(self):
self.assertTrue(self.vt.matches_video_url(
'http://someurl.com/video.ogv'))
self.assertTrue(self.vt.matches_video_url(
'http://someurl.com/video.OGV'))
self.assertTrue(self.vt.matches_video_url('http://someurl.com/video.ogg'))
self.assertTrue(self.vt.matches_video_url('http://someurl.com/video.mp4'))
self.assertTrue(self.vt.matches_video_url('http://someurl.com/video.m4v'))
self.assertTrue(self.vt.matches_video_url('http://someurl.com/video.webm'))
self.assertFalse(self.vt.matches_video_url('http://someurl.ogv'))
self.assertFalse(self.vt.matches_video_url('http://someurl.com/ogv'))
self.assertFalse(self.vt.matches_video_url(''))
#for this is other type
self.assertFalse(self.vt.matches_video_url('http://someurl.com/video.flv'))
self.assertFalse(self.vt.matches_video_url('http://someurl.com/ogv.video'))
class Mp3VideoTypeTest(TestCase):
def setUp(self):
self.vt = Mp3VideoType
def test_type(self):
self.assertTrue(self.vt.matches_video_url(
'http://someurl.com/audio.mp3'))
self.assertTrue(self.vt.matches_video_url(
'http://someurl.com/audio.MP3'))
self.assertFalse(self.vt.matches_video_url(
'http://someurl.com/mp3.audio'))
class BlipTvVideoTypeTest(TestCase):
def setUp(self):
self.vt = BlipTvVideoType
def test_type(self):
url = 'http://blip.tv/day9tv/day-9-daily-438-p3-build-orders-made-easy-newbie-tuesday-6066868'
video, created = Video.get_or_create_for_url(url)
vu = video.videourl_set.all()[:1].get()
# this is the id used to embed videos
self.assertEqual(vu.videoid, 'hdljgvKmGAI')
self.assertTrue(video.title)
self.assertTrue(video.thumbnail)
self.assertTrue(vu.url)
self.assertTrue(self.vt.matches_video_url(url))
self.assertTrue(self.vt.matches_video_url('http://blip.tv/day9tv/day-9-daily-438-p3-build-orders-made-easy-newbie-tuesday-6066868'))
self.assertFalse(self.vt.matches_video_url('http://blip.tv'))
self.assertFalse(self.vt.matches_video_url(''))
def test_video_title(self):
url = 'http://blip.tv/day9tv/day-9-daily-100-my-life-of-starcraft-3505715'
video, created = Video.get_or_create_for_url(url)
#really this should be jsut not failed
self.assertTrue(video.get_absolute_url())
def test_creating(self):
# this test is for ticket: https://www.pivotaltracker.com/story/show/12996607
url = 'http://blip.tv/day9tv/day-9-daily-1-flash-vs-hero-3515432'
video, created = Video.get_or_create_for_url(url)
class DailymotionVideoTypeTest(TestCase):
def setUp(self):
self.vt = DailymotionVideoType
def test_type(self):
url = 'http://www.dailymotion.com/video/x7u2ww_juliette-drums_lifestyle#hp-b-l'
video, created = Video.get_or_create_for_url(url)
vu = video.videourl_set.all()[:1].get()
self.assertEqual(vu.videoid, 'x7u2ww')
self.assertTrue(video.title)
self.assertTrue(video.thumbnail)
self.assertEqual(vu.url, 'http://dailymotion.com/video/x7u2ww')
self.assertTrue(self.vt.matches_video_url(url))
self.assertFalse(self.vt.matches_video_url(''))
self.assertFalse(self.vt.matches_video_url('http://www.dailymotion.com'))
def test_type1(self):
url = u'http://www.dailymotion.com/video/edit/xjhzgb_projet-de-maison-des-services-a-fauquembergues_news'
vt = self.vt(url)
try:
vt.get_metadata(vt.videoid)
self.fail('This link should return wrong response')
except VideoTypeError:
pass
class FLVVideoTypeTest(TestCase):
def setUp(self):
self.vt = FLVVideoType
def test_type(self):
self.assertTrue(self.vt.matches_video_url(
'http://someurl.com/video.flv'))
self.assertFalse(self.vt.matches_video_url(
'http://someurl.flv'))
self.assertFalse(self.vt.matches_video_url(
''))
self.assertFalse(self.vt.matches_video_url(
'http://someurl.com/flv.video'))
def test_blip_type(self):
url = 'http://blip.tv/file/get/Coldguy-SpineBreakersLiveAWizardOfEarthsea210.FLV'
video, created = Video.get_or_create_for_url(url)
video_url = video.videourl_set.all()[0]
self.assertEqual(self.vt.abbreviation, video_url.type)
class VimeoVideoTypeTest(TestCase):
def setUp(self):
self.vt = VimeoVideoType
def test_type(self):
url = 'http://vimeo.com/15786066?some_param=111'
video, created = Video.get_or_create_for_url(url)
vu = video.videourl_set.all()[:1].get()
self.assertEqual(vu.videoid, '15786066')
self.assertTrue(self.vt.matches_video_url(url))
self.assertFalse(self.vt.matches_video_url('http://vimeo.com'))
self.assertFalse(self.vt.matches_video_url(''))
def test1(self):
#For this video Vimeo API returns response with strance error
#But we can get data from this response. See vidscraper.sites.vimeo.get_shortmem
#So if this test is failed - maybe API was just fixed and other response is returned
# FIXME: restablish when vimeo api is back!
return
url = u'http://vimeo.com/22070806'
video, created = Video.get_or_create_for_url(url)
self.assertNotEqual(video.title, '')
self.assertNotEqual(video.description, '')
vu = video.videourl_set.all()[:1].get()
self.assertEqual(vu.videoid, '22070806')
class VideoTypeRegistrarTest(TestCase):
def test_base(self):
registrar = VideoTypeRegistrar()
class MockupVideoType(VideoType):
abbreviation = 'mockup'
name = 'MockUp'
registrar.register(MockupVideoType)
self.assertEqual(registrar[MockupVideoType.abbreviation], MockupVideoType)
self.assertEqual(registrar.choices[-1], (MockupVideoType.abbreviation, MockupVideoType.name))
def test_video_type_for_url(self):
type = video_type_registrar.video_type_for_url('some url')
self.assertEqual(type, None)
type = video_type_registrar.video_type_for_url('http://youtube.com/v=UOtJUmiUZ08')
self.assertTrue(isinstance(type, YoutubeVideoType))
return
self.assertRaises(VideoTypeError, video_type_registrar.video_type_for_url,
'http://youtube.com/v=100500')
class BrightcoveVideoTypeTest(TestCase):
player_id = '1234'
video_id = '5678'
@test_utils.patch_for_test('videos.types.brightcove.BrightcoveVideoType._resolve_url_redirects')
def setUp(self, resolve_url_redirects):
TestCase.setUp(self)
self.resolve_url_redirects = resolve_url_redirects
resolve_url_redirects.side_effect = lambda url: url
def check_url(self):
self.assertEquals(vu.type, 'R')
self.assertEquals(vu.brightcove_id(), self.video_id)
def test_type(self):
self.assertEqual(BrightcoveVideoType.abbreviation,
VIDEO_TYPE_BRIGHTCOVE)
def make_url(self, url):
return url.format(video_id=self.video_id, player_id=self.player_id)
def check_url(self, url):
self.assertTrue(BrightcoveVideoType.matches_video_url(url))
vt = BrightcoveVideoType(url)
self.assertEquals(vt.video_id, self.video_id)
def test_urls(self):
# test URLs with the video_id in the path
self.check_url(self.make_url(
'http://link.brightcove.com'
'/services/link/bcpid{player_id}/bctid{video_id}'))
self.check_url(self.make_url(
'http://bcove.me'
'/services/link/bcpid{player_id}/bctid{video_id}'))
# test URLs with the video_id in the query
self.check_url(self.make_url(
'http://link.brightcove.com'
'/services/link/bcpid{player_id}'
'?bckey=foo&bctid={video_id}'))
def test_redirection(self):
# test URLs in bcove.me that redirect to another brightcove URL
self.resolve_url_redirects.side_effect = lambda url: self.make_url(
'http://link.brightcove.com/'
'services/link/bcpid{player_id}/bctid{video_id}')
self.check_url('http://bcove.me/shortpath')
class KalturaVideoTypeTest(TestCase):
def test_type(self):
url = 'http://cdnbakmi.kaltura.com/p/1492321/sp/149232100/serveFlavor/entryId/1_zr7niumr/flavorId/1_djpnqf7y/name/a.mp4'
video, created = Video.get_or_create_for_url(url)
vu = video.videourl_set.get()
self.assertEquals(vu.type, 'K')
self.assertEquals(vu.kaltura_id(), '1_zr7niumr')
|
agpl-3.0
|
pgmillon/ansible
|
lib/ansible/modules/network/fortios/fortios_address.py
|
24
|
10246
|
#!/usr/bin/python
#
# Ansible module to manage IP addresses on fortios devices
# (c) 2016, Benjamin Jolivot <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: fortios_address
version_added: "2.4"
author: "Benjamin Jolivot (@bjolivot)"
short_description: Manage fortios firewall address objects
description:
- This module provide management of firewall addresses on FortiOS devices.
extends_documentation_fragment: fortios
options:
state:
description:
- Specifies if address need to be added or deleted.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the address to add or delete.
required: true
type:
description:
- Type of the address.
choices: ['iprange', 'fqdn', 'ipmask', 'geography']
value:
description:
- Address value, based on type.
If type=fqdn, somthing like www.google.com.
If type=ipmask, you can use simple ip (192.168.0.1), ip+mask (192.168.0.1 255.255.255.0) or CIDR (192.168.0.1/32).
start_ip:
description:
- First ip in range (used only with type=iprange).
end_ip:
description:
- Last ip in range (used only with type=iprange).
country:
description:
- 2 letter country code (like FR).
interface:
description:
- interface name the address apply to.
default: any
comment:
description:
- free text to describe address.
notes:
- This module requires netaddr python library.
"""
EXAMPLES = """
- name: Register french addresses
fortios_address:
host: 192.168.0.254
username: admin
password: p4ssw0rd
state: present
name: "fromfrance"
type: geography
country: FR
comment: "French geoip address"
- name: Register some fqdn
fortios_address:
host: 192.168.0.254
username: admin
password: p4ssw0rd
state: present
name: "Ansible"
type: fqdn
value: www.ansible.com
comment: "Ansible website"
- name: Register google DNS
fortios_address:
host: 192.168.0.254
username: admin
password: p4ssw0rd
state: present
name: "google_dns"
type: ipmask
value: 8.8.8.8
"""
RETURN = """
firewall_address_config:
description: full firewall addresses config string.
returned: always
type: str
change_string:
description: The commands executed by the module.
returned: only if config changed
type: str
"""
from ansible.module_utils.network.fortios.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.network.fortios.fortios import backup, AnsibleFortios
from ansible.module_utils.basic import AnsibleModule
# check for netaddr lib
try:
from netaddr import IPNetwork
HAS_NETADDR = True
except Exception:
HAS_NETADDR = False
# define valid country list for GEOIP address type
FG_COUNTRY_LIST = (
'ZZ', 'A1', 'A2', 'O1', 'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AN', 'AO',
'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE',
'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', 'BR', 'BS', 'BT',
'BV', 'BW', 'BY', 'BZ', 'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL',
'CM', 'CN', 'CO', 'CR', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK',
'DM', 'DO', 'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'EU', 'FI', 'FJ',
'FK', 'FM', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GL',
'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN',
'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT',
'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KP', 'KR', 'KW',
'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV', 'LY',
'MA', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP',
'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', 'NC', 'NE',
'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM', 'PA', 'PE', 'PF',
'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE',
'RO', 'RS', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI', 'SJ',
'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'SS', 'ST', 'SV', 'SX', 'SY', 'SZ', 'TC',
'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV',
'TW', 'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI',
'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW'
)
def get_formated_ipaddr(input_ip):
"""
Format given ip address string to fortigate format (ip netmask)
Args:
* **ip_str** (string) : string representing ip address
accepted format:
- ip netmask (ex: 192.168.0.10 255.255.255.0)
- ip (ex: 192.168.0.10)
- CIDR (ex: 192.168.0.10/24)
Returns:
formated ip if ip is valid (ex: "192.168.0.10 255.255.255.0")
False if ip is not valid
"""
try:
if " " in input_ip:
# ip netmask format
str_ip, str_netmask = input_ip.split(" ")
ip = IPNetwork(str_ip)
mask = IPNetwork(str_netmask)
return "%s %s" % (str_ip, str_netmask)
else:
ip = IPNetwork(input_ip)
return "%s %s" % (str(ip.ip), str(ip.netmask))
except Exception:
return False
return False
def main():
argument_spec = dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
type=dict(choices=['iprange', 'fqdn', 'ipmask', 'geography'], default='ipmask'),
value=dict(),
start_ip=dict(),
end_ip=dict(),
country=dict(),
interface=dict(default='any'),
comment=dict(),
)
# merge argument_spec from module_utils/fortios.py
argument_spec.update(fortios_argument_spec)
# Load module
module = AnsibleModule(
argument_spec=argument_spec,
required_if=fortios_required_if,
supports_check_mode=True,
)
result = dict(changed=False)
if not HAS_NETADDR:
module.fail_json(msg='Could not import the python library netaddr required by this module')
# check params
if module.params['state'] == 'absent':
if module.params['type'] != "ipmask":
module.fail_json(msg='Invalid argument type=%s when state=absent' % module.params['type'])
if module.params['value'] is not None:
module.fail_json(msg='Invalid argument `value` when state=absent')
if module.params['start_ip'] is not None:
module.fail_json(msg='Invalid argument `start_ip` when state=absent')
if module.params['end_ip'] is not None:
module.fail_json(msg='Invalid argument `end_ip` when state=absent')
if module.params['country'] is not None:
module.fail_json(msg='Invalid argument `country` when state=absent')
if module.params['interface'] != "any":
module.fail_json(msg='Invalid argument `interface` when state=absent')
if module.params['comment'] is not None:
module.fail_json(msg='Invalid argument `comment` when state=absent')
else:
# state=present
# validate IP
if module.params['type'] == "ipmask":
formated_ip = get_formated_ipaddr(module.params['value'])
if formated_ip is not False:
module.params['value'] = get_formated_ipaddr(module.params['value'])
else:
module.fail_json(msg="Bad ip address format")
# validate country
if module.params['type'] == "geography":
if module.params['country'] not in FG_COUNTRY_LIST:
module.fail_json(msg="Invalid country argument, need to be in `diagnose firewall ipgeo country-list`")
# validate iprange
if module.params['type'] == "iprange":
if module.params['start_ip'] is None:
module.fail_json(msg="Missing argument 'start_ip' when type is iprange")
if module.params['end_ip'] is None:
module.fail_json(msg="Missing argument 'end_ip' when type is iprange")
# init forti object
fortigate = AnsibleFortios(module)
# Config path
config_path = 'firewall address'
# load config
fortigate.load_config(config_path)
# Absent State
if module.params['state'] == 'absent':
fortigate.candidate_config[config_path].del_block(module.params['name'])
# Present state
if module.params['state'] == 'present':
# define address params
new_addr = fortigate.get_empty_configuration_block(module.params['name'], 'edit')
if module.params['comment'] is not None:
new_addr.set_param('comment', '"%s"' % (module.params['comment']))
if module.params['type'] == 'iprange':
new_addr.set_param('type', 'iprange')
new_addr.set_param('start-ip', module.params['start_ip'])
new_addr.set_param('end-ip', module.params['end_ip'])
if module.params['type'] == 'geography':
new_addr.set_param('type', 'geography')
new_addr.set_param('country', '"%s"' % (module.params['country']))
if module.params['interface'] != 'any':
new_addr.set_param('associated-interface', '"%s"' % (module.params['interface']))
if module.params['value'] is not None:
if module.params['type'] == 'fqdn':
new_addr.set_param('type', 'fqdn')
new_addr.set_param('fqdn', '"%s"' % (module.params['value']))
if module.params['type'] == 'ipmask':
new_addr.set_param('subnet', module.params['value'])
# add the new address object to the device
fortigate.add_block(module.params['name'], new_addr)
# Apply changes (check mode is managed directly by the fortigate object)
fortigate.apply_changes()
if __name__ == '__main__':
main()
|
gpl-3.0
|
sim0629/linux-openwrt
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
hashems/Mobile-Cloud-Development-Projects
|
appengine/standard/taskqueue/counter/worker.py
|
9
|
1335
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START all]
from google.appengine.ext import ndb
import webapp2
COUNTER_KEY = 'default counter'
class Counter(ndb.Model):
count = ndb.IntegerProperty(indexed=False)
class UpdateCounterHandler(webapp2.RequestHandler):
def post(self):
amount = int(self.request.get('amount'))
# This task should run at most once per second because of the datastore
# transaction write throughput.
@ndb.transactional
def update_counter():
counter = Counter.get_or_insert(COUNTER_KEY, count=0)
counter.count += amount
counter.put()
update_counter()
app = webapp2.WSGIApplication([
('/update_counter', UpdateCounterHandler)
], debug=True)
# [END all]
|
apache-2.0
|
miguelgrinberg/Flask-Moment
|
docs/conf.py
|
1
|
2232
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../src'))
# -- Project information -----------------------------------------------------
project = 'Flask-Moment'
copyright = '2021, Miguel Grinberg'
author = 'Miguel Grinberg'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
html_theme_options = {
'github_user': 'miguelgrinberg',
'github_repo': 'flask-moment',
'github_banner': True,
'github_button': True,
'github_type': 'star',
'fixed_sidebar': True,
}
autodoc_default_options = {
'member-order': 'bysource',
}
add_module_names = False
|
mit
|
gabrielhuang/OhSnap
|
segment.py
|
1
|
3771
|
import pyaudio
import sys
import wave
import numpy as np
import scipy.ndimage
import scipy.signal
import matplotlib.pyplot as plt
from sklearn.externals import joblib
def decode(in_data, channels):
"""
Convert a byte stream into a 2D numpy array with
shape (chunk_size, channels)
Samples are interleaved, so for a stereo stream with left channel
of [L0, L1, L2, ...] and right channel of [R0, R1, R2, ...], the output
is ordered as [L0, R0, L1, R1, ...]
"""
result = np.fromstring(in_data, dtype=np.int16)
chunk_length = len(result) / channels
assert chunk_length == int(chunk_length)
result = np.reshape(result, (chunk_length, channels))
return result
def encode(signal):
"""
Convert a 2D numpy array into a byte stream for PyAudio
Signal should be a numpy array with shape (chunk_size, channels)
"""
interleaved = signal.flatten()
# TODO: handle data type as parameter, convert between pyaudio/numpy types
out_data = interleaved.astype(np.int16).tostring()
return out_data
def wav_to_np(filename, sample_width=np.int16, chunk_size=1024):
wf = wave.open(filename, 'rb')
print 'Open {}, BitsPerSample {}-bits, Channels {}, Rate {}'.format(
filename, 8*wf.getsampwidth(), wf.getnchannels(), wf.getframerate())
buf = wf.readframes(chunk_size)
frames = []
while buf:
frames.append(buf)
buf = wf.readframes(chunk_size)
buf = b''.join(frames)
data = np.frombuffer(buf, dtype=sample_width)
data = data.reshape((-1, wf.getnchannels()))
return data
def smooth(inp, sigma):
return scipy.ndimage.filters.gaussian_filter(np.abs(inp), sigma=sigma)
def get_clicks(inp, threshold, min_samples, last_click=None):
out = inp*0.
for i,x in enumerate(inp):
if x>threshold:
if last_click is None or i-last_click>min_samples:
last_click = i
out[i] = 1
return out, last_click
def chop(inp, clicks, afterlength=150, prelength=0):
chunks = []
for i,(x,clicked) in enumerate(zip(inp,clicks)):
if clicked:
overflow = i+afterlength-len(inp)
underflow = i-prelength
if underflow>=0:
if overflow>0:
chunk = np.hstack((inp[underflow:], np.zeros(overflow)))
else:
chunk = inp[underflow:i+afterlength]
else:
if overflow>0:
chunk = np.hstack((np.zeros(-underflow),inp[:], np.zeros(overflow)))
else:
print [x.shape for x in (np.zeros(-underflow), inp[:i+afterlength])]
chunk = np.hstack((np.zeros(-underflow), inp[:i+afterlength]))
chunks.append(chunk)
return np.array(chunks)
def chop_all(inp, threshold, click_inhibit=6666, afterlength=150, prelength=50):
smoothed = smooth(inp, 4)
clicks = get_clicks(smoothed, threshold, click_inhibit)[0]
chunks = chop(inp, clicks, afterlength, prelength)
return chunks
if __name__=='__main__':
wav = wav_to_np('snaps/san4.wav')[:,0]/32768. # scale to [-1,+1]
smoothed = smooth(wav, 1)
threshold = 0.3
clicks = get_clicks(smoothed, threshold, 6666)[0]
num_clicks = sum(clicks)
chunks = chop(wav, clicks, afterlength=300, prelength=0)
#chunks = chop_all(wav, 0.02)
print 'NumChunks: {}'.format(len(chunks))
chunks=chunks
for chunk in chunks:
plt.plot(chunk)
plt.figure('figure2')
plt.plot(np.arange(len(smoothed)),smoothed,np.arange(len(smoothed)),np.ones(len(smoothed))*threshold)
#plt.plot(smoothed)
plt.plot(np.arange(len(smoothed)), clicks)
|
mit
|
golismero/golismero-devel
|
plugins/testing/scan/zone_transfer.py
|
8
|
3586
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: http://golismero-project.com
Golismero project mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from golismero.api.config import Config
from golismero.api.data.resource.domain import Domain
from golismero.api.data.resource.ip import IP
from golismero.api.data.vulnerability.information_disclosure.dns_zone_transfer\
import DNSZoneTransfer
from golismero.api.logger import Logger
from golismero.api.net.dns import DNS
from golismero.api.plugin import TestingPlugin
#------------------------------------------------------------------------------
class DNSZoneTransferPlugin(TestingPlugin):
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [Domain]
#--------------------------------------------------------------------------
def run(self, info):
# Get the root domain only.
root = info.root
# Skip localhost.
if root == "localhost":
return
# Skip if the root domain is out of scope.
if root not in Config.audit_scope:
return
# Skip root domains we've already processed.
if self.state.put(root, True):
return
# Attempt a DNS zone transfer.
ns_servers, resolv = DNS.zone_transfer(
root, ns_allowed_zone_transfer = True)
# On failure, skip.
if not resolv:
Logger.log_verbose(
"DNS zone transfer failed, server %r not vulnerable"
% root)
return
# Create a Domain object for the root domain.
domain = Domain(root)
# Associate all the results with the root domain.
for r in resolv:
map(domain.add_information, r)
# Add the root domain to the results.
results = []
results.append(domain)
# We have a vulnerability on each of the nameservers involved.
msg = "DNS zone transfer successful, "
if len(ns_servers) > 1:
msg += "%d nameservers for %r are vulnerable!"
msg %= (len(ns_servers), root)
else:
msg += "nameserver for %r is vulnerable!" % root
Logger.log(msg)
# If we don't have the name servers...
if not ns_servers:
# Assume the root domain also points to the nameserver.
vulnerability = DNSZoneTransfer(domain, root)
results.append(vulnerability)
# If we have the name servers...
else:
# Create a vulnerability for each nameserver in scope.
for ns in ns_servers:
vulnerability = DNSZoneTransfer(domain, ns)
results.append(vulnerability)
# Return the results.
return results
|
gpl-2.0
|
aspuru-guzik-group/mission_control
|
setup.py
|
1
|
1103
|
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
dependencies = [
*[
{'install_requires_value': simple_dependency}
for simple_dependency in [
'dill',
'jinja2',
'sqlalchemy',
]
],
]
setup(
name='mission_control',
version='0.0.1',
packages=find_packages(),
include_package_data=True,
license='Apache 2.0',
description='A workflow toolkit',
author='A. Dorsk',
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=(
[dependency['install_requires_value'] for dependency in dependencies
if 'install_requires_value' in dependency]
),
dependency_links=(
[dependency['dependency_links_value'] for dependency in dependencies
if 'dependency_links_value' in dependency]
)
)
|
apache-2.0
|
sbellem/django
|
tests/auth_tests/test_templates.py
|
328
|
2785
|
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.views import (
password_change, password_change_done, password_reset,
password_reset_complete, password_reset_confirm, password_reset_done,
)
from django.test import RequestFactory, TestCase, override_settings
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthTemplateTests(TestCase):
def test_titles(self):
rf = RequestFactory()
user = User.objects.create_user('jsmith', '[email protected]', 'pass')
user = authenticate(username=user.username, password='pass')
request = rf.get('/somepath/')
request.user = user
response = password_reset(request, post_reset_redirect='dummy/')
self.assertContains(response, '<title>Password reset</title>')
self.assertContains(response, '<h1>Password reset</h1>')
response = password_reset_done(request)
self.assertContains(response, '<title>Password reset sent</title>')
self.assertContains(response, '<h1>Password reset sent</h1>')
# password_reset_confirm invalid token
response = password_reset_confirm(request, uidb64='Bad', token='Bad', post_reset_redirect='dummy/')
self.assertContains(response, '<title>Password reset unsuccessful</title>')
self.assertContains(response, '<h1>Password reset unsuccessful</h1>')
# password_reset_confirm valid token
default_token_generator = PasswordResetTokenGenerator()
token = default_token_generator.make_token(user)
uidb64 = force_text(urlsafe_base64_encode(force_bytes(user.pk)))
response = password_reset_confirm(request, uidb64, token, post_reset_redirect='dummy/')
self.assertContains(response, '<title>Enter new password</title>')
self.assertContains(response, '<h1>Enter new password</h1>')
response = password_reset_complete(request)
self.assertContains(response, '<title>Password reset complete</title>')
self.assertContains(response, '<h1>Password reset complete</h1>')
response = password_change(request, post_change_redirect='dummy/')
self.assertContains(response, '<title>Password change</title>')
self.assertContains(response, '<h1>Password change</h1>')
response = password_change_done(request)
self.assertContains(response, '<title>Password change successful</title>')
self.assertContains(response, '<h1>Password change successful</h1>')
|
bsd-3-clause
|
jorik041/robotframework
|
utest/result/test_visitor.py
|
36
|
1590
|
import unittest
from os.path import dirname, join
from robot.result import ExecutionResult
from robot.result.visitor import SuiteVisitor
RESULT = ExecutionResult(join(dirname(__file__), 'golden.xml'))
class TestVisitingSuite(unittest.TestCase):
def test_abstract_visitor(self):
RESULT.suite.visit(SuiteVisitor())
RESULT.suite.visit(SuiteVisitor())
def test_start_suite_can_stop_visiting(self):
RESULT.suite.visit(StartSuiteStopping())
def test_start_test_can_stop_visiting(self):
RESULT.suite.visit(StartTestStopping())
def test_start_keyword_can_stop_visiting(self):
RESULT.suite.visit(StartKeywordStopping())
class StartSuiteStopping(SuiteVisitor):
def start_suite(self, suite):
return False
def end_suite(self, suite):
raise AssertionError
def start_test(self, test):
raise AssertionError
def start_keyword(self, keyword):
raise AssertionError
class StartTestStopping(SuiteVisitor):
def __init__(self):
self.test_started = False
def start_test(self, test):
self.test_started = True
return False
def end_test(self, test):
raise AssertionError
def start_keyword(self, keyword):
if self.test_started:
raise AssertionError
class StartKeywordStopping(SuiteVisitor):
def start_keyword(self, test):
return False
def end_keyword(self, test):
raise AssertionError
def log_message(self, msg):
raise AssertionError
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
idrogeno/FusionOE
|
lib/python/Components/TuneTest.py
|
28
|
10716
|
from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersTerrestrial, eDVBFrontendParametersCable, eDVBFrontendParameters, eDVBResourceManager, eTimer
class Tuner:
def __init__(self, frontend, ignore_rotor=False):
self.frontend = frontend
self.ignore_rotor = ignore_rotor
# transponder = (frequency, symbolrate, polarisation, fec, inversion, orbpos, system, modulation, rolloff, pilot, tsid, onid)
# 0 1 2 3 4 5 6 7 8 9 10 11
def tune(self, transponder):
if self.frontend:
print "[TuneTest] tuning to transponder with data", transponder
parm = eDVBFrontendParametersSatellite()
parm.frequency = transponder[0] * 1000
parm.symbol_rate = transponder[1] * 1000
parm.polarisation = transponder[2]
parm.fec = transponder[3]
parm.inversion = transponder[4]
parm.orbital_position = transponder[5]
parm.system = transponder[6]
parm.modulation = transponder[7]
parm.rolloff = transponder[8]
parm.pilot = transponder[9]
self.tuneSatObj(parm)
def tuneSatObj(self, transponderObj):
if self.frontend:
feparm = eDVBFrontendParameters()
feparm.setDVBS(transponderObj, self.ignore_rotor)
self.lastparm = feparm
self.frontend.tune(feparm)
def tuneTerr(self, frequency,
inversion=2, bandwidth = 7000000, fechigh = 6, feclow = 6,
modulation = 2, transmission = 2, guard = 4,
hierarchy = 4, system = 0, plpid = 0):
if self.frontend:
print "[TuneTest] tuning to transponder with data", [frequency, inversion, bandwidth, fechigh, feclow, modulation, transmission, guard, hierarchy, system, plpid]
parm = eDVBFrontendParametersTerrestrial()
parm.frequency = frequency
parm.inversion = inversion
parm.bandwidth = bandwidth
parm.code_rate_HP = fechigh
parm.code_rate_LP = feclow
parm.modulation = modulation
parm.transmission_mode = transmission
parm.guard_interval = guard
parm.hierarchy = hierarchy
parm.system = system
parm.plpid = plpid
self.tuneTerrObj(parm)
def tuneTerrObj(self, transponderObj):
if self.frontend:
feparm = eDVBFrontendParameters()
feparm.setDVBT(transponderObj)
self.lastparm = feparm
self.frontend.tune(feparm)
def tuneCab(self, transponder):
if self.frontend:
print "[TuneTest] tuning to transponder with data", transponder
parm = eDVBFrontendParametersCable()
parm.frequency = transponder[0]
parm.symbol_rate = transponder[1]
parm.modulation = transponder[2]
parm.fec_inner = transponder[3]
parm.inversion = transponder[4]
#parm.system = transponder[5]
self.tuneCabObj(parm)
def tuneCabObj(self, transponderObj):
if self.frontend:
feparm = eDVBFrontendParameters()
feparm.setDVBC(transponderObj)
self.lastparm = feparm
self.frontend.tune(feparm)
def retune(self):
if self.frontend:
self.frontend.tune(self.lastparm)
def getTransponderData(self):
ret = { }
if self.frontend:
self.frontend.getTransponderData(ret, True)
return ret
# tunes a list of transponders and checks, if they lock and optionally checks the onid/tsid combination
# 1) add transponders with addTransponder()
# 2) call run(<checkPIDs = True>)
# 3) finishedChecking() is called, when the run is finished
class TuneTest:
def __init__(self, feid, stopOnSuccess = -1, stopOnError = -1):
self.stopOnSuccess = stopOnSuccess
self.stopOnError = stopOnError
self.feid = feid
self.transponderlist = []
self.currTuned = None
print "TuneTest for feid %d" % self.feid
if not self.openFrontend():
self.oldref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService() # try to disable foreground service
if not self.openFrontend():
if self.session.pipshown: # try to disable pip
if hasattr(self.session, 'infobar'):
if self.session.infobar.servicelist.dopipzap:
self.session.infobar.servicelist.togglePipzap()
if hasattr(self.session, 'pip'):
del self.session.pip
self.session.pipshown = False
if not self.openFrontend():
self.frontend = None # in normal case this should not happen
self.tuner = Tuner(self.frontend)
self.timer = eTimer()
self.timer.callback.append(self.updateStatus)
def gotTsidOnid(self, tsid, onid):
print "******** got tsid, onid:", tsid, onid
if tsid is not -1 and onid is not -1:
self.pidStatus = self.INTERNAL_PID_STATUS_SUCCESSFUL
self.tsid = tsid
self.onid = onid
else:
self.pidStatus = self.INTERNAL_PID_STATUS_FAILED
self.tsid = -1
self.onid = -1
self.timer.start(100, True)
def updateStatus(self):
dict = {}
self.frontend.getFrontendStatus(dict)
stop = False
print "status:", dict
if dict["tuner_state"] == "TUNING":
print "TUNING"
self.timer.start(100, True)
self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_TUNING, self.currTuned))
elif self.checkPIDs and self.pidStatus == self.INTERNAL_PID_STATUS_NOOP:
print "2nd choice"
if dict["tuner_state"] == "LOCKED":
print "acquiring TSID/ONID"
self.raw_channel.receivedTsidOnid.get().append(self.gotTsidOnid)
self.raw_channel.requestTsidOnid()
self.pidStatus = self.INTERNAL_PID_STATUS_WAITING
else:
self.pidStatus = self.INTERNAL_PID_STATUS_FAILED
elif self.checkPIDs and self.pidStatus == self.INTERNAL_PID_STATUS_WAITING:
print "waiting for pids"
else:
if dict["tuner_state"] == "LOSTLOCK" or dict["tuner_state"] == "FAILED":
self.tuningtransponder = self.nextTransponder()
self.failedTune.append([self.currTuned, self.oldTuned, "tune_failed", dict]) # last parameter is the frontend status)
if self.stopOnError != -1 and self.stopOnError <= len(self.failedTune):
stop = True
elif dict["tuner_state"] == "LOCKED":
pidsFailed = False
if self.checkPIDs:
if self.currTuned is not None:
if self.tsid != self.currTuned[10] or self.onid != self.currTuned[11]:
self.failedTune.append([self.currTuned, self.oldTuned, "pids_failed", {"real": (self.tsid, self.onid), "expected": (self.currTuned[10], self.currTuned[11])}, dict]) # last parameter is the frontend status
pidsFailed = True
else:
self.successfullyTune.append([self.currTuned, self.oldTuned, dict]) # 3rd parameter is the frontend status
if self.stopOnSuccess != -1 and self.stopOnSuccess <= len(self.successfullyTune):
stop = True
elif not self.checkPIDs or (self.checkPids and not pidsFailed):
self.successfullyTune.append([self.currTuned, self.oldTuned, dict]) # 3rd parameter is the frontend status
if self.stopOnSuccess != -1 and self.stopOnSuccess <= len(self.successfullyTune):
stop = True
self.tuningtransponder = self.nextTransponder()
else:
print "************* tuner_state:", dict["tuner_state"]
self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_NOOP, self.currTuned))
if not stop:
self.tune()
if self.tuningtransponder < len(self.transponderlist) and not stop:
if self.pidStatus != self.INTERNAL_PID_STATUS_WAITING:
self.timer.start(100, True)
print "restart timer"
else:
print "not restarting timers (waiting for pids)"
else:
self.progressCallback((self.getProgressLength(), len(self.transponderlist), self.STATUS_DONE, self.currTuned))
print "finishedChecking"
self.finishedChecking()
def firstTransponder(self):
print "firstTransponder:"
index = 0
if self.checkPIDs:
print "checkPIDs-loop"
# check for tsid != -1 and onid != -1
print "index:", index
print "len(self.transponderlist):", len(self.transponderlist)
while index < len(self.transponderlist) and (self.transponderlist[index][10] == -1 or self.transponderlist[index][11] == -1):
index += 1
print "FirstTransponder final index:", index
return index
def nextTransponder(self):
print "getting next transponder", self.tuningtransponder
index = self.tuningtransponder + 1
if self.checkPIDs:
print "checkPIDs-loop"
# check for tsid != -1 and onid != -1
print "index:", index
print "len(self.transponderlist):", len(self.transponderlist)
while index < len(self.transponderlist) and (self.transponderlist[index][10] == -1 or self.transponderlist[index][11] == -1):
index += 1
print "next transponder index:", index
return index
def finishedChecking(self):
print "finished testing"
print "successfull:", self.successfullyTune
print "failed:", self.failedTune
def openFrontend(self):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
self.raw_channel = res_mgr.allocateRawChannel(self.feid)
if self.raw_channel:
self.frontend = self.raw_channel.getFrontend()
if self.frontend:
return True
else:
print "getFrontend failed"
else:
print "getRawChannel failed"
else:
print "getResourceManager instance failed"
return False
def tune(self):
print "tuning to", self.tuningtransponder
if self.tuningtransponder < len(self.transponderlist):
self.pidStatus = self.INTERNAL_PID_STATUS_NOOP
self.oldTuned = self.currTuned
self.currTuned = self.transponderlist[self.tuningtransponder]
self.tuner.tune(self.transponderlist[self.tuningtransponder])
INTERNAL_PID_STATUS_NOOP = 0
INTERNAL_PID_STATUS_WAITING = 1
INTERNAL_PID_STATUS_SUCCESSFUL = 2
INTERNAL_PID_STATUS_FAILED = 3
def run(self, checkPIDs = False):
self.checkPIDs = checkPIDs
self.pidStatus = self.INTERNAL_PID_STATUS_NOOP
self.failedTune = []
self.successfullyTune = []
self.tuningtransponder = self.firstTransponder()
self.tune()
self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_START, self.currTuned))
self.timer.start(100, True)
# transponder = (frequency, symbolrate, polarisation, fec, inversion, orbpos, <system>, <modulation>, <rolloff>, <pilot>, <tsid>, <onid>)
# 0 1 2 3 4 5 6 7 8 9 10 11
def addTransponder(self, transponder):
self.transponderlist.append(transponder)
def clearTransponder(self):
self.transponderlist = []
def getProgressLength(self):
count = 0
if self.stopOnError == -1:
count = len(self.transponderlist)
else:
if count < self.stopOnError:
count = self.stopOnError
if self.stopOnSuccess == -1:
count = len(self.transponderlist)
else:
if count < self.stopOnSuccess:
count = self.stopOnSuccess
return count
STATUS_START = 0
STATUS_TUNING = 1
STATUS_DONE = 2
STATUS_NOOP = 3
# can be overwritten
# progress = (range, value, status, transponder)
def progressCallback(self, progress):
pass
|
gpl-2.0
|
druuu/django
|
tests/csrf_tests/tests.py
|
152
|
19350
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token,
)
from django.template import RequestContext, Template
from django.template.context_processors import csrf
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import (
csrf_exempt, ensure_csrf_cookie, requires_csrf_token,
)
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(SimpleTestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = b"<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
Check that if the token is longer than expected, it is ignored and
a new token is created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['httponly'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = b""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNotNone(req2)
self.assertEqual(403, req2.status_code)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_malformed_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'http://http://www.example.com/'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNotNone(req2)
self.assertEqual(403, req2.status_code)
# Non-ASCII
req.META['HTTP_REFERER'] = b'\xd8B\xf6I\xdf'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNotNone(req2)
self.assertEqual(403, req2.status_code)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
Test that a POST HTTPS request with a good referer is accepted
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
Test that a POST HTTPS request with a good referer is accepted
where the referer contains no trailing slash
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_ensures_csrf_cookie_no_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with no middleware
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp.get('Vary', ''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with the middleware enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_ensures_csrf_cookie_no_logging(self):
"""
Tests that ensure_csrf_cookie doesn't log warnings. See #19436.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
class TestHandler(logging.Handler):
def emit(self, record):
raise Exception("This shouldn't have happened!")
logger = logging.getLogger('django.request')
test_handler = TestHandler()
old_log_level = logger.level
try:
logger.addHandler(test_handler)
logger.setLevel(logging.WARNING)
req = self._get_GET_no_csrf_cookie_request()
view(req)
finally:
logger.removeHandler(test_handler)
logger.setLevel(old_log_level)
def test_csrf_cookie_age(self):
"""
Test to verify CSRF cookie age can be set using
settings.CSRF_COOKIE_AGE.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = 123
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
Test to verify CSRF cookie age does not have max age set and therefore
uses session-based cookies.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = None
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, '')
def test_post_data_read_failure(self):
"""
#20128 -- IOErrors during POST data reading should be caught and
treated as if the POST data wasn't there.
"""
class CsrfPostRequest(HttpRequest):
"""
HttpRequest that can raise an IOError when accessing POST data
"""
def __init__(self, token, raise_error):
super(CsrfPostRequest, self).__init__()
self.method = 'POST'
self.raise_error = False
self.COOKIES[settings.CSRF_COOKIE_NAME] = token
self.POST['csrfmiddlewaretoken'] = token
self.raise_error = raise_error
def _load_post_and_files(self):
raise IOError('error reading input data')
def _get_post(self):
if self.raise_error:
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
token = 'ABC'
req = CsrfPostRequest(token, raise_error=False)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = CsrfPostRequest(token, raise_error=True)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
|
bsd-3-clause
|
krousey/test-infra
|
triage/summarize.py
|
5
|
18589
|
#!/usr/bin/env python2
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Summarize groups failed tests together by finding edit distances between their failure strings,
and emits JSON for rendering in a browser.
'''
# pylint: disable=invalid-name,missing-docstring
import argparse
import functools
import hashlib
import json
import os
import re
import sys
import time
import zlib
import berghelroach
editdist = berghelroach.dist
flakeReasonDateRE = re.compile(
r'[A-Z][a-z]{2}, \d+ \w+ 2\d{3} [\d.-: ]*([-+]\d+)?|'
r'\w{3}\s+\d{1,2} \d+:\d+:\d+(\.\d+)?|(\d{4}-\d\d-\d\d.|.\d{4} )\d\d:\d\d:\d\d(.\d+)?')
# Find random noisy strings that should be replaced with renumbered strings, for more similarity.
flakeReasonOrdinalRE = re.compile(
r'0x[0-9a-fA-F]+' # hex constants
r'|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?' # IPs + optional port
r'|[0-9a-fA-F]{8}-\S{4}-\S{4}-\S{4}-\S{12}(-\d+)?' # UUIDs + trailing digits
r'|[0-9a-f]{12,32}' # hex garbage
r'|(?<=minion-group-|default-pool-)[-0-9a-z]{4,}' # node names
)
def normalize(s):
"""
Given a traceback or error message from a text, reduce excess entropy to make
clustering easier.
This includes:
- blanking dates and timestamps
- renumbering unique information like
- pointer addresses
- UUIDs
- IP addresses
- sorting randomly ordered map[] strings.
"""
# blank out dates
s = flakeReasonDateRE.sub('TIME', s)
# do alpha conversion-- rename random garbage strings (hex pointer values, node names, etc)
# into 'UNIQ1', 'UNIQ2', etc.
matches = {}
def repl(m):
s = m.group(0)
if s not in matches:
matches[s] = 'UNIQ%d' % (len(matches) + 1)
return matches[s]
if 'map[' in s:
# Go's maps are in a random order. Try to sort them to reduce diffs.
s = re.sub(r'map\[([^][]*)\]',
lambda m: 'map[%s]' % ' '.join(sorted(m.group(1).split())),
s)
s = flakeReasonOrdinalRE.sub(repl, s)
if len(s) > 10000:
# for long strings, remove repeated lines!
s = re.sub(r'(?m)^(.*\n)\1+', r'\1', s)
if len(s) > 200000: # ridiculously long test output
s = s[:100000] + '\n...[truncated]...\n' + s[-100000:]
return s
def normalize_name(name):
"""
Given a test name, remove [...]/{...}.
Matches code in testgrid and kubernetes/hack/update_owners.py.
"""
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def make_ngram_counts(s, ngram_counts={}):
"""
Convert a string into a histogram of frequencies for different byte combinations.
This can be used as a heuristic to estimate edit distance between two strings in
constant time.
Instead of counting each ngram individually, they are hashed into buckets.
This makes the output count size constant.
"""
# Yes, I'm intentionally memoizing here.
# pylint: disable=dangerous-default-value
size = 64
if s not in ngram_counts:
counts = [0] * size
for x in xrange(len(s)-3):
counts[zlib.crc32(s[x:x+4].encode('utf8')) & (size - 1)] += 1
ngram_counts[s] = counts # memoize
return ngram_counts[s]
def ngram_editdist(a, b):
"""
Compute a heuristic lower-bound edit distance using ngram counts.
An insert/deletion/substitution can cause up to 4 ngrams to differ:
abcdefg => abcefg
(abcd, bcde, cdef, defg) => (abce, bcef, cefg)
This will underestimate the edit distance in many cases:
- ngrams hashing into the same bucket will get confused
- a large-scale transposition will barely disturb ngram frequencies,
but will have a very large effect on edit distance.
It is useful to avoid more expensive precise computations when they are
guaranteed to exceed some limit (being a lower bound), or as a proxy when
the exact edit distance computation is too expensive (for long inputs).
"""
counts_a = make_ngram_counts(a)
counts_b = make_ngram_counts(b)
return sum(abs(x-y) for x, y in zip(counts_a, counts_b))/4
def make_ngram_counts_digest(s):
"""
Returns a hashed version of the ngram counts.
"""
return hashlib.sha1(str(make_ngram_counts(s))).hexdigest()[:20]
def file_memoize(description, name):
"""
Decorator to save a function's results to a file.
"""
def inner(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if os.path.exists(name):
data = json.load(open(name))
print 'done (cached)', description
return data
data = func(*args, **kwargs)
json.dump(data, open(name, 'w'))
print 'done', description
return data
wrapper.__wrapped__ = func
return wrapper
return inner
@file_memoize('loading failed tests', 'failed.json')
def load_failures(builds_file, tests_file):
builds = {}
for build in json.load(open(builds_file)):
if not build['started'] or not build['number']:
continue
for attr in ('started', 'tests_failed', 'number', 'tests_run'):
build[attr] = int(build[attr])
build['elapsed'] = int(float(build['elapsed']))
if 'pr-logs' in build['path']:
build['pr'] = build['path'].split('/')[-3]
builds[build['path']] = build
failed_tests = {}
for test in json.load(open(tests_file)):
failed_tests.setdefault(test['name'], []).append(test)
for tests in failed_tests.itervalues():
tests.sort(key=lambda t: t['build'])
return builds, failed_tests
def find_match(fnorm, clusters):
for ngram_dist, other in sorted((ngram_editdist(fnorm, x), x) for x in clusters):
# allow up to 10% differences
limit = int((len(fnorm)+len(other))/2.0 * 0.10)
if ngram_dist > limit:
continue
if limit <= 1 and other != fnorm: # no chance
continue
dist = editdist(fnorm, other, limit)
if dist < limit:
return other
def cluster_test(tests):
"""
Compute failure clusters given a list of failures for one test.
Args:
tests: list of failed test dictionaries, with 'failure_text' keys
Returns:
{failure_text: [failure_in_cluster_1, failure_in_cluster_2, ...]}
"""
clusters = {}
start = time.time()
for test in tests:
ftext = test['failure_text']
fnorm = normalize(ftext)
if fnorm in clusters:
clusters[fnorm].append(test)
else:
other = find_match(fnorm, clusters)
if other:
clusters[other].append(test)
else:
clusters[fnorm] = [test]
if time.time() > start + 60:
print 'bailing early, taking too long!'
break
return clusters
@file_memoize('clustering inside each test', 'failed_clusters_local.json')
def cluster_local(failed_tests):
"""Cluster together the failures for each test. """
clustered = {}
for test_name, tests in sorted(failed_tests.iteritems(), key=lambda x: len(x[1]), reverse=True):
print len(tests), test_name,
sys.stdout.flush()
clustered[test_name] = cluster_test(tests)
print len(clustered[test_name])
return clustered
@file_memoize('clustering across tests', 'failed_clusters_global.json')
def cluster_global(clustered, previous_clustered):
"""Combine together clustered failures for each test.
This is done hierarchically for efficiency-- each test's failures are likely to be similar,
reducing the number of clusters that need to be paired up at this stage.
Args:
{test_name: {failure_text: [failure_1, failure_2, ...], ...}, ...}
Returns:
{failure_text: [(test_name, [failure_1, failure_2, ...]), ...], ...}
"""
clusters = {}
if previous_clustered:
# seed clusters using output from the previous run
n = 0
for cluster in previous_clustered:
key = cluster['key']
if key != normalize(key):
print key
print normalize(key)
n += 1
continue
clusters[cluster['key']] = {}
print 'Seeding with %d previous clusters' % len(clusters)
if n:
print '!!! %d clusters lost from different normalization! !!!' % n
for n, (test_name, cluster) in enumerate(
sorted(clustered.iteritems(),
key=lambda (k, v): sum(len(x) for x in v.itervalues()),
reverse=True),
1):
print '%d/%d %d %s' % (n, len(clustered), len(cluster), test_name)
for key, tests in sorted(cluster.iteritems(), key=lambda x: len(x[1]), reverse=True):
if key in clusters:
clusters[key].setdefault(test_name, []).extend(tests)
else:
other = find_match(key, clusters)
if other:
clusters[other].setdefault(test_name, []).extend(tests)
else:
clusters[key] = {test_name: list(tests)}
# If we seeded clusters using the previous run's keys, some of those
# clusters may have disappeared. Remove the resulting empty entries.
for k in {k for k, v in clusters.iteritems() if not v}:
clusters.pop(k)
return clusters
def tests_group_by_job(tests, builds):
"""Turn a list of test failures into {job: [buildnumber, ...], ...}"""
groups = {}
for test in tests:
try:
build = builds[test['build']]
except KeyError:
continue
if 'number' in build:
groups.setdefault(build['job'], set()).add(build['number'])
return sorted(((key, sorted(value, reverse=True)) for key, value in groups.iteritems()),
key=lambda (k, v): (-len(v), k))
SPAN_RE = re.compile(r'\w+|\W+')
def common_spans(xs):
"""
Finds something similar to the longest common subsequence of xs, but much faster.
Returns a list of [matchlen_1, mismatchlen_2, matchlen_2, mismatchlen_2, ...], representing
sequences of the first element of the list that are present in all members.
"""
common = None
for x in xs:
x_split = SPAN_RE.findall(x)
if common is None: # first iteration
common = set(x_split)
else:
common.intersection_update(x_split)
spans = []
match = True
span_len = 0
for x in SPAN_RE.findall(xs[0]):
if x in common:
if not match:
match = True
spans.append(span_len)
span_len = 0
span_len += len(x)
else:
if match:
match = False
spans.append(span_len)
span_len = 0
span_len += len(x)
if span_len:
spans.append(span_len)
return spans
def clusters_to_display(clustered, builds):
"""Transpose and sort the output of cluster_global."""
return [{
"key": key,
"id": key_id,
"spans": common_spans([f['failure_text'] for _, fs in clusters for f in fs]),
"text": clusters[0][1][0]['failure_text'],
"tests": [{
"name": test_name,
"jobs": [{"name": n, "builds": b}
for n, b in tests_group_by_job(tests, builds)]
}
for test_name, tests in sorted(clusters, key=lambda (n, t): (-len(t), n))
]
}
for key, key_id, clusters in clustered if sum(len(x[1]) for x in clusters) > 1
]
def builds_to_columns(builds):
"""Convert a list of build dictionaries into a columnar form.
This compresses much better with gzip."""
jobs = {}
cols = {v: [] for v in 'started tests_failed elapsed tests_run result executor pr'.split()}
out = {'jobs': jobs, 'cols': cols, 'job_paths': {}}
for build in sorted(builds.itervalues(), key=lambda b: (b['job'], b['number'])):
if 'number' not in build:
continue
index = len(cols['started'])
for key, entries in cols.iteritems():
entries.append(build.get(key))
job = jobs.setdefault(build['job'], {})
if not job:
out['job_paths'][build['job']] = build['path'][:build['path'].rindex('/')]
job[build['number']] = index
for k, indexes in jobs.items():
numbers = sorted(indexes)
base = indexes[numbers[0]]
count = len(numbers)
# optimization: if we have a dense sequential mapping of builds=>indexes,
# store only the first build number, the run length, and the first index number.
if numbers[-1] == numbers[0] + count - 1 and \
all(indexes[k] == n + base for n, k in enumerate(numbers)):
jobs[k] = [numbers[0], count, base]
for n in numbers:
assert n <= numbers[0] + len(numbers), (k, n, jobs[k], len(numbers), numbers)
return out
def render(builds, clustered):
clustered_sorted = sorted(
clustered.iteritems(),
key=lambda (k, v): (-sum(len(ts) for ts in v.itervalues()), k))
clustered_tuples = [(k,
make_ngram_counts_digest(k),
sorted(clusters.items(), key=lambda (n, t): (-len(t), n)))
for k, clusters in clustered_sorted]
return {'clustered': clusters_to_display(clustered_tuples, builds),
'builds': builds_to_columns(builds)}
SIG_LABEL_RE = re.compile(r'\[sig-([^]]*)\]')
def annotate_owners(data, builds, owners):
"""
Assign ownership to a cluster based on the share of hits in the last day.
"""
owner_re = re.compile(r'(?:%s)' % '|'.join(
'(?P<%s>%s)' % (
sig.replace('-', '_'), # regex group names can't have -
'|'.join(re.escape(p) for p in prefixes)
)
for sig, prefixes in owners.iteritems()
))
job_paths = data['builds']['job_paths']
yesterday = max(data['builds']['cols']['started']) - (60 * 60 * 24)
for cluster in data['clustered']:
owner_counts = {}
for test in cluster['tests']:
m = SIG_LABEL_RE.search(test['name'])
if m:
owner = m.group(1)
else:
m = owner_re.match(normalize_name(test['name']))
if not m or not m.groupdict():
continue
owner = next(k for k, v in m.groupdict().iteritems() if v)
owner = owner.replace('_', '-')
counts = owner_counts.setdefault(owner, [0, 0])
for job in test['jobs']:
if ':' in job['name']: # non-standard CI
continue
job_path = job_paths[job['name']]
for build in job['builds']:
if builds['%s/%d' % (job_path, build)]['started'] > yesterday:
counts[0] += 1
else:
counts[1] += 1
if owner_counts:
owner = max(owner_counts.items(), key=lambda (o, c): (c, o))[0]
cluster['owner'] = owner
else:
cluster['owner'] = 'testing'
def render_slice(data, builds, prefix='', owner=''):
clustered = []
builds_out = {}
jobs = set()
for cluster in data['clustered']:
# print [cluster['id'], prefix]
if owner and cluster.get('owner') == owner:
clustered.append(cluster)
elif prefix and cluster['id'].startswith(prefix):
clustered.append(cluster)
else:
continue
for test in cluster['tests']:
for job in test['jobs']:
jobs.add(job['name'])
for path, build in builds.iteritems():
if build['job'] in jobs:
builds_out[path] = build
return {'clustered': clustered, 'builds': builds_to_columns(builds_out)}
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('builds', help='builds.json file from BigQuery')
parser.add_argument('tests', help='tests.json file from BigQuery')
parser.add_argument('--previous', help='previous output', type=argparse.FileType('r'))
parser.add_argument('--owners', help='test owner SIGs', type=argparse.FileType('r'))
parser.add_argument('--output', default='failure_data.json')
parser.add_argument('--output_slices',
help='Output slices to this path (must include PREFIX in template)')
return parser.parse_args(args)
def main(args):
builds, failed_tests = load_failures(args.builds, args.tests)
previous_clustered = None
if args.previous:
print 'loading previous'
previous_clustered = json.load(args.previous)['clustered']
clustered_local = cluster_local(failed_tests)
clustered = cluster_global(clustered_local, previous_clustered)
print '%d clusters' % len(clustered)
data = render(builds, clustered)
if args.owners:
owners = json.load(args.owners)
annotate_owners(data, builds, owners)
json.dump(data, open(args.output, 'w'),
sort_keys=True)
if args.output_slices:
assert 'PREFIX' in args.output_slices
for subset in range(256):
id_prefix = '%02x' % subset
json.dump(render_slice(data, builds, id_prefix),
open(args.output_slices.replace('PREFIX', id_prefix), 'w'),
sort_keys=True)
if args.owners:
owners.setdefault('testing', []) # for output
for owner in owners:
json.dump(render_slice(data, builds, prefix='', owner=owner),
open(args.output_slices.replace('PREFIX', 'sig-' + owner), 'w'),
sort_keys=True)
if __name__ == '__main__':
main(parse_args(sys.argv[1:]))
|
apache-2.0
|
InstaMineNuggets/InstaMineNuggets
|
contrib/testgen/base58.py
|
2139
|
2818
|
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
mit
|
annarev/tensorflow
|
tensorflow/python/training/training_ops_test.py
|
9
|
19664
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.learning.training_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import TensorFlowTestCase
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import training_ops
class TrainingOpsTest(TensorFlowTestCase):
def _toType(self, dtype):
if dtype == np.float16:
return dtypes.float16
elif dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
else:
assert False, (dtype)
def _testTypes(self, x, alpha, delta, use_gpu=None):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = self.evaluate(apply_sgd)
self.assertShapeEqual(out, apply_sgd)
self.assertAllCloseAccordingToType(x - alpha * delta, out)
@test_util.run_v1_only("ApplyGradientDescent op returns a ref, so it is not "
"supported in eager mode.")
def testApplyGradientDescent(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
alpha = np.array(2.0).astype(dtype)
delta = np.arange(100).astype(dtype)
self._testTypes(x, alpha, delta, use_gpu)
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
out = self.evaluate(apply_adagrad)
self.assertShapeEqual(out, apply_adagrad)
self.assertAllCloseAccordingToType(x - lr * grad * (y + grad * grad)**
(-0.5), out)
self.assertAllCloseAccordingToType(y + grad * grad, self.evaluate(accum))
def _testTypesForFtrl(self,
x,
y,
z,
lr,
grad,
use_gpu=None,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
lr_power)
out = self.evaluate(apply_ftrl)
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad - (accum_update**(-lr_power) - y**
(-lr_power)) / lr * x
quadratic = 1.0 / (accum_update**(lr_power) * lr) + 2 * l2
expected_out = np.array([(
np.sign(linear_update[i]) * l1 - linear_update[i]) / (quadratic[i]) if
np.abs(linear_update[i]) > l1 else 0.0
for i in range(linear_update.size)])
self.assertAllCloseAccordingToType(accum_update, self.evaluate(accum))
if x.dtype == np.float16:
# The calculations here really are not very precise in float16.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=2e-2, atol=2e-2)
self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)
elif x.dtype == np.float32:
# The calculations here not sufficiently precise in float32.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=1e-5, atol=1e-5)
self.assertAllClose(expected_out, out, rtol=1e-5, atol=1e-5)
else:
self.assertAllClose(linear_update, self.evaluate(linear))
self.assertAllClose(expected_out, out)
def _testTypesForFtrlMultiplyLinearByLr(self,
x,
y,
z,
lr,
grad,
use_gpu=None,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_ftrl = (
training_ops.apply_ftrl(
var,
accum,
linear,
grad,
lr,
l1,
l2,
lr_power,
multiply_linear_by_lr=True))
out = self.evaluate(apply_ftrl)
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad * lr - (accum_update**(-lr_power) - y**
(-lr_power)) * x
quadratic = accum_update**(-lr_power) + 2 * l2 * lr
expected_out = np.array([
(np.sign(linear_update[i]) * l1 * lr - linear_update[i]) /
(quadratic[i]) if np.abs(linear_update[i]) > l1 * lr else 0.0
for i in range(linear_update.size)
])
self.assertAllCloseAccordingToType(accum_update, self.evaluate(accum))
if x.dtype == np.float16:
# The calculations here really are not very precise in float16.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=2e-2, atol=2e-2)
self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)
elif x.dtype == np.float32:
# The calculations here not sufficiently precise in float32.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=1e-5, atol=1e-5)
self.assertAllClose(expected_out, out, rtol=1e-5, atol=1e-5)
else:
self.assertAllClose(linear_update, self.evaluate(linear))
self.assertAllClose(expected_out, out)
@test_util.run_v1_only("ApplyAdagrad op returns a ref, so it is not "
"supported in eager mode.")
def testApplyAdagrad(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdagrad(x, y, lr, grad, use_gpu)
@test_util.run_v1_only("ApplyFtrl op returns a ref, so it is not "
"supported in eager mode.")
def testApplyFtrl(self):
for dtype in [np.float16, np.float32, np.float64]:
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
z = np.arange(102, 202).astype(dtype)
lr = np.array(2.0).astype(dtype)
l1 = np.array(3.0).astype(dtype)
l2 = np.array(4.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)
@test_util.run_v1_only("ApplyFtrlMultiplyLinearByLr op returns a ref, so it "
"is not supported in eager mode.")
def testApplyFtrlMultiplyLinearByLr(self):
for dtype in [np.float16, np.float32, np.float64]:
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
z = np.arange(102, 202).astype(dtype)
lr = np.array(2.0).astype(dtype)
l1 = np.array(3.0).astype(dtype)
l2 = np.array(4.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForFtrlMultiplyLinearByLr(
x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices, use_gpu):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
var, accum, lr, grad,
constant_op.constant(indices, self._toType(indices.dtype)))
out = self.evaluate(sparse_apply_adagrad)
self.assertShapeEqual(out, sparse_apply_adagrad)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**(-0.5),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
def _testTypesForSparseFtrl(self,
x,
y,
z,
lr,
grad,
indices,
use_gpu,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad,
constant_op.constant(indices, self._toType(indices.dtype)),
lr,
l1,
l2,
lr_power=lr_power)
out = self.evaluate(sparse_apply_ftrl)
self.assertShapeEqual(out, sparse_apply_ftrl)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] *
(y[index] + grad[i] * grad[i])**(lr_power),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
def _testTypesForSparseFtrlMultiplyLinearByLr(self,
x,
y,
z,
lr,
grad,
indices,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=False):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_ftrl = (
training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad,
constant_op.constant(indices, self._toType(indices.dtype)),
lr,
l1,
l2,
lr_power=lr_power,
multiply_linear_by_lr=True))
out = self.evaluate(sparse_apply_ftrl)
self.assertShapeEqual(out, sparse_apply_ftrl)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**
(lr_power),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
@test_util.run_v1_only("SparseApplyAdagrad op returns a ref, so it is not "
"supported in eager mode.")
def testSparseApplyAdagrad(self):
for (dtype, index_type,
use_gpu) in itertools.product([np.float16, np.float32, np.float64],
[np.int32, np.int64], [False, True]):
x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [np.arange(10), np.arange(10)]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices, use_gpu)
# Empty sparse gradients.
empty_grad = np.zeros([0, 10], dtype=dtype)
empty_indices = np.zeros([0], dtype=index_type)
self._testTypesForSparseAdagrad(x, y, lr, empty_grad, empty_indices,
use_gpu)
@test_util.run_v1_only("SparseApplyAdagrad op returns a ref, so it is not "
"supported in eager mode.")
def testSparseApplyAdagradDim1(self):
for (dtype, index_type,
use_gpu) in itertools.product([np.float16, np.float32, np.float64],
[np.int32, np.int64], [False, True]):
x_val = [[1.0], [2.0], [3.0]]
y_val = [[4.0], [5.0], [6.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices, use_gpu)
@test_util.run_v1_only("SparseApplyFtrl op returns a ref, so it is not "
"supported in eager mode.")
def testSparseApplyFtrlDim1(self):
for (dtype, index_type,
use_gpu) in itertools.product([np.float16, np.float32, np.float64],
[np.int32, np.int64], [False, True]):
x_val = [[0.0], [0.0], [0.0]]
y_val = [[4.0], [5.0], [6.0]]
z_val = [[0.0], [0.0], [0.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
z = np.array(z_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseFtrl(x, y, z, lr, grad, indices, use_gpu)
# Empty sparse gradients.
empty_grad = np.zeros([0, 1], dtype=dtype)
empty_indices = np.zeros([0], dtype=index_type)
self._testTypesForSparseFtrl(x, y, z, lr, empty_grad, empty_indices,
use_gpu)
@test_util.run_v1_only("SparseApplyFtrlMultiplyLinearByLr op returns a ref, "
"so it is not supported in eager mode.")
def testSparseApplyFtrlMultiplyLinearByLrDim1(self):
for (dtype,
index_type) in itertools.product([np.float16, np.float32, np.float64],
[np.int32, np.int64]):
x_val = [[0.0], [0.0], [0.0]]
y_val = [[4.0], [5.0], [6.0]]
z_val = [[0.0], [0.0], [0.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
z = np.array(z_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseFtrlMultiplyLinearByLr(x, y, z, lr, grad, indices)
@test_util.run_v1_only("ApplyAdam op returns a ref, so it is not "
"supported in eager mode.")
def testApplyAdam(self):
for dtype, use_gpu in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
var = np.arange(100).astype(dtype)
m = np.arange(1, 101).astype(dtype)
v = np.arange(101, 201).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdam(var, m, v, grad, use_gpu)
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
self.setUp()
with self.session(use_gpu=use_gpu):
var_t = variables.VariableV1(var)
m_t = variables.VariableV1(m)
v_t = variables.VariableV1(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.VariableV1(beta1_power)
beta2_power_t = variables.VariableV1(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(var, self.evaluate(var_t))
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v, lr, beta1,
beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t, beta1_t,
beta2_t, epsilon_t, grad)
out = self.evaluate(apply_adam)
self.assertShapeEqual(out, apply_adam)
self.assertAllCloseAccordingToType(new_var, out)
def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1, beta2, epsilon):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
JGarcia-Panach/odoo
|
addons/note/note.py
|
223
|
8893
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
class note_stage(osv.osv):
""" Category of Note """
_name = "note.stage"
_description = "Note Stage"
_columns = {
'name': fields.char('Stage Name', translate=True, required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages"),
'user_id': fields.many2one('res.users', 'Owner', help="Owner of the note stage.", required=True, ondelete='cascade'),
'fold': fields.boolean('Folded by Default'),
}
_order = 'sequence asc'
_defaults = {
'fold': 0,
'user_id': lambda self, cr, uid, ctx: uid,
'sequence' : 1,
}
class note_tag(osv.osv):
_name = "note.tag"
_description = "Note Tag"
_columns = {
'name' : fields.char('Tag Name', required=True),
}
class note_note(osv.osv):
""" Note """
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
#writing method (no modification of values)
def name_create(self, cr, uid, name, context=None):
rec_id = self.create(cr, uid, {'memo': name}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
#read the first line (convert hml into text)
def _get_note_first_line(self, cr, uid, ids, name="", args={}, context=None):
res = {}
for note in self.browse(cr, uid, ids, context=context):
res[note.id] = (note.memo and html2plaintext(note.memo) or "").strip().replace('*','').split("\n")[0]
return res
def onclick_note_is_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': False, 'date_done': fields.date.today()}, context=context)
def onclick_note_not_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': True}, context=context)
#return the default stage for the uid user
def _get_default_stage_id(self,cr,uid,context=None):
ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
return ids and ids[0] or False
def _set_stage_per_user(self, cr, uid, id, name, value, args=None, context=None):
note = self.browse(cr, uid, id, context=context)
if not value: return False
stage_ids = [value] + [stage.id for stage in note.stage_ids if stage.user_id.id != uid ]
return self.write(cr, uid, [id], {'stage_ids': [(6, 0, set(stage_ids))]}, context=context)
def _get_stage_per_user(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
for stage in record.stage_ids:
if stage.user_id.id == uid:
result[record.id] = stage.id
return result
_columns = {
'name': fields.function(_get_note_first_line,
string='Note Summary',
type='text', store=True),
'user_id': fields.many2one('res.users', 'Owner'),
'memo': fields.html('Note Content'),
'sequence': fields.integer('Sequence'),
'stage_id': fields.function(_get_stage_per_user,
fnct_inv=_set_stage_per_user,
string='Stage',
type='many2one',
relation='note.stage'),
'stage_ids': fields.many2many('note.stage','note_stage_rel','note_id','stage_id','Stages of Users'),
'open': fields.boolean('Active', track_visibility='onchange'),
'date_done': fields.date('Date done'),
'color': fields.integer('Color Index'),
'tag_ids' : fields.many2many('note.tag','note_tags_rel','note_id','tag_id','Tags'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'open' : 1,
'stage_id' : _get_default_stage_id,
}
_order = 'sequence'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if groupby and groupby[0]=="stage_id":
#search all stages
current_stage_ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
if current_stage_ids: #if the user have some stages
stages = self.pool['note.stage'].browse(cr, uid, current_stage_ids, context=context)
result = [{ #notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search(cr,uid, domain+[('stage_ids', '=', stage.id)], context=context, count=True),
'__fold': stage.fold,
} for stage in stages]
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain+[('stage_ids', 'not in', current_stage_ids)], context=context, count=True)
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', current_stage_ids)
if result and result[0]['stage_id'][0] == current_stage_ids[0]:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count':nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain, context=context, count=True)
if nb_notes_ws:
result = [{ #notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count':nb_notes_ws
}]
else:
result = []
return result
else:
return super(note_note, self).read_group(cr, uid, domain, fields, groupby,
offset=offset, limit=limit, context=context, orderby=orderby,lazy=lazy)
#upgrade config setting page to configure pad, fancy and tags mode
class note_base_config_settings(osv.osv_memory):
_inherit = 'base.config.settings'
_columns = {
'module_note_pad': fields.boolean('Use collaborative pads (etherpad)'),
'group_note_fancy': fields.boolean('Use fancy layouts for notes', implied_group='note.group_note_fancy'),
}
class res_users(osv.Model):
_name = 'res.users'
_inherit = ['res.users']
def create(self, cr, uid, data, context=None):
user_id = super(res_users, self).create(cr, uid, data, context=context)
note_obj = self.pool['note.stage']
data_obj = self.pool['ir.model.data']
is_employee = self.has_group(cr, user_id, 'base.group_user')
if is_employee:
for n in range(5):
xmlid = 'note_stage_%02d' % (n,)
try:
_model, stage_id = data_obj.get_object_reference(cr, SUPERUSER_ID, 'note', xmlid)
except ValueError:
continue
note_obj.copy(cr, SUPERUSER_ID, stage_id, default={'user_id': user_id}, context=context)
return user_id
|
agpl-3.0
|
Pajinek/spacewalk
|
client/tools/rhncfg/config_management/rhncfg-manager.py
|
7
|
1282
|
#!/usr/bin/python
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from config_common.rhn_main import BaseMain
class Main(BaseMain):
modes = [
'add',
'create-channel',
'diff',
'diff-revisions',
'download-channel',
'get',
'list',
'list-channels',
'remove',
'remove-channel',
'revisions',
'update',
'upload-channel',
]
plugins_dir = 'config_management'
config_section = 'rhncfg-manager'
mode_prefix = 'rhncfg'
if __name__ == '__main__':
try:
sys.exit(Main().main() or 0)
except KeyboardInterrupt:
sys.stderr.write("user interrupted\n")
sys.exit(0)
|
gpl-2.0
|
SafeStack/ava
|
ava_core/gather/gather_ldap/views.py
|
2
|
10561
|
import json
import logging
from rest_framework import status, viewsets, permissions
from rest_framework.response import Response
from ava_core.abstract.permissions import IsRetrieveOnly
from ava_core.organize.models import Group, Person, Identifier
from ava_core.organize.tasks import task_run_intro_email
from ava_core.organize.utils import add_identifier
from ava_core.gather.gather_abstract.views import GatherImportAPI
from ava_core.gather.gather_abstract.models import GatherHistory
from .interface import ActiveDirectoryHelper
from .serializers import LDAPGatherHistorySerializer
from .models import LDAPGatherHistory
from .utils import clean_hex, convert_date_time, ldap_field_to_group_model, ldap_field_to_user_model
from ava_core.integration.integration_abstract.utils import retrieve_integration_from_database
log = logging.getLogger(__name__)
class LDAPImportAPI(GatherImportAPI):
MODEL_NAME = 'integration_ldap.LDAPIntegrationAdapter'
DIRECTORY_INTERFACE = ActiveDirectoryHelper()
def get(self, request, **kwargs):
super(LDAPImportAPI, self).get(request, **kwargs)
pk = self.kwargs.get('pk')
integration = retrieve_integration_from_database(self.MODEL_NAME, pk)
return_message = "Imported completed"
LDAPGatherHistory.objects.create(integration=integration,message=return_message, import_status=GatherHistory.COMPLETED)
return Response({'message': "Import complete"}, status=status.HTTP_200_OK)
def import_users_from_json(self, users):
super(LDAPImportAPI, self).import_users_from_json(users)
ldap_json = json.loads(users)
entries = ldap_json['entries']
for person in entries:
log.debug("Handling groups '%s'", person.get('objectGUID'))
attributes = person['attributes']
model_attributes = {}
groups = []
gen_groups = []
email_addresses = []
for key, value in attributes.items():
# log.debug("Handling attributes for person key = %s, value = %s", key, value)
if len(value) > 0:
if key == 'memberOf':
for cn in value:
qs = Group.objects.filter(name=cn)
for q in qs:
groups.append(q)
if q.groups:
gen_groups.append(q.groups)
elif key == 'proxyAddresses':
for address in value:
email_addresses.append(address[5:])
else:
value_string = ""
try:
if isinstance(value, str):
value_string = value
value_string = value_string.decode('utf-8')
else:
for e in value:
if isinstance(e, str):
value_string = ''.join(e)
else:
value_string = e['encoded']
if key in ('accountExpires', 'badPasswordTime', 'lastLogoff', 'lastLogon',
'lastLogonTimestamp', 'pwdLastSet', 'uSNChanged', 'uSNCreated',
'whenChanged', 'whenCreated'):
date = convert_date_time(self, value_string)
if date:
value_string = date.isoformat()
if key in ('adminCount', 'badPwdCount', 'logonCount'):
# print("WTF IS HAPPENING HERE")
# print(value_string)
if value_string is None or value_string is "":
value_string = 0
else:
value_string = int(value_string)
log.debug("Adding to mode;_attributes for person key = %s, value = %s",
ldap_field_to_user_model(self, key), value_string)
model_attributes[ldap_field_to_user_model(self, key)] = value_string
except UnicodeDecodeError:
log.debug("Adding to mode;_attributes for person key = %s, value = %s",
ldap_field_to_user_model(self, key), clean_hex(self, value_string))
model_attributes[ldap_field_to_user_model(self, key)] = clean_hex(self, value_string)
attributes.pop('memberOf', None)
attributes.pop('proxyAddresses', None)
name = model_attributes['cn'];
firstname = ''
surname = name
if " " in name:
name_parts = name.split(" ")
if len(name_parts) > 1:
firstname = name_parts[0]
surname = name_parts[1]
curr_person, p_created = Person.objects.get_or_create(first_name=firstname,
surname=surname,
ldap_identity_data=json.dumps(model_attributes))
if 'object_guid' in model_attributes:
add_identifier(self.PERSON_MODEL, curr_person, Identifier.GUID, model_attributes['object_guid'])
if 'object_sid' in model_attributes:
add_identifier(self.PERSON_MODEL, curr_person, Identifier.SID, model_attributes['object_sid'])
if 'distinguished_name' in model_attributes:
add_identifier(self.PERSON_MODEL, curr_person, Identifier.NAME, model_attributes['distinguished_name'])
if 'sam_account_name' in attributes:
add_identifier(self.PERSON_MODEL, curr_person, Identifier.NAME, model_attributes['sam_account_name'])
if 'cn' in model_attributes:
add_identifier(self.PERSON_MODEL, curr_person, Identifier.NAME, model_attributes['cn'])
# Import the email addresses.
for email_address in email_addresses:
add_identifier(self.PERSON_MODEL, curr_person, Identifier.EMAIL, email_address)
for group in groups:
if curr_person.groups.filter(id=group.id).count() == 0:
curr_person.groups.add(group)
# Invite user to system
task_run_intro_email.apply_async((curr_person.id,), countdown=1)
def import_groups_from_json(self, groups):
super(LDAPImportAPI, self).import_groups_from_json(groups)
ldap_json = json.loads(groups)
entries = ldap_json['entries']
for group in entries:
log.debug("Handling groups '%s'", group.get('dn'))
attributes = group['attributes']
model_attributes = {}
log.debug("Cleaning and processing attributes for group")
for key, value in attributes.items():
if len(value) > 0:
value_string = ""
try:
if isinstance(value, str):
value_string = value
value_string = value_string.decode('utf-8')
else:
for e in value:
if isinstance(e, str):
value_string = ''.join(e)
# value_string = value_string.decode('utf-8')
else:
value_string = e['encoded']
model_attributes[ldap_field_to_group_model(self, key)] = value_string
except UnicodeDecodeError:
model_attributes[ldap_field_to_group_model(self, key)] = clean_hex(self, value_string)
# # If no matching groups currently exists then create one, otherwise
# # update the existing groups.
groups = Group.objects.filter(name=model_attributes['cn'])
if groups.count() == 0:
# log.debug("Attempting to create new Group object")
# for k, v in model_attributes.items():
# log.debug("Model Attributes : %s = %s", k, v)
gen_group, group_created = Group.objects.get_or_create(name=model_attributes['cn'], group_type=Group.AD,
description="Imported group from LDAP")
if 'object_guid' in model_attributes:
log.debug("Adding group identifier (%s) as type guid to %s", model_attributes['object_guid'],
gen_group.name)
add_identifier(self.GROUP_MODEL, gen_group, Identifier.GUID, model_attributes['object_guid'])
if 'object_sid' in model_attributes:
log.debug("Adding group identifier (%s) as type sid to %s", model_attributes['object_sid'],
gen_group.name)
add_identifier(self.GROUP_MODEL, gen_group, Identifier.SID, model_attributes['object_sid'])
if 'distinguished_name' in model_attributes:
log.debug("Adding group identifier (%s) as type dist name to %s",
model_attributes['distinguished_name'], gen_group.name)
add_identifier(self.GROUP_MODEL, gen_group, Identifier.NAME, model_attributes['distinguished_name'])
if 'sam_account_name' in model_attributes:
log.debug("Adding group identifier (%s) as type sam account name to %s",
model_attributes['sam_account_name'], gen_group.name)
add_identifier(self.GROUP_MODEL, gen_group, Identifier.NAME, model_attributes['sam_account_name'])
class LDAPGatherHistoryAPI(viewsets.ModelViewSet):
queryset = LDAPGatherHistory.objects.none() # Required for DjangoModelPermissions
serializer_class = LDAPGatherHistorySerializer
permission_classes = (permissions.IsAuthenticated,
permissions.IsAdminUser,
IsRetrieveOnly)
def get_queryset(self):
return LDAPGatherHistory.objects.all()
|
gpl-3.0
|
moto-timo/robotframework
|
src/robot/reporting/resultwriter.py
|
4
|
5932
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.model import ModelModifier
from robot.output import LOGGER
from robot.result import ExecutionResult, Result
from robot.utils import unic
from .jsmodelbuilders import JsModelBuilder
from .logreportwriters import LogWriter, ReportWriter
from .xunitwriter import XUnitWriter
class ResultWriter(object):
"""A class to create log, report, output XML and xUnit files.
:param sources: Either one :class:`~robot.result.executionresult.Result`
object, or one or more paths to existing output XML files.
By default writes ``report.html`` and ``log.html``, but no output XML
or xUnit files. Custom file names can be given and results disabled
or enabled using ``settings`` or ``options`` passed to the
:meth:`write_results` method. The latter is typically more convenient::
writer = ResultWriter(result)
writer.write_results(report='custom.html', log=None, xunit='xunit.xml')
"""
def __init__(self, *sources):
self._sources = sources
def write_results(self, settings=None, **options):
"""Writes results based on the given ``settings`` or ``options``.
:param settings: :class:`~robot.conf.settings.RebotSettings` object
to configure result writing.
:param options: Used to construct new
:class:`~robot.conf.settings.RebotSettings` object if ``settings``
are not given.
"""
settings = settings or RebotSettings(options)
results = Results(settings, *self._sources)
if settings.output:
self._write_output(results.result, settings.output)
if settings.xunit:
self._write_xunit(results.result, settings.xunit,
settings.xunit_skip_noncritical)
if settings.log:
config = dict(settings.log_config,
minLevel=results.js_result.min_level)
self._write_log(results.js_result, settings.log, config)
if settings.report:
results.js_result.remove_data_not_needed_in_report()
self._write_report(results.js_result, settings.report,
settings.report_config)
return results.return_code
def _write_output(self, result, path):
self._write('Output', result.save, path)
def _write_xunit(self, result, path, skip_noncritical):
self._write('XUnit', XUnitWriter(result, skip_noncritical).write, path)
def _write_log(self, js_result, path, config):
self._write('Log', LogWriter(js_result).write, path, config)
def _write_report(self, js_result, path, config):
self._write('Report', ReportWriter(js_result).write, path, config)
def _write(self, name, writer, path, *args):
try:
writer(path, *args)
except DataError as err:
LOGGER.error(err.message)
except EnvironmentError as err:
# `err.filename` can be different than `path` at least if reading
# log/report templates or writing split log fails.
# `unic` is needed due to http://bugs.jython.org/issue1825.
LOGGER.error("Writing %s file '%s' failed: %s: %s" %
(name.lower(), path, err.strerror, unic(err.filename)))
else:
LOGGER.output_file(name, path)
class Results(object):
def __init__(self, settings, *sources):
self._settings = settings
self._sources = sources
if len(sources) == 1 and isinstance(sources[0], Result):
self._result = sources[0]
self._prune = False
self.return_code = self._result.return_code
else:
self._result = None
self._prune = True
self.return_code = -1
self._js_result = None
@property
def result(self):
if self._result is None:
include_keywords = bool(self._settings.log or self._settings.output)
flattened = self._settings.flatten_keywords
self._result = ExecutionResult(include_keywords=include_keywords,
flattened_keywords=flattened,
merge=self._settings.merge,
*self._sources)
self._result.configure(self._settings.status_rc,
self._settings.suite_config,
self._settings.statistics_config)
modifier = ModelModifier(self._settings.pre_rebot_modifiers,
self._settings.process_empty_suite,
LOGGER)
self._result.suite.visit(modifier)
self.return_code = self._result.return_code
return self._result
@property
def js_result(self):
if self._js_result is None:
builder = JsModelBuilder(log_path=self._settings.log,
split_log=self._settings.split_log,
prune_input_to_save_memory=self._prune)
self._js_result = builder.build_from(self.result)
if self._prune:
self._result = None
return self._js_result
|
apache-2.0
|
Pinecast/pinecast
|
notifications/migrations/0003_notificationevent.py
|
3
|
1085
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-01 01:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0048_auto_20180501_0154'),
('notifications', '0002_auto_20161127_1935'),
]
operations = [
migrations.CreateModel(
name='NotificationEvent',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('episode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='podcasts.PodcastEpisode')),
('hook', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='notifications.NotificationHook')),
('podcast', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='podcasts.Podcast')),
],
),
]
|
apache-2.0
|
marqh/cartopy
|
lib/cartopy/io/shapereader.py
|
1
|
8691
|
# (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
"""
Combines the shapefile access of pyshp with the
geometry representation of shapely.
>>> import os.path
>>> import cartopy.io.shapereader as shapereader
>>> filename = os.path.join(os.path.dirname(shapereader.__file__), 'data', 'Devon')
>>> reader = shapereader.Reader(filename)
>>> len(reader)
1
>>> list(reader.records()) #doctest: +ELLIPSIS
[<Record: <shapely.geometry.multipolygon.MultiPolygon object at ...>, {'PMS_REGION': 14, 'SHAPE_AREA': 6597719517.55, 'OBJECTID': 15, 'COUNTRY': 'ENGLAND', 'SNAC_GOR': 'South West', 'COUNTY_STR': 'Devon', 'SHAPE_LEN': 570341.652865}, <fields>>]
>>> list(reader.geometries()) #doctest: +ELLIPSIS
[<shapely.geometry.multipolygon.MultiPolygon object at ...>]
"""
from shapely.geometry import MultiLineString, MultiPolygon, Point, Polygon
import shapefile
import os
__all__ = ['Reader', 'Record']
def _create_point(shape):
return Point(shape.points[0])
def _create_polyline(shape):
parts = list(shape.parts) + [None]
bounds = zip(parts[:-1], parts[1:])
lines = [shape.points[slice(lower, upper)] for lower, upper in bounds]
return MultiLineString(lines)
def _create_polygon(shape):
# Partition the shapefile rings into outer rings/polygons (clockwise) and
# inner rings/holes (anti-clockwise).
parts = list(shape.parts) + [None]
bounds = zip(parts[:-1], parts[1:])
outer_polygons_and_holes = []
inner_polygons = []
for lower, upper in bounds:
polygon = Polygon(shape.points[slice(lower, upper)])
if polygon.exterior.is_ccw:
inner_polygons.append(polygon)
else:
outer_polygons_and_holes.append((polygon, []))
# Find the appropriate outer ring for each inner ring.
# aka. Group the holes with their containing polygons.
for inner_polygon in inner_polygons:
for outer_polygon, holes in outer_polygons_and_holes:
if outer_polygon.contains(inner_polygon):
holes.append(inner_polygon.exterior.coords)
break
polygon_defns = [(outer_polygon.exterior.coords, holes) for outer_polygon, holes in outer_polygons_and_holes]
return MultiPolygon(polygon_defns)
def _make_geometry(geometry_factory, shape):
geometry = None
if shape.shapeType != shapefile.NULL:
geometry = geometry_factory(shape)
return geometry
# The mapping from shapefile shapeType values to geometry creation functions.
GEOMETRY_FACTORIES = {
shapefile.POINT: _create_point,
shapefile.POLYLINE: _create_polyline,
shapefile.POLYGON: _create_polygon,
}
class Record(object):
"""
A single logical entry from a shapefile, combining the attributes with
their associated geometry.
attributes - A dictionary mapping attribute names to attribute values.
bounds - A tuple of (minx, miny, maxx, maxy).
fields - A list of field definitions, as per the Python Shapefile Library.
geometry - A shapely.geometry instance or None if it was a null shape.
"""
def __init__(self, shape, geometry_factory, attributes, fields):
self._shape = shape
self._geometry_factory = geometry_factory
if hasattr(shape, 'bbox'):
self.bounds = tuple(shape.bbox)
self.attributes = attributes
self.fields = fields
def __repr__(self):
return '<Record: %r, %r, <fields>>' % (self.geometry, self.attributes)
def __str__(self):
return 'Record(%s, %s, <fields>)' % (self.geometry, self.attributes)
def __getattr__(self, name):
if name == 'bounds':
value = self.bounds = self.geometry().bounds
elif name == 'geometry':
value = self.geometry = _make_geometry(self._geometry_factory, self._shape)
else:
value = object.__getattribute__(self, name)
return value
class Reader(object):
"""
Provides iterator based access to the contents of a shapefile.
The shapefile geometry is expressed as ``shapely.geometry`` instances.
"""
def __init__(self, filename):
# Validate the filename/shapefile
self._reader = reader = shapefile.Reader(filename)
if reader.shp is None or reader.shx is None or reader.dbf is None:
raise ValueError("Incomplete shapefile definition in '%s'." % filename)
# Figure out how to make appropriate shapely geometry instances
shapeType = reader.shapeType
self._geometry_factory = GEOMETRY_FACTORIES.get(shapeType)
if self._geometry_factory is None:
raise ValueError('Unsupported shape type: %s' % shapeType)
self.fields = self._reader.fields
def __len__(self):
return self._reader.numRecords
def geometries(self):
"""Returns an iterator of shapely geometries."""
geometry_factory = self._geometry_factory
for i in xrange(self._reader.numRecords):
shape = self._reader.shape(i)
yield _make_geometry(geometry_factory, shape)
def records(self):
"""Returns an iterator of Record instances."""
geometry_factory = self._geometry_factory
# Ignore the "DeletionFlag" field which always comes first
fields = self._reader.fields[1:]
field_names = [field[0] for field in fields]
for i in xrange(self._reader.numRecords):
shape_record = self._reader.shapeRecord(i)
attributes = dict(zip(field_names, shape_record.record))
yield Record(shape_record.shape, geometry_factory, attributes, fields)
def natural_earth(resolution='110m', category='physical', name='coastline', data_dir=None):
"""
Returns the path to the requested natural earth shapefile, downloading and unziping if necessary.
"""
import glob
if data_dir is None:
dname = os.path.dirname
# XXX be more clever in the data directory so that users can define a setting.
data_dir = os.path.join(dname(dname(__file__)), 'data', 'shapefiles', 'natural_earth')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
full_name = '%s-%s' % (resolution, name)
shape_dir = os.path.join(data_dir, full_name)
if not os.path.exists(shape_dir):
os.makedirs(shape_dir)
# find the only shapefile in the directory. This is because NE have inconsistent zip file naming conventions.
glob_pattern = os.path.join(data_dir, full_name, '*.shp')
shapefiles = glob.glob(glob_pattern)
if not shapefiles:
# download the zip file
import urllib2
import cStringIO as StringIO
from zipfile import ZipFile
# note the repeated http. That is intentional
file_url = ('http://www.naturalearthdata.com/http//www.naturalearthdata.com/'
'download/%s/%s/%s.zip' % (resolution, category, full_name))
shapefile_online = urllib2.urlopen(file_url)
zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')
zfh.extractall(shape_dir)
shapefiles = glob.glob(glob_pattern)
if len(shapefiles) != 1:
raise ValueError('%s shapefiles were found, expecting just one to match %s' % (len(shapefiles), glob_pattern))
return shapefiles[0]
if __name__ == '__main__':
coastlines = natural_earth(resolution='110m', category='physical', name='coastline')
for record in Reader(coastlines).records():
print record.attributes
# XXX TODO: Turn into a tutorial
coastlines = natural_earth(resolution='110m', category='cultural', name='admin-0-countries')
cntry_size = [(record.attributes['NAME'], int(record.attributes['POP_EST'])) for record in Reader(coastlines).records()]
# return the countries, grouped alphabetically, sorted by size.
import itertools
cntry_size.sort(key=lambda (name, population): (name[0], population))
for k, g in itertools.groupby(cntry_size, key=lambda item: item[0][0]):
print k, list(g)
|
gpl-3.0
|
yudingding6197/fin_script
|
debug/sina_guben.py
|
1
|
1796
|
#!/usr/bin/env python
# -*- coding:gbk -*-
import sys
import re
import os
import string
import datetime
import urllib
import urllib2
from openpyxl import Workbook
from openpyxl.reader.excel import load_workbook
sys.path.append(".")
sys.path.append("..")
from internal.ts_common import *
#µÃµ½ËùÓеĹɱ¾±ä¶¯ÐÅÏ¢£¬ÐèÒªµÃµ½ËùÓеĴúÂ룬µ÷ÓÃdebug/instant_data.py¸üÐÂÊý¾Ý
url_liut = "http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructureHistory/stockid/%s/stocktype/LiuTongA.phtml"
url_totl = "http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructureHistory/stockid/%s/stocktype/TotalStock.phtml"
pindex = len(sys.argv)
if pindex==1:
data_path = "../data/entry/trade/trade_last.txt"
file = open(data_path, 'r')
while 1:
lines = file.readlines(9000)
if not lines:
break
for line in lines:
code = line.strip()
if len(code)!=6:
print code, "Len should be 6"
continue
if code.isdigit() is False:
print code, "Invalid code"
continue
ltgb_list = []
gb_str = get_guben_line(url_liut, code)
parse_guben(gb_str, ltgb_list)
zgb_list = []
gb_str = get_guben_line(url_totl, code)
parse_guben(gb_str, zgb_list)
#print ltgb_list,zgb_list
file.close()
elif pindex==2:
code = sys.argv[1]
if len(code)!=6:
print code, "Len should be 6"
exit(1);
if code.isdigit() is False:
print code, "Invalid code"
exit(1);
ltgb_list = []
gb_str = get_guben_line(url_liut, code)
parse_guben(gb_str, ltgb_list)
zgb_list = []
gb_str = get_guben_line(url_totl, code)
parse_guben(gb_str, zgb_list)
for i in range(0, len(ltgb_list)):
ltobj = ltgb_list[i]
zobj = zgb_list[i]
if ltobj[0] != zobj[0]:
print code, "Not match", ltobj[0], zobj[0]
break
str = "%s %10.4f %10.4f" %(ltobj[0], ltobj[1], zobj[1])
print str
|
gpl-2.0
|
akhmadMizkat/odoo
|
addons/report_webkit/convert.py
|
47
|
1389
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# Author : Nicolas Bessi (Camptocamp)
from openerp.tools import convert
original_xml_import = convert.xml_import
class WebkitXMLImport(original_xml_import):
# Override of xml import in order to add webkit_header tag in report tag.
# As discussed with the R&D Team, the current XML processing API does
# not offer enough flexibity to do it in a cleaner way.
# The solution is not meant to be long term solution, but at least
# allows chaining of several overrides of the _tag_report method,
# and does not require a copy/paste of the original code.
def _tag_report(self, cr, rec, data_node=None, mode=None):
report_id = super(WebkitXMLImport, self)._tag_report(cr, rec, data_node)
if rec.get('report_type') == 'webkit':
header = rec.get('webkit_header')
if header:
if header in ('False', '0', 'None'):
webkit_header_id = False
else:
webkit_header_id = self.id_get(cr, header)
self.pool.get('ir.actions.report.xml').write(cr, self.uid,
report_id, {'webkit_header': webkit_header_id})
return report_id
convert.xml_import = WebkitXMLImport
|
gpl-3.0
|
corygiltner/CSB
|
bp_includes/external/babel/messages/tests/extract.py
|
36
|
19857
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import codecs
import doctest
from StringIO import StringIO
import sys
import unittest
from babel.messages import extract
class ExtractPythonTestCase(unittest.TestCase):
def test_nested_calls(self):
buf = StringIO("""\
msg1 = _(i18n_arg.replace(r'\"', '"'))
msg2 = ungettext(i18n_arg.replace(r'\"', '"'), multi_arg.replace(r'\"', '"'), 2)
msg3 = ungettext("Babel", multi_arg.replace(r'\"', '"'), 2)
msg4 = ungettext(i18n_arg.replace(r'\"', '"'), "Babels", 2)
msg5 = ungettext('bunny', 'bunnies', random.randint(1, 2))
msg6 = ungettext(arg0, 'bunnies', random.randint(1, 2))
msg7 = _(hello.there)
msg8 = gettext('Rabbit')
msg9 = dgettext('wiki', model.addPage())
msg10 = dngettext(getDomain(), 'Page', 'Pages', 3)
""")
messages = list(extract.extract_python(buf,
extract.DEFAULT_KEYWORDS.keys(),
[], {}))
self.assertEqual([
(1, '_', None, []),
(2, 'ungettext', (None, None, None), []),
(3, 'ungettext', (u'Babel', None, None), []),
(4, 'ungettext', (None, u'Babels', None), []),
(5, 'ungettext', (u'bunny', u'bunnies', None), []),
(6, 'ungettext', (None, u'bunnies', None), []),
(7, '_', None, []),
(8, 'gettext', u'Rabbit', []),
(9, 'dgettext', (u'wiki', None), []),
(10, 'dngettext', (None, u'Page', u'Pages', None), [])],
messages)
def test_nested_comments(self):
buf = StringIO("""\
msg = ngettext('pylon', # TRANSLATORS: shouldn't be
'pylons', # TRANSLATORS: seeing this
count)
""")
messages = list(extract.extract_python(buf, ('ngettext',),
['TRANSLATORS:'], {}))
self.assertEqual([(1, 'ngettext', (u'pylon', u'pylons', None), [])],
messages)
def test_comments_with_calls_that_spawn_multiple_lines(self):
buf = StringIO("""\
# NOTE: This Comment SHOULD Be Extracted
add_notice(req, ngettext("Catalog deleted.",
"Catalogs deleted.", len(selected)))
# NOTE: This Comment SHOULD Be Extracted
add_notice(req, _("Locale deleted."))
# NOTE: This Comment SHOULD Be Extracted
add_notice(req, ngettext("Foo deleted.", "Foos deleted.", len(selected)))
# NOTE: This Comment SHOULD Be Extracted
# NOTE: And This One Too
add_notice(req, ngettext("Bar deleted.",
"Bars deleted.", len(selected)))
""")
messages = list(extract.extract_python(buf, ('ngettext','_'), ['NOTE:'],
{'strip_comment_tags':False}))
self.assertEqual((6, '_', 'Locale deleted.',
[u'NOTE: This Comment SHOULD Be Extracted']),
messages[1])
self.assertEqual((10, 'ngettext', (u'Foo deleted.', u'Foos deleted.',
None),
[u'NOTE: This Comment SHOULD Be Extracted']),
messages[2])
self.assertEqual((3, 'ngettext',
(u'Catalog deleted.',
u'Catalogs deleted.', None),
[u'NOTE: This Comment SHOULD Be Extracted']),
messages[0])
self.assertEqual((15, 'ngettext', (u'Bar deleted.', u'Bars deleted.',
None),
[u'NOTE: This Comment SHOULD Be Extracted',
u'NOTE: And This One Too']),
messages[3])
def test_declarations(self):
buf = StringIO("""\
class gettext(object):
pass
def render_body(context,x,y=_('Page arg 1'),z=_('Page arg 2'),**pageargs):
pass
def ngettext(y='arg 1',z='arg 2',**pageargs):
pass
class Meta:
verbose_name = _('log entry')
""")
messages = list(extract.extract_python(buf,
extract.DEFAULT_KEYWORDS.keys(),
[], {}))
self.assertEqual([(3, '_', u'Page arg 1', []),
(3, '_', u'Page arg 2', []),
(8, '_', u'log entry', [])],
messages)
def test_multiline(self):
buf = StringIO("""\
msg1 = ngettext('pylon',
'pylons', count)
msg2 = ngettext('elvis',
'elvises',
count)
""")
messages = list(extract.extract_python(buf, ('ngettext',), [], {}))
self.assertEqual([(1, 'ngettext', (u'pylon', u'pylons', None), []),
(3, 'ngettext', (u'elvis', u'elvises', None), [])],
messages)
def test_triple_quoted_strings(self):
buf = StringIO("""\
msg1 = _('''pylons''')
msg2 = ngettext(r'''elvis''', \"\"\"elvises\"\"\", count)
msg2 = ngettext(\"\"\"elvis\"\"\", 'elvises', count)
""")
messages = list(extract.extract_python(buf,
extract.DEFAULT_KEYWORDS.keys(),
[], {}))
self.assertEqual([(1, '_', (u'pylons'), []),
(2, 'ngettext', (u'elvis', u'elvises', None), []),
(3, 'ngettext', (u'elvis', u'elvises', None), [])],
messages)
def test_multiline_strings(self):
buf = StringIO("""\
_('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.''')
""")
messages = list(extract.extract_python(buf,
extract.DEFAULT_KEYWORDS.keys(),
[], {}))
self.assertEqual(
[(1, '_',
u'This module provides internationalization and localization\n'
'support for your Python programs by providing an interface to '
'the GNU\ngettext message catalog library.', [])],
messages)
def test_concatenated_strings(self):
buf = StringIO("""\
foobar = _('foo' 'bar')
""")
messages = list(extract.extract_python(buf,
extract.DEFAULT_KEYWORDS.keys(),
[], {}))
self.assertEqual(u'foobar', messages[0][2])
def test_unicode_string_arg(self):
buf = StringIO("msg = _(u'Foo Bar')")
messages = list(extract.extract_python(buf, ('_',), [], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
def test_comment_tag(self):
buf = StringIO("""
# NOTE: A translation comment
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([u'NOTE: A translation comment'], messages[0][3])
def test_comment_tag_multiline(self):
buf = StringIO("""
# NOTE: A translation comment
# with a second line
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([u'NOTE: A translation comment', u'with a second line'],
messages[0][3])
def test_translator_comments_with_previous_non_translator_comments(self):
buf = StringIO("""
# This shouldn't be in the output
# because it didn't start with a comment tag
# NOTE: A translation comment
# with a second line
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([u'NOTE: A translation comment', u'with a second line'],
messages[0][3])
def test_comment_tags_not_on_start_of_comment(self):
buf = StringIO("""
# This shouldn't be in the output
# because it didn't start with a comment tag
# do NOTE: this will not be a translation comment
# NOTE: This one will be
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([u'NOTE: This one will be'], messages[0][3])
def test_multiple_comment_tags(self):
buf = StringIO("""
# NOTE1: A translation comment for tag1
# with a second line
msg = _(u'Foo Bar1')
# NOTE2: A translation comment for tag2
msg = _(u'Foo Bar2')
""")
messages = list(extract.extract_python(buf, ('_',),
['NOTE1:', 'NOTE2:'], {}))
self.assertEqual(u'Foo Bar1', messages[0][2])
self.assertEqual([u'NOTE1: A translation comment for tag1',
u'with a second line'], messages[0][3])
self.assertEqual(u'Foo Bar2', messages[1][2])
self.assertEqual([u'NOTE2: A translation comment for tag2'], messages[1][3])
def test_two_succeeding_comments(self):
buf = StringIO("""
# NOTE: one
# NOTE: two
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([u'NOTE: one', u'NOTE: two'], messages[0][3])
def test_invalid_translator_comments(self):
buf = StringIO("""
# NOTE: this shouldn't apply to any messages
hello = 'there'
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([], messages[0][3])
def test_invalid_translator_comments2(self):
buf = StringIO("""
# NOTE: Hi!
hithere = _('Hi there!')
# NOTE: you should not be seeing this in the .po
rows = [[v for v in range(0,10)] for row in range(0,10)]
# this (NOTE:) should not show up either
hello = _('Hello')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Hi there!', messages[0][2])
self.assertEqual([u'NOTE: Hi!'], messages[0][3])
self.assertEqual(u'Hello', messages[1][2])
self.assertEqual([], messages[1][3])
def test_invalid_translator_comments3(self):
buf = StringIO("""
# NOTE: Hi,
# there!
hithere = _('Hi there!')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Hi there!', messages[0][2])
self.assertEqual([], messages[0][3])
def test_comment_tag_with_leading_space(self):
buf = StringIO("""
#: A translation comment
#: with leading spaces
msg = _(u'Foo Bar')
""")
messages = list(extract.extract_python(buf, ('_',), [':'], {}))
self.assertEqual(u'Foo Bar', messages[0][2])
self.assertEqual([u': A translation comment', u': with leading spaces'],
messages[0][3])
def test_different_signatures(self):
buf = StringIO("""
foo = _('foo', 'bar')
n = ngettext('hello', 'there', n=3)
n = ngettext(n=3, 'hello', 'there')
n = ngettext(n=3, *messages)
n = ngettext()
n = ngettext('foo')
""")
messages = list(extract.extract_python(buf, ('_', 'ngettext'), [], {}))
self.assertEqual((u'foo', u'bar'), messages[0][2])
self.assertEqual((u'hello', u'there', None), messages[1][2])
self.assertEqual((None, u'hello', u'there'), messages[2][2])
self.assertEqual((None, None), messages[3][2])
self.assertEqual(None, messages[4][2])
self.assertEqual(('foo'), messages[5][2])
def test_utf8_message(self):
buf = StringIO("""
# NOTE: hello
msg = _('Bonjour à tous')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'],
{'encoding': 'utf-8'}))
self.assertEqual(u'Bonjour à tous', messages[0][2])
self.assertEqual([u'NOTE: hello'], messages[0][3])
def test_utf8_message_with_magic_comment(self):
buf = StringIO("""# -*- coding: utf-8 -*-
# NOTE: hello
msg = _('Bonjour à tous')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Bonjour à tous', messages[0][2])
self.assertEqual([u'NOTE: hello'], messages[0][3])
def test_utf8_message_with_utf8_bom(self):
buf = StringIO(codecs.BOM_UTF8 + """
# NOTE: hello
msg = _('Bonjour à tous')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Bonjour à tous', messages[0][2])
self.assertEqual([u'NOTE: hello'], messages[0][3])
def test_utf8_raw_strings_match_unicode_strings(self):
buf = StringIO(codecs.BOM_UTF8 + """
msg = _('Bonjour à tous')
msgu = _(u'Bonjour à tous')
""")
messages = list(extract.extract_python(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Bonjour à tous', messages[0][2])
self.assertEqual(messages[0][2], messages[1][2])
def test_extract_strip_comment_tags(self):
buf = StringIO("""\
#: This is a comment with a very simple
#: prefix specified
_('Servus')
# NOTE: This is a multiline comment with
# a prefix too
_('Babatschi')""")
messages = list(extract.extract('python', buf, comment_tags=['NOTE:', ':'],
strip_comment_tags=True))
self.assertEqual(u'Servus', messages[0][1])
self.assertEqual([u'This is a comment with a very simple',
u'prefix specified'], messages[0][2])
self.assertEqual(u'Babatschi', messages[1][1])
self.assertEqual([u'This is a multiline comment with',
u'a prefix too'], messages[1][2])
class ExtractJavaScriptTestCase(unittest.TestCase):
def test_simple_extract(self):
buf = StringIO("""\
msg1 = _('simple')
msg2 = gettext('simple')
msg3 = ngettext('s', 'p', 42)
""")
messages = \
list(extract.extract('javascript', buf, extract.DEFAULT_KEYWORDS,
[], {}))
self.assertEqual([(1, 'simple', []),
(2, 'simple', []),
(3, ('s', 'p'), [])], messages)
def test_various_calls(self):
buf = StringIO("""\
msg1 = _(i18n_arg.replace(/"/, '"'))
msg2 = ungettext(i18n_arg.replace(/"/, '"'), multi_arg.replace(/"/, '"'), 2)
msg3 = ungettext("Babel", multi_arg.replace(/"/, '"'), 2)
msg4 = ungettext(i18n_arg.replace(/"/, '"'), "Babels", 2)
msg5 = ungettext('bunny', 'bunnies', parseInt(Math.random() * 2 + 1))
msg6 = ungettext(arg0, 'bunnies', rparseInt(Math.random() * 2 + 1))
msg7 = _(hello.there)
msg8 = gettext('Rabbit')
msg9 = dgettext('wiki', model.addPage())
msg10 = dngettext(domain, 'Page', 'Pages', 3)
""")
messages = \
list(extract.extract('javascript', buf, extract.DEFAULT_KEYWORDS, [],
{}))
self.assertEqual([(5, (u'bunny', u'bunnies'), []),
(8, u'Rabbit', []),
(10, (u'Page', u'Pages'), [])], messages)
def test_message_with_line_comment(self):
buf = StringIO("""\
// NOTE: hello
msg = _('Bonjour à tous')
""")
messages = list(extract.extract_javascript(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Bonjour à tous', messages[0][2])
self.assertEqual([u'NOTE: hello'], messages[0][3])
def test_message_with_multiline_comment(self):
buf = StringIO("""\
/* NOTE: hello
and bonjour
and servus */
msg = _('Bonjour à tous')
""")
messages = list(extract.extract_javascript(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Bonjour à tous', messages[0][2])
self.assertEqual([u'NOTE: hello', 'and bonjour', ' and servus'], messages[0][3])
def test_ignore_function_definitions(self):
buf = StringIO("""\
function gettext(value) {
return translations[language][value] || value;
}""")
messages = list(extract.extract_javascript(buf, ('gettext',), [], {}))
self.assertEqual(messages, [])
def test_misplaced_comments(self):
buf = StringIO("""\
/* NOTE: this won't show up */
foo()
/* NOTE: this will */
msg = _('Something')
// NOTE: this will show up
// too.
msg = _('Something else')
// NOTE: but this won't
bar()
_('no comment here')
""")
messages = list(extract.extract_javascript(buf, ('_',), ['NOTE:'], {}))
self.assertEqual(u'Something', messages[0][2])
self.assertEqual([u'NOTE: this will'], messages[0][3])
self.assertEqual(u'Something else', messages[1][2])
self.assertEqual([u'NOTE: this will show up', 'too.'], messages[1][3])
self.assertEqual(u'no comment here', messages[2][2])
self.assertEqual([], messages[2][3])
class ExtractTestCase(unittest.TestCase):
def test_invalid_filter(self):
buf = StringIO("""\
msg1 = _(i18n_arg.replace(r'\"', '"'))
msg2 = ungettext(i18n_arg.replace(r'\"', '"'), multi_arg.replace(r'\"', '"'), 2)
msg3 = ungettext("Babel", multi_arg.replace(r'\"', '"'), 2)
msg4 = ungettext(i18n_arg.replace(r'\"', '"'), "Babels", 2)
msg5 = ungettext('bunny', 'bunnies', random.randint(1, 2))
msg6 = ungettext(arg0, 'bunnies', random.randint(1, 2))
msg7 = _(hello.there)
msg8 = gettext('Rabbit')
msg9 = dgettext('wiki', model.addPage())
msg10 = dngettext(domain, 'Page', 'Pages', 3)
""")
messages = \
list(extract.extract('python', buf, extract.DEFAULT_KEYWORDS, [],
{}))
self.assertEqual([(5, (u'bunny', u'bunnies'), []),
(8, u'Rabbit', []),
(10, (u'Page', u'Pages'), [])], messages)
def test_invalid_extract_method(self):
buf = StringIO('')
self.assertRaises(ValueError, list, extract.extract('spam', buf))
def test_different_signatures(self):
buf = StringIO("""
foo = _('foo', 'bar')
n = ngettext('hello', 'there', n=3)
n = ngettext(n=3, 'hello', 'there')
n = ngettext(n=3, *messages)
n = ngettext()
n = ngettext('foo')
""")
messages = \
list(extract.extract('python', buf, extract.DEFAULT_KEYWORDS, [],
{}))
self.assertEqual(len(messages), 2)
self.assertEqual(u'foo', messages[0][1])
self.assertEqual((u'hello', u'there'), messages[1][1])
def test_empty_string_msgid(self):
buf = StringIO("""\
msg = _('')
""")
stderr = sys.stderr
sys.stderr = StringIO()
try:
messages = \
list(extract.extract('python', buf, extract.DEFAULT_KEYWORDS,
[], {}))
self.assertEqual([], messages)
assert 'warning: Empty msgid.' in sys.stderr.getvalue()
finally:
sys.stderr = stderr
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(extract))
suite.addTest(unittest.makeSuite(ExtractPythonTestCase))
suite.addTest(unittest.makeSuite(ExtractJavaScriptTestCase))
suite.addTest(unittest.makeSuite(ExtractTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
lgpl-3.0
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/pandas/tests/plotting/test_series.py
|
6
|
30296
|
# coding: utf-8
""" Test cases for Series.plot """
import itertools
import pytest
from datetime import datetime
import pandas as pd
from pandas import Series, DataFrame, date_range
from pandas.compat import range, lrange
import pandas.util.testing as tm
from pandas.util.testing import slow
import numpy as np
from numpy.random import randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_module_if_no_mpl()
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
assert label == ''
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
assert label2 == ''
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
if self.mpl_ge_2_0_0:
expected = np.hstack((1.0e-05, expected))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
assert xp == ax.get_xlim()[0]
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == 'YLABEL'
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
with pytest.raises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
assert result == expected
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
assert len(ax.patches) == 10
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
assert len(ax.patches) == 20
ax = df.plot.hist() # bins=10
assert len(ax.patches) == 40
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
assert len(axes) == 2
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
assert ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with pytest.raises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=linspace(-100, 100, 20))
ax = self.ts.plot.kde(logy=True, bw_method=.5,
ind=linspace(-100, 100, 20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
assert len(ax.patches) == 5
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
assert len(lines) == 1
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (plotting._core._common_kinds +
plotting._core._series_kinds)
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with pytest.raises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
# in mpl 1.5+ this is a TypeError
with pytest.raises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._core._series_kinds +
plotting._core._common_kinds)
@slow
def test_standard_colors(self):
from pandas.plotting._style import _get_standard_colors
for c in ['r', 'red', 'green', '#FF0000']:
result = _get_standard_colors(1, color=c)
assert result == [c]
result = _get_standard_colors(1, color=[c])
assert result == [c]
result = _get_standard_colors(3, color=c)
assert result == [c] * 3
result = _get_standard_colors(3, color=[c])
assert result == [c] * 3
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._style import _get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0, 3, 5, 9])
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
|
mit
|
joshmoore/openmicroscopy
|
components/tools/OmeroFS/test/drivers.py
|
1
|
13464
|
#!/usr/bin/env python
"""
Utility classes for generating file-system-like events
for testing.
Copyright 2009 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import exceptions
import logging
import os
import threading
import time
import unittest
import omero_ext.uuid as uuid # see ticket:3774
import omero.grid.monitors as monitors
import IceGrid
from path import path
from omero.util import ServerContext
from omero_ext.mox import Mox
from omero_ext.functional import wraps
from omero.util.temp_files import create_path
from fsDropBoxMonitorClient import *
LOGFORMAT = """%(asctime)s %(levelname)-5s [%(name)40s] (%(threadName)-10s) %(message)s"""
logging.basicConfig(level=0,format=LOGFORMAT)
class AbstractEvent(object):
"""
Event which is configured in a Driver instance
to be executed at a specified time.
"""
def __init__(self, waitMillis):
"""
wait is the time that should elapse after the
previous event (or on startup) and this event.
in millis.
"""
self.log = logging.getLogger("Event")
self.client = None
self.waitMillis = waitMillis
def setClient(self, client):
"""
Sets the client which will receive the event on run.
"""
if self.client:
log.error("Reusing event: old=%s new=%s" % (self.client, client))
self.client = client
def run(self):
"""
By default, nothing.
"""
self.log.info("Sleeping %s" % self.waitMillis)
time.sleep(self.waitMillis/1000)
if not self.client:
self.log.error("No client")
self.doRun()
class CallbackEvent(AbstractEvent):
"""
Not really an Event, but allows some action
to be executed in the driver thread.
"""
def __init__(self, waitMillis, delegate):
AbstractEvent.__init__(self, waitMillis)
self.delegate = delegate
def doRun(self):
"""
Calls the delegate.
"""
m = self.delegate
m(self.client)
class InfoEvent(AbstractEvent):
"""
Event with an info to pass to the client
"""
def __init__(self, waitMillis, info):
AbstractEvent.__init__(self, waitMillis)
self.info = info
def doRun(self):
"""
Runs run on the delegate
"""
self.client.fsEventHappened("", [self.info], None) # Ice.Current
class DirInfoEvent(InfoEvent):
"""
Adds a test-specific "dir" attribute to EventInfo
instance. Used by the Simulator and perhaps other
test monitor clients
"""
def __init__(self, waitMillis, info):
InfoEvent.__init__(self, waitMillis, info)
self.info.dir = True
class Driver(threading.Thread):
"""
Class which generates fsEvents at a pre-defined time.
"""
def __init__(self, client):
assert client.fsEventHappened
threading.Thread.__init__(self)
self.log = logging.getLogger("Driver")
self.client = client
self.events = []
self.errors = []
def add(self, event):
# Force attribute error
assert event.setClient
assert event.run
self.events.append(event)
self.log.debug("Added: %s" % event)
def run(self):
self.log.debug("Running %s event(s)" % len(self.events))
for event in self.events:
try:
event.setClient(self.client)
event.run()
except exceptions.Exception, e:
self.errors.append((event, e))
self.log.exception("Error in Driver.run()")
def with_driver(func, errors = 0):
""" Decorator for running a test with a Driver """
def handler(*args, **kwargs):
self = args[0]
self.dir = create_path(folder=True) / "DropBox"
self.simulator = Simulator(self.dir)
self.client = MockMonitor(self.dir, pre=[self.simulator], post=[])
try:
self.driver = Driver(self.client)
rv = func(*args, **kwargs)
self.assertEquals(errors, len(self.driver.errors))
for i in range(errors):
self.driver.errors.pop()
return rv
finally:
self.client.stop()
return wraps(func)(handler)
class Replay(object):
"""
Utility to read EVENT_RECORD logs and make the proper
calls on the given target.
"""
def __init__(self, dir, source, target):
"""
Uses the dir as the location where files should *appear*
to be created, regardless of what the EVENT_RECORD suggests.
"""
self.log = logging.getLogger("Replay")
self.dir_out = dir
self.dir_in = None
self.batch = None
self.bsize = None
self.timestamp= None
self.filesets = None
self.source = source
self.target = target
def run(self):
for line in self.source.lines():
if 0<=line.find("EVENT_RECORD"):
parts = line.split("::")
cookie = parts[1]
timestamp = float(parts[2])
category = parts[3]
data = parts[4].strip()
if category == "Directory":
self.directory(timestamp, data)
elif category == "Batch":
self.batchStart(timestamp, data)
elif category == "Filesets":
self.fileset(timestamp, data)
elif category == "Create":
self.event(timestamp, data, monitors.EventType.Create)
elif category == "Modify":
self.event(timestamp, data, monitors.EventType.Modify)
def directory(self, timestamp, data):
self.dir_in = data
self.timestamp = float(timestamp)
self.log.info("Replaying from %s at %s", self.dir_in, self.timestamp)
def batchStart(self, timestamp, data):
if self.batch:
assert len(self.batch) == ( self.bsize * 2 ) # Double due to callbacks
self.process()
self.batch = []
self.bsize = int(data)
def fileset(self, timestamp, data):
filesets = eval(data, {"__builtins__":None}, {})
self.filesets = dict()
for k,iv in filesets.items():
k = self.rewrite(k)
ov = []
for i in iv:
ov.append(self.rewrite(i))
self.filesets[k] = ov
return self.filesets
def event(self, timestamp, data, type):
data = self.rewrite(data)
def cb(client):
client.files = dict(self.filesets)
self.batch.append(CallbackEvent(0, cb))
offset = timestamp - self.timestamp
self.timestamp = timestamp
info = monitors.EventInfo(data, type)
event = InfoEvent(offset, info)
self.batch.append(event)
return event
def process(self):
if self.target:
for event in self.batch:
self.target.add(event)
def rewrite(self, data):
if not data.startswith(self.dir_in):
raise exceptions.Exception("%s doesn't start with %s" % (data, self.dir_in))
data = data[len(self.dir_in):]
data = self.dir_out + data
return data
class Simulator(monitors.MonitorClient):
"""
Adapter object which takes mocked Events from
a Driver (for example, can be any event source)
and creates files to simulate that those events
really happened.
"""
def __init__(self, dir):
self.dir = path(dir)
self.log = logging.getLogger("Simulator")
def fsEventHappened(self, monitorid, eventList, current = None):
# enum EventType { Create, Modify, Delete, MoveIn, MoveOut, All, System };
for event in eventList:
fileid = event.fileId
file = path(fileid)
if not file.parpath(self.dir):
raise exceptions.Exception("%s is not in %s" % (file, self.dir))
if monitors.EventType.Create == event.type:
if file.exists():
raise exceptions.Exception("%s already exists" % file)
if hasattr(event, "dir"):
self.log.info("Creating dir: %s", file)
file.makedirs()
else:
#
# For the moment, we assum directory events are being filtered
# and therefore we will do the creation anyway.
#
if not file.parent.exists():
file.parent.makedirs()
self.log.info("Creating file: %s", file)
file.write_lines(["Created by event: %s" % event])
elif monitors.EventType.Modify == event.type:
if not file.exists():
raise exceptions.Exception("%s doesn't exist" % file)
if hasattr(event, "dir"):
if not file.isdir():
raise exceptions.Exception("%s is not a directory" % file)
self.log.info("Creating file in dir %s", file)
new_file = file / str(uuid.uuid4())
new_file.write_lines(["Writing new file to modify this directory on event: %s" % event])
else:
self.log.info("Modifying file %s", file)
file.write_lines(["Modified by event: %s" % event])
elif monitors.EventType.Delete == event.type:
if not file.exists():
raise exceptions.Exception("%s doesn't exist" % file)
if hasattr(event, "dir"):
if not file.isdir():
raise exceptions.Exception("%s is not a directory" % file)
self.log.info("Deleting dir %s", file)
file.rmtree()
else:
self.log.info("Deleting file %s", file)
file.remove()
elif monitors.EventType.MoveIn == event.type:
raise exceptions.Exception("TO BE REMOVED")
elif monitors.EventType.MoveOut == event.type:
raise exceptions.Exception("TO BE REMOVED")
elif monitors.EventType.System == event.type:
pass # file id here is simply an informational string
else:
self.fail("UNKNOWN EVENT TYPE: %s" % event.eventType)
class mock_communicator(object):
def findObjectFactory(self, *args):
return None
def addObjectFactory(self, *args):
pass
class MockServerContext(ServerContext):
def __init__(self, ic, get_root):
self.mox = Mox()
self.communicator = ic
self.getSession = get_root
self.stop_event = threading.Event()
def newSession(self, *args):
sess = self.mox.CreateMock(omero.api.ServiceFactoryPrx.__class__)
return sess
class MockMonitor(MonitorClientI):
"""
Mock Monitor Client which can also delegate to other clients.
"""
INSTANCES = []
def static_stop():
for i in MockMonitor.INSTANCES:
i.stop()
static_stop = staticmethod(static_stop)
def __init__(self, dir=None, pre = None, post = None):
if pre is None: pre = []
if post is None: post = []
self.root = None
ic = mock_communicator()
MonitorClientI.__init__(self, dir, ic, getUsedFiles = self.used_files, ctx = MockServerContext(ic, self.get_root), worker_wait = 0.1)
self.log = logging.getLogger("MockMonitor")
self.events = []
self.files = {}
self.pre = list(pre)
self.post = list(post)
MockMonitor.INSTANCES.append(self)
def fake_meth(self, name, rv, *args, **kwargs):
self.log.info("%s(%s, %s)=>%s", name, args, kwargs, rv)
if isinstance(rv, exceptions.Exception):
raise rv
else:
return rv
def used_files(self, *args, **kwargs):
return self.fake_meth("getUsedFiles", self.files, *args, **kwargs)
def get_root(self, *args, **kwargs):
return self.fake_meth("getRoot", self.root, *args, **kwargs)
def fsEventHappened(self, monitorid, eventList, current = None):
"""
Dispatches the event first to pre, then to the true implementation
and finally to the post monitor clients. This allows for Simulator
or similar to set the event up properly.
"""
self.events.extend(eventList)
for client in self.pre:
client.fsEventHappened(monitorid, eventList, current)
MonitorClientI.fsEventHappened(self, monitorid, eventList, current)
for client in self.post:
client.fsEventHappened(monitorid, eventList, current)
def with_errors(func, count = 1):
""" Decorator for catching any ERROR logging messages """
def exc_handler(*args, **kwargs):
handler = DetectError()
logging.root.addHandler(handler)
try:
rv = func(*args, **kwargs)
return rv
finally:
logging.root.removeHandler(handler)
exc_handler = wraps(func)(exc_handler)
return exc_handler
class DetectError(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.errors = []
def handle(self, record):
self.errors.append(record)
|
gpl-2.0
|
tsgit/invenio
|
modules/bibrank/lib/bibrankadmin_regression_tests.py
|
17
|
2863
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibRank Admin Regression Test Suite."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
from invenio.config import CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
class BibRankAdminWebPagesAvailabilityTest(InvenioTestCase):
"""Check BibRank Admin web pages whether they are up or not."""
def test_bibrank_admin_interface_pages_availability(self):
"""bibrankadmin - availability of BibRank Admin interface pages"""
baseurl = CFG_SITE_URL + '/admin/bibrank/bibrankadmin.py/'
_exports = ['', 'addrankarea', 'modifytranslations',
'modifycollection', 'showrankdetails', 'modifyrank',
'deleterank']
error_messages = []
for url in [baseurl + page for page in _exports]:
# first try as guest:
error_messages.extend(test_web_page_content(url,
username='guest',
expected_text=
'Authorization failure'))
# then try as admin:
error_messages.extend(test_web_page_content(url,
username='admin'))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_bibrank_admin_guide_availability(self):
"""bibrankadmin - availability of BibRank Admin guide pages"""
url = CFG_SITE_URL + '/help/admin/bibrank-admin-guide'
error_messages = test_web_page_content(url,
expected_text="BibRank Admin Guide")
if error_messages:
self.fail(merge_error_messages(error_messages))
return
TEST_SUITE = make_test_suite(BibRankAdminWebPagesAvailabilityTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
gpl-2.0
|
ChinaMassClouds/copenstack-server
|
openstack/src/horizon-2014.2/openstack_dashboard/dashboards/admin/volumes/volume_types/extras/tables.py
|
14
|
2569
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
class ExtraSpecDelete(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Extra Spec",
u"Delete Extra Specs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Extra Spec",
u"Deleted Extra Specs",
count
)
def delete(self, request, obj_ids):
api.cinder.volume_type_extra_delete(request,
self.table.kwargs['type_id'],
obj_ids)
class ExtraSpecCreate(tables.LinkAction):
name = "create"
verbose_name = _("Create")
url = "horizon:admin:volumes:volume_types:extras:create"
classes = ("ajax-modal",)
icon = "plus"
def get_link_url(self, extra_spec=None):
return reverse(self.url, args=[self.table.kwargs['type_id']])
class ExtraSpecEdit(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:admin:volumes:volume_types:extras:edit"
classes = ("btn-edit", "ajax-modal")
def get_link_url(self, extra_spec):
return reverse(self.url, args=[self.table.kwargs['type_id'],
extra_spec.key])
class ExtraSpecsTable(tables.DataTable):
key = tables.Column('key', verbose_name=_('Key'))
value = tables.Column('value', verbose_name=_('Value'))
class Meta:
name = "extras"
verbose_name = _("Extra Specs")
table_actions = (ExtraSpecCreate, ExtraSpecDelete)
row_actions = (ExtraSpecEdit, ExtraSpecDelete)
def get_object_id(self, datum):
return datum.key
def get_object_display(self, datum):
return datum.key
|
gpl-2.0
|
dannyboi104/SickRage
|
lib/guessit/test/test_hashes.py
|
33
|
1966
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.test.guessittest import *
class TestHashes(TestGuessit):
def test_hashes(self):
hashes = (
('hash_mpc', '1MB', u'8542ad406c15c8bd'), # TODO: Check if this value is valid
('hash_ed2k', '1MB', u'ed2k://|file|1MB|1048576|AA3CC5552A9931A76B61A41D306735F7|/'), # TODO: Check if this value is valid
('hash_md5', '1MB', u'5d8dcbca8d8ac21766f28797d6c3954c'),
('hash_sha1', '1MB', u'51d2b8f3248d7ee495b7750c8da5aa3b3819de9d'),
('hash_md5', 'dummy.srt', u'64de6b5893cac24456c46a935ef9c359'),
('hash_sha1', 'dummy.srt', u'a703fc0fa4518080505809bf562c6fc6f7b3c98c')
)
for hash_type, filename, expected_value in hashes:
guess = guess_file_info(file_in_same_dir(__file__, filename), hash_type)
computed_value = guess.get(hash_type)
assert expected_value == guess.get(hash_type), \
"Invalid %s for %s: %s != %s" % (hash_type, filename, computed_value, expected_value)
|
gpl-3.0
|
kustodian/ansible
|
lib/ansible/module_utils/network/netvisor/pn_nvos.py
|
38
|
1832
|
# Copyright: (c) 2018, Pluribus Networks
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.netvisor.netvisor import run_commands
def pn_cli(module, switch=None, username=None, password=None, switch_local=None):
"""
Method to generate the cli portion to launch the Netvisor cli.
:param module: The Ansible module to fetch username and password.
:return: The cli string for further processing.
"""
cli = ''
if username and password:
cli += '--user "%s":"%s" ' % (username, password)
if switch:
cli += ' switch ' + switch
if switch_local:
cli += ' switch-local '
return cli
def booleanArgs(arg, trueString, falseString):
if arg is True:
return " %s " % trueString
elif arg is False:
return " %s " % falseString
else:
return ""
def run_cli(module, cli, state_map):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param state_map: Provides state of the command.
:param module: The Ansible module to fetch command
"""
state = module.params['state']
command = state_map[state]
result, out, err = run_commands(module, cli)
results = dict(
command=cli,
msg="%s operation completed" % cli,
changed=True
)
# Response in JSON format
if result != 0:
module.exit_json(
command=cli,
msg="%s operation failed" % cli,
changed=False
)
module.exit_json(**results)
|
gpl-3.0
|
bigswitch/nova
|
nova/conductor/rpcapi.py
|
3
|
15993
|
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_versionedobjects import base as ovo_base
import nova.conf
from nova.objects import base as objects_base
from nova import rpc
CONF = nova.conf.CONF
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
* 1.0 - Initial version.
* 1.1 - Added migration_update
* 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
* 1.3 - Added aggregate_host_add and aggregate_host_delete
* 1.4 - Added migration_get
* 1.5 - Added bw_usage_update
* 1.6 - Added get_backdoor_port()
* 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
* 1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
* 1.9 - Added provider_fw_rule_get_all
* 1.10 - Added agent_build_get_by_triple
* 1.11 - Added aggregate_get
* 1.12 - Added block_device_mapping_update_or_create
* 1.13 - Added block_device_mapping_get_all_by_instance
* 1.14 - Added block_device_mapping_destroy
* 1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
* 1.16 - Added instance_destroy
* 1.17 - Added instance_info_cache_delete
* 1.18 - Added instance_type_get
* 1.19 - Added vol_get_usage_by_time and vol_usage_update
* 1.20 - Added migration_get_unconfirmed_by_dest_compute
* 1.21 - Added service_get_all_by
* 1.22 - Added ping
* 1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
* 1.24 - Added instance_get
* 1.25 - Added action_event_start and action_event_finish
* 1.26 - Added instance_info_cache_update
* 1.27 - Added service_create
* 1.28 - Added binary arg to service_get_all_by
* 1.29 - Added service_destroy
* 1.30 - Added migration_create
* 1.31 - Added migration_get_in_progress_by_host_and_node
* 1.32 - Added optional node to instance_get_all_by_host
* 1.33 - Added compute_node_create and compute_node_update
* 1.34 - Added service_update
* 1.35 - Added instance_get_active_by_window_joined
* 1.36 - Added instance_fault_create
* 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
* 1.38 - Added service name to instance_update
* 1.39 - Added notify_usage_exists
* 1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
* 1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
* 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
* 1.43 - Added compute_stop
* 1.44 - Added compute_node_delete
* 1.45 - Added project_id to quota_commit and quota_rollback
* 1.46 - Added compute_confirm_resize
* 1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
* 1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
* 1.49 - Added columns_to_join to instance_get_by_uuid
* 1.50 - Added object_action() and object_class_action()
* 1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
* 1.52 - Pass instance objects for compute_confirm_resize
* 1.53 - Added compute_reboot
* 1.54 - Added 'update_cells' argument to bw_usage_update
* 1.55 - Pass instance objects for compute_stop
* 1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
* 1.57 - Remove migration_create()
* 1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
* 1.59 - Remove instance_info_cache_update()
* 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
* ... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
* 1.61 - Return deleted instance from instance_destroy()
* 1.62 - Added object_backport()
* 1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
* 1.64 - Added use_slave to instance_get_all_filters()
- Remove instance_type_get()
- Remove aggregate_get()
- Remove aggregate_get_by_host()
- Remove instance_get()
- Remove migration_update()
- Remove block_device_mapping_destroy()
* 2.0 - Drop backwards compatibility
- Remove quota_rollback() and quota_commit()
- Remove aggregate_host_add() and aggregate_host_delete()
- Remove network_migrate_instance_start() and
network_migrate_instance_finish()
- Remove vol_get_usage_by_time
... Icehouse supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* Remove instance_destroy()
* Remove compute_unrescue()
* Remove instance_get_all_by_filters()
* Remove instance_get_active_by_window_joined()
* Remove instance_fault_create()
* Remove action_event_start() and action_event_finish()
* Remove instance_get_by_uuid()
* Remove agent_build_get_by_triple()
... Juno supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* 2.1 - Make notify_usage_exists() take an instance object
* Remove bw_usage_update()
* Remove notify_usage_exists()
... Kilo supports message version 2.1. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.1.
* Remove get_ec2_ids()
* Remove service_get_all_by()
* Remove service_create()
* Remove service_destroy()
* Remove service_update()
* Remove migration_get_in_progress_by_host_and_node()
* Remove aggregate_metadata_get_by_host()
* Remove block_device_mapping_update_or_create()
* Remove block_device_mapping_get_all_by_instance()
* Remove instance_get_all_by_host()
* Remove compute_node_update()
* Remove compute_node_delete()
* Remove security_groups_trigger_handler()
* Remove task_log_get()
* Remove task_log_begin_task()
* Remove task_log_end_task()
* Remove security_groups_trigger_members_refresh()
* Remove vol_usage_update()
* Remove instance_update()
* 2.2 - Add object_backport_versions()
* 2.3 - Add object_class_action_versions()
* Remove compute_node_create()
* Remove object_backport()
* 3.0 - Drop backwards compatibility
... Liberty and Mitaka support message version 3.0. So, any changes to
existing methods in 3.x after that point should be done such
that they can handle the version_cap being set to 3.0.
* Remove provider_fw_rule_get_all()
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
'icehouse': '2.0',
'juno': '2.0',
'kilo': '2.1',
'liberty': '3.0',
'mitaka': '3.0',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
# TODO(hanlind): This method can be removed once oslo.versionedobjects
# has been converted to use version_manifests in remotable_classmethod
# operations, which will use the new class action handler.
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
versions = ovo_base.obj_tree_get_versions(objname)
return self.object_class_action_versions(context,
objname,
objmethod,
versions,
args, kwargs)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_class_action_versions',
objname=objname, objmethod=objmethod,
object_versions=object_versions,
args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport_versions(self, context, objinst, object_versions):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_backport_versions', objinst=objinst,
object_versions=object_versions)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
1.7 - Do not send block_device_mapping and legacy_bdm to build_instances
1.8 - Add rebuild_instance
1.9 - Converted requested_networks to NetworkRequestList object
1.10 - Made migrate_server() and build_instances() send flavor objects
1.11 - Added clean_shutdown to migrate_server()
1.12 - Added request_spec to rebuild_instance()
1.13 - Added request_spec to migrate_server()
1.14 - Added request_spec to unshelve_instance()
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None, clean_shutdown=True, request_spec=None):
kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
'live': live, 'rebuild': rebuild, 'flavor': flavor,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit,
'reservations': reservations,
'clean_shutdown': clean_shutdown,
'request_spec': request_spec,
}
version = '1.13'
if not self.client.can_send_version(version):
del kw['request_spec']
version = '1.11'
if not self.client.can_send_version(version):
del kw['clean_shutdown']
version = '1.10'
if not self.client.can_send_version(version):
kw['flavor'] = objects_base.obj_to_primitive(flavor)
version = '1.6'
if not self.client.can_send_version(version):
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server', **kw)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
version = '1.10'
if not self.client.can_send_version(version):
version = '1.9'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
kw = {'instances': instances, 'image': image_p,
'filter_properties': filter_properties,
'admin_password': admin_password,
'injected_files': injected_files,
'requested_networks': requested_networks,
'security_groups': security_groups}
if not self.client.can_send_version(version):
version = '1.8'
kw['requested_networks'] = kw['requested_networks'].as_tuples()
if not self.client.can_send_version('1.7'):
version = '1.5'
bdm_p = objects_base.obj_to_primitive(block_device_mapping)
kw.update({'block_device_mapping': bdm_p,
'legacy_bdm': legacy_bdm})
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'build_instances', **kw)
def unshelve_instance(self, context, instance, request_spec=None):
version = '1.14'
kw = {'instance': instance,
'request_spec': request_spec
}
if not self.client.can_send_version(version):
version = '1.3'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'unshelve_instance', **kw)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None, kwargs=None):
version = '1.12'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
'image_ref': image_ref,
'orig_image_ref': orig_image_ref,
'orig_sys_metadata': orig_sys_metadata,
'bdms': bdms,
'recreate': recreate,
'on_shared_storage': on_shared_storage,
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
}
if not self.client.can_send_version(version):
version = '1.8'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'rebuild_instance', **kw)
|
apache-2.0
|
yarshure/v2ex
|
mapreduce/util.py
|
19
|
4805
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from v2ex.babel import Member
from v2ex.babel import Counter
from v2ex.babel import Section
from v2ex.babel import Node
from v2ex.babel import Topic
from v2ex.babel import Reply
"""Utility functions for use with the mapreduce library."""
__all__ = ["for_name", "is_generator_function", "get_short_name", "parse_bool"]
import inspect
import logging
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
# if "." not in fq_name:
# raise ImportError("'%s' is not a full-qualified name" % fq_name)
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
|
bsd-3-clause
|
lildadou/Flexget
|
flexget/api/schema.py
|
1
|
1296
|
from flask import jsonify
from flexget.api import api, APIResource
from flexget.config_schema import schema_paths, resolve_ref
schema_api = api.namespace('schema', description='Config and plugin schemas')
_plugins_cache = None
schema_api_list = api.schema('schema.list', {
'type': 'object',
'properties': {
'schemas': {
'type': 'array',
'items': {'type': 'object'}
}
}
})
@schema_api.route('/')
class SchemaAllAPI(APIResource):
@api.response(200, model=schema_api_list)
def get(self, session=None):
""" List all schema definitions """
schemas = {}
for path in schema_paths:
schemas[path] = resolve_ref(path)
return jsonify({'schemas': schemas})
@schema_api.route('/<path:path>/')
@api.doc(params={'path': 'Path of schema'})
@api.response(404, 'invalid schema path')
class SchemaAPI(APIResource):
@api.response(200, model=schema_api_list)
def get(self, path, session=None):
""" Get schema definition """
path = '/schema/%s' % path
if path in schema_paths:
schema = resolve_ref(path)
if hasattr(schema, '__call__'):
schema = schema()
return schema
return {'error': 'invalid schema path'}, 404
|
mit
|
keen99/SickRage
|
lib/hachoir_core/field/__init__.py
|
94
|
2280
|
# Field classes
from hachoir_core.field.field import Field, FieldError, MissingField, joinPath
from hachoir_core.field.bit_field import Bit, Bits, RawBits
from hachoir_core.field.byte_field import Bytes, RawBytes
from hachoir_core.field.sub_file import SubFile, CompressedField
from hachoir_core.field.character import Character
from hachoir_core.field.integer import (
Int8, Int16, Int24, Int32, Int64,
UInt8, UInt16, UInt24, UInt32, UInt64,
GenericInteger)
from hachoir_core.field.enum import Enum
from hachoir_core.field.string_field import (GenericString,
String, CString, UnixLine,
PascalString8, PascalString16, PascalString32)
from hachoir_core.field.padding import (PaddingBits, PaddingBytes,
NullBits, NullBytes)
# Functions
from hachoir_core.field.helper import (isString, isInteger,
createPaddingField, createNullField, createRawField,
writeIntoFile, createOrphanField)
# FieldSet classes
from hachoir_core.field.fake_array import FakeArray
from hachoir_core.field.basic_field_set import (BasicFieldSet,
ParserError, MatchError)
from hachoir_core.field.generic_field_set import GenericFieldSet
from hachoir_core.field.seekable_field_set import SeekableFieldSet, RootSeekableFieldSet
from hachoir_core.field.field_set import FieldSet
from hachoir_core.field.static_field_set import StaticFieldSet
from hachoir_core.field.parser import Parser
from hachoir_core.field.vector import GenericVector, UserVector
# Complex types
from hachoir_core.field.float import Float32, Float64, Float80
from hachoir_core.field.timestamp import (GenericTimestamp,
TimestampUnix32, TimestampUnix64, TimestampMac32, TimestampUUID60, TimestampWin64,
DateTimeMSDOS32, TimeDateMSDOS32, TimedeltaWin64)
# Special Field classes
from hachoir_core.field.link import Link, Fragment
available_types = (
Bit, Bits, RawBits,
Bytes, RawBytes,
SubFile,
Character,
Int8, Int16, Int24, Int32, Int64,
UInt8, UInt16, UInt24, UInt32, UInt64,
String, CString, UnixLine,
PascalString8, PascalString16, PascalString32,
Float32, Float64,
PaddingBits, PaddingBytes,
NullBits, NullBytes,
TimestampUnix32, TimestampMac32, TimestampWin64,
DateTimeMSDOS32, TimeDateMSDOS32,
# GenericInteger, GenericString,
)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.