code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
###############################################################################
#
# RandomWord
# Retrieves a random word.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RandomWord(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RandomWord Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RandomWord, self).__init__(temboo_session, '/Library/Wordnik/Words/RandomWord')
def new_input_set(self):
return RandomWordInputSet()
def _make_result_set(self, result, path):
return RandomWordResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RandomWordChoreographyExecution(session, exec_id, path)
class RandomWordInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RandomWord
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key from Wordnik.)
"""
super(RandomWordInputSet, self)._set_input('APIKey', value)
def set_ExcludePartOfSpeech(self, value):
"""
Set the value of the ExcludePartOfSpeech input for this Choreo. ((optional, string) Excludes the specified comma-delimited parts of speech from the results returned. Acceptable values include: adjective, noun, etc. See docs for full list.)
"""
super(RandomWordInputSet, self)._set_input('ExcludePartOfSpeech', value)
def set_HasDefinition(self, value):
"""
Set the value of the HasDefinition input for this Choreo. ((optional, string) Only returns words that have dictionary definitions when true. Otherwise false. Defaults to true.)
"""
super(RandomWordInputSet, self)._set_input('HasDefinition', value)
def set_IncludePartOfSpeech(self, value):
"""
Set the value of the IncludePartOfSpeech input for this Choreo. ((optional, string) Only includes the specified comma-delimited parts of speech. Acceptable values include: adjective, noun, etc. See docs for full list.)
"""
super(RandomWordInputSet, self)._set_input('IncludePartOfSpeech', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Maximum number of results to return. Defaults to 10.)
"""
super(RandomWordInputSet, self)._set_input('Limit', value)
def set_MaxCorpus(self, value):
"""
Set the value of the MaxCorpus input for this Choreo. ((optional, integer) Results include a corpus frequency count for each word returned. When this input is specified, results are limited to words with a corpus frequency count below the given number.)
"""
super(RandomWordInputSet, self)._set_input('MaxCorpus', value)
def set_MaxDictionaries(self, value):
"""
Set the value of the MaxDictionaries input for this Choreo. ((optional, integer) Maximum number of dictionaries in which the words appear.)
"""
super(RandomWordInputSet, self)._set_input('MaxDictionaries', value)
def set_MaxLength(self, value):
"""
Set the value of the MaxLength input for this Choreo. ((optional, integer) Maximum word length.)
"""
super(RandomWordInputSet, self)._set_input('MaxLength', value)
def set_MinCorpus(self, value):
"""
Set the value of the MinCorpus input for this Choreo. ((optional, integer) Results include a corpus frequency count for each word returned. When this input is specified, results are limited to words with a corpus frequency count above the given number.)
"""
super(RandomWordInputSet, self)._set_input('MinCorpus', value)
def set_MinDictionaries(self, value):
"""
Set the value of the MinDictionaries input for this Choreo. ((optional, integer) Minimum number of dictionaries in which the words appear.)
"""
super(RandomWordInputSet, self)._set_input('MinDictionaries', value)
def set_MinLength(self, value):
"""
Set the value of the MinLength input for this Choreo. ((optional, integer) Minimum word length.)
"""
super(RandomWordInputSet, self)._set_input('MinLength', value)
def set_ResponseType(self, value):
"""
Set the value of the ResponseType input for this Choreo. ((optional, string) Response can be either JSON or XML. Defaults to JSON.)
"""
super(RandomWordInputSet, self)._set_input('ResponseType', value)
class RandomWordResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RandomWord Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Wordnik.)
"""
return self._output.get('Response', None)
class RandomWordChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RandomWordResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Wordnik/Words/RandomWord.py | Python | apache-2.0 | 6,299 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from nova.openstack.common import gettextutils
from nova.openstack.common import importutils
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def dump(obj, fp, *args, **kwargs):
return json.dump(obj, fp, *args, **kwargs)
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8', **kwargs):
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| srajag/nova | nova/openstack/common/jsonutils.py | Python | apache-2.0 | 6,824 |
import logging
import requests
from flask import redirect, url_for, Blueprint, flash, request, session
from flask_login import login_user
from flask_oauthlib.client import OAuth
from redash import models, settings
from redash.authentication.org_resolving import current_org
logger = logging.getLogger('google_oauth')
oauth = OAuth()
blueprint = Blueprint('google_oauth', __name__)
def google_remote_app():
if 'google' not in oauth.remote_apps:
oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={
'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',
},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
consumer_key=settings.GOOGLE_CLIENT_ID,
consumer_secret=settings.GOOGLE_CLIENT_SECRET)
return oauth.google
def get_user_profile(access_token):
headers = {'Authorization': 'OAuth {}'.format(access_token)}
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)
if response.status_code == 401:
logger.warning("Failed getting user profile (response code 401).")
return None
return response.json()
def verify_profile(org, profile):
if org.is_public:
return True
email = profile['email']
domain = email.split('@')[-1]
if domain in org.google_apps_domains:
return True
if org.has_user(email) == 1:
return True
return False
def create_and_login_user(org, name, email):
try:
user_object = models.User.get_by_email_and_org(email, org)
if user_object.name != name:
logger.debug("Updating user name (%r -> %r)", user_object.name, name)
user_object.name = name
user_object.save()
except models.User.DoesNotExist:
logger.debug("Creating user object (%r)", name)
user_object = models.User.create(org=org, name=name, email=email, groups=[org.default_group.id])
login_user(user_object, remember=True)
return user_object
@blueprint.route('/<org_slug>/oauth/google', endpoint="authorize_org")
def org_login(org_slug):
session['org_slug'] = current_org.slug
return redirect(url_for(".authorize", next=request.args.get('next', None)))
@blueprint.route('/oauth/google', endpoint="authorize")
def login():
callback = url_for('.callback', _external=True)
next = request.args.get('next', url_for("redash.index", org_slug=session.get('org_slug')))
logger.debug("Callback url: %s", callback)
logger.debug("Next is: %s", next)
return google_remote_app().authorize(callback=callback, state=next)
@blueprint.route('/oauth/google_callback', endpoint="callback")
def authorized():
resp = google_remote_app().authorized_response()
access_token = resp['access_token']
if access_token is None:
logger.warning("Access token missing in call back request.")
flash("Validation error. Please retry.")
return redirect(url_for('redash.login'))
profile = get_user_profile(access_token)
if profile is None:
flash("Validation error. Please retry.")
return redirect(url_for('redash.login'))
if 'org_slug' in session:
org = models.Organization.get_by_slug(session.pop('org_slug'))
else:
org = current_org
if not verify_profile(org, profile):
logger.warning("User tried to login with unauthorized domain name: %s (org: %s)", profile['email'], org)
flash("Your Google Apps account ({}) isn't allowed.".format(profile['email']))
return redirect(url_for('redash.login', org_slug=org.slug))
create_and_login_user(org, profile['name'], profile['email'])
next = request.args.get('state') or url_for("redash.index", org_slug=org.slug)
return redirect(next)
| pubnative/redash | redash/authentication/google_oauth.py | Python | bsd-2-clause | 4,179 |
'''
Created on Nov 16, 2011
@author: clarkmatthew
Simple Example script using the eutester web ui classes.
The intention of this script is to download and unzip credentials to a given file by
interacting with the webui.
-First a Eucaweb object is created which gives us a selenium interface to work with.
In this case we do not need a eutester connection to interact with the cloud outside the webui.
-A euwebuser is created with an account name, user name, and password.
There are webmethods to create new users, but in this script we expect this user
already exists and has previously logged into the webui.
-To simply access to predefined xpath variables, a euwebglobals object is created.
-The script navigates to the webui login page, the user logs in, and downloads credentials to a given filename.
'''
from eucaweb import Eucaweb, euwebuser, euwebaccount,euwebglobals, webtestcase
import time
def testrun():
#New User to be added username will be randomly generated account is derived from acctadmin above
user = euwebuser.Euwebuser(passwd="password", account="eucalyptus", user="admin")
user.printUser()
wg = euwebglobals.Webui_globals()
#gui = eucawebdriver.Euguid(host="localhost",browser="FIREFOX",clc="192.168.51.9",needEutester=False)
gui.goLoginPage()
gui.login(user.account, user.user, user.passwd)
time.sleep(5)
gui.downloadCredentials(user.user, user.account, timeout=10, callBackMethod = testMethod, force = False)
time.sleep(30)
gui.tearDown(0)
def testMethod(filename):
creddir= '/tmp/credentialtests'
print "this is our test method we got filename("+filename+")"
gui.unzipCredentialsToDir(filename, creddir)
gui.sourceEucarcFromDir(creddir)
if __name__ == '__main__':
gui = Eucaweb(host="localhost",browser="FIREFOX",clc="192.168.51.9",needEutester=False)
testrun()
print "this test is done"
| nagyistoce/eutester | eucaweb/example_get_creds_via_webui.py | Python | bsd-2-clause | 2,033 |
# -*- coding: utf-8 -*-
"""
flaskbb.utils.views
~~~~~~~~~~~~~~~~~~~
This module contains some helpers for creating views.
:copyright: (c) 2016 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from flaskbb.utils.helpers import render_template
from flask.views import View
class RenderableView(View):
def __init__(self, template, view):
self.template = template
self.view = view
def dispatch_request(self, *args, **kwargs):
view_model = self.view(*args, **kwargs)
return render_template(self.template, **view_model)
| realityone/flaskbb | flaskbb/utils/views.py | Python | bsd-3-clause | 600 |
__author__ = 'nearl'
| nmearl/ifupy | tests/__init__.py | Python | bsd-3-clause | 21 |
# pylint: disable-msg=E1101,W0613,W0603
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime, MultiIndex
from pandas.io.common import get_filepath_or_buffer, _get_handle
from pandas.core.common import AbstractMethodError
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema
from pandas.core.dtypes.common import is_period_dtype
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False):
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='%s'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`" % date_format)
raise ValueError(msg)
self.schema = build_table_schema(obj)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
self.obj = obj.reset_index()
self.date_format = 'iso'
self.orient = 'records'
def write(self):
data = super(JSONTableWriter, self).write()
serialized = '{{"schema": {}, "data": {}}}'.format(
dumps(self.schema), data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
encoding=encoding)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError, ValueError):
exists = False
if exists:
fh, handles = _get_handle(filepath_or_buffer, 'r',
encoding=encoding)
json = fh.read()
fh.close()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
if lines:
# If given a json lines file, we break the string into lines, add
# commas and put it in a json list to make a valid json object.
lines = list(StringIO(json.strip()))
json = '[' + ','.join(lines) + ']'
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/io/json/json.py | Python | mit | 24,553 |
#!/usr/bin/env python
from __future__ import print_function
import chainer
import chainer.functions as F
from chainer import Variable
class DCGANUpdater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
super(DCGANUpdater, self).__init__(*args, **kwargs)
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = F.sum(F.softplus(-y_real)) / batchsize
L2 = F.sum(F.softplus(y_fake)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(F.softplus(-y_fake)) / batchsize
chainer.report({'loss': loss}, gen)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
batch = self.get_iterator('main').next()
x_real = Variable(self.converter(batch, self.device)) / 255.
xp = chainer.cuda.get_array_module(x_real.data)
gen, dis = self.gen, self.dis
batchsize = len(batch)
y_real = dis(x_real)
z = Variable(xp.asarray(gen.make_hidden(batchsize)))
x_fake = gen(z)
y_fake = dis(x_fake)
dis_optimizer.update(self.loss_dis, dis, y_fake, y_real)
gen_optimizer.update(self.loss_gen, gen, y_fake)
| kiyukuta/chainer | examples/dcgan/updater.py | Python | mit | 1,441 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION | python-spain/PyConES-2015 | pycones/pycones/settings/local.py | Python | mit | 1,632 |
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bill Bumgarner and Bram Cohen
# Dave's comments:
# makeHelp has no less than 4 elif's based on uiname. Doh.
#
# I like the function parseargs. It makes no UI-specific assumptions.
from types import *
from cStringIO import StringIO
from BTL.translation import _
from BTL.obsoletepythonsupport import *
from BitTorrent import BTFailure
from BTL.bencode import bdecode
from BitTorrent.platform import is_frozen_exe
from BTL.exceptions import str_exc
class UsageException(BTFailure):
pass
def makeHelp(uiname, defaults):
ret = u''
ret += (_("Usage: %s ") % uiname)
if uiname.startswith('launchmany'):
ret += _("[OPTIONS] [TORRENTDIRECTORY]\n\n")
ret += _("If a non-option argument is present it's taken as the value\n"
"of the torrent_dir option.\n")
elif uiname == 'bittorrent-tracker' or uiname == 'test-client':
ret += _("OPTIONS")
elif uiname == 'bittorrent':
ret += _("[OPTIONS] [TORRENTFILES]\n")
elif uiname.startswith('bittorrent'):
ret += _("[OPTIONS] [TORRENTFILE]\n")
elif uiname.startswith('maketorrent'):
ret += _("[OPTION] TRACKER_URL FILE [FILE]\n")
ret += '\n'
ret += _("arguments are -\n") + formatDefinitions(defaults, 80)
return ret
def printHelp(uiname, defaults):
print makeHelp(uiname, defaults)
def formatDefinitions(options, COLS):
s = u''
indent = u" " * 10
width = COLS - 11
if width < 15:
width = COLS - 2
indent = " "
for option in options:
(longname, default, doc) = option
if doc == '':
continue
s += u'--' + longname
is_boolean = type(default) is bool
if is_boolean:
s += u', --no_' + longname
else:
s += u' <arg>'
s += u'\n'
if default is not None:
doc += _(u" (defaults to ") + repr(default) + u')'
i = 0
for word in doc.split():
if i == 0:
s += indent + word
i = len(word)
elif i + len(word) >= width:
s += u'\n' + indent + word
i = len(word)
else:
s += u' ' + word
i += len(word) + 1
s += u'\n\n'
return s
def usage(str):
raise UsageException(str)
def format_key(key):
if len(key) == 1:
return '-%s'%key
else:
return '--%s'%key
def parseargs(argv, defaults, minargs=None, maxargs=None, presets=None ):
"""This function parses command-line arguments and uses them to override
the presets which in turn override the defaults (see defaultargs.py).
As currently used, the presets come from a config file (see
configfile.py).
Options have the form:
--option value
where the word option is replaced with the option name, etc.
If a string or number appears on the line without being preceeded
by a --option, then the string or number is an argument.
@param argv: command-line arguments. Command-line options override
defaults and presets.
@param defaults: list of (optionname,value,documentation) 3-tuples.
@param minargs: minimum number of arguments in argv.
@param maxargs: maximum number of arguments in argv.
@param presets: a dict containing option-value pairs. Presets
typically come from a config file. Presets override defaults.
@return: the pair (config,args) where config is a dict containing
option-value pairs, and args is a list of the arguments in the
order they appeared in argv.
"""
assert type(argv)==list
assert type(defaults)==list
assert minargs is None or type(minargs) in (int,long) and minargs>=0
assert maxargs is None or type(maxargs) in (int,long) and maxargs>=minargs
assert presets is None or type(presets)==dict
config = {}
for option in defaults:
longname, default, doc = option
config[longname] = default
args = []
pos = 0
if presets is None:
presets = {}
else:
presets = presets.copy()
while pos < len(argv):
if argv[pos][:1] != '-': # not a cmdline option
args.append(argv[pos])
pos += 1
else:
key, value = None, None
if argv[pos].startswith('--'): # --aaa 1
if argv[pos].startswith('--no_'):
key = argv[pos][5:]
boolval = False
else:
key = argv[pos][2:]
boolval = True
if key not in config:
raise UsageException(_("unknown option ") + format_key(key))
if type(config[key]) is bool: # boolean cmd line switch, no value
value = boolval
pos += 1
else: # --argument value
if pos == len(argv) - 1:
usage(_("parameter passed in at end with no value"))
key, value = argv[pos][2:], argv[pos+1]
pos += 2
elif argv[pos][:1] == '-':
key = argv[pos][1:2]
if len(argv[pos]) > 2: # -a1
value = argv[pos][2:]
pos += 1
else: # -a 1
if pos == len(argv) - 1:
usage(_("parameter passed in at end with no value"))
value = argv[pos+1]
pos += 2
else:
raise UsageException(_("command line parsing failed at ")+argv[pos])
presets[key] = value
parse_options(config, presets, None)
config.update(presets)
# if a key appears in the config with a None value then this is because
# the key appears in the defaults with a None value and the value was
# not provided by the user. keys appearing in defaults with a none
# value are REQUIRED arguments.
for key, value in config.items():
if value is None:
usage(_("Option %s is required.") % format_key(key))
if minargs is not None and len(args) < minargs:
usage(_("Must supply at least %d arguments.") % minargs)
if maxargs is not None and len(args) > maxargs:
usage(_("Too many arguments - %d maximum.") % maxargs)
return (config, args)
def parse_options(defaults, newvalues, encoding):
"""Given the type provided by the default value, this tries to cast/convert
the corresponding newvalue to the type of the default value.
By calling eval() on it, in some cases!
Entertainly, newvalue sometimes holds strings and, apparently,
sometimes holds values which have already been cast appropriately.
This function is like a boat made of shit, floating on a river of shit.
@param defaults: dict of key-value pairs where value is the default.
@param newvalues: dict of key-value pairs which override the default.
"""
assert type(defaults) == dict
assert type(newvalues) == dict
for key, value in newvalues.iteritems():
if not defaults.has_key(key):
raise UsageException(_("unknown option ") + format_key(key))
try:
t = type(defaults[key])
if t is bool:
if value in ('True', '1', True):
value = True
else:
value = False
newvalues[key] = value
elif t in (StringType, NoneType):
# force ASCII
newvalues[key] = value.decode('ascii').encode('ascii')
elif t in (IntType, LongType):
if value == 'False':
newvalues[key] == 0
elif value == 'True':
newvalues[key] == 1
else:
newvalues[key] = int(value)
elif t is FloatType:
newvalues[key] = float(value)
elif t in (ListType, TupleType, DictType):
if type(value) == StringType:
try:
n = eval(value)
assert type(n) == t
newvalues[key] = n
except:
newvalues[key] = t()
elif t is UnicodeType:
if type(value) == StringType:
try:
newvalues[key] = value.decode(encoding)
except:
newvalues[key] = value.decode('ascii')
else:
raise TypeError, str(t)
except ValueError, e:
raise UsageException(_("wrong format of %s - %s") % (format_key(key), str_exc(e)))
| epsylon3/torrentflux | html/bin/clients/mainline/BitTorrent/parseargs.py | Python | gpl-2.0 | 9,389 |
from __future__ import print_function
# Note: this code was initially copied from the 'pyutools' package by its
# original author, and re-licensed under Theano's license.
import numpy
import theano
from theano.compile.mode import Mode
class MonitorMode(Mode):
"""
`MonitorMode` is a debug mode to easily step through function execution.
Its default behavior is to behave like the 'FAST_RUN' mode. By providing
either a `pre_func` (called before a node is executed) or a `post_func`
(called after a node is executed) monitoring function, the user can inspect
node behavior.
A typical use case is to detect the introduction of NaN values in a graph.
For an example of such a use case, see doc/tutorial/debug_faq.txt.
Parameters
----------
pre_func
A function to call before executing a thunk, with arguments:
- the thunk index
- the Apply node
- the thunk to be called
post_func
A function to call after executing a thunk, with the same three
arguments as `pre_func`.
optimizer
The optimizer to use. One may use for instance 'fast_compile' to skip
optimizations.
linker
DO NOT USE. This mode uses its own linker. The parameter is needed to
allow selecting optimizers to use.
"""
def __init__(self, pre_func=None, post_func=None,
optimizer='default', linker=None):
self.pre_func = pre_func
self.post_func = post_func
wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()],
[self.eval])
if optimizer == 'default':
optimizer = theano.config.optimizer
if (linker is not None and
not isinstance(linker.mode, MonitorMode)):
raise Exception("MonitorMode can only use its own linker! You "
"should not provide one.", linker)
super(MonitorMode, self).__init__(wrap_linker, optimizer=optimizer)
def __getstate__(self):
lnk, opt = super(MonitorMode, self).__getstate__()
return (lnk, opt, self.pre_func, self.post_func)
def __setstate__(self, state):
lnk, opt, pre_func, post_func = state
self.pre_func = pre_func
self.post_func = post_func
super(MonitorMode, self).__setstate__((lnk, opt))
def eval(self, i, node, fn):
"""
The method that calls the thunk `fn`.
"""
if self.pre_func is not None:
self.pre_func(i, node, fn)
fn()
if self.post_func is not None:
self.post_func(i, node, fn)
def clone(self, link_kwargs=None, optimizer="", **kwargs):
"""
Create a new instance of this Mode.
Keyword arguments can be provided for the linker, but they will be
ignored, because ProfileMode needs to use its own linker.
"""
if optimizer == "":
optimizer = self.provided_optimizer
new_mode = type(self)(pre_func=self.pre_func,
post_func=self.post_func,
linker=None,
optimizer=optimizer)
return new_mode
def detect_nan(i, node, fn):
for output in fn.outputs:
if (not isinstance(output[0], numpy.random.RandomState) and
numpy.isnan(output[0]).any()):
print('*** NaN detected ***')
theano.printing.debugprint(node)
print('Inputs : %s' % [input[0] for input in fn.inputs])
print('Outputs: %s' % [output[0] for output in fn.outputs])
break
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/compile/monitormode.py | Python | gpl-2.0 | 3,659 |
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email [email protected]
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: Garrison.py
Age: Garrison
Date: October 2002
event manager hooks for the Garrison
"""
from Plasma import *
from PlasmaTypes import *
IsPublic = 0
boolWellBlocker = 0
class Garrison(ptResponder):
def __init__(self):
ptResponder.__init__(self)
self.id = 5024
self.version = 2
def OnFirstUpdate(self):
global IsPublic
thisComponent = self.key.getName()
if thisComponent != "VeryVerySpecialPythonFileMod":
print "Garrison.OnFirstUpdate(): this isn't the right script instance, ignoring rest of script"
return
parentname = None
try:
agevault = ptAgeVault()
ageinfo = agevault.getAgeInfo()
parent = ageinfo.getParentAgeLink()
parentinfo = parent.getAgeInfo()
parentname = parentinfo.getAgeFilename()
except:
pass
if parentname == "Neighborhood":
IsPublic = 1
print "Garrison.OnFirstUpdate(): this Garrison is the public instance, as its parent = ",parentname
else:
print "Garrison.OnFirstUpdate(): this Garrison is the regular aka Yeesha version, as its parent = ",parentname
def OnServerInitComplete(self):
thisComponent = self.key.getName()
if thisComponent != "VeryVerySpecialPythonFileMod":
print "Garrison.OnFirstUpdate(): this isn't the right script instance, ignoring rest of script"
return
global boolWellBlocker
ageSDL = PtGetAgeSDL()
ageSDL.setFlags("grsnWellFirstFloorBlocker",1,1)
ageSDL.sendToClients("grsnWellFirstFloorBlocker")
ageSDL.setNotify(self.key,"grsnWellFirstFloorBlocker",0.0)
boolWellBlocker = ageSDL["grsnWellFirstFloorBlocker"][0]
if IsPublic and not boolWellBlocker:
ageSDL["grsnWellFirstFloorBlocker"] = (1,)
elif not IsPublic and boolWellBlocker:
ageSDL["grsnWellFirstFloorBlocker"] = (0,)
def Load(self):
pass
def OnSDLNotify(self,VARname,SDLname,playerID,tag):
global boolWellBlocker
# if VARname == "grsnWellFirstFloorBlocker":
# ageSDL = PtGetAgeSDL()
# boolWellBlocker = ageSDL["grsnWellFirstFloorBlocker"][0]
# if IsPublic and not boolWellBlocker:
# ageSDL["grsnWellFirstFloorBlocker"] = (1,)
# elif not IsPublic and boolWellBlocker:
# ageSDL["grsnWellFirstFloorBlocker"] = (0,)
def OnNotify(self,state,id,events):
pass
| TOC-Shard/moul-scripts | Python/Garrison.py | Python | gpl-3.0 | 4,371 |
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""This is used to create/delete/drop the account database schema."""
from backends.db.tools.schema import Schema
__all__ = ["create_schema"]
def create_schema():
"""Return a Schema"""
from backends.db.schemas import account as patch_package
return Schema(CREATE, DROP, DELETE, patch_package, 'account2.patch')
CREATE = [
"""
CREATE SCHEMA account2
""",
"""
GRANT USAGE on SCHEMA account2 TO webapp
""",
"""
CREATE TABLE account2.user_profile (
id INTEGER NOT NULL PRIMARY KEY,
accepted_tos_on TIMESTAMP WITHOUT TIME ZONE,
email_notification boolean default false
)
""",
"""
GRANT SELECT, UPDATE, INSERT, DELETE
ON TABLE account2.user_profile TO webapp;
""",
"""
CREATE TABLE account2.plan (
id SERIAL PRIMARY KEY,
name TEXT,
description TEXT,
is_base_plan boolean default false,
available_from TIMESTAMP WITHOUT TIME ZONE,
available_until TIMESTAMP WITHOUT TIME ZONE,
price_table BYTEA,
promotional_days INTEGER
);
""",
"""
GRANT USAGE, SELECT, UPDATE ON SEQUENCE account2.plan_id_seq TO webapp;
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE account2.plan TO webapp
""",
"""
CREATE TABLE account2.capability (
id SERIAL PRIMARY KEY,
description TEXT NOT NULL,
code TEXT NOT NULL UNIQUE,
allow_amount boolean,
unit_amount BIGINT,
unit_price_table BYTEA
);
""",
"""
GRANT USAGE, SELECT, UPDATE
ON SEQUENCE account2.capability_id_seq TO webapp;
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE account2.capability TO webapp
""",
"""
CREATE TABLE account2.plan_capability (
id SERIAL PRIMARY KEY,
plan_id INTEGER NOT NULL REFERENCES account2.plan(id)
ON DELETE CASCADE,
capability_id INTEGER NOT NULL REFERENCES account2.capability(id)
ON DELETE CASCADE,
base_amount BIGINT,
UNIQUE (plan_id, capability_id)
);
""",
"""
GRANT USAGE, SELECT, UPDATE
ON SEQUENCE account2.plan_capability_id_seq TO webapp;
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE account2.plan_capability TO webapp
""",
"""
CREATE TABLE account2.user_plan (
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL,
plan_id INTEGER NOT NULL REFERENCES account2.plan(id)
ON DELETE CASCADE,
active_from TIMESTAMP WITHOUT TIME ZONE NOT NULL,
active_until TIMESTAMP WITHOUT TIME ZONE,
cancel_date TIMESTAMP WITHOUT TIME ZONE,
sub_id INTEGER
)
""",
"""
GRANT USAGE, SELECT, UPDATE
ON SEQUENCE account2.user_plan_id_seq TO webapp;
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE account2.user_plan TO webapp
""",
"""
CREATE INDEX user_plan_user__plan_idx ON
account2.user_plan (user_id, plan_id)
""",
"""
CREATE TABLE account2.user_capability (
id SERIAL PRIMARY KEY,
user_id INTEGER NOT NULL,
capability_id INTEGER NOT NULL REFERENCES account2.capability(id)
ON DELETE CASCADE,
units BIGINT NOT NULL,
active_from TIMESTAMP WITHOUT TIME ZONE NOT NULL,
active_until TIMESTAMP WITHOUT TIME ZONE,
sub_id INTEGER
)
""",
"""
GRANT USAGE, SELECT, UPDATE
ON SEQUENCE account2.user_capability_id_seq TO webapp;
""",
"""
CREATE INDEX user_capability_user_idx ON
account2.user_capability (user_id)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE account2.user_capability TO webapp
""",
"""
CREATE VIEW account2.user_capability_summary as
select up.user_id, up.active_from, up.active_until,
c.code, pc.base_amount as amount
from account2.user_plan up,
account2.plan p,
account2.plan_capability pc,
account2.capability c
where up.plan_id = p.id and p.id = pc.plan_id
and pc.capability_id = c.id and p.is_base_plan is false
UNION ALL
select uc.user_id, uc.active_from, uc.active_until,
c.code, uc.units * c.unit_amount as amount
from account2.user_capability uc,
account2.capability c
where uc.capability_id=c.id
UNION ALL
select u.id as user_id, u.accepted_tos_on as active_from, null
as active_until, c.code, pc.base_amount as amount
from account2.user_profile u,
account2.plan p,
account2.plan_capability pc,
account2.capability c
where u.accepted_tos_on is not null and p.id = pc.plan_id and
pc.capability_id = c.id and p.is_base_plan is true
""",
"""
GRANT SELECT ON TABLE account2.user_capability_summary TO webapp
""",
]
DROP = []
DELETE = [
"DELETE FROM account2.user_plan",
"DELETE FROM account2.plan_capability",
"DELETE FROM account2.user_capability",
"DELETE FROM account2.capability",
"DELETE FROM account2.plan",
"DELETE FROM account2.user_profile",
"ALTER SEQUENCE account2.plan_id_seq RESTART WITH 1",
"ALTER SEQUENCE account2.capability_id_seq RESTART WITH 1",
"ALTER SEQUENCE account2.plan_capability_id_seq RESTART WITH 1",
"ALTER SEQUENCE account2.user_plan_id_seq RESTART WITH 1",
"ALTER SEQUENCE account2.user_capability_id_seq RESTART WITH 1",
]
| indradhanush/filesync-server | src/backends/db/schemas/account/__init__.py | Python | agpl-3.0 | 6,330 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from copy import copy
from weboob.core import CallErrors
from weboob.tools.application.repl import ReplApplication
from weboob.applications.boobmsg import Boobmsg
from weboob.capabilities.dating import CapDating, OptimizationNotFound
from weboob.tools.application.formatters.iformatter import PrettyFormatter
__all__ = ['HaveDate']
class EventListFormatter(PrettyFormatter):
MANDATORY_FIELDS = ('date', 'type')
def get_title(self, event):
s = u'(%s) %s' % (event.date, event.type)
if hasattr(event, 'contact') and event.contact:
s += u' — %s (%s)' % (event.contact.name, event.contact.id)
return s
def get_description(self, event):
if hasattr(event, 'message'):
return event.message
class HaveDate(Boobmsg):
APPNAME = 'havedate'
VERSION = '1.1'
COPYRIGHT = 'Copyright(C) 2010-YEAR Romain Bignon'
DESCRIPTION = "Console application allowing to interact with various dating websites " \
"and to optimize seduction algorithmically."
SHORT_DESCRIPTION = "interact with dating websites"
STORAGE_FILENAME = 'dating.storage'
STORAGE = {'optims': {}}
CAPS = CapDating
EXTRA_FORMATTERS = copy(Boobmsg.EXTRA_FORMATTERS)
EXTRA_FORMATTERS['events'] = EventListFormatter
COMMANDS_FORMATTERS = copy(Boobmsg.COMMANDS_FORMATTERS)
COMMANDS_FORMATTERS['optim'] = 'table'
COMMANDS_FORMATTERS['events'] = 'events'
def load_default_backends(self):
self.load_backends(CapDating, storage=self.create_storage(self.STORAGE_FILENAME))
def main(self, argv):
self.load_config()
try:
self.do('init_optimizations').wait()
except CallErrors as e:
self.bcall_errors_handler(e)
optimizations = self.storage.get('optims')
for optim, backends in optimizations.iteritems():
self.optims('start', backends, optim, store=False)
return ReplApplication.main(self, argv)
def do_query(self, id):
"""
query ID
Send a query to someone.
"""
_id, backend_name = self.parse_id(id, unique_backend=True)
for query in self.do('send_query', _id, backends=backend_name):
print('%s' % query.message)
def edit_optims(self, backend_names, optims_names, stop=False):
if optims_names is None:
print('Error: missing parameters.', file=self.stderr)
return 2
for optim_name in optims_names.split():
backends_optims = {}
for optim in self.do('get_optimization', optim_name, backends=backend_names):
if optim:
backends_optims[optim.backend] = optim
for backend_name, optim in backends_optims.iteritems():
if len(optim.CONFIG) == 0:
print('%s.%s does not require configuration.' % (backend_name, optim_name))
continue
was_running = optim.is_running()
if stop and was_running:
print('Stopping %s: %s' % (optim_name, backend_name))
optim.stop()
params = optim.get_config()
if params is None:
params = {}
print('Configuration of %s.%s' % (backend_name, optim_name))
print('-----------------%s-%s' % ('-' * len(backend_name), '-' * len(optim_name)))
for key, value in optim.CONFIG.iteritems():
params[key] = self.ask(value, default=params[key] if (key in params) else value.default)
optim.set_config(params)
if stop and was_running:
print('Starting %s: %s' % (optim_name, backend_name))
optim.start()
def optims(self, function, backend_names, optims, store=True):
if optims is None:
print('Error: missing parameters.', file=self.stderr)
return 2
for optim_name in optims.split():
try:
if store:
storage_optim = set(self.storage.get('optims', optim_name, default=[]))
self.stdout.write('%sing %s:' % (function.capitalize(), optim_name))
for optim in self.do('get_optimization', optim_name, backends=backend_names):
if optim:
# It's useless to start a started optim, or to stop a stopped one.
if (function == 'start' and optim.is_running()) or \
(function == 'stop' and not optim.is_running()):
continue
# Optim is not configured and would be, ask user to do it.
if function == 'start' and len(optim.CONFIG) > 0 and optim.get_config() is None:
self.edit_optims(optim.backend, optim_name)
ret = getattr(optim, function)()
self.stdout.write(' ' + optim.backend)
if not ret:
self.stdout.write('(failed)')
self.stdout.flush()
if store:
if function == 'start' and ret:
storage_optim.add(optim.backend)
elif function == 'stop':
try:
storage_optim.remove(optim.backend)
except KeyError:
pass
self.stdout.write('.\n')
except CallErrors as errors:
for backend, error, backtrace in errors:
if isinstance(error, OptimizationNotFound):
self.logger.error(u'Error(%s): Optimization "%s" not found' % (backend.name, optim_name))
else:
self.bcall_error_handler(backend, error, backtrace)
if store:
if len(storage_optim) > 0:
self.storage.set('optims', optim_name, list(storage_optim))
else:
self.storage.delete('optims', optim_name)
if store:
self.storage.save()
def complete_optim(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return ['list', 'start', 'stop', 'edit']
elif len(args) == 3:
return [backend.name for backend in self.enabled_backends]
elif len(args) >= 4:
if args[2] == '*':
backend = None
else:
backend = args[2]
optims = set()
for optim in self.do('iter_optimizations', backends=backend):
optims.add(optim.id)
return sorted(optims - set(args[3:]))
def do_optim(self, line):
"""
optim [list | start | edit | stop] BACKEND [OPTIM1 [OPTIM2 ...]]
All dating backends offer optimization services. This command can be
manage them.
Use * us BACKEND value to apply command to all backends.
Commands:
* list list all available optimizations of a backend
* start start optimization services on a backend
* edit configure an optimization service for a backend
* stop stop optimization services on a backend
"""
cmd, backend_name, optims_names = self.parse_command_args(line, 3)
if backend_name == '*':
backend_name = None
elif backend_name is not None and backend_name not in [b.name for b in self.enabled_backends]:
print('Error: No such backend "%s"' % backend_name, file=self.stderr)
return 1
if cmd == 'start':
return self.optims('start', backend_name, optims_names)
if cmd == 'stop':
return self.optims('stop', backend_name, optims_names)
if cmd == 'edit':
self.edit_optims(backend_name, optims_names, stop=True)
return
if cmd == 'list' or cmd is None:
if optims_names is not None:
optims_names = optims_names.split()
optims = {}
backends = set()
for optim in self.do('iter_optimizations', backends=backend_name):
if optims_names is not None and optim.id not in optims_names:
continue
if optim.is_running():
status = 'RUNNING'
else:
status = '-------'
if optim.id not in optims:
optims[optim.id] = {optim.backend: status}
else:
optims[optim.id][optim.backend] = status
backends.add(optim.backend)
backends = sorted(backends)
for name, backends_status in optims.iteritems():
line = [('name', name)]
for b in backends:
try:
status = backends_status[b]
except KeyError:
status = ''
line.append((b, status))
self.format(tuple(line))
return
print("No such command '%s'" % cmd, file=self.stderr)
return 1
def do_events(self, line):
"""
events
Display dating events.
"""
self.change_path([u'events'])
self.start_format()
for event in self.do('iter_events'):
self.cached_format(event)
| sputnick-dev/weboob | weboob/applications/havedate/havedate.py | Python | agpl-3.0 | 10,370 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyliblzma(PythonPackage):
"""Python bindings for liblzma"""
homepage = "https://launchpad.net/pyliblzma"
pypi = "pyliblzma/pyliblzma-0.5.3.tar.bz2"
version('0.5.3', sha256='08d762f36d5e59fb9bb0e22e000c300b21f97e35b713321ee504cfb442667957')
depends_on('py-setuptools', type='build')
depends_on('lzma')
| LLNL/spack | var/spack/repos/builtin/packages/py-pyliblzma/package.py | Python | lgpl-2.1 | 556 |
from PySide.QtCore import Qt, QPersistentModelIndex
from PySide.QtGui import QStringListModel
if __name__ == '__main__':
stringListModel = QStringListModel(['one', 'two'])
idx = stringListModel.index(1, 0)
persistentModelIndex = QPersistentModelIndex(idx)
stringListModel.data(persistentModelIndex, Qt.DisplayRole)
| M4rtinK/pyside-android | tests/QtGui/bug_716.py | Python | lgpl-2.1 | 333 |
#!/usr/bin/env python
################################################################################
# Copyright (C) 2015 The Qt Company Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of The Qt Company Ltd, nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import os
import sys
import datetime
import getopt
import subprocess
import fnmatch
import tempfile
import shutil
import inspect
def usage():
print 'Usage: %s [-v|--version-string=versionstring] [-i|--installer-path=/path/to/installerfw] [-a|--archive=archive.7z] <outputname>' % os.path.basename(sys.argv[0])
def substitute_file(infile, outfile, substitutions):
with open(infile, 'r') as f:
template = f.read()
with open(outfile, 'w') as f:
f.write(template.format(**substitutions))
def ifw_template_dir():
script_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
source_dir = os.path.normpath(os.path.join(script_dir, '..'));
return os.path.normpath(os.path.join(source_dir, 'dist', 'installer', 'ifw'))
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hv:i:a:', ['help', 'version-string=', 'installer-path=', 'archive'])
except:
usage()
sys.exit(2)
if len(args) < 1:
usage()
sys.exit(2)
version = ''
ifw_location = ''
archive = ''
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-v', '--version-string'):
version = a
if o in ('-i', '--installer-path'):
ifw_location = a
if o in ('-a', '--archive'):
archive = a
if (version == ''):
raise Exception('Version not specified (--version-string)!')
if (ifw_location == ''):
raise Exception('Installer framework location not specified (--installer-path)!')
if (archive == ''):
raise Exception('Archive not specified (--archive)!')
installer_name = args[0]
config_postfix = ''
if sys.platform == 'darwin':
config_postfix = '-mac'
if sys.platform.startswith('win'):
config_postfix = '-windows'
if sys.platform.startswith('linux'):
config_postfix = '-linux'
installer_name = installer_name + '.run'
config_name = 'config' + config_postfix + '.xml'
try:
temp_dir = tempfile.mkdtemp()
except:
raise Exception('Failed to create a temporary directory!')
try:
substs = {}
substs['version'] = version
substs['date'] = datetime.date.today().isoformat()
template_dir = ifw_template_dir()
out_config_dir = os.path.join(temp_dir,'config')
out_packages_dir = os.path.join(temp_dir, 'packages')
shutil.copytree(os.path.join(template_dir, 'packages'), os.path.join(temp_dir, 'packages'))
shutil.copytree(os.path.join(template_dir, 'config'), os.path.join(temp_dir, 'config'))
for root, dirnames, filenames in os.walk(out_packages_dir):
for template in fnmatch.filter(filenames, '*.in'):
substitute_file(os.path.join(root, template), os.path.join(root, template[:-3]), substs)
os.remove(os.path.join(root, template))
for root, dirnames, filenames in os.walk(out_config_dir):
for template in fnmatch.filter(filenames, '*.in'):
substitute_file(os.path.join(root, template), os.path.join(root, template[:-3]), substs)
os.remove(os.path.join(root, template))
data_path = os.path.join(out_packages_dir, 'org.qtproject.qtcreator.application', 'data')
if not os.path.exists(data_path):
os.makedirs(data_path)
shutil.copy(archive, data_path)
ifw_call = [os.path.join(ifw_location, 'bin', 'binarycreator'), '-c', os.path.join(out_config_dir, config_name), '-p', out_packages_dir, installer_name, '--offline-only' ]
subprocess.check_call(ifw_call, stderr=subprocess.STDOUT)
finally:
print 'Cleaning up...'
shutil.rmtree(temp_dir)
print 'Done.'
if __name__ == '__main__':
main()
| danimo/qt-creator | scripts/packageIfw.py | Python | lgpl-2.1 | 5,571 |
#!/usr/bin/env python
"""Required credentials configuration."""
# See instructions in the README.md file for how to fill these in.
# The service account email address authorized by your Google contact.
EE_ACCOUNT = '<your-service-account>@developer.gserviceaccount.com'
# Copy your credentials from the console.
OAUTH_CLIENT_ID = '<your-client-id>'
OAUTH_CLIENT_SECRET = '<your-client-secret>'
# The private key associated with your service account in Privacy Enhanced
# Email format (.pem suffix).
EE_PRIVATE_KEY_FILE = 'privatekey.pem'
| tylere/earthengine-api | demos/export-to-drive/config.py | Python | apache-2.0 | 542 |
from __future__ import absolute_import
import inspect
import os
import platform
import sys
import lit.Test
import lit.formats
import lit.TestingConfig
import lit.util
# LitConfig must be a new style class for properties to work
class LitConfig(object):
"""LitConfig - Configuration data for a 'lit' test runner instance, shared
across all tests.
The LitConfig object is also used to communicate with client configuration
files, it is always passed in as the global variable 'lit' so that
configuration files can access common functionality and internal components
easily.
"""
def __init__(self, progname, path, quiet,
useValgrind, valgrindLeakCheck, valgrindArgs,
noExecute, debug, isWindows,
params, config_prefix = None,
maxIndividualTestTime = 0,
maxFailures = None,
parallelism_groups = {},
echo_all_commands = False):
# The name of the test runner.
self.progname = progname
# The items to add to the PATH environment variable.
self.path = [str(p) for p in path]
self.quiet = bool(quiet)
self.useValgrind = bool(useValgrind)
self.valgrindLeakCheck = bool(valgrindLeakCheck)
self.valgrindUserArgs = list(valgrindArgs)
self.noExecute = noExecute
self.debug = debug
self.isWindows = bool(isWindows)
self.params = dict(params)
self.bashPath = None
# Configuration files to look for when discovering test suites.
self.config_prefix = config_prefix or 'lit'
self.suffixes = ['cfg.py', 'cfg']
self.config_names = ['%s.%s' % (self.config_prefix,x) for x in self.suffixes]
self.site_config_names = ['%s.site.%s' % (self.config_prefix,x) for x in self.suffixes]
self.local_config_names = ['%s.local.%s' % (self.config_prefix,x) for x in self.suffixes]
self.numErrors = 0
self.numWarnings = 0
self.valgrindArgs = []
if self.useValgrind:
self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
'--tool=memcheck', '--trace-children=yes',
'--error-exitcode=123']
if self.valgrindLeakCheck:
self.valgrindArgs.append('--leak-check=full')
else:
# The default is 'summary'.
self.valgrindArgs.append('--leak-check=no')
self.valgrindArgs.extend(self.valgrindUserArgs)
self.maxIndividualTestTime = maxIndividualTestTime
self.maxFailures = maxFailures
self.parallelism_groups = parallelism_groups
self.echo_all_commands = echo_all_commands
@property
def maxIndividualTestTime(self):
"""
Interface for getting maximum time to spend executing
a single test
"""
return self._maxIndividualTestTime
@property
def maxIndividualTestTimeIsSupported(self):
"""
Returns a tuple (<supported> , <error message>)
where
`<supported>` is True if setting maxIndividualTestTime is supported
on the current host, returns False otherwise.
`<error message>` is an empty string if `<supported>` is True,
otherwise is contains a string describing why setting
maxIndividualTestTime is not supported.
"""
return lit.util.killProcessAndChildrenIsSupported()
@maxIndividualTestTime.setter
def maxIndividualTestTime(self, value):
"""
Interface for setting maximum time to spend executing
a single test
"""
if not isinstance(value, int):
self.fatal('maxIndividualTestTime must set to a value of type int.')
self._maxIndividualTestTime = value
if self.maxIndividualTestTime > 0:
# The current implementation needs psutil on some platforms to set
# a timeout per test. Check it's available.
# See lit.util.killProcessAndChildren()
supported, errormsg = self.maxIndividualTestTimeIsSupported
if not supported:
self.fatal('Setting a timeout per test not supported. ' +
errormsg)
elif self.maxIndividualTestTime < 0:
self.fatal('The timeout per test must be >= 0 seconds')
def load_config(self, config, path):
"""load_config(config, path) - Load a config object from an alternate
path."""
if self.debug:
self.note('load_config from %r' % path)
config.load_from_path(path, self)
return config
def getBashPath(self):
"""getBashPath - Get the path to 'bash'"""
if self.bashPath is not None:
return self.bashPath
self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
if self.bashPath is None:
self.bashPath = lit.util.which('bash')
if self.bashPath is None:
self.bashPath = ''
# Check whether the found version of bash is able to cope with paths in
# the host path format. If not, don't return it as it can't be used to
# run scripts. For example, WSL's bash.exe requires '/mnt/c/foo' rather
# than 'C:\\foo' or 'C:/foo'.
if self.isWindows and self.bashPath:
command = [self.bashPath, '-c',
'[[ -f "%s" ]]' % self.bashPath.replace('\\', '\\\\')]
_, _, exitCode = lit.util.executeCommand(command)
if exitCode:
self.note('bash command failed: %s' % (
' '.join('"%s"' % c for c in command)))
self.bashPath = ''
if not self.bashPath:
self.warning('Unable to find a usable version of bash.')
return self.bashPath
def getToolsPath(self, dir, paths, tools):
if dir is not None and os.path.isabs(dir) and os.path.isdir(dir):
if not lit.util.checkToolsPath(dir, tools):
return None
else:
dir = lit.util.whichTools(tools, paths)
# bash
self.bashPath = lit.util.which('bash', dir)
if self.bashPath is None:
self.bashPath = ''
return dir
def _write_message(self, kind, message):
# Get the file/line where this message was generated.
f = inspect.currentframe()
# Step out of _write_message, and then out of wrapper.
f = f.f_back.f_back
file,line,_,_,_ = inspect.getframeinfo(f)
location = '%s:%d' % (file, line)
sys.stderr.write('%s: %s: %s: %s\n' % (self.progname, location,
kind, message))
def note(self, message):
if not self.quiet:
self._write_message('note', message)
def warning(self, message):
if not self.quiet:
self._write_message('warning', message)
self.numWarnings += 1
def error(self, message):
self._write_message('error', message)
self.numErrors += 1
def fatal(self, message):
self._write_message('fatal', message)
sys.exit(2)
| llvm-mirror/llvm | utils/lit/lit/LitConfig.py | Python | apache-2.0 | 7,264 |
#!/usr/bin/env python
"""
This module export tec format to stl, vtk and vtp
Changes in version 1.1 (OpenWarp - Add Logging Functionality)
Added support for logging.
"""
import settings
import utility
import os
import sys
import subprocess
__author__ = "yedtoss"
__copyright__ = "Copyright (C) 2014-2016 TopCoder Inc. All rights reserved."
__version__ = "1.1"
def tec_to_vtp(inp, output):
"""
Convert tec to vtp format
Args:
inp: string, the path to the input file
output: string. the path to the output file
Returns:
Return the status of the conversion
"""
return subprocess.call(["vmtk", "vmtksurfacereader", "-ifile", str(inp), "-f", "tecplot", "-ofile", str(output)])
def vtp_to_other(inp, output, format='stl'):
"""
Convert vtp format to stl, vtk, tecplot, ply, pointdata
Args:
inp: string, the path to the input file
output: string. the path to the output file
Returns:
Return the status of the conversion
"""
return subprocess.call(["vmtk", "vmtksurfacewriter", "-ifile", str(inp),
"-f", str(format), "-ofile", str(output), "-mode", "ascii"])
def export_tec(files):
"""
Args:
files: array, a list of files to process. If empty or None the default files are processed
This function export the tec files generated by the nemoh module to other format
"""
tec_files = [settings.MESH_TEC_FILE, settings.FK_FORCE_TEC_FILE, settings.DIFFRACTION_FORCE_TEC_FILE,
settings.EXCITATION_FORCE_TEC_FILE, settings.IRF_TEC_FILE, settings.RADIATION_COEFFICIENTS_TEC_FILE,
settings.WAVE_FIELD_TEC_FILE]
if files and len(files) > 0:
tec_files = files
extensions = ['stl', 'vtk', 'ply', 'pointdata']
for tec_file in tec_files:
vtp_out = os.path.splitext(tec_file)[0] + ".vtp"
status = tec_to_vtp(tec_file, vtp_out)
if status == 0:
for ext in extensions:
out = os.path.splitext(vtp_out)[0] + "." + ext
vtp_to_other(vtp_out, out, ext)
if __name__ == '__main__':
utility.setup_logging(default_conf_path=settings.LOGGING_CONFIGURATION_FILE, logging_path=settings.LOG_FILE)
export_tec(sys.argv[1:])
| NREL/OpenWARP | source/openwarpgui/nemoh/export_tec.py | Python | apache-2.0 | 2,293 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flattened tree resource printer."""
from googlecloudsdk.core.resource import resource_printer_base
from googlecloudsdk.core.resource import resource_transform
def _Flatten(obj):
"""Flattens a JSON-serializable object into a list of tuples.
The first element of each tuple will be a key and the second element
will be a simple value.
For example, _Flatten({'a': ['hello', 'world'], 'b': {'x': 'bye'}})
will produce:
[
('a[0]', 'hello'),
('a[1]', 'world'),
('b.x', 'bye'),
]
Args:
obj: A JSON-serializable object.
Returns:
A list of tuples.
"""
def Flatten(obj, name, res):
"""Recursively appends keys in path from obj into res.
Args:
obj: The object to flatten.
name: The key name of the current obj.
res: The ordered result value list.
"""
if isinstance(obj, list):
if obj:
for i, item in enumerate(obj):
Flatten(item, '{name}[{index}]'.format(name=name, index=i), res)
else:
res.append((name, []))
elif isinstance(obj, dict):
if obj:
for k, v in sorted(obj.iteritems()):
Flatten(v, '{name}{dot}{key}'.format(
name=name, dot='.' if name else '', key=k), res)
else:
res.append((name, {}))
elif isinstance(obj, float):
res.append((name, resource_transform.TransformFloat(obj)))
else:
res.append((name, obj))
res = []
Flatten(obj, '', res)
return res
def _StringQuote(s, quote='"', escape='\\'):
"""Returns <quote>s<quote> with <escape> and <quote> in s escaped.
s.encode('string-escape') does not work with type(s) == unicode.
Args:
s: The string to quote.
quote: The outer quote character.
escape: The enclosed escape character.
Returns:
<quote>s<quote> with <escape> and <quote> in s escaped.
"""
entity = {'\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
chars = []
if quote:
chars.append(quote)
for c in s:
if c in (escape, quote):
chars.append(escape)
elif c in entity:
c = entity[c]
chars.append(c)
if quote:
chars.append(quote)
return ''.join(chars)
class FlattenedPrinter(resource_printer_base.ResourcePrinter):
"""Prints a flattened tree representation of JSON-serializable objects.
A flattened tree. Each output line contains one *key*:*value* pair.
Printer attributes:
no-pad: Don't print space after the separator. The default adjusts the
space to align the values into the same output column. Use *no-pad*
for comparing resource outputs.
separator=_SEPARATOR_: Print _SEPARATOR_ between the *key* and *value*.
The default is ": ".
For example:
printer = resource_printer.Printer('flattened', out=sys.stdout)
printer.AddRecord({'a': ['hello', 'world'], 'b': {'x': 'bye'}})
produces:
---
a[0]: hello
a[1]: world
b.x: bye
"""
def __init__(self, *args, **kwargs):
super(FlattenedPrinter, self).__init__(*args, retain_none_values=False,
**kwargs)
def _AddRecord(self, record, delimit=True):
"""Immediately prints the record as flattened a flattened tree.
Args:
record: A JSON-serializable object.
delimit: Prints resource delimiters if True.
"""
if delimit:
self._out.write('---\n')
flattened_record = _Flatten(record)
if flattened_record:
pad = 'no-pad' not in self.attributes
separator = self.attributes.get('separator', ': ')
if pad:
max_key_len = max(len(key) for key, _ in flattened_record)
for key, value in flattened_record:
self._out.write(key)
self._out.write(separator)
if pad:
self._out.write(' ' * (max_key_len - len(key)))
val = unicode(value)
# Value must be one text line with leading/trailing space quoted.
if '\n' in val or val[0:1].isspace() or val[-1:].isspace():
val = _StringQuote(val)
self._out.write(val + '\n')
| KaranToor/MA450 | google-cloud-sdk/lib/googlecloudsdk/core/resource/flattened_printer.py | Python | apache-2.0 | 4,616 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Full coverage testing of the MimeDict object
#
# (C) Cerenity Contributors, All Rights Reserved.
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from Kamaelia.Data.MimeDict import MimeDict
#from MimeDict import MimeDict
#class MimeDict_InitTests(object):
class MimeDict_InitTests(unittest.TestCase):
def test_SmokeTest_NoArguments(self):
"__init__ - Creating an empty mime dict does not raise any exception"
x = MimeDict()
def test_SmokeTest_SubClassOfDict(self):
"__init__ - MimeDict items are also dictionaries"
x = MimeDict()
self.assert_(isinstance(x,MimeDict))
self.assert_(isinstance(x,dict))
def test__init__emptyDict_hasBody(self):
"__init__ - An empty MimeDict always has a __BODY__ attribute"
x = MimeDict()
self.assert_("__BODY__" in x)
def test__init__New__BODY__NotClobbered(self):
"__init__ - Passing over a __BODY__ argument should be stored and not clobbered"
x = MimeDict(__BODY__ = "Hello World")
self.assert_("__BODY__" in x)
self.assertEqual(x["__BODY__"],"Hello World")
#class MimeDict_StrTests(unittest.TestCase):
def test__str__emptyDict(self):
"__str__ - The string representation of an empty MimeDict should be precisely 1 empty line"
x = MimeDict()
self.assertEqual(str(x), '\r\n')
def test__str__emptyDictNonEmptyBody(self):
"__str__ - String representation of an empty MimeDict with a non-empty body should be that non-empty body with an empty line prepended"
someString = """This is
some random text
so there"""
x = MimeDict(__BODY__=someString)
self.assertEqual(str(x), '\r\n'+someString)
def test__str__NonEmptyDictEmptyBody(self):
"__str__ - String representation a non empty dict with no body should finish with an empty line - last 4 chars should be network EOL"
x = MimeDict(Hello="World", Fate="Success")
self.assertEqual(str(x)[-4:], "\r\n\r\n")
def test__str__NonEmptyDict(self):
"__str__ - String representation a non empty dict, non-empty body should finish with that body and leading blank line"
someString = """This is
some random text
so there"""
x = MimeDict(Hello="World", Fate="Success",__BODY__=someString)
self.assertEqual(str(x)[-(4+len(someString)):], "\r\n\r\n"+someString)
def test__str__simplestNonEmptyDict(self):
"__str__ - Dict with one header key, 1 associated string value should result in a single leading Key: Value line"
x= MimeDict(Hello="World")
lines = str(x).splitlines(True)
self.assertEqual("Hello: World\r\n", lines[0])
x= MimeDict(Sheep="Dolly")
lines = str(x).splitlines(True)
self.assertEqual("Sheep: Dolly\r\n", lines[0])
def test__str__SampleNonEmptyDict(self):
"__str__ - Dict with multiple headers each with 1 associated simple string value should result in leading Key: Value lines"
x = MimeDict(Hello="World", Sheep="Dolly", Marvin="Android", Cat="Garfield")
lines = str(x).splitlines(True)
self.assertEqual(lines[4], '\r\n')
header = lines[:4]
header.sort()
keys = x.keys()
keys.sort()
h=0
for k in keys:
if k == "__BODY__":
continue
self.assertEqual(header[h], "%s: %s\r\n"%(k,x[k]))
h += 1
def test__str__EmptyListValue(self):
"__str__ - Dict with one header key, List of no values results in a header with just a null value"
x= MimeDict(Hello=[])
lines = str(x).splitlines(True)
self.assertEqual("Hello: \r\n", lines[0])
def test__str__ListLengthOneValue(self):
"__str__ - Dict with one header key, List of 1 value results in a header with 1 headers, one with that value"
x= MimeDict(Hello=["World"])
lines = str(x).splitlines(True)
self.assertEqual("Hello: World\r\n", lines[0])
def test__str__ListLengthTwoValues(self):
"__str__ - Dict with one header key, List of 2 values results in a header with 2 headers, with each value, in the same order"
x= MimeDict(Greeting=["Hello","World"])
lines = str(x).splitlines(True)
self.assertEqual("Greeting: Hello\r\n", lines[0])
self.assertEqual("Greeting: World\r\n", lines[1])
def test__str__TwoListsLengthTwoValues(self):
"__str__ - Dict with 2 keys, each with lists of multiple values, both get inserted, possibly mixed up"
x= MimeDict(Greeting=["Hello","Bonjour"],Parting=["Farewell","Goodbye"])
lines = str(x).splitlines(True)
header = lines[:4]
header.sort()
self.assertEqual("Greeting: Bonjour\r\n", header[0])
self.assertEqual("Greeting: Hello\r\n", header[1])
self.assertEqual("Parting: Farewell\r\n", header[2])
self.assertEqual("Parting: Goodbye\r\n", header[3])
def test__str__FourListsDifferingLengths(self):
"__str__ - Dict with 4 keys, each with lists of multiple values, all get inserted, possibly mixed up"
x= MimeDict(Greeting=["Hello","Bonjour","Hi","Greetings"],
Parting=["Farewell","Goodbye","Ciao"],
Numbers=["One", "Two", "Three", "Four", "Five", "Six", "Seven"],
Empty=[]
)
lines = str(x).splitlines(True)
header = lines[:15]
header.sort()
self.assertEqual("Empty: \r\n", header[0])
self.assertEqual("Greeting: Bonjour\r\n", header[1])
self.assertEqual("Greeting: Greetings\r\n", header[2])
self.assertEqual("Greeting: Hello\r\n", header[3])
self.assertEqual("Greeting: Hi\r\n", header[4])
self.assertEqual("Numbers: Five\r\n", header[5])
self.assertEqual("Numbers: Four\r\n", header[6])
self.assertEqual("Numbers: One\r\n", header[7])
self.assertEqual("Numbers: Seven\r\n", header[8])
self.assertEqual("Numbers: Six\r\n", header[9])
self.assertEqual("Numbers: Three\r\n", header[10])
self.assertEqual("Numbers: Two\r\n", header[11])
self.assertEqual("Parting: Ciao\r\n", header[12])
self.assertEqual("Parting: Farewell\r\n", header[13])
self.assertEqual("Parting: Goodbye\r\n", header[14])
#class MimeDict_FromStringTests(unittest.TestCase):
def test_fromString_static(self):
"fromString - Is a static method in MimeDict that returns a MimeDict object"
x = MimeDict.fromString("")
self.assert_(isinstance(x,MimeDict))
def test_fromString_emptyString(self):
"fromString - Empty String results in an 'empty' dict)"
x = MimeDict.fromString("")
self.assertEqual(x, MimeDict())
def test_fromString_emptyLine(self):
"fromString - Empty line is an empty dict"
x = MimeDict.fromString("\r\n")
self.assertEqual(x, MimeDict())
def test_fromString_emptyLineAndBody(self):
"fromString - Random text preceded by a new line forms a body attribute"
randomText = "If the implementation is hard to explain, it's a bad idea."
x = MimeDict.fromString("\r\n"+randomText)
self.assertEqual(x, MimeDict(__BODY__=randomText))
def test_fromString_NoEmptyLineAndBody(self):
"fromString - Random text not preceded by a new line forms a body attribute"
randomText = "If the implementation is hard to explain, it's a bad idea."
x = MimeDict.fromString(randomText)
self.assertEqual(x, MimeDict(__BODY__=randomText))
def test_fromString_HeaderLineEmptyBody(self):
"fromString - A header line followed by an empty line is valid, has the form 'Header: Key'"
header = """Header: line\r\n"""
x = MimeDict.fromString(header+"\r\n")
self.assertEqual(x, MimeDict(Header="line"))
def test_fromString_ManyHeaderLineEmptyBody(self):
"fromString - Many header lines followed by an empty line is valid"
header = "Header: line\r\nHeeder: line\r\nHooder: line\r\n"
x = MimeDict.fromString(header+"\r\n")
self.assertEqual(x, MimeDict(Header="line",Heeder="line", Hooder="line"))
def test_fromString_HeaderLineNonEmptyBody(self):
"fromString - A header line followed by an empty line and a body is valid"
header = "Header: line\r\n"
body = "This is a random body\r\nWibbleWibble\r\n"
x = MimeDict.fromString(header+"\r\n"+body)
self.assertEqual(x, MimeDict(Header="line",__BODY__=body))
def test_fromString_ManyHeaderLineNonEmptyBody(self):
"fromString - Many header lines followed by an empty line and a body is valid"
header = "Header: line\r\nHeeder: line\r\nHooder: line\r\n"
body = "This is a random body\r\nWibbleWibble\r\n"
x = MimeDict.fromString(header+"\r\n" + body)
self.assertEqual(x, MimeDict(Header="line",Heeder="line", Hooder="line", __BODY__=body))
def test_fromString_HeaderMustBeFollowedByEmptyLine(self):
"""fromString - Header not followed by an empty line and empty body
results is invalid. The result is an empty header and the header as the
body."""
header = "Header: Twinkle Twinkle Little Star\r\n"
body = ""
x = MimeDict.fromString (header + "" + body) # empty "divider"
self.assertEqual(x["__BODY__"], header, "Invalid header results in header being treated as an unstructured body" )
def test_fromString_HeaderMustBeFollowedByEmptyLine_NonEmptyBody(self):
"""fromString - Header not followed by an empty line and non-empty body results is invalid. The result is an empty header and the original message"""
header = "Header: Twinkle Twinkle Little Star\r\n"
body = "How I wonder what you are"
message = header + "" + body # empty "divider"
x = MimeDict.fromString (message)
self.assertEqual(x["__BODY__"], message, "Invalid header results in entire being treated as an unstructured body" )
def test_fromString_HeaderMustBeFollowedByEmptyLine2(self):
"""fromString - Invalid header which was partially successfully parsed results in 'empty' dict - only valid key is __BODY__"""
header = "Header: Twinkle Twinkle Little Star\r\n"
body = ""
x = MimeDict.fromString (header + "" + body) # empty "divider"
self.assertEqual(x.keys(), ["__BODY__"])
def test_fromString_HeaderWithContinuationLines_EmptyBody(self):
"fromString - Header lines may continue over multiple lines, and are considered part of the header, meaning if the body is empty, the __BODY__ should be too"
header = "Header: Twinkle Twinkle Little Start\r\n How I wonder what you are\r\n"
body = ""
x = MimeDict.fromString(header+"\r\n" + body)
self.assertEqual(x["__BODY__"], "")
def test_fromString_HeaderWithContinuationLines_NonEmptyBody(self):
"fromString - Headers with continuations should not be extended by a body that looks like a continuation"
header = "Header: Twinkle Twinkle Little Start\r\n How I wonder what you are\r\n"
body = " This leading body looks like a continuation"
x = MimeDict.fromString(header+"\r\n" + body)
self.assertEqual(x["__BODY__"], body)
def test_fromString_HeaderWithContinuationLines_AllCaptured_One(self):
"fromString - A header with a continuation line is captured as a single string"
value = "Twinkle Twinkle Little Start\r\n How I wonder what you are"
header = "Header: " + value + "\r\n"
body = ""
x = MimeDict.fromString(header+"\r\n" + body)
self.assertEqual(x["Header"], value)
def test_fromString_HeaderWithContinuationLines_AllCaptured_ManyContinuations(self):
"fromString - A header with many continuation lines is captured as a single string"
value = "Twinkle\r\n Twi nkle\r\n Li ttle Start\r\n How I wonder what y\r\n u are\r\n No Really"
header = "Header: " + value + "\r\n"
body = ""
x = MimeDict.fromString(header+"\r\n" + body)
self.assertEqual(x["Header"], value)
def test_fromString_HeaderWithContinuationLines_AllCaptured_ManyHeaders(self):
"fromString - Multiple headers with continuation lines captured as a string for each header"
value1 = "Twinkle \r\n Twinkle"
value2 = "Little\r\n Star"
value3 = "How I \r\n wonder what you are"
header1 = "Header: " + value1 + "\r\n"
header2 = "Heeder: " + value2 + "\r\n"
header3 = "Hooder: " + value3 + "\r\n"
body = ""
message = header1+header2+header3+"\r\n" + body
x = MimeDict.fromString(message)
self.assertEqual(x["Header"], value1)
self.assertEqual(x["Heeder"], value2)
self.assertEqual(x["Hooder"], value3)
#class MimeDict_FromStringTests_RepeatedHeaders(unittest.TestCase):
def test_fromString_RepeatedHeaderResultsInList(self):
"fromString - Repeated header with same key results in a list of values associated'"
header = """Header: value 1\r\nHeader: value 2\r\nHeader: value 3\r\n"""
x = MimeDict.fromString(header+"\r\n")
self.assert_(isinstance(x["Header"],list),"Should be a list associated with 'Header'")
def test_fromString_RepeatedHeaderResultsInListInOriginalOrder(self):
"fromString - Repeated header with same key results in list with values in original order'"
header = """Header: value 1\r\nHeader: value 2\r\nHeader: value 3\r\n"""
x = MimeDict.fromString(header+"\r\n")
self.assertEqual(x["Header"], ["value 1","value 2","value 3"])
def test_fromString_RepeatedHeaders_SameOrder(self):
"fromString - Repeated headers after each other retain order in dictionary values"
headerset1 = """HeaderA: value 1\r\nHeaderA: value 2\r\nHeaderA: value 3\r\n"""
headerset2 = """HeaderB: value 4\r\nHeaderB: value 5\r\nHeaderB: value 6\r\n"""
x = MimeDict.fromString(headerset1 + headerset2 + "\r\n")
self.assertEqual(x["HeaderA"], ["value 1","value 2","value 3"])
self.assertEqual(x["HeaderB"], ["value 4","value 5","value 6"])
def test_fromString_RepeatedInterleaved_Headers_SameOrder(self):
"fromString - Repeated interleaved headers after each other retain order in dictionary values"
headers = "HeaderA: value 1\r\n"
headers += "HeaderB: value 4\r\n"
headers += "HeaderA: value 2\r\n"
headers += "HeaderB: value 5\r\n"
headers += "HeaderA: value 3\r\n"
headers += "HeaderB: value 6\r\n"
x = MimeDict.fromString(headers + "\r\n")
self.assertEqual(x["HeaderA"], ["value 1","value 2","value 3"])
self.assertEqual(x["HeaderB"], ["value 4","value 5","value 6"])
#class MimeDict___str__fromString_Roundtrips(unittest.TestCase):
def test___str__fromString_emptyDict(self):
"performing __str__ on an empty dict and then fromString should result in empty dict"
x = MimeDict()
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_OnlyBody(self):
"performing __str__ on a dict with just a __BODY__ and then fromString should result in the same dict"
x = MimeDict(__BODY__="Hello World")
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_OneHeader_NoBody(self):
"performing __str__/fromString halftrip on a dict with just one header, no body should result in identity"
x = MimeDict(Header="Hello World")
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_ManyDifferentHeaders_NoBody(self):
"performing __str__/fromString halftrip on a dict with just multiple different simple headers, no body should result in identity"
x = MimeDict(Header="Hello World",Heeder="Goodbye Smalltown", Hooder="Bingle")
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_ManySameHeaders_NoBody(self):
"performing __str__/fromString halftrip on a dict with single header type multiple times, no body should result in identity"
x = MimeDict(Header=[ "Hello World", "Goodbye Smalltown", "Bingle" ])
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_MultipleDifferentHeaders_NoBody(self):
"performing __str__/fromString halftrip on a dict with multiple header types multiple times, no body should result in identity"
x = MimeDict(HeaderA=[ "value 1", "value 2", "value 3" ],
HeaderB=[ "value 4", "value 5", "value 6" ])
y = MimeDict.fromString(str(x))
self.assertEqual(y, x)
self.assertEqual(y["HeaderA"],[ "value 1", "value 2", "value 3" ])
self.assertEqual(y["HeaderB"],[ "value 4", "value 5", "value 6" ])
self.assert_(x is not y)
#class MimeDict_fromString__str___Roundtrips(unittest.TestCase):
def test___str__fromString_emptyMessage(self):
"Performing a fromString, __str__ roundtrip results in identity forthe empty message"
x = "\r\n"
y = str(MimeDict.fromString(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_emptyHeaderNonEmptyBody(self):
"Identity check for fromString, __str__ roundtrip for the empty header, non empty body"
x = "\r\nHello World"
y = str(MimeDict.fromString(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_BasicNonEmptyHeader_EmptyBody(self):
"Identity check for fromString, __str__ roundtrip for single basic single header, empty body"
x = "Header: Hello\r\n\r\n"
y = str(MimeDict.fromString(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_BasicNonEmptyHeader_EmptyBody_OrderPreservation(self):
"Identity check for fromString, __str__ roundtrip for multiple basic single header, empty body, requires order preservation"
x = "Hooder: Bingle\r\nHeader: Hello\r\nHeeder: Wind\r\n\r\n"
y = str(MimeDict.fromString(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_BasicRepeatedNonEmptyHeader_EmptyBody(self):
"Identity check for fromString, __str__ roundtrip for multiple repeated basic single header, empty body, requires order preservation"
x = "Hooder: Bingle\r\nHeader: Hello\r\nHeeder: Wind\r\nHooder: Bingle\r\nHeader: Hello\r\nHeeder: Wind\r\n\r\n"
y = str(MimeDict.fromString(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_BasicRepeatedNonEmptyHeader_EmptyBodyDifferentContinuations(self):
"Identity check for fromString, __str__ roundtrip for header with multiple different continuations"
x = "Hooder: Bingle\r\nHeader: Hello\r\nHeeder: Wind\r\nHooder: Bingle\r\nHeader: Hello\r\nHeeder: Wind\r\n\r\n"
y = str(MimeDict.fromString(x))
self.assertEqual(x,y)
self.assert_(x is not y)
def test___str__fromString_MultipleHeadersWithContinuations(self):
"Identity check for fromString, __str__ roundtrip for header with multiple headers with continuations"
value1 = "Twinkle \r\n Twinkle"
value2 = "Little\r\n Star"
value3 = "How I \r\n wonder what you are"
header = "Header: " + value1 + "\r\n"
header += "Heeder: " + value2 + "\r\n"
header += "Hooder: " + value3 + "\r\n"
body = ""
message = header+"\r\n" + body
x = MimeDict.fromString(message)
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assertEqual(str(x),str(y))
self.assertEqual(str(y),message)
def test___str__fromString_RepeatedSingleHeaderWithContinuations(self):
"Identity check for fromString, __str__ roundtrip for header with repeated headers with continuations"
value1 = "Twinkle \r\n Twinkle"
value2 = "Little\r\n Star"
header = "Header: " + value1 + "\r\n"
header += "Header: " + value2 + "\r\n"
body = ""
message = header+"\r\n" + body
x = MimeDict.fromString(message)
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assertEqual(str(x),str(y))
self.assertEqual(str(x),message)
def test___str__CheckedAgainstOriginalRepeatedSingleHeaderWithContinuations(self):
"__str__ of fromString(message) checked against message for equality"
value1 = "Twinkle \r\n Twinkle"
value2 = "Little\r\n Star"
header = "Header: " + value1 + "\r\n"
header += "Header: " + value2 + "\r\n"
body = ""
message = header+"\r\n" + body
x = MimeDict.fromString(message)
self.assertEqual(str(x),message)
def test___str__fromString_RepeatedMultipleHeadersWithContinuations(self):
"Identity check for fromString, __str__ roundtrip for header with repeated multiple headers with continuations"
value1 = "Twinkle \r\n Twinkle"
value2 = "Little\r\n Star"
value3 = "How I \r\n wonder what you are"
header = "Header: " + value1 + "\r\n"
header += "Hooder: " + value2 + "\r\n"
header += "Hooder: " + value3 + "\r\n"
header += "Heeder: " + value3 + "\r\n"
header += "Header: " + value3 + "\r\n"
header += "Heeder: " + value2 + "\r\n"
header += "Heeder: " + value1 + "\r\n"
header += "Hooder: " + value1 + "\r\n"
header += "Header: " + value2 + "\r\n"
body = ""
message = header+"\r\n" + body
x = MimeDict.fromString(message)
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assertEqual(str(x),str(y))
self.assertEqual(str(y),message)
def test___str__fromString_RepeatedMultipleHeadersWithDifferingContinuationSizes(self):
"Identity check for fromString, __str__ roundtrip for header with repeated multiple headers with continuations"
value1a = "Twinkle \r\n Twinkle"
value2a = "Little\r\n Star"
value3a = "How I \r\n wonder what you are"
value1b = "Twinkle \r\n Twinkle\r\n Twinkle"
value2b = "Little\r\n Star\r\n Star"
value3b = "How I \r\n wonder what you are\r\n Star"
value1c = "Twinkle \r\n Twinkle\r\n Star"
value2c = "Little\r\n Star\r\n Star"
value3c = "How I \r\n wonder what you are\r\n Star\r\n are!"
header = "Header: " + value1a + "\r\n"
header += "Hooder: " + value2a + "\r\n"
header += "Hooder: " + value3a + "\r\n"
header += "Heeder: " + value3b + "\r\n"
header += "Header: " + value3c + "\r\n"
header += "Heeder: " + value2b + "\r\n"
header += "Heeder: " + value1b + "\r\n"
header += "Hooder: " + value1c + "\r\n"
header += "Header: " + value2c + "\r\n"
body = ""
message = header+"\r\n" + body
x = MimeDict.fromString(message)
y = MimeDict.fromString(str(x))
self.assertEqual(x,y)
self.assertEqual(str(x),str(y))
self.assertEqual(str(y),message)
#class RoundtripHandlingForInvalids(unittest.TestCase):
def test_Roundtrip_InvalidSourceMessageEmptyBody(self):
"Roundtrip handling (fromString->__str__) for invalid messages with an empty body should NOT result in equality"
header = "Header: Twinkle Twinkle Little Star\r\n"
body = ""
message = header + "" + body # empty "divider"
x = MimeDict.fromString(message)
self.assertEqual(str(x), message)
def test_Roundtrip_InvalidSourceMessageNonEmptyBody(self):
"Roundtrip handling (fromString->__str__) for invalid messages with an non-empty body should NOT result in equality"
header = "Header: Twinkle Twinkle Little Star\r\n"
body = "How I wonder what you are"
message = header + "" + body # empty "divider"
x = MimeDict.fromString(message)
self.assertEqual(str(x), message)
#class DirectUpdateTests(unittest.TestCase):
def test_basicInsertion(self):
"Insertion into a dictionary succeeds"
x = MimeDict()
x["hello"] = "hello"
self.assertEqual("hello", x["hello"] )
def test_secondaryInsertion(self):
"Insertion of multiple values sequentially into a dictionary results in it remembering the last thing added"
x = MimeDict()
x["hello"] = "1hello1"
x["hello"] = "2hello2"
x["hello"] = "3hello3"
x["hello"] = "4hello4"
self.assertEqual("4hello4", x["hello"] )
def test_basicInsertion_Roundtrip(self):
"Insertion into a dictionary, then roundtripped -- fromString(str(x)) results in original value"
x = MimeDict()
x["hello"] =["2hello1", "2hello2"]
x["__BODY__"] = "Hello\nWorld\n"
stringified = str(x)
y = MimeDict.fromString(stringified)
self.assertEqual(x,y)
def test_InformationLossRoundtrip(self):
"If you put a list with a single string into a MimeDict, and try to send that across the network by itself, it will not be reconstituted as a list. This is because we have no real way of determining that the single value should or should not be a list"
x = MimeDict()
x["hello"] =["hello"]
x["__BODY__"] = "Hello\nWorld\n"
stringified = str(x)
y = MimeDict.fromString(stringified)
self.assertNotEqual(x,y)
def test_BasicDeletion(self):
"Deleting a key value succeeds correctly"
x = MimeDict()
x["hello"] ="hello"
x["__BODY__"] = "Hello\nWorld\n"
x["world"] ="world"
x["dolly"] ="dolly"
del x["world"]
y = MimeDict()
y["hello"] ="hello"
y["__BODY__"] = "Hello\nWorld\n"
y["dolly"] ="dolly"
str_x = str(x)
str_y = str(y)
self.assertEqual(x,y)
self.assertEqual(str_x, str_y)
class BugFixes(unittest.TestCase):
def test_EmbeddedNewlineInHeaderRoundtrip_fromInsertion(self):
"A header which contains a single carriage return keeps the carriage return embedded since it *isn't* a carriage return/line feed"
x = MimeDict()
x["header"] = "Hello\nWorld"
y = MimeDict.fromString(str(x))
self.assertEqual(y["__BODY__"], "")
self.assertEqual(y["header"], x["header"])
self.assertEqual(x["header"], "Hello\nWorld")
def test_EmptyFieldRoundTrip(self):
"Empty header remains empty"
x = MimeDict()
x["header"] = ""
y = MimeDict.fromString(str(x))
self.assertEqual(x["header"],y["header"])
def test_SingleSpaceFieldRoundTrip(self):
"Header with a single space remains a header with a single space"
x = MimeDict()
x["header"] = " "
y = MimeDict.fromString(str(x))
self.assertEqual(x["header"],y["header"])
if __name__=="__main__":
unittest.main()
# FIXME: Need to merge in the most uptodate version of this code & tests
# RELEASE: MH, MPS
| sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Support/Data/tests/test_MimeDict.py | Python | apache-2.0 | 27,574 |
import os
import base64
from collections import defaultdict
from django.db.models import F, Q
from xos.config import Config
from synchronizers.base.openstacksyncstep import OpenStackSyncStep
from synchronizers.base.syncstep import *
from core.models.site import Controller, SitePrivilege
from core.models.user import User
from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
from xos.logger import observer_logger as logger
from synchronizers.base.ansible import *
import json
class SyncControllerSitePrivileges(OpenStackSyncStep):
provides=[SitePrivilege]
requested_interval=0
observes=ControllerSitePrivilege
playbook='sync_controller_users.yaml'
def map_sync_inputs(self, controller_site_privilege):
controller_register = json.loads(controller_site_privilege.controller.backend_register)
if not controller_site_privilege.controller.admin_user:
logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
return
roles = [controller_site_privilege.site_privilege.role.role]
# setup user home site roles at controller
if not controller_site_privilege.site_privilege.user.site:
raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
else:
# look up tenant id for the user's site at the controller
#ctrl_site_deployments = SiteDeployment.objects.filter(
# site_deployment__site=controller_site_privilege.user.site,
# controller=controller_site_privilege.controller)
#if ctrl_site_deployments:
# # need the correct tenant id for site at the controller
# tenant_id = ctrl_site_deployments[0].tenant_id
# tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
user_fields = {
'endpoint':controller_site_privilege.controller.auth_url,
'endpoint_v3': controller_site_privilege.controller.auth_url_v3,
'domain': controller_site_privilege.controller.domain,
'name': controller_site_privilege.site_privilege.user.email,
'email': controller_site_privilege.site_privilege.user.email,
'password': controller_site_privilege.site_privilege.user.remote_password,
'admin_user': controller_site_privilege.controller.admin_user,
'admin_password': controller_site_privilege.controller.admin_password,
'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
'admin_tenant': controller_site_privilege.controller.admin_tenant,
'roles':roles,
'tenant':controller_site_privilege.site_privilege.site.login_base}
return user_fields
def map_sync_outputs(self, controller_site_privilege, res):
# results is an array in which each element corresponds to an
# "ok" string received per operation. If we get as many oks as
# the number of operations we issued, that means a grand success.
# Otherwise, the number of oks tell us which operation failed.
controller_site_privilege.role_id = res[0]['id']
controller_site_privilege.save()
def delete_record(self, controller_site_privilege):
controller_register = json.loads(controller_site_privilege.controller.backend_register)
if (controller_register.get('disabled',False)):
raise InnocuousException('Controller %s is disabled'%controller_site_privilege.controller.name)
if controller_site_privilege.role_id:
driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
user = ControllerUser.objects.get(
controller=controller_site_privilege.controller,
user=controller_site_privilege.site_privilege.user
)
site = ControllerSite.objects.get(
controller=controller_site_privilege.controller,
user=controller_site_privilege.site_privilege.user
)
driver.delete_user_role(
user.kuser_id,
site.tenant_id,
controller_site_privilege.site_prvilege.role.role
)
| xmaruto/mcord | xos/synchronizers/openstack/steps/sync_controller_site_privileges.py | Python | apache-2.0 | 4,346 |
from .assembler import KamiAssembler
| johnbachman/indra | indra/assemblers/kami/__init__.py | Python | bsd-2-clause | 37 |
# -*- encoding: utf-8 -*-
#
# :licence: see LICENSE
from twisted.python import usage
from scapy.all import IP, ICMP
from ooni.templates import scapyt
class UsageOptions(usage.Options):
optParameters = [['target', 't', '8.8.8.8', "Specify the target to ping"]]
class ExampleICMPPingScapy(scapyt.BaseScapyTest):
name = "Example ICMP Ping Test"
usageOptions = UsageOptions
def test_icmp_ping(self):
def finished(packets):
print packets
answered, unanswered = packets
for snd, rcv in answered:
rcv.show()
packets = IP(dst=self.localOptions['target'])/ICMP()
d = self.sr(packets)
d.addCallback(finished)
return d
| Karthikeyan-kkk/ooni-probe | ooni/nettests/examples/example_scapyt.py | Python | bsd-2-clause | 728 |
class RestServiceInterface(object):
def send(self, url, data=None):
raise NotImplementedError
| spatialdev/onadata | onadata/apps/restservice/RestServiceInterface.py | Python | bsd-2-clause | 106 |
from .main import Test
class Inheritance(Test):
def TEMPLATE_CONTEXT_PROCESSORS(self):
return super(Inheritance, self).TEMPLATE_CONTEXT_PROCESSORS() + (
'tests.settings.base.test_callback',)
| ramaseshan/django-configurations | tests/settings/multiple_inheritance.py | Python | bsd-3-clause | 218 |
r"""
Synthetic seismograms using the convolutional model
---------------------------------------------------
The simplest way to get a seismogram (in time x offset) is through the
convolutional model
.. math::
trace(t) = wavelet(t) \ast reflectivity(t)
Module :mod:`fatiando.seismic.conv` defines functions for doing this
convolution, calculating the required reflectivity, and converting from depth a
model into time.
"""
import numpy as np
import matplotlib.pyplot as plt
from fatiando.seismic import conv
from fatiando.vis import mpl
# Define the parameters of our depth model
n_samples, n_traces = [600, 100]
velocity = 1500*np.ones((n_samples, n_traces))
# We'll put two interfaces in depth
velocity[150:, :] = 2000
velocity[400:, :] = 3500
dt = 2e-3
# We need to convert the depth model we made above into time
vel_l = conv.depth_2_time(velocity, velocity, dt=dt, dz=1)
# and we'll assume the density is homogeneous
rho_l = 2200*np.ones(np.shape(vel_l))
# With that, we can calculate the reflectivity model in time
rc = conv.reflectivity(vel_l, rho_l)
# and finally perform our convolution
synt = conv.convolutional_model(rc, 30, conv.rickerwave, dt=dt)
# We can use the utility function in fatiando.vis.mpl to plot the seismogram
fig, axes = plt.subplots(1, 2, figsize=(8, 5))
ax = axes[0]
ax.set_title("Velocity model (in depth)")
tmp = ax.imshow(velocity, extent=[0, n_traces, n_samples, 0],
cmap="copper", aspect='auto', origin='upper')
fig.colorbar(tmp, ax=ax, pad=0, aspect=50)
ax.set_xlabel('Trace')
ax.set_ylabel('Depth (m)')
ax = axes[1]
ax.set_title("Synthetic seismogram")
mpl.seismic_wiggle(synt[:, ::20], dt, scale=1)
mpl.seismic_image(synt, dt, cmap="RdBu_r", aspect='auto')
ax.set_xlabel('Trace')
ax.set_ylabel('Time (s)')
plt.tight_layout()
plt.show()
| rafaelmds/fatiando | gallery/seismic/convolutional_model.py | Python | bsd-3-clause | 1,804 |
"""
This example shows to embed a Mayavi view in a wx frame.
The trick is to create a `HasTraits` object, as in the
mlab_traits_ui.py, mayavi_traits_ui.py, or the modifying_mlab_source.py
examples (:ref:`example_mlab_traits_ui`, :ref:`example_mayavi_traits_ui`,
:ref:`example_mlab_interactive_dialog`).
Calling the `edit_traits` method returns a `ui` object whose
`control` attribute is the wx widget. It can thus be embedded in a
standard wx application.
In this example, the wx part is very simple. See
:ref:`example_wx_mayavi_embed_in_notebook` for an example of more complex
embedding of Mayavi scenes in Wx applications.
"""
from numpy import ogrid, sin
from traits.api import HasTraits, Instance
from traitsui.api import View, Item
from mayavi.sources.api import ArraySource
from mayavi.modules.api import IsoSurface
from mayavi.core.ui.api import SceneEditor, MlabSceneModel
class MayaviView(HasTraits):
scene = Instance(MlabSceneModel, ())
# The layout of the panel created by Traits
view = View(Item('scene', editor=SceneEditor(), resizable=True,
show_label=False),
resizable=True)
def __init__(self):
HasTraits.__init__(self)
# Create some data, and plot it using the embedded scene's engine
x, y, z = ogrid[-10:10:100j, -10:10:100j, -10:10:100j]
scalars = sin(x*y*z)/(x*y*z)
src = ArraySource(scalar_data=scalars)
self.scene.engine.add_source(src)
src.add_module(IsoSurface())
#-----------------------------------------------------------------------------
# Wx Code
import wx
class MainWindow(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Mayavi in Wx')
self.mayavi_view = MayaviView()
# Use traits to create a panel, and use it as the content of this
# wx frame.
self.control = self.mayavi_view.edit_traits(
parent=self,
kind='subpanel').control
self.Show(True)
app = wx.PySimpleApp()
frame = MainWindow(None, wx.ID_ANY)
app.MainLoop()
| dmsurti/mayavi | examples/mayavi/interactive/wx_embedding.py | Python | bsd-3-clause | 2,111 |
#!/usr/bin/env kross
# -*- coding: utf-8 -*-
import os, datetime, sys, traceback, pickle
import Kross, Plan
T = Kross.module("kdetranslation")
#TODO some ui
Plan.beginCommand( T.i18nc( "(qtundoformat)", "Clear all external appointments" ) )
Plan.project().clearExternalAppointments()
Plan.endCommand()
| wyuka/calligra | plan/plugins/scripting/scripts/busyinfoclear.py | Python | gpl-2.0 | 305 |
from django.core.urlresolvers import reverse
from django.views.generic import View
from pulp.server import exceptions as pulp_exceptions
from pulp.server.auth import authorization
from pulp.server.controllers import user as user_controller
from pulp.server.managers import factory
from pulp.server.webservices.views.decorators import auth_required
from pulp.server.webservices.views.util import (generate_json_response,
generate_json_response_with_pulp_encoder,
generate_redirect_response,
parse_json_body)
class RolesView(View):
"""
Views for roles.
"""
@auth_required(authorization.READ)
def get(self, request):
"""
List all roles.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return: Response containing a list of roles
:rtype: django.http.HttpResponse
"""
role_query_manager = factory.role_query_manager()
permissions_manager = factory.permission_manager()
roles = role_query_manager.find_all()
for role in roles:
users = [u.login for u in user_controller.find_users_belonging_to_role(role['id'])]
role['users'] = users
resource_permission = {}
# isolate schema change
if role['permissions']:
for item in role['permissions']:
resource = item['resource']
operations = item.get('permission', [])
resource_permission[resource] = [permissions_manager.operation_value_to_name(o)
for o in operations]
role['permissions'] = resource_permission
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
return generate_json_response_with_pulp_encoder(roles)
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request):
"""
Create a new role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return: Response containing the role
:rtype: django.http.HttpResponse
"""
role_data = request.body_as_json
role_id = role_data.get('role_id', None)
display_name = role_data.get('display_name', None)
description = role_data.get('description', None)
manager = factory.role_manager()
role = manager.create_role(role_id, display_name, description)
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
response = generate_json_response_with_pulp_encoder(role)
redirect_response = generate_redirect_response(response, link['_href'])
return redirect_response
class RoleResourceView(View):
"""
Views for a single role.
"""
@auth_required(authorization.READ)
def get(self, request, role_id):
"""
Retrieve a specific role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: Response containing the role
:rtype: django.http.HttpResponse
:raises: MissingResource if role ID does not exist
"""
role = factory.role_query_manager().find_by_id(role_id)
if role is None:
raise pulp_exceptions.MissingResource(role_id)
role['users'] = [u.login for u in user_controller.find_users_belonging_to_role(role['id'])]
permissions_manager = factory.permission_manager()
# isolate schema change
resource_permission = {}
for item in role['permissions']:
resource = item['resource']
operations = item.get('permission', [])
resource_permission[resource] = [permissions_manager.operation_value_to_name(o)
for o in operations]
role['permissions'] = resource_permission
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
return generate_json_response_with_pulp_encoder(role)
@auth_required(authorization.DELETE)
def delete(self, request, role_id):
"""
Delete a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
manager = factory.role_manager()
result = manager.delete_role(role_id)
return generate_json_response(result)
@auth_required(authorization.UPDATE)
@parse_json_body(json_type=dict)
def put(self, request, role_id):
"""
Update a specific role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: Response containing the role
:rtype: django.http.HttpResponse
"""
role_data = request.body_as_json
delta = role_data.get('delta', None)
manager = factory.role_manager()
role = manager.update_role(role_id, delta)
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
return generate_json_response_with_pulp_encoder(role)
class RoleUsersView(View):
"""
Views for user membership within a role
"""
@auth_required(authorization.READ)
def get(self, request, role_id):
"""
List Users belonging to a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: Response containing the users
:rtype: django.http.HttpResponse
"""
role_users = user_controller.find_users_belonging_to_role(role_id)
return generate_json_response_with_pulp_encoder(role_users)
@auth_required(authorization.UPDATE)
@parse_json_body(json_type=dict)
def post(self, request, role_id):
"""
Add user to a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: An empty response
:rtype: django.http.HttpResponse
:raises: InvalidValue some parameters are invalid
"""
params = request.body_as_json
login = params.get('login', None)
if login is None:
raise pulp_exceptions.InvalidValue(login)
role_manager = factory.role_manager()
add_user = role_manager.add_user_to_role(role_id, login)
return generate_json_response(add_user)
class RoleUserView(View):
"""
View for specific user membership within a role.
"""
@auth_required(authorization.DELETE)
def delete(self, request, role_id, login):
"""
Remove user from a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:param login: id for the requested user
:type login: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
role_manager = factory.role_manager()
remove_user = role_manager.remove_user_from_role(role_id, login)
return generate_json_response(remove_user)
| ulif/pulp | server/pulp/server/webservices/views/roles.py | Python | gpl-2.0 | 7,995 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import formatdate
from erpnext.controllers.website_list_for_contact import get_customers_suppliers
def get_context(context):
context.no_cache = 1
context.show_sidebar = True
context.doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
context.parents = frappe.form_dict.parents
context.doc.supplier = get_supplier()
context.doc.rfq_links = get_link_quotation(context.doc.supplier, context.doc.name)
unauthorized_user(context.doc.supplier)
update_supplier_details(context)
context["title"] = frappe.form_dict.name
def get_supplier():
doctype = frappe.form_dict.doctype
parties_doctype = 'Request for Quotation Supplier' if doctype == 'Request for Quotation' else doctype
customers, suppliers = get_customers_suppliers(parties_doctype, frappe.session.user)
return suppliers[0] if suppliers else ''
def check_supplier_has_docname_access(supplier):
status = True
if frappe.form_dict.name not in frappe.db.sql_list("""select parent from `tabRequest for Quotation Supplier`
where supplier = %s""", (supplier,)):
status = False
return status
def unauthorized_user(supplier):
status = check_supplier_has_docname_access(supplier) or False
if status == False:
frappe.throw(_("Not Permitted"), frappe.PermissionError)
def update_supplier_details(context):
supplier_doc = frappe.get_doc("Supplier", context.doc.supplier)
context.doc.currency = supplier_doc.default_currency or frappe.get_cached_value('Company', context.doc.company, "default_currency")
context.doc.currency_symbol = frappe.db.get_value("Currency", context.doc.currency, "symbol", cache=True)
context.doc.number_format = frappe.db.get_value("Currency", context.doc.currency, "number_format", cache=True)
context.doc.buying_price_list = supplier_doc.default_price_list or ''
def get_link_quotation(supplier, rfq):
quotation = frappe.db.sql(""" select distinct `tabSupplier Quotation Item`.parent as name,
`tabSupplier Quotation`.status, `tabSupplier Quotation`.transaction_date from
`tabSupplier Quotation Item`, `tabSupplier Quotation` where `tabSupplier Quotation`.docstatus < 2 and
`tabSupplier Quotation Item`.request_for_quotation =%(name)s and
`tabSupplier Quotation Item`.parent = `tabSupplier Quotation`.name and
`tabSupplier Quotation`.supplier = %(supplier)s order by `tabSupplier Quotation`.creation desc""",
{'name': rfq, 'supplier': supplier}, as_dict=1)
for data in quotation:
data.transaction_date = formatdate(data.transaction_date)
return quotation or None
| ebukoz/thrive | erpnext/templates/pages/rfq.py | Python | gpl-3.0 | 2,722 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import exceptions, SUPERUSER_ID
from odoo.addons.sale.controllers.portal import CustomerPortal
from odoo.http import request, route
from odoo.tools import consteq
class SaleStockPortal(CustomerPortal):
def _stock_picking_check_access(self, picking_id, access_token=None):
picking = request.env['stock.picking'].browse([picking_id])
picking_sudo = picking.sudo()
try:
picking.check_access_rights('read')
picking.check_access_rule('read')
except exceptions.AccessError:
if not access_token or not consteq(picking_sudo.sale_id.access_token, access_token):
raise
return picking_sudo
@route(['/my/picking/pdf/<int:picking_id>'], type='http', auth="public", website=True)
def portal_my_picking_report(self, picking_id, access_token=None, **kw):
""" Print delivery slip for customer, using either access rights or access token
to be sure customer has access """
try:
picking_sudo = self._stock_picking_check_access(picking_id, access_token=access_token)
except exceptions.AccessError:
return request.redirect('/my')
# print report as SUPERUSER, since it require access to product, taxes, payment term etc.. and portal does not have those access rights.
pdf = request.env.ref('stock.action_report_delivery').with_user(SUPERUSER_ID)._render_qweb_pdf([picking_sudo.id])[0]
pdfhttpheaders = [
('Content-Type', 'application/pdf'),
('Content-Length', len(pdf)),
]
return request.make_response(pdf, headers=pdfhttpheaders)
| jeremiahyan/odoo | addons/sale_stock/controllers/portal.py | Python | gpl-3.0 | 1,748 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import test_work_entry
from . import test_work_intervals
| jeremiahyan/odoo | addons/hr_work_entry_contract/tests/__init__.py | Python | gpl-3.0 | 164 |
#!/usr/bin/env python
# Test whether a client sends a correct SUBSCRIBE to a topic with QoS 1.
# The client should connect to port 1888 with keepalive=60, clean session set,
# and client id subscribe-qos1-test
# The test will send a CONNACK message to the client with rc=0. Upon receiving
# the CONNACK and verifying that rc=0, the client should send a SUBSCRIBE
# message to subscribe to topic "qos1/test" with QoS=1. If rc!=0, the client
# should exit with an error.
# Upon receiving the correct SUBSCRIBE message, the test will reply with a
# SUBACK message with the accepted QoS set to 1. On receiving the SUBACK
# message, the client should send a DISCONNECT message.
import inspect
import os
import subprocess
import socket
import sys
import time
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subscribe-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "qos1/test", 1)
suback_packet = mosq_test.gen_suback(mid, 1)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "subscribe", subscribe_packet):
conn.send(suback_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| telefonicaid/fiware-IoTAgent-Cplusplus | third_party/mosquitto-1.4.4/test/lib/02-subscribe-qos1.py | Python | agpl-3.0 | 2,349 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Base container class for all neural network models."""
import copy
from .. import symbol, ndarray, initializer
from ..symbol import Symbol
from ..ndarray import NDArray
from .. import name as _name
from .parameter import Parameter, ParameterDict, DeferredInitializationError
from .utils import _indent
class _BlockScope(object):
"""Scope for collecting child `Block`s."""
_current = None
def __init__(self, block):
self._block = block
self._counter = {}
self._old_scope = None
self._name_scope = None
@staticmethod
def create(prefix, params, hint):
"""Creates prefix and params for new `Block`."""
current = _BlockScope._current
if current is None:
if prefix is None:
prefix = _name.NameManager.current.get(None, hint) + '_'
if params is None:
params = ParameterDict(prefix)
else:
params = ParameterDict(params.prefix, params)
return prefix, params
if prefix is None:
count = current._counter.get(hint, 0)
prefix = '%s%d_'%(hint, count)
current._counter[hint] = count + 1
if params is None:
parent = current._block.params
params = ParameterDict(parent.prefix+prefix, parent._shared)
else:
params = ParameterDict(params.prefix, params)
return current._block.prefix+prefix, params
def __enter__(self):
self._old_scope = _BlockScope._current
_BlockScope._current = self
self._name_scope = _name.Prefix(self._block.prefix)
self._name_scope.__enter__()
return self
def __exit__(self, ptype, value, trace):
self._name_scope.__exit__(ptype, value, trace)
self._name_scope = None
_BlockScope._current = self._old_scope
def _flatten(args):
if isinstance(args, NDArray):
return [args], int(0)
if isinstance(args, Symbol):
length = len(args.list_outputs())
length = length if length > 1 else 0
return [args], int(length)
assert isinstance(args, (list, tuple)), \
"HybridBlock input must be (nested) list of Symbol or NDArray, " \
"but got %s of type %s"%(str(args), str(type(args)))
flat = []
fmts = []
for i in args:
arg, fmt = _flatten(i)
flat.extend(arg)
fmts.append(fmt)
return flat, fmts
def _regroup(args, fmt):
if isinstance(fmt, int):
if fmt == 0:
return args[0], args[1:]
return args[:fmt], args[fmt:]
assert isinstance(args, (list, tuple)), \
"HybridBlock output must be (nested) list of Symbol or NDArray, " \
"but got %s of type %s"%(str(args), str(type(args)))
ret = []
for i in fmt:
res, args = _regroup(args, i)
ret.append(res)
return ret, args
class Block(object):
"""Base class for all neural network layers and models. Your models should
subclass this class.
`Block` can be nested recursively in a tree structure. You can create and
assign child `Block` as regular attributes::
from mxnet.gluon import Block, nn
from mxnet import ndarray as F
class Model(Block):
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
# use name_scope to give child Blocks appropriate names.
# It also allows sharing Parameters between Blocks recursively.
with self.name_scope():
self.dense0 = nn.Dense(20)
self.dense1 = nn.Dense(20)
def forward(self, x):
x = F.relu(self.dense0(x))
return F.relu(self.dense1(x))
model = Model()
model.initialize(ctx=mx.cpu(0))
model(F.zeros((10, 10), ctx=mx.cpu(0)))
Child `Block` assigned this way will be registered and `collect_params`
will collect their Parameters recursively.
Parameters
----------
prefix : str
Prefix acts like a name space. It will be prepended to the names of all
Parameters and child `Block`s in this `Block`'s `name_scope`. Prefix
should be unique within one model to prevent name collisions.
params : ParameterDict or None
`ParameterDict` for sharing weights with the new `Block`. For example,
if you want `dense1` to share `dense0`'s weights, you can do::
dense0 = nn.Dense(20)
dense1 = nn.Dense(20, params=dense0.collect_params())
"""
def __init__(self, prefix=None, params=None):
self._prefix, self._params = _BlockScope.create(prefix, params, self._alias())
self._name = self._prefix[:-1] if self._prefix.endswith('_') else self._prefix
self._scope = _BlockScope(self)
self._children = []
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in self.__dict__.items() if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __setattr__(self, name, value):
"""Registers parameters."""
super(Block, self).__setattr__(name, value)
if isinstance(value, Block):
self.register_child(value)
def _alias(self):
return self.__class__.__name__.lower()
@property
def prefix(self):
"""Prefix of this `Block`."""
return self._prefix
@property
def name(self):
"""Name of this `Block`, without '_' in the end."""
return self._name
def name_scope(self):
"""Returns a name space object managing a child `Block` and parameter
names. Should be used within a `with` statement::
with self.name_scope():
self.dense = nn.Dense(20)
"""
return self._scope
@property
def params(self):
"""Returns this `Block`'s parameter dictionary (does not include its
children's parameters)."""
return self._params
def collect_params(self):
"""Returns a `ParameterDict` containing this `Block` and all of its
children's Parameters."""
ret = ParameterDict(self._params.prefix)
ret.update(self.params)
for cld in self._children:
ret.update(cld.collect_params())
return ret
def save_params(self, filename):
"""Save parameters to file.
filename : str
Path to file.
"""
self.collect_params().save(filename, strip_prefix=self.prefix)
def load_params(self, filename, ctx, allow_missing=False,
ignore_extra=False):
"""Load parameters from file.
filename : str
Path to parameter file.
ctx : Context or list of Context
Context(s) initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
"""
self.collect_params().load(filename, ctx, allow_missing, ignore_extra,
self.prefix)
def register_child(self, block):
"""Registers block as a child of self. `Block`s assigned to self as
attributes will be registered automatically."""
self._children.append(block)
def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False):
"""Initializes `Parameter`s of this `Block` and its children.
Equivalent to `block.collect_params().initialize(...)`
"""
self.collect_params().initialize(init, ctx, verbose)
def hybridize(self, active=True):
"""Activates or deactivates `HybridBlock`s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
"""
for cld in self._children:
cld.hybridize(active)
def __call__(self, *args):
"""Calls forward. Only accepts positional arguments."""
return self.forward(*args)
def forward(self, *args):
"""Overrides to implement forward computation using `NDArray`. Only
accepts positional arguments.
Parameters
----------
*args : list of NDArray
Input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class HybridBlock(Block):
"""`HybridBlock` supports forwarding with both Symbol and NDArray.
Forward computation in `HybridBlock` must be static to work with `Symbol`s,
i.e. you cannot call `.asnumpy()`, `.shape`, `.dtype`, etc on tensors.
Also, you cannot use branching or loop logic that bases on non-constant
expressions like random numbers or intermediate results, since they change
the graph structure for each iteration.
Before activating with `hybridize()`, `HybridBlock` works just like normal
`Block`. After activation, `HybridBlock` will create a symbolic graph
representing the forward computation and cache it. On subsequent forwards,
the cached graph will be used instead of `hybrid_forward`.
Refer `Hybrid tutorial <http://mxnet.io/tutorials/gluon/hybrid.html>`_ to see
the end-to-end usage.
"""
def __init__(self, prefix=None, params=None):
super(HybridBlock, self).__init__(prefix=prefix, params=params)
self._reg_params = {}
self._cached_graph = ()
self._cached_op = None
self._cached_params = None
self._out_format = None
self._in_format = None
self._active = False
def __setattr__(self, name, value):
"""Registers parameters."""
super(HybridBlock, self).__setattr__(name, value)
if isinstance(value, Parameter):
assert name not in self._reg_params or \
not isinstance(self._reg_params[name], Parameter), \
"Overriding Parameter attribute %s is not allowed. " \
"Please pass in Parameters by specifying `params` at " \
"Block construction instead."
self._reg_params[name] = value
def register_child(self, block):
if not isinstance(block, HybridBlock):
raise ValueError(
"Children of HybridBlock must also be HybridBlock, " \
"but %s has type %s. If you are using Sequential, " \
"please try HybridSequential instead"%(
str(block), str(type(block))))
super(HybridBlock, self).register_child(block)
def hybridize(self, active=True):
self._active = active
super(HybridBlock, self).hybridize(active)
def _get_graph(self, *args):
if not self._cached_graph:
args, self._in_format = _flatten(args)
inputs = [symbol.var('input_%d'%i) for i in range(len(args))]
grouped_inputs = _regroup(inputs, self._in_format)[0]
params = {i: j.var() for i, j in self._reg_params.items()}
with self.name_scope():
out = self.hybrid_forward(symbol, *grouped_inputs, **params) # pylint: disable=no-value-for-parameter
out, self._out_format = _flatten(out)
self._cached_graph = inputs, symbol.Group(out)
return self._cached_graph
def infer_shape(self, *args):
"""Infers shape of Parameters from inputs."""
inputs, out = self._get_graph(*args)
args, _ = _flatten(args)
arg_shapes, _, aux_shapes = out.infer_shape(
**{i.name: j.shape for i, j in zip(inputs, args)})
sdict = {i: j for i, j in zip(out.list_arguments(), arg_shapes)}
sdict.update({name : shape for name, shape in \
zip(out.list_auxiliary_states(), aux_shapes)})
for i in self.collect_params().values():
i.shape = sdict[i.name]
def _build_cache(self, *args):
inputs, out = self._get_graph(*args)
self._cached_op = ndarray.CachedOp(out)
params = dict(self.collect_params().items())
self._cached_params = [params.get(name, None) for name in out.list_inputs()]
assert len(params) + len(self._cached_graph[0]) == len(out.list_inputs()), \
"Wrong number of inputs."
name2pos = {var.name: i for i, var in enumerate(inputs)}
self._in_idx = [(i, name2pos[name]) for i, name in enumerate(out.list_inputs())
if name not in params]
def _call_cached_op(self, *args):
if self._cached_op is None:
self._build_cache(*args)
try:
cargs = [i.data() if i else None for i in self._cached_params]
except DeferredInitializationError:
self.infer_shape(*args)
for i in self._cached_params:
if i is not None:
i._finish_deferred_init()
cargs = [i.data() if i else None for i in self._cached_params]
args, fmt = _flatten(args)
assert fmt == self._in_format, "Invalid input format"
for i, j in self._in_idx:
cargs[i] = args[j]
out = self._cached_op(*cargs)
if isinstance(out, NDArray):
out = [out]
return _regroup(out, self._out_format)[0]
def forward(self, x, *args):
"""Defines the forward computation. Arguments can be either
`NDArray` or `Symbol`."""
if isinstance(x, NDArray):
with x.context as ctx:
if self._active:
return self._call_cached_op(x, *args)
try:
params = {i: j.data(ctx) for i, j in self._reg_params.items()}
except DeferredInitializationError:
self.infer_shape(x, *args)
for i in self.collect_params().values():
i._finish_deferred_init()
params = {i: j.data(ctx) for i, j in self._reg_params.items()}
return self.hybrid_forward(ndarray, x, *args, **params)
assert isinstance(x, Symbol), \
"HybridBlock requires the first argument to forward be either " \
"Symbol or NDArray, but got %s"%type(x)
params = {i: j.var() for i, j in self._reg_params.items()}
with self.name_scope():
return self.hybrid_forward(symbol, x, *args, **params)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class SymbolBlock(HybridBlock):
"""Construct block from symbol. This is useful for using pre-trained models
as feature extractors. For example, you may want to extract get the output
from fc2 layer in AlexNet.
Parameters
----------
outputs : Symbol or list of Symbol
The desired output for SymbolBlock.
inputs : Symbol or list of Symbol
The Variables in output's argument that should be used as inputs.
params : ParameterDict
Parameter dictionary for arguments and auxililary states of outputs
that are not inputs.
Examples
--------
>>> # To extract the feature from fc1 and fc2 layers of AlexNet:
>>> alexnet = gluon.model_zoo.vision.alexnet(pretrained=True, ctx=mx.cpu(),
prefix='model_')
>>> inputs = mx.sym.var('data')
>>> out = alexnet(inputs)
>>> internals = out.get_internals()
>>> print(internals.list_outputs())
['data', ..., 'model_dense0_relu_fwd_output', ..., 'model_dense1_relu_fwd_output', ...]
>>> outputs = [internals['model_dense0_relu_fwd_output'],
internals['model_dense1_relu_fwd_output']]
>>> # Create SymbolBlock that shares parameters with alexnet
>>> feat_model = gluon.SymbolBlock(outputs, inputs, params=alexnet.collect_params())
>>> x = mx.nd.random_normal(shape=(16, 3, 224, 224))
>>> print(feat_model(x))
"""
def __init__(self, outputs, inputs, params=None):
super(SymbolBlock, self).__init__(prefix=None, params=None)
self._prefix = ''
self._params = ParameterDict('', params)
if isinstance(inputs, symbol.Symbol) and len(inputs.list_outputs()) == 1:
inputs = [inputs]
if isinstance(outputs, symbol.Symbol) and len(outputs.list_outputs()) == 1:
outputs = [outputs]
syms, self._in_format = _flatten(inputs)
out, self._out_format = _flatten(outputs)
out = symbol.Group(out)
input_names = set()
for i in syms:
assert len(i.get_internals().list_outputs()) == 1, \
"Input symbols must be variable, but %s is an output of operators"%str(i)
input_names.add(i.name)
for i in out.list_arguments():
if i not in input_names:
self.params.get(i, allow_deferred_init=True)
for i in out.list_auxiliary_states():
if i not in input_names:
self.params.get(i, grad_req='null', allow_deferred_init=True)
self._cached_graph = syms, out
self._build_cache()
def forward(self, x, *args):
if isinstance(x, NDArray):
with x.context:
return self._call_cached_op(x, *args)
assert isinstance(x, Symbol), \
"HybridBlock requires the first argument to forward be either " \
"Symbol or NDArray, but got %s"%type(x)
args, in_fmt = _flatten([x] + list(args))
assert in_fmt == self._in_format, "Invalid input format"
ret = copy.copy(self._cached_graph[1])
ret._compose(**{k.name: v for k, v in zip(self._cached_graph[0], args)})
return _regroup(ret, self._out_format)[0]
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
| stefanhenneking/mxnet | python/mxnet/gluon/block.py | Python | apache-2.0 | 19,291 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittest runner for quantum Cisco plugin
export PLUGIN_DIR=quantum/plugins/cisco
./run_tests.sh -N
"""
import os
import sys
from nose import config
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(__file__))
from quantum.common.test_lib import run_tests, test_config
def main():
test_config['plugin_name'] = "l2network_plugin.L2Network"
cwd = os.getcwd()
os.chdir(cwd)
working_dir = os.path.abspath("quantum/plugins/cisco")
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
workingDir=working_dir)
sys.exit(run_tests(c))
if __name__ == '__main__':
main()
| ykaneko/quantum | quantum/plugins/cisco/run_tests.py | Python | apache-2.0 | 1,393 |
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the labqueue configuration file. It contains various settings
which allow the customization of the system's behavior.
"""
import os
import datetime
# This is the worker GUID used to signify topics, tasks, etc. which
# were created internally by this system (as opposed to, say, a
# sketchbot connecting over the REST interface).
#
API_WORKER_GUID = 'api.openweblab'
# This setting specifies the maximum size of a file which can be POSTed
# directly to the binary storage section of the queue. Any content which
# exceeds this amount must be uploaded to a special dynamic URL generated by
# the server. Such URLs must be requested from the server just prior to
# uploading the content.
MAX_DIRECT_UPLOAD_FILE_SIZE_BYTES = 800000
# If ALLOW_UNAUTHENTICATED_USE_WITH_WARNING is True, the server will
# allow connection from sketchbots, etc. without any kind of security
# or authorization. In that case, the server will complain with a
# warning, but allow such requests to proceed.
#
# If it is False, then the server will require an authorization header
# with pre-shared security key to be included with all requests.
#
ALLOW_UNAUTHENTICATED_USE_WITH_WARNING = True
# The HELP_TEMPLATES_PATH and ADMIN_TEMPLATES_PATH settings should point
# to the location of the template source files used to render online API
# help and browser-based admin UI.
#
HELP_TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), 'templates', 'help' )
ADMIN_TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), 'templates', 'admin' )
# If LDCS_ONLY_EDITABLE_BY_ORIGINAL_CREATOR is True, then an LDC can only
# be modified by the same worker that created it. If False, then any worker
# can edit any LDC.
#
LDCS_ONLY_EDITABLE_BY_ORIGINAL_CREATOR = False
# The number of seconds a Task reservation will be held before being
# automatically released by the system.
#
TASK_RESERVATION_MAX_HOLD_TIME_SEC = 500
# If True, then Topics with task policies using a 'max_num_tasks'
# rule will get max_num_tasks new 'slots' each hour for new Tasks,
# even if the Tasks from the previous hour have not been completed.
# If False, then the topic will have an absolute cap at 'max_num_tasks'
# so that Tasks must be completed for new ones to get in."
#
TASK_POLICY_MAX_NUM_TASKS_USES_SLOT_MODE = False
# Records are kept to check the last time the system was contacted
# by a particular worker. Set DISABLE_WORKER_STATUS_WRITES to silently
# disable updates to these records. This can be useful for debugging
# or reducing the number of datastore writes.
#
DISABLE_WORKER_STATUS_WRITES = False
# The minimum time between allowed worker status updates, in seconds.
# If a worker tries to update its own status less than MIN_WORKER_STATUS_UPDATE_PERIOD_SEC
# since its last update the server will return an error. This is used
# to prevent over-active workers from gobbling up app engine quota.
# To reduce quota use, set this to a higher number (or better yet, make
# your robots check in less frequently).
#
MIN_WORKER_STATUS_UPDATE_PERIOD_SEC = 5
# When listing the recent status of all workers that have contacted
# the system, ANCIENT_WORKER_STATUS_CUTOFF_DAYS can be used to automatically
# filter out old entries. If non-None, then this should indicate the
# maximum age of a workrer before they are dropped from status lists.
# Even ancient workers can have their status queried via directly
# requesting that single worker's status.
#
ANCIENT_WORKER_STATUS_CUTOFF_DAYS = None # 10
# This is the canonical list of valid touchpoint names. A touchpoint
# is a grouping of closely-related interactive exhibit pieces.
#
VALID_TOUCHPOINT_NAMES = [
'rob',
]
# This is the canonical list of valid activity space names. An activity
# space is a logical grouping of touchpoints.
#
VALID_ACTIVITY_SPACES = [
'www',
]
# If HTTP_RAISE_EXCEPTIONS_IN_REQUESTS is True, the RESTful HTTP interface
# will allow any Exceptions encountered in the labqueue code to bubble up
# as true Python Exceptions. This will cause any Exception-generating request
# to respond as HTTP status 500. That will potentially obscure any bugs from
# users connecting via HTTP, in exchange for allowing the system to be debugged
# via a live debugger. If it is False, however, the Exceptions will be caught
# and (hopefully) userful error responses will be returned over HTTP, with
# appropriate status codes.
#
HTTP_RAISE_EXCEPTIONS_IN_REQUESTS = False
# If HTTP_HELP is True, then a built-in human interface to the RESTful API
# will be accessible by appending ?HELP=GET (to try out GET requests) or
# ?HELP=POST (for POST requests) to any API URL.
#
HTTP_HELP = True
LDC_THUMBNAIL_PARAMS = {
'small': {
'all': {
'max_source_height': None,
'min_source_height': None,
'max_source_width': None,
'min_source_width': None,
'width': 140,
'height': 110,
'overlay_path': None,
'valign': 'middle',
# these are good for robot portraits:
'top_crop_pct': None,
'bottom_crop_pct': None,
'left_crop_pct': None,
'right_crop_pct': None,
'crop_x': None,
'crop_y': None,
'post_crop_uniform_scale_pct': None,
},
},
}
# The number of seconds to allow edge cache to hold LDC public media content
PUBLIC_MEDIA_CACHE_MAX_AGE_SEC = 61
| r8o8s1e0/ChromeWebLab | Sketchbots/sw/labqueue/config.py | Python | apache-2.0 | 6,010 |
"""Dummy package initialisation."""
| emijrp/pywikibot-core | tests/pwb/__init__.py | Python | mit | 36 |
"""
Defines classes for path effects. The path effects are supported in
:class:`~matplotlib.text.Text`, :class:`~matplotlib.lines.Line2D`
and :class:`~matplotlib.patches.Patch`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.backend_bases import RendererBase
import matplotlib.transforms as mtransforms
from matplotlib.colors import colorConverter
import matplotlib.patches as mpatches
class AbstractPathEffect(object):
"""
A base class for path effects.
Subclasses should override the ``draw_path`` method to add effect
functionality.
"""
def __init__(self, offset=(0., 0.)):
"""
Parameters
----------
offset : pair of floats
The offset to apply to the path, measured in points.
"""
self._offset = offset
self._offset_trans = mtransforms.Affine2D()
def _offset_transform(self, renderer, transform):
"""Apply the offset to the given transform."""
offset_x = renderer.points_to_pixels(self._offset[0])
offset_y = renderer.points_to_pixels(self._offset[1])
return transform + self._offset_trans.clear().translate(offset_x,
offset_y)
def _update_gc(self, gc, new_gc_dict):
"""
Update the given GraphicsCollection with the given
dictionary of properties. The keys in the dictionary are used to
identify the appropriate set_ method on the gc.
"""
new_gc_dict = new_gc_dict.copy()
dashes = new_gc_dict.pop("dashes", None)
if dashes:
gc.set_dashes(**dashes)
for k, v in six.iteritems(new_gc_dict):
set_method = getattr(gc, 'set_' + k, None)
if set_method is None or not six.callable(set_method):
raise AttributeError('Unknown property {0}'.format(k))
set_method(v)
return gc
def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):
"""
Derived should override this method. The arguments are the same
as :meth:`matplotlib.backend_bases.RendererBase.draw_path`
except the first argument is a renderer.
"""
# Get the real renderer, not a PathEffectRenderer.
if isinstance(renderer, PathEffectRenderer):
renderer = renderer._renderer
return renderer.draw_path(gc, tpath, affine, rgbFace)
class PathEffectRenderer(RendererBase):
"""
Implements a Renderer which contains another renderer.
This proxy then intercepts draw calls, calling the appropriate
:class:`AbstractPathEffect` draw method.
.. note::
Not all methods have been overridden on this RendererBase subclass.
It may be necessary to add further methods to extend the PathEffects
capabilities further.
"""
def __init__(self, path_effects, renderer):
"""
Parameters
----------
path_effects : iterable of :class:`AbstractPathEffect`
The path effects which this renderer represents.
renderer : :class:`matplotlib.backend_bases.RendererBase` instance
"""
self._path_effects = path_effects
self._renderer = renderer
def new_gc(self):
return self._renderer.new_gc()
def copy_with_path_effect(self, path_effects):
return self.__class__(path_effects, self._renderer)
def draw_path(self, gc, tpath, affine, rgbFace=None):
for path_effect in self._path_effects:
path_effect.draw_path(self._renderer, gc, tpath, affine,
rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, *args,
**kwargs):
# We do a little shimmy so that all markers are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return RendererBase.draw_markers(self, gc, marker_path,
marker_trans, path, *args,
**kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_markers(gc, marker_path, marker_trans, path,
*args, **kwargs)
def draw_path_collection(self, gc, master_transform, paths, *args,
**kwargs):
# We do a little shimmy so that all paths are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return RendererBase.draw_path_collection(self, gc,
master_transform, paths,
*args, **kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_path_collection(gc, master_transform, paths,
*args, **kwargs)
def points_to_pixels(self, points):
return self._renderer.points_to_pixels(points)
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
# Implements the naive text drawing as is found in RendererBase.
path, transform = self._get_text_path_transform(x, y, s, prop,
angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def __getattribute__(self, name):
if name in ['_text2path', 'flipy', 'height', 'width']:
return getattr(self._renderer, name)
else:
return object.__getattribute__(self, name)
class Normal(AbstractPathEffect):
"""
The "identity" PathEffect.
The Normal PathEffect's sole purpose is to draw the original artist with
no special path effect.
"""
pass
class Stroke(AbstractPathEffect):
"""A line based PathEffect which re-draws a stroke."""
def __init__(self, offset=(0, 0), **kwargs):
"""
The path will be stroked with its gc updated with the given
keyword arguments, i.e., the keyword arguments should be valid
gc parameter values.
"""
super(Stroke, self).__init__(offset)
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
draw the path with updated gc.
"""
# Do not modify the input! Use copy instead.
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
gc0 = self._update_gc(gc0, self._gc)
trans = self._offset_transform(renderer, affine)
renderer.draw_path(gc0, tpath, trans, rgbFace)
gc0.restore()
class withStroke(Stroke):
"""
Adds a simple :class:`Stroke` and then draws the
original Artist to avoid needing to call :class:`Normal`.
"""
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
Stroke.draw_path(self, renderer, gc, tpath, affine, rgbFace)
renderer.draw_path(gc, tpath, affine, rgbFace)
class SimplePatchShadow(AbstractPathEffect):
"""A simple shadow via a filled patch."""
def __init__(self, offset=(2, -2),
shadow_rgbFace=None, alpha=None,
rho=0.3, **kwargs):
"""
Parameters
----------
offset : pair of floats
The offset of the shadow in points.
shadow_rgbFace : color
The shadow color.
alpha : float
The alpha transparency of the created shadow patch.
Default is 0.3.
http://matplotlib.1069221.n5.nabble.com/path-effects-question-td27630.html
rho : float
A scale factor to apply to the rgbFace color if `shadow_rgbFace`
is not specified. Default is 0.3.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
"""
super(SimplePatchShadow, self).__init__(offset)
if shadow_rgbFace is None:
self._shadow_rgbFace = shadow_rgbFace
else:
self._shadow_rgbFace = colorConverter.to_rgba(shadow_rgbFace)
if alpha is None:
alpha = 0.3
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
#: The offset transform object. The offset isn't calculated yet
#: as we don't know how big the figure will be in pixels.
self._offset_tran = mtransforms.Affine2D()
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
# IMPORTANT: Do not modify the input - we copy everything instead.
affine0 = self._offset_transform(renderer, affine)
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
if self._shadow_rgbFace is None:
r,g,b = (rgbFace or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_rgbFace
gc0.set_foreground("none")
gc0.set_alpha(self._alpha)
gc0.set_linewidth(0)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(gc0, tpath, affine0, shadow_rgbFace)
gc0.restore()
class withSimplePatchShadow(SimplePatchShadow):
"""
Adds a simple :class:`SimplePatchShadow` and then draws the
original Artist to avoid needing to call :class:`Normal`.
"""
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
SimplePatchShadow.draw_path(self, renderer, gc, tpath, affine, rgbFace)
renderer.draw_path(gc, tpath, affine, rgbFace)
class SimpleLineShadow(AbstractPathEffect):
"""A simple shadow via a line."""
def __init__(self, offset=(2,-2),
shadow_color='k', alpha=0.3, rho=0.3, **kwargs):
"""
Parameters
----------
offset : pair of floats
The offset to apply to the path, in points.
shadow_color : color
The shadow color. Default is black.
A value of ``None`` takes the original artist's color
with a scale factor of `rho`.
alpha : float
The alpha transparency of the created shadow patch.
Default is 0.3.
rho : float
A scale factor to apply to the rgbFace color if `shadow_rgbFace`
is ``None``. Default is 0.3.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
"""
super(SimpleLineShadow, self).__init__(offset)
if shadow_color is None:
self._shadow_color = shadow_color
else:
self._shadow_color = colorConverter.to_rgba(shadow_color)
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
#: The offset transform object. The offset isn't calculated yet
#: as we don't know how big the figure will be in pixels.
self._offset_tran = mtransforms.Affine2D()
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
# IMPORTANT: Do not modify the input - we copy everything instead.
affine0 = self._offset_transform(renderer, affine)
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
if self._shadow_color is None:
r,g,b = (gc0.get_foreground() or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_color
fill_color = None
gc0.set_foreground(shadow_rgbFace)
gc0.set_alpha(self._alpha)
gc0.set_linestyle("solid")
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(gc0, tpath, affine0, fill_color)
gc0.restore()
class PathPatchEffect(AbstractPathEffect):
"""
Draws a :class:`~matplotlib.patches.PathPatch` instance whose Path
comes from the original PathEffect artist.
"""
def __init__(self, offset=(0, 0), **kwargs):
"""
Parameters
----------
offset : pair of floats
The offset to apply to the path, in points.
**kwargs :
All keyword arguments are passed through to the
:class:`~matplotlib.patches.PathPatch` constructor. The
properties which cannot be overridden are "path", "clip_box"
"transform" and "clip_path".
"""
super(PathPatchEffect, self).__init__(offset=offset)
self.patch = mpatches.PathPatch([], **kwargs)
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
affine = self._offset_transform(renderer, affine)
self.patch._path = tpath
self.patch.set_transform(affine)
self.patch.set_clip_box(gc._cliprect)
self.patch.set_clip_path(gc._clippath)
self.patch.draw(renderer)
| yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/patheffects.py | Python | mit | 14,367 |
import ntpath
from test.test_support import verbose, TestFailed
import os
errors = 0
def tester(fn, wantResult):
global errors
fn = fn.replace("\\", "\\\\")
gotResult = eval(fn)
if wantResult != gotResult:
print "error!"
print "evaluated: " + str(fn)
print "should be: " + str(wantResult)
print " returned: " + str(gotResult)
print ""
errors = errors + 1
tester('ntpath.splitext("foo.ext")', ('foo', '.ext'))
tester('ntpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
tester('ntpath.splitext(".ext")', ('', '.ext'))
tester('ntpath.splitext("\\foo.ext\\foo")', ('\\foo.ext\\foo', ''))
tester('ntpath.splitext("foo.ext\\")', ('foo.ext\\', ''))
tester('ntpath.splitext("")', ('', ''))
tester('ntpath.splitext("foo.bar.ext")', ('foo.bar', '.ext'))
tester('ntpath.splitext("xx/foo.bar.ext")', ('xx/foo.bar', '.ext'))
tester('ntpath.splitext("xx\\foo.bar.ext")', ('xx\\foo.bar', '.ext'))
tester('ntpath.splitdrive("c:\\foo\\bar")',
('c:', '\\foo\\bar'))
tester('ntpath.splitunc("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitdrive("c:/foo/bar")',
('c:', '/foo/bar'))
tester('ntpath.splitunc("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint\\foo', 'bar'))
tester('ntpath.split("c:\\")', ('c:\\', ''))
tester('ntpath.split("\\\\conky\\mountpoint\\")',
('\\\\conky\\mountpoint', ''))
tester('ntpath.split("c:/")', ('c:/', ''))
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
tester('ntpath.isabs("c:\\")', 1)
tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
tester('ntpath.isabs("\\foo")', 1)
tester('ntpath.isabs("\\foo\\bar")', 1)
tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
"/home/swen")
tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
"\\home\\swen\\")
tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
"/home/swen/spam")
tester('ntpath.join("")', '')
tester('ntpath.join("", "", "")', '')
tester('ntpath.join("a")', 'a')
tester('ntpath.join("/a")', '/a')
tester('ntpath.join("\\a")', '\\a')
tester('ntpath.join("a:")', 'a:')
tester('ntpath.join("a:", "b")', 'a:b')
tester('ntpath.join("a:", "/b")', 'a:/b')
tester('ntpath.join("a:", "\\b")', 'a:\\b')
tester('ntpath.join("a", "/b")', '/b')
tester('ntpath.join("a", "\\b")', '\\b')
tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b\\", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b", "\\c")', '\\c')
tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
tester("ntpath.join('c:', '/a')", 'c:/a')
tester("ntpath.join('c:/', '/a')", 'c:/a')
tester("ntpath.join('c:/a', '/b')", '/b')
tester("ntpath.join('c:', 'd:/')", 'd:/')
tester("ntpath.join('c:/', 'd:/')", 'd:/')
tester("ntpath.join('c:/', 'd:/a/b')", 'd:/a/b')
tester("ntpath.join('')", '')
tester("ntpath.join('', '', '', '', '')", '')
tester("ntpath.join('a')", 'a')
tester("ntpath.join('', 'a')", 'a')
tester("ntpath.join('', '', '', '', 'a')", 'a')
tester("ntpath.join('a', '')", 'a\\')
tester("ntpath.join('a', '', '', '', '')", 'a\\')
tester("ntpath.join('a\\', '')", 'a\\')
tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
tester("ntpath.normpath('A//////././//.//B')", r'A\B')
tester("ntpath.normpath('A/./B')", r'A\B')
tester("ntpath.normpath('A/foo/../B')", r'A\B')
tester("ntpath.normpath('C:A//B')", r'C:A\B')
tester("ntpath.normpath('D:A/./B')", r'D:A\B')
tester("ntpath.normpath('e:A/foo/../B')", r'e:A\B')
# Next 3 seem dubious, and especially the 3rd, but normpath is possibly
# trying to leave UNC paths alone without actually knowing anything about
# them.
tester("ntpath.normpath('C:///A//B')", r'C:\\\A\B')
tester("ntpath.normpath('D:///A/./B')", r'D:\\\A\B')
tester("ntpath.normpath('e:///A/foo/../B')", r'e:\\\A\B')
tester("ntpath.normpath('..')", r'..')
tester("ntpath.normpath('.')", r'.')
tester("ntpath.normpath('')", r'.')
tester("ntpath.normpath('/')", '\\')
tester("ntpath.normpath('c:/')", 'c:\\')
tester("ntpath.normpath('/../.././..')", '\\')
tester("ntpath.normpath('c:/../../..')", 'c:\\')
tester("ntpath.normpath('../.././..')", r'..\..\..')
tester("ntpath.normpath('K:../.././..')", r'K:..\..\..')
# ntpath.abspath() can only be used on a system with the "nt" module
# (reasonably), so we protect this test with "import nt". This allows
# the rest of the tests for the ntpath module to be run to completion
# on any platform, since most of the module is intended to be usable
# from any platform.
try:
import nt
except ImportError:
pass
else:
tester('ntpath.abspath("C:\\")', "C:\\")
if errors:
raise TestFailed(str(errors) + " errors.")
elif verbose:
print "No errors. Thank your lucky stars."
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/test/test_ntpath.py | Python | gpl-2.0 | 5,049 |
#!/usr/bin/env python
# Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2012 Brett Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ts3
import gnome15.g15logging as g15logging
import logging
if __name__ == "__main__":
logger = g15logging.get_root_logger()
logger.setLevel(logging.INFO)
t = ts3.TS3()
t.start()
logger.info("schandlerid : %d", t.schandlerid)
logger.info("channel: %s", t.send_command(ts3.Command('channelconnectinfo')).args['path'])
| achilleas-k/gnome15 | src/plugins/voip-teamspeak3/test.py | Python | gpl-3.0 | 1,183 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shuup.notify.enums
import enumfields.fields
import shuup.core.fields
from django.conf import settings
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('recipient_type', enumfields.fields.EnumIntegerField(verbose_name='recipient type', default=1, enum=shuup.notify.enums.RecipientType)),
('created_on', models.DateTimeField(verbose_name='created on', auto_now_add=True)),
('message', models.CharField(verbose_name='message', editable=False, max_length=140, default='')),
('identifier', shuup.core.fields.InternalIdentifierField(max_length=64, null=True, unique=False, editable=False, blank=True)),
('priority', enumfields.fields.EnumIntegerField(verbose_name='priority', db_index=True, default=2, enum=shuup.notify.enums.Priority)),
('_data', jsonfield.fields.JSONField(db_column='data', blank=True, null=True, editable=False)),
('marked_read', models.BooleanField(verbose_name='marked read', editable=False, default=False, db_index=True)),
('marked_read_on', models.DateTimeField(blank=True, verbose_name='marked read on', null=True)),
('marked_read_by', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, null=True, editable=False, verbose_name='marked read by')),
('recipient', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, null=True, verbose_name='recipient')),
],
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('event_identifier', models.CharField(db_index=True, max_length=64, verbose_name='event identifier')),
('identifier', shuup.core.fields.InternalIdentifierField(max_length=64, null=True, unique=True, editable=False, blank=True)),
('created_on', models.DateTimeField(verbose_name='created on', auto_now_add=True)),
('name', models.CharField(max_length=64, verbose_name='name')),
('enabled', models.BooleanField(verbose_name='enabled', db_index=True, default=False)),
('_step_data', jsonfield.fields.JSONField(db_column='step_data', default=[])),
],
),
]
| suutari-ai/shoop | shuup/notify/migrations/0001_initial.py | Python | agpl-3.0 | 2,946 |
# -*- coding: utf-8 -*-
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Copyright (C)
# 2014: Agile Business Group (<http://www.agilebg.com>)
# 2015: Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from openerp import api, fields, models
from . import exceptions
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
"""Adds last name and first name; name becomes a stored function field."""
_inherit = 'res.partner'
firstname = fields.Char("First name")
lastname = fields.Char("Last name")
name = fields.Char(
compute="_compute_name",
inverse="_inverse_name_after_cleaning_whitespace",
required=False,
store=True)
@api.model
def _get_computed_name(self, lastname, firstname):
"""Compute the 'name' field according to splitted data.
You can override this method to change the order of lastname and
firstname the computed name"""
return u" ".join((p for p in (lastname, firstname) if p))
@api.one
@api.depends("firstname", "lastname")
def _compute_name(self):
"""Write the 'name' field according to splitted data."""
self.name = self._get_computed_name(self.lastname, self.firstname)
@api.one
def _inverse_name_after_cleaning_whitespace(self):
"""Clean whitespace in :attr:`~.name` and split it.
Removes leading, trailing and duplicated whitespace.
The splitting logic is stored separately in :meth:`~._inverse_name`, so
submodules can extend that method and get whitespace cleaning for free.
"""
# Remove unneeded whitespace
clean = u" ".join(self.name.split(None)) if self.name else self.name
# Clean name avoiding infinite recursion
if self.name != clean:
self.name = clean
# Save name in the real fields
else:
self._inverse_name()
@api.model
def _get_inverse_name(self, name, is_company=False):
"""Try to revert the effect of :meth:`._compute_name`.
- If the partner is a company, save it in the lastname.
- Otherwise, make a guess.
This method can be easily overriden by other submodules.
You can also override this method to change the order of name's
attributes
When this method is called, :attr:`~.name` already has unified and
trimmed whitespace.
"""
# Company name goes to the lastname
if is_company or not name:
parts = [name or False, False]
# Guess name splitting
else:
parts = name.split(" ", 1)
while len(parts) < 2:
parts.append(False)
return parts
@api.one
def _inverse_name(self):
parts = self._get_inverse_name(self.name, self.is_company)
self.lastname, self.firstname = parts
@api.one
@api.constrains("firstname", "lastname")
def _check_name(self):
"""Ensure at least one name is set."""
if not (self.firstname or self.lastname):
raise exceptions.EmptyNamesError(self)
@api.one
@api.onchange("firstname", "lastname")
def _onchange_subnames(self):
"""Avoid recursion when the user changes one of these fields.
This forces to skip the :attr:`~.name` inversion when the user is
setting it in a not-inverted way.
"""
# Modify self's context without creating a new Environment.
# See https://github.com/odoo/odoo/issues/7472#issuecomment-119503916.
self.env.context = self.with_context(skip_onchange=True).env.context
@api.one
@api.onchange("name")
def _onchange_name(self):
"""Ensure :attr:`~.name` is inverted in the UI."""
if self.env.context.get("skip_onchange"):
# Do not skip next onchange
self.env.context = (
self.with_context(skip_onchange=False).env.context)
else:
self._inverse_name_after_cleaning_whitespace()
@api.model
def _install_partner_firstname(self):
"""Save names correctly in the database.
Before installing the module, field ``name`` contains all full names.
When installing it, this method parses those names and saves them
correctly into the database. This can be called later too if needed.
"""
# Find records with empty firstname and lastname
records = self.search([("firstname", "=", False),
("lastname", "=", False)])
# Force calculations there
records._inverse_name()
_logger.info("%d partners updated installing module.", len(records))
| BT-ojossen/partner-contact | partner_firstname/models.py | Python | agpl-3.0 | 5,391 |
#!/usr/bin/env python
"""
This is an Ansible dynamic inventory for OpenStack.
It requires your OpenStack credentials to be set in clouds.yaml or your shell
environment.
"""
import resources
def build_inventory():
"""Build the Ansible inventory for the current environment."""
inventory = resources.build_inventory()
inventory['nodes'] = inventory['openstack_nodes']
inventory['masters'] = inventory['openstack_master_nodes']
inventory['etcd'] = inventory['openstack_etcd_nodes']
inventory['glusterfs'] = inventory['openstack_cns_nodes']
return inventory
if __name__ == '__main__':
resources.main(build_inventory)
| markllama/openshift-ansible | playbooks/openstack/inventory.py | Python | apache-2.0 | 652 |
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from oslo_log import log
from oslo_utils import uuidutils
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import dvr_local_router as dvr_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import router_info
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
FIP_PRI = 32768
HOSTNAME = 'myhost'
class TestDvrRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestDvrRouterOperations, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.register_opts(external_process.OPTS)
self.conf.set_override('router_id', 'fake_id')
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('send_arp_for_ha', 1)
self.conf.set_override('state_path', '')
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron.agent.linux.utils.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _create_router(self, router=None, **kwargs):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.router_id = _uuid()
if not router:
router = mock.MagicMock()
return dvr_router.DvrLocalRouter(agent,
HOSTNAME,
self.router_id,
router,
self.conf,
mock.Mock(),
**kwargs)
def test_get_floating_ips_dvr(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual([{'host': HOSTNAME}], fips)
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'IPRule')
def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_adv_notif):
router = mock.MagicMock()
ri = self._create_router(router)
ext_net_id = _uuid()
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': ext_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': ext_net_id,
'port_id': _uuid()}
ri.fip_ns = mock.Mock()
ri.fip_ns.agent_gateway_port = agent_gw_port
ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
ri.rtr_fip_subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.dist_fip_count = 0
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
ri.floating_ip_added_dist(fip, ip_cidr)
mIPRule().rule.add.assert_called_with(ip='192.168.0.1',
table=16,
priority=FIP_PRI)
self.assertEqual(1, ri.dist_fip_count)
# TODO(mrsmith): add more asserts
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'IPRule')
def test_floating_ip_removed_dist(self, mIPRule, mIPDevice, mIPWrapper):
router = mock.MagicMock()
ri = self._create_router(router)
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
fip_cidr = '11.22.33.44/24'
ri.dist_fip_count = 2
ri.fip_ns = mock.Mock()
ri.fip_ns.get_name.return_value = 'fip_ns_name'
ri.floating_ips_dict['11.22.33.44'] = FIP_PRI
ri.fip_2_rtr = '11.22.33.42'
ri.rtr_2_fip = '11.22.33.40'
ri.fip_ns.agent_gateway_port = agent_gw_port
s = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = s
ri.floating_ip_removed_dist(fip_cidr)
mIPRule().rule.delete.assert_called_with(
ip=str(netaddr.IPNetwork(fip_cidr).ip), table=16, priority=FIP_PRI)
mIPDevice().route.delete_route.assert_called_with(fip_cidr, str(s.ip))
self.assertFalse(ri.fip_ns.unsubscribe.called)
ri.dist_fip_count = 1
ri.rtr_fip_subnet = lla.LinkLocalAddressPair('15.1.2.3/32')
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
fip_ns = ri.fip_ns
with mock.patch.object(self.plugin_api,
'delete_agent_gateway_port') as del_fip_gw:
ri.floating_ip_removed_dist(fip_cidr)
self.assertTrue(del_fip_gw.called)
self.assertTrue(fip_ns.destroyed)
mIPWrapper().del_veth.assert_called_once_with(
fip_ns.get_int_device_name(router['id']))
mIPDevice().route.delete_gateway.assert_called_once_with(
str(fip_to_rtr.ip), table=16)
fip_ns.unsubscribe.assert_called_once_with(ri.router_id)
def _test_add_floating_ip(self, ri, fip, is_failure):
ri._add_fip_addr_to_device = mock.Mock(return_value=is_failure)
ri.floating_ip_added_dist = mock.Mock()
result = ri.add_floating_ip(fip,
mock.sentinel.interface_name,
mock.sentinel.device)
ri._add_fip_addr_to_device.assert_called_once_with(
fip, mock.sentinel.device)
return result
def test_add_floating_ip(self):
ri = self._create_router(mock.MagicMock())
ip = '15.1.2.3'
fip = {'floating_ip_address': ip}
result = self._test_add_floating_ip(ri, fip, True)
ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result)
def test_add_floating_ip_error(self):
ri = self._create_router(mock.MagicMock())
result = self._test_add_floating_ip(
ri, {'floating_ip_address': '15.1.2.3'}, False)
self.assertFalse(ri.floating_ip_added_dist.called)
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
@mock.patch.object(router_info.RouterInfo, 'remove_floating_ip')
def test_remove_floating_ip(self, super_remove_floating_ip):
ri = self._create_router(mock.MagicMock())
ri.floating_ip_removed_dist = mock.Mock()
ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr)
super_remove_floating_ip.assert_called_once_with(
mock.sentinel.device, mock.sentinel.ip_cidr)
ri.floating_ip_removed_dist.assert_called_once_with(
mock.sentinel.ip_cidr)
def test__get_internal_port(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id))
def test__get_internal_port_not_found(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertEqual(None, ri._get_internal_port(mock.sentinel.subnet_id2))
def test__get_snat_idx_ipv4(self):
ip_cidr = '101.12.13.00/24'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x650C0D00 is numerical value of 101.12.13.00
self.assertEqual(0x650C0D00, snat_idx)
def test__get_snat_idx_ipv6(self):
ip_cidr = '2620:0:a03:e100::/64'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
self.assertEqual(0x3D345705, snat_idx)
def test__get_snat_idx_ipv6_below_32768(self):
ip_cidr = 'd488::/30'
# crc32 of this ip_cidr is 0x1BD7
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
self.assertEqual(0x40001BD6, snat_idx)
def test__set_subnet_arp_info(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
ri = dvr_router.DvrLocalRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
subnet_id = l3_test_common.get_subnet_id(ports[0])
test_ports = [{'mac_address': '00:11:22:33:44:55',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '1.2.3.4',
'prefixlen': 24,
'subnet_id': subnet_id}]}]
self.plugin_api.get_ports_by_subnet.return_value = test_ports
# Test basic case
ports[0]['subnets'] = [{'id': subnet_id,
'cidr': '1.2.3.0/24'}]
ri._set_subnet_arp_info(subnet_id)
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.2.3.4', '00:11:22:33:44:55')
# Test negative case
router['distributed'] = False
ri._set_subnet_arp_info(subnet_id)
self.mock_ip_dev.neigh.add.never_called()
def test_add_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
agent.add_arp_entry(None, payload)
agent.router_deleted(None, router['id'])
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.7.23.11', '00:11:22:33:44:55')
def test_add_arp_entry_no_routerinfo(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent.add_arp_entry(None, payload)
def test__update_arp_entry_with_no_subnet(self):
ri = dvr_router.DvrLocalRouter(
mock.sentinel.agent,
HOSTNAME,
'foo_router_id',
{'distributed': True, 'gw_port_host': HOSTNAME},
**self.ri_kwargs)
with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f:
ri._update_arp_entry(mock.ANY, mock.ANY, 'foo_subnet_id', 'add')
self.assertFalse(f.call_count)
def test_del_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.5.25.15',
'mac_address': '00:44:33:22:11:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
# first add the entry
agent.add_arp_entry(None, payload)
# now delete it
agent.del_arp_entry(None, payload)
self.mock_ip_dev.neigh.delete.assert_called_once_with(
'1.5.25.15', '00:44:33:22:11:55')
agent.router_deleted(None, router['id'])
def test_get_floating_agent_gw_interfaces(self):
fake_network_id = _uuid()
subnet_id = _uuid()
agent_gateway_port = (
[{'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'binding:host_id': 'myhost',
'device_owner': 'network:floatingip_agent_gateway',
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrLocalRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
self.assertEqual(
agent_gateway_port[0],
ri.get_floating_agent_gw_interface(fake_network_id))
def test_process_router_dist_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()},
{'id': _uuid(),
'host': 'some-other-host',
'floating_ip_address': '15.1.2.4',
'fixed_ip_address': '192.168.0.10',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrLocalRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.dist_fip_count = 0
fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id)
subnet_id = _uuid()
fip_ns.agent_gateway_port = (
{'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
)
def _test_ext_gw_updated_dvr_agent_mode(self, host,
agent_mode, expected_call_count):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrLocalRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
# test agent mode = dvr (compute node)
router['gw_port_host'] = host
agent.conf.agent_mode = agent_mode
ri.external_gateway_updated(ex_gw_port, interface_name)
# no gateway should be added on dvr node
self.assertEqual(expected_call_count,
ri._external_gateway_added.call_count)
def test_ext_gw_updated_dvr_agent_mode(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
def test_ext_gw_updated_dvr_agent_mode_host(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME,
'dvr', 0)
def test_external_gateway_removed_ext_gw_port_and_fip(self):
self.conf.set_override('state_path', '/tmp')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr'
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
ri = dvr_router.DvrLocalRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.fip_ns = agent.get_fip_ns(external_net_id)
subnet_id = _uuid()
ri.fip_ns.agent_gateway_port = {
'fixed_ips': [{
'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id
}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': external_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
vm_floating_ip = '19.4.4.2'
ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
ri.dist_fip_count = 1
ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))]
self.mock_ip_dev.addr.list.return_value = [
{'cidr': vm_floating_ip + '/32'},
{'cidr': '19.4.4.1/24'}]
self.device_exists.return_value = True
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev,
'19.4.4.2/32')
| vivekanand1101/neutron | neutron/tests/unit/agent/l3/test_dvr_local_router.py | Python | apache-2.0 | 25,046 |
"""Tests for the Home Assistant Websocket API."""
import asyncio
from unittest.mock import patch, Mock
from aiohttp import WSMsgType
import pytest
import voluptuous as vol
from homeassistant.components.websocket_api import const, messages
@pytest.fixture
def mock_low_queue():
"""Mock a low queue."""
with patch('homeassistant.components.websocket_api.http.MAX_PENDING_MSG',
5):
yield
@asyncio.coroutine
def test_invalid_message_format(websocket_client):
"""Test sending invalid JSON."""
yield from websocket_client.send_json({'type': 5})
msg = yield from websocket_client.receive_json()
assert msg['type'] == const.TYPE_RESULT
error = msg['error']
assert error['code'] == const.ERR_INVALID_FORMAT
assert error['message'].startswith('Message incorrectly formatted')
@asyncio.coroutine
def test_invalid_json(websocket_client):
"""Test sending invalid JSON."""
yield from websocket_client.send_str('this is not JSON')
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.close
@asyncio.coroutine
def test_quiting_hass(hass, websocket_client):
"""Test sending invalid JSON."""
with patch.object(hass.loop, 'stop'):
yield from hass.async_stop()
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.CLOSE
@asyncio.coroutine
def test_pending_msg_overflow(hass, mock_low_queue, websocket_client):
"""Test get_panels command."""
for idx in range(10):
yield from websocket_client.send_json({
'id': idx + 1,
'type': 'ping',
})
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.close
@asyncio.coroutine
def test_unknown_command(websocket_client):
"""Test get_panels command."""
yield from websocket_client.send_json({
'id': 5,
'type': 'unknown_command',
})
msg = yield from websocket_client.receive_json()
assert not msg['success']
assert msg['error']['code'] == const.ERR_UNKNOWN_COMMAND
async def test_handler_failing(hass, websocket_client):
"""Test a command that raises."""
hass.components.websocket_api.async_register_command(
'bla', Mock(side_effect=TypeError),
messages.BASE_COMMAND_MESSAGE_SCHEMA.extend({'type': 'bla'}))
await websocket_client.send_json({
'id': 5,
'type': 'bla',
})
msg = await websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == const.TYPE_RESULT
assert not msg['success']
assert msg['error']['code'] == const.ERR_UNKNOWN_ERROR
async def test_invalid_vol(hass, websocket_client):
"""Test a command that raises invalid vol error."""
hass.components.websocket_api.async_register_command(
'bla', Mock(side_effect=TypeError),
messages.BASE_COMMAND_MESSAGE_SCHEMA.extend({
'type': 'bla',
vol.Required('test_config'): str
}))
await websocket_client.send_json({
'id': 5,
'type': 'bla',
'test_config': 5
})
msg = await websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == const.TYPE_RESULT
assert not msg['success']
assert msg['error']['code'] == const.ERR_INVALID_FORMAT
assert 'expected str for dictionary value' in msg['error']['message']
| MartinHjelmare/home-assistant | tests/components/websocket_api/test_init.py | Python | apache-2.0 | 3,364 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import os
import h5py
import numpy as np
from synthetic_data_utils import generate_data, generate_rnn
from synthetic_data_utils import get_train_n_valid_inds
from synthetic_data_utils import nparray_and_transpose
from synthetic_data_utils import spikify_data, split_list_by_inds
import tensorflow as tf
from utils import write_datasets
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "conditioned_rnn_data",
"Name of data file for input case.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 400, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nspikifications", 10,
"Number of spikifications of the same underlying rates.")
flags.DEFINE_float("g", 1.5, "Complexity of dynamics")
flags.DEFINE_float("x0_std", 1.0,
"Volume from which to pull initial conditions (affects diversity of dynamics.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second")
FLAGS = flags.FLAGS
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
rnn_rngs = [np.random.RandomState(seed=FLAGS.synth_data_seed+1),
np.random.RandomState(seed=FLAGS.synth_data_seed+2)]
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N
nspikifications = FLAGS.nspikifications
E = nspikifications * C
train_percentage = FLAGS.train_percentage
ntimesteps = int(T / FLAGS.dt)
rnn_a = generate_rnn(rnn_rngs[0], N, FLAGS.g, FLAGS.tau, FLAGS.dt,
FLAGS.max_firing_rate)
rnn_b = generate_rnn(rnn_rngs[1], N, FLAGS.g, FLAGS.tau, FLAGS.dt,
FLAGS.max_firing_rate)
rnns = [rnn_a, rnn_b]
# pick which RNN is used on each trial
rnn_to_use = rng.randint(2, size=E)
ext_input = np.repeat(np.expand_dims(rnn_to_use, axis=1), ntimesteps, axis=1)
ext_input = np.expand_dims(ext_input, axis=2) # these are "a's" in the paper
x0s = []
condition_labels = []
condition_number = 0
for c in range(C):
x0 = FLAGS.x0_std * rng.randn(N, 1)
x0s.append(np.tile(x0, nspikifications))
for ns in range(nspikifications):
condition_labels.append(condition_number)
condition_number += 1
x0s = np.concatenate(x0s, axis=1)
P_nxn = rng.randn(N, N) / np.sqrt(N)
# generate trials for both RNNs
rates_a, x0s_a, _ = generate_data(rnn_a, T=T, E=E, x0s=x0s, P_sxn=P_nxn,
input_magnitude=0.0, input_times=None)
spikes_a = spikify_data(rates_a, rng, rnn_a['dt'], rnn_a['max_firing_rate'])
rates_b, x0s_b, _ = generate_data(rnn_b, T=T, E=E, x0s=x0s, P_sxn=P_nxn,
input_magnitude=0.0, input_times=None)
spikes_b = spikify_data(rates_b, rng, rnn_b['dt'], rnn_b['max_firing_rate'])
# not the best way to do this but E is small enough
rates = []
spikes = []
for trial in xrange(E):
if rnn_to_use[trial] == 0:
rates.append(rates_a[trial])
spikes.append(spikes_a[trial])
else:
rates.append(rates_b[trial])
spikes.append(spikes_b[trial])
# split into train and validation sets
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nspikifications)
rates_train, rates_valid = split_list_by_inds(rates, train_inds, valid_inds)
spikes_train, spikes_valid = split_list_by_inds(spikes, train_inds, valid_inds)
condition_labels_train, condition_labels_valid = split_list_by_inds(
condition_labels, train_inds, valid_inds)
ext_input_train, ext_input_valid = split_list_by_inds(
ext_input, train_inds, valid_inds)
rates_train = nparray_and_transpose(rates_train)
rates_valid = nparray_and_transpose(rates_valid)
spikes_train = nparray_and_transpose(spikes_train)
spikes_valid = nparray_and_transpose(spikes_valid)
# add train_ext_input and valid_ext input
data = {'train_truth': rates_train,
'valid_truth': rates_valid,
'train_data' : spikes_train,
'valid_data' : spikes_valid,
'train_ext_input' : np.array(ext_input_train),
'valid_ext_input': np.array(ext_input_valid),
'train_percentage' : train_percentage,
'nspikifications' : nspikifications,
'dt' : FLAGS.dt,
'P_sxn' : P_nxn,
'condition_labels_train' : condition_labels_train,
'condition_labels_valid' : condition_labels_valid,
'conversion_factor': 1.0 / rnn_a['conversion_factor']}
# just one dataset here
datasets = {}
dataset_name = 'dataset_N' + str(N)
datasets[dataset_name] = data
# write out the dataset
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
print ('Saved to ', os.path.join(FLAGS.save_dir,
FLAGS.datafile_name + '_' + dataset_name))
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/lfads/synth_data/generate_labeled_rnn_data.py | Python | bsd-2-clause | 5,821 |
##########################################################################
#
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import pipes
import fnmatch
import imath
import IECore
import Gaffer
import GafferUI
import GafferCortex
import GafferCortexUI
__nodeTypes = (
GafferCortex.ParameterisedHolderNode,
GafferCortex.ParameterisedHolderComputeNode,
GafferCortex.ParameterisedHolderDependencyNode,
GafferCortex.ParameterisedHolderTaskNode,
)
##########################################################################
# NodeUI
##########################################################################
# Supported userData entries :
#
# ["UI"]["headerVisible"]
class _ParameterisedHolderNodeUI( GafferUI.NodeUI ) :
def __init__( self, node, readOnly=False, **kw ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 )
GafferUI.NodeUI.__init__( self, node, column, **kw )
headerVisible = True
parameterised = self.node().getParameterised()[0]
with IECore.IgnoredExceptions( KeyError ) :
headerVisible = parameterised.userData()["UI"]["headerVisible"].value
with column :
if headerVisible :
with GafferUI.ListContainer( orientation = GafferUI.ListContainer.Orientation.Horizontal ) :
GafferUI.Spacer( imath.V2i( 10 ), parenting = { "expand" : True } )
toolButton = GafferCortexUI.ToolParameterValueWidget( self.node().parameterHandler() )
toolButton.plugValueWidget().setReadOnly( readOnly )
_InfoButton( node )
with GafferUI.ScrolledContainer( horizontalMode=GafferUI.ScrollMode.Never, borderWidth=4 ) :
self.__parameterValueWidget = GafferCortexUI.CompoundParameterValueWidget( self.node().parameterHandler(), collapsible = False )
self.setReadOnly( readOnly )
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.NodeUI.setReadOnly( self, readOnly )
self.__parameterValueWidget.plugValueWidget().setReadOnly( readOnly )
for nodeType in __nodeTypes :
GafferUI.NodeUI.registerNodeUI( nodeType, _ParameterisedHolderNodeUI )
##########################################################################
# Info button
##########################################################################
## \todo We might want to think about using this for all NodeUIs, since it
# relies only on Metadata which should be available for all node types.
class _InfoButton( GafferUI.Button ) :
def __init__( self, node ) :
GafferUI.Button.__init__( self, image="info.png", hasFrame=False )
self.__node = node
self.__window = None
self.clickedSignal().connect( Gaffer.WeakMethod( self.__clicked ), scoped = False )
def getToolTip( self ) :
result = GafferUI.Button.getToolTip( self )
if result :
return result
result = IECore.StringUtil.wrap( self.__infoText(), 75 )
return result
def __infoText( self ) :
## \todo: NodeUI should provide setContext()/getContext() methods
## and we should use those to get the proper context here.
context = self.__node.scriptNode().context() if self.__node.scriptNode() else Gaffer.Context.current()
with context :
result = Gaffer.Metadata.value( self.__node, "description" ) or ""
summary = Gaffer.Metadata.value( self.__node, "summary" )
if summary :
if result :
result += "\n\n"
result += summary
return result
def __clicked( self, button ) :
if self.__window is None :
with GafferUI.Window( "Info", borderWidth=8 ) as self.__window :
GafferUI.MultiLineTextWidget( editable = False )
self.ancestor( GafferUI.Window ).addChildWindow( self.__window )
self.__window.getChild().setText( self.__infoText() )
self.__window.reveal()
##########################################################################
# Metadata
##########################################################################
def __nodeDescription( node ) :
parameterised = node.getParameterised()[0]
if parameterised is None :
return "Hosts Cortex Parameterised classes"
return parameterised.description
def __nodeSummary( node ) :
parameterised = node.getParameterised()[0]
if not isinstance( parameterised, IECore.Op ) :
return ""
node.parameterHandler().setParameterValue()
parameterValues = IECore.ParameterParser().serialise( parameterised.parameters() )
# pipes.quote() has a bug in some python versions where it doesn't quote empty strings.
parameterValues = " ".join( [ pipes.quote( x ) if x else "''" for x in parameterValues ] )
return "Command line equivalent : \n\ngaffer op %s -version %d -arguments %s" % (
parameterised.path,
parameterised.version,
parameterValues,
)
## \todo There should really be a method to map from plug to parameter.
# The logic exists in ParameterisedHolder.plugSet() but isn't public.
def __parameter( plug ) :
parameter = plug.node().parameterHandler().parameter()
for name in plug.relativeName( plug.node() ).split( "." )[1:] :
if not isinstance( parameter, IECore.CompoundParameter ) :
return None
else :
parameter = parameter[name]
return parameter
def __plugDescription( plug ) :
parameter = __parameter( plug )
return parameter.description if parameter else None
def __plugPresetNames( plug ) :
parameter = __parameter( plug )
if not parameter :
return None
presetNames = parameter.presetNames()
if presetNames and isinstance( plug, (
Gaffer.StringPlug,
Gaffer.BoolPlug,
Gaffer.IntPlug,
Gaffer.FloatPlug,
Gaffer.Color3fPlug,
Gaffer.V3fPlug,
) ) :
return IECore.StringVectorData( presetNames )
return None
def __plugPresetValues( plug ) :
parameter = __parameter( plug )
if not parameter :
return None
# make sure to get the values in the same
# order that the names were given.
values = [ parameter.getPresets()[x] for x in parameter.presetNames() ]
if isinstance( plug, Gaffer.StringPlug ) :
return IECore.StringVectorData( [ v.value for v in values ] )
elif isinstance( plug, Gaffer.BoolPlug ) :
return IECore.BoolVectorData( [ v.value for v in values ] )
elif isinstance( plug, Gaffer.IntPlug ) :
return IECore.IntVectorData( [ v.value for v in values ] )
elif isinstance( plug, Gaffer.FloatPlug ) :
return IECore.FloatVectorData( [ v.value for v in values ] )
elif isinstance( plug, Gaffer.Color3fPlug ) :
return IECore.Color3fVectorData( [ v.value for v in values ] )
elif isinstance( plug, Gaffer.V3fPlug ) :
return IECore.V3fVectorData( [ v.value for v in values ] )
return None
def __plugWidgetType( plug ) :
parameter = __parameter( plug )
if parameter and parameter.presetsOnly and __plugPresetNames( plug ) :
return "GafferUI.PresetsPlugValueWidget"
return None
def __plugNoduleType( plug ) :
return "GafferUI::StandardNodule" if isinstance( plug, Gaffer.ObjectPlug ) else ""
for nodeType in __nodeTypes :
Gaffer.Metadata.registerNode(
nodeType,
"description", __nodeDescription,
"summary", __nodeSummary,
plugs = {
"parameters" : [
"nodule:type", "GafferUI::CompoundNodule",
],
"parameters.*..." : [
"description", __plugDescription,
"presetNames", __plugPresetNames,
"presetValues", __plugPresetValues,
"plugValueWidget:type", __plugWidgetType,
"nodule:type", __plugNoduleType,
],
},
)
##########################################################################
# Node menu
##########################################################################
## Appends menu items for the creation of all Parameterised classes found on some searchpaths.
def appendParameterisedHolders( menuDefinition, prefix, searchPathEnvVar, nodeCreator, matchExpression = re.compile( ".*" ) ) :
if isinstance( matchExpression, str ) :
matchExpression = re.compile( fnmatch.translate( matchExpression ) )
menuDefinition.append( prefix, { "subMenu" : IECore.curry( __parameterisedHolderMenu, nodeCreator, searchPathEnvVar, matchExpression ) } )
def __parameterisedHolderCreator( nodeCreator, className, classVersion, searchPathEnvVar ) :
nodeName = className.rpartition( "/" )[-1]
node = nodeCreator( nodeName )
node.setParameterised( className, classVersion, searchPathEnvVar )
return node
def __parameterisedHolderMenu( nodeCreator, searchPathEnvVar, matchExpression ) :
c = IECore.ClassLoader.defaultLoader( searchPathEnvVar )
d = IECore.MenuDefinition()
for n in c.classNames() :
if matchExpression.match( n ) :
nc = "/".join( [ IECore.CamelCase.toSpaced( x ) for x in n.split( "/" ) ] )
v = c.getDefaultVersion( n )
d.append( "/" + nc, { "command" : GafferUI.NodeMenu.nodeCreatorWrapper( IECore.curry( __parameterisedHolderCreator, nodeCreator, n, v, searchPathEnvVar ) ) } )
return d
| lucienfostier/gaffer | python/GafferCortexUI/ParameterisedHolderUI.py | Python | bsd-3-clause | 10,409 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class QueryError(Exception):
"""Raised when a BigQuery query fails."""
class TimeoutError(Exception):
"""Raised when an operation takes longer than its specified timeout."""
| endlessm/chromium-browser | third_party/catapult/firefighter/base/exceptions.py | Python | bsd-3-clause | 344 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'DashboardPreferences', fields ['dashboard_id', 'user']
db.create_unique('admin_tools_dashboard_preferences', ['dashboard_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'DashboardPreferences', fields ['dashboard_id', 'user']
db.delete_unique('admin_tools_dashboard_preferences', ['dashboard_id', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model: {
'Meta': {'object_name': user_model.split('.')[1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dashboard.dashboardpreferences': {
'Meta': {'ordering': "('user',)", 'unique_together': "(('user', 'dashboard_id'),)", 'object_name': 'DashboardPreferences', 'db_table': "'admin_tools_dashboard_preferences'"},
'dashboard_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model})
}
}
complete_apps = ['dashboard']
| WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/admin_tools/dashboard/migrations/0003_auto__add_unique_dashboardpreferences_dashboard_id_user.py | Python | mit | 4,467 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Pekka Jääskeläinen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Generate a static HTML from the given template.
#
import sys
try:
from mako.template import Template
from mako.lookup import TemplateLookup
except:
print "Install Mako templates (e.g. easy_install \"Mako>=0.7.4\")"
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: generate_html.py source_template.mak destination.html"
mylookup = TemplateLookup(directories=['.'])
template = Template(filename=sys.argv[1], lookup=mylookup, strict_undefined=False)
output = template.render()
f = open(sys.argv[2], "w+")
f.write(output)
f.close()
| NatTuck/pocl | doc/www/generate_html.py | Python | mit | 1,768 |
"""ACME JSON fields."""
import logging
import pyrfc3339
from acme import jose
logger = logging.getLogger(__name__)
class Fixed(jose.Field):
"""Fixed field."""
def __init__(self, json_name, value):
self.value = value
super(Fixed, self).__init__(
json_name=json_name, default=value, omitempty=False)
def decode(self, value):
if value != self.value:
raise jose.DeserializationError('Expected {0!r}'.format(self.value))
return self.value
def encode(self, value):
if value != self.value:
logger.warning(
'Overriding fixed field (%s) with %r', self.json_name, value)
return value
class RFC3339Field(jose.Field):
"""RFC3339 field encoder/decoder.
Handles decoding/encoding between RFC3339 strings and aware (not
naive) `datetime.datetime` objects
(e.g. ``datetime.datetime.now(pytz.utc)``).
"""
@classmethod
def default_encoder(cls, value):
return pyrfc3339.generate(value)
@classmethod
def default_decoder(cls, value):
try:
return pyrfc3339.parse(value)
except ValueError as error:
raise jose.DeserializationError(error)
class Resource(jose.Field):
"""Resource MITM field."""
def __init__(self, resource_type, *args, **kwargs):
self.resource_type = resource_type
super(Resource, self).__init__(
'resource', default=resource_type, *args, **kwargs)
def decode(self, value):
if value != self.resource_type:
raise jose.DeserializationError(
'Wrong resource type: {0} instead of {1}'.format(
value, self.resource_type))
return value
| nohona/cron-crm | usr/local/certbot/acme/acme/fields.py | Python | gpl-3.0 | 1,745 |
from queue import Queue
import sys
import unittest
from tests.test_bears.TestBear import TestBear
from tests.test_bears.TestBearDep import (TestDepBearBDependsA,
TestDepBearCDependsB,
TestDepBearDependsAAndAA)
from coalib.bearlib.abstractions.Linter import linter
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.testing.LocalBearTestHelper import verify_local_bear, execute_bear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper as Helper
files = ('Everything is invalid/valid/raises error',)
invalidTest = verify_local_bear(TestBear,
valid_files=(),
invalid_files=files,
settings={'result': True})
validTest = verify_local_bear(TestBear,
valid_files=files,
invalid_files=())
class LocalBearCheckResultsTest(Helper):
def setUp(self):
section = Section('')
section.append(Setting('result', 'a, b'))
self.uut = TestBear(section, Queue())
def test_order_ignored(self):
self.check_results(self.uut, ['a', 'b'], ['b', 'a'],
check_order=False)
def test_require_order(self):
with self.assertRaises(AssertionError):
self.check_results(self.uut, ['a', 'b'], ['b', 'a'],
check_order=True)
class LocalBearTestCheckLineResultCountTest(Helper):
def setUp(self):
section = Section('')
section.append(Setting('result', True))
self.uut = TestBear(section, Queue())
def test_check_line_result_count(self):
self.check_line_result_count(self.uut,
['a', '', 'b', ' ', '# abc', '1'],
[1, 1, 1])
class LocalBearTestDependency(Helper):
def setUp(self):
self.section = Section('')
def test_check_results_bear_with_dependency(self):
bear = TestDepBearBDependsA(self.section, Queue())
self.check_results(bear, [], [['settings1',
'settings2',
'settings3',
'settings4']],
settings={'settings1': 'settings1',
'settings2': 'settings2',
'settings3': 'settings3',
'settings4': 'settings4'})
def test_check_results_bear_with_2_deep_dependency(self):
bear = TestDepBearCDependsB(self.section, Queue())
self.check_results(bear, [], [['settings1',
'settings2',
'settings3',
'settings4',
'settings5',
'settings6']],
settings={'settings1': 'settings1',
'settings2': 'settings2',
'settings3': 'settings3',
'settings4': 'settings4',
'settings5': 'settings5',
'settings6': 'settings6'})
def test_check_results_bear_with_two_dependencies(self):
bear = TestDepBearDependsAAndAA(self.section, Queue())
self.check_results(bear, [], [['settings1',
'settings2',
'settings3',
'settings4']],
settings={'settings1': 'settings1',
'settings2': 'settings2',
'settings3': 'settings3',
'settings4': 'settings4'})
class LocalBearTestHelper(unittest.TestCase):
def setUp(self):
section = Section('')
section.append(Setting('exception', True))
self.uut = TestBear(section, Queue())
def test_stdout_stderr_on_linter_test_fail(self):
class TestLinter:
@staticmethod
def process_output(output, filename, file):
pass
@staticmethod
def create_arguments(filename, file, config_file):
code = '\n'.join(['import sys',
"print('hello stdout')",
"print('hello stderr', file=sys.stderr)"])
return '-c', code
# Testing with both stdout and stderr enabled
uut = (linter(sys.executable, use_stdout=True, use_stderr=True)
(TestLinter)
(Section('TEST_SECTION'), Queue()))
try:
with execute_bear(uut, 'filename', ['file']) as result:
raise AssertionError
except AssertionError as ex:
self.assertIn('The program yielded the following output:', str(ex))
self.assertIn('Stdout:', str(ex))
self.assertIn('hello stdout', str(ex))
self.assertIn('Stderr:', str(ex))
self.assertIn('hello stderr', str(ex))
# Testing with only stdout enabled
uut = (linter(sys.executable, use_stdout=True)
(TestLinter)
(Section('TEST_SECTION'), Queue()))
try:
with execute_bear(uut, 'filename', ['file']) as result:
raise AssertionError
except AssertionError as ex:
self.assertIn('The program yielded the following output:', str(ex))
self.assertIn('hello stdout', str(ex))
def test_exception(self):
with self.assertRaises(AssertionError), execute_bear(
self.uut, 'Luke', files[0]) as result:
pass
| CruiseDevice/coala | tests/testing/LocalBearTestHelperTest.py | Python | agpl-3.0 | 5,968 |
# -*- encoding: utf-8 -*-
# Copyright P. Christeas <[email protected]> 2008-2010
# Copyright 2010 OpenERP SA. http://www.openerp.com
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
###############################################################################
def dict_merge(*dicts):
""" Return a dict with all values of dicts
"""
res = {}
for d in dicts:
res.update(d)
return res
def dict_merge2(*dicts):
""" Return a dict with all values of dicts.
If some key appears twice and contains iterable objects, the values
are merged (instead of overwritten).
"""
res = {}
for d in dicts:
for k in d.keys():
if k in res and isinstance(res[k], (list, tuple)):
res[k] = res[k] + d[k]
elif k in res and isinstance(res[k], dict):
res[k].update(d[k])
else:
res[k] = d[k]
return res
def dict_filter(srcdic, keys, res=None):
''' Return a copy of srcdic that has only keys set.
If any of keys are missing from srcdic, the result won't have them,
either.
@param res If given, result will be updated there, instead of a new dict.
'''
if res is None:
res = {}
for k in keys:
if k in srcdic:
res[k] = srcdic[k]
return res
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| crmccreary/openerp_server | openerp/addons/document/dict_tools.py | Python | agpl-3.0 | 2,020 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file contains plotting tools for NLP experiment results.
"""
import math
import numpy
import os
import pandas as pd
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import (
Data,
ErrorY,
Figure,
Font,
Heatmap,
Layout,
Margin,
Scatter,
XAxis,
YAxis)
class PlotNLP():
"""Class to plot evaluation metrics for NLP experiments."""
def __init__(self,
apiKey=None,
username=None,
experimentName="experiment"):
# Instantiate API credentials.
try:
self.apiKey = apiKey if apiKey else os.environ["PLOTLY_API_KEY"]
except:
print ("Missing PLOTLY_API_KEY environment variable. If you have a "
"key, set it with $ export PLOTLY_API_KEY=api_key\n"
"You can retrieve a key by registering for the Plotly API at "
"http://www.plot.ly")
raise OSError("Missing API key.")
try:
self.username = username if username else os.environ["PLOTLY_USERNAME"]
except:
print ("Missing PLOTLY_USERNAME environment variable. If you have a "
"username, set it with $ export PLOTLY_USERNAME=username\n"
"You can sign up for the Plotly API at http://www.plot.ly")
raise OSError("Missing username.")
py.sign_in(self.username, self.apiKey)
self.experimentName = experimentName
@staticmethod
def getDataFrame(dataPath):
"""Get pandas dataframe of the results CSV."""
try:
return pd.read_csv(dataPath)
except IOError("Invalid data path to file"):
return
@staticmethod
def interpretConfusionMatrixData(dataFrame, normalize):
"""Parse pandas dataframe into confusion matrix format."""
labels = dataFrame.columns.values.tolist()[:-1]
values = map(list, dataFrame.values)
for i, row in enumerate(values):
values[i] = [v/row[-1] for v in row[:-1]] if normalize else row[:-1]
cm = {"x":labels,
"y":labels[:-1],
"z":values[:-1]
}
return cm
def plotCategoryConfusionMatrix(self, data, normalize=True):
"""
Plots the confusion matrix of the input classifications dataframe.
@param data (pandas DF) The confusion matrix.
@param normalize (bool) True will normalize the confusion matrix
values for the total number of actual classifications per label. Thus
the cm values are 0.0 to 1.0.
"""
xyzData = self.interpretConfusionMatrixData(data, normalize)
data = Data([Heatmap(z=xyzData["z"],
x=xyzData["x"],
y=xyzData["y"],
colorscale='YIGnBu')])
layout = Layout(
title='Confusion matrix for ' + self.experimentName,
xaxis=XAxis(
title='Predicted label',
side='top',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='True label',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
autorange='reversed'
),
barmode='overlay',
autosize=True,
width=1000,
height=1000,
margin=Margin(
l=200,
r=80,
b=80,
t=450
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Confusion matrix URL: ", plot_url
def plotConfusionMatrix(self, dataFrame, name):
"""
@param data (pandas DF) The confusion matrix.
@param name (str)
"""
labels = dataFrame.columns.values.tolist()
values = map(list, dataFrame.values)
data = Data([Heatmap(z=values,
x=labels,
y=labels,
colorscale='YIGnBu')])
layout = Layout(
title='Confusion Matrix for ' + name,
xaxis=XAxis(
title='',
side='top',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
autorange='reversed'
),
barmode='overlay',
autosize=True,
width=1000,
height=1000,
margin=Margin(
l=80,
r=80,
b=120,
t=140
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Confusion matrix URL: ", plot_url
def plotRegression(self, xData, yData, title, axisTitles=("x","y"), zData=None, line=None):
"""
Plot a regression of the input data vectors; these must be the same length
lists, where the items are scalar values.
Note: line defaults to None b/c there are better line fitting tools online
(https://plot.ly/how-to-create-a-line-of-best-fits/)
"""
# TODO: use zData for 3D plots
assert(len(xData) == len(yData))
if zData: assert(len(xData) == len(zData))
if line:
assert line in ("linear", "spline")
trace = Scatter(
x=xData,
y=yData,
mode='lines+markers',
marker=dict(
color='rgb(128, 0, 128)',
size=8,
),
line=dict(
shape=line,
color='rgb(128, 0, 128)',
width=1
)
)
else:
trace = Scatter(
x=xData,
y=yData,
mode='markers',
marker=dict(
color='rgb(128, 0, 128)',
size=8,
)
)
layout = Layout(
title=title,
xaxis=dict(
title=axisTitles[0],
),
yaxis=dict(
title=axisTitles[1],
)
)
data = [trace]
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Regression plot URL: ", plot_url
def plotCategoryAccuracies(self, trialAccuracies, trainSizes):
"""
Shows the accuracy for the categories at a certain training size
@param trialAccuracies (dict) A dictionary of dictionaries. For each
train size, there is a dictionary that maps a category to a list of
accuracies for that category.
@param trainSizes (list) Size of training set for each trial.
"""
sizes = sorted(set(trainSizes))
size_sqrt = math.sqrt(len(sizes))
subplotDimension = int(math.ceil(size_sqrt))
rows = subplotDimension
cols = subplotDimension
if len(sizes) <= subplotDimension * (subplotDimension - 1):
rows -= 1
fig = tls.make_subplots(rows=rows, cols=cols,
shared_xaxes=True, shared_yaxes=True, print_grid=False)
num_categories = 0
for i, s in enumerate(sizes):
# 1-indexed
col = i % cols + 1
row = (i - col + 1) / cols + 1
classificationAccuracies = trialAccuracies[s]
num_categories = max(num_categories,len(classificationAccuracies.keys()))
x = []
y = []
std = []
for label, acc in classificationAccuracies.iteritems():
x.append(label)
y.append(numpy.mean(acc))
std.append(numpy.std(acc))
trace = Scatter(
x=x,
y=y,
name=s,
mode='markers',
error_y=ErrorY(
type='data',
symmetric=False,
array=std,
arrayminus=std,
visible=True
)
)
fig.append_trace(trace, row, col)
fig["layout"]["title"] = "Accuracies for category by training size"
half_way_cols = int(math.ceil(cols / 2.0))
half_way_rows = int(math.ceil(rows / 2.0))
fig["layout"]["xaxis{}".format(half_way_cols)]["title"] = "Category Label"
fig["layout"]["yaxis{}".format(half_way_rows)]["title"] = "Accuracy"
for i in xrange(1, cols + 1):
fig["layout"]["xaxis{}".format(i)]["tickangle"] = -45
fig["layout"]["xaxis{}".format(i)]["nticks"] = num_categories * 2
if i <= rows:
fig["layout"]["yaxis{}".format(i)]["range"] = [-.1, 1.1]
fig["layout"]["margin"] = {"b" : 120}
plot_url = py.plot(fig)
print "Category Accuracies URL: ", plot_url
def plotCumulativeAccuracies(self, classificationAccuracies, trainSizes):
"""
Creates scatter plots that show the accuracy for each category at a
certain training size
@param classificationAccuracies (dict) Maps a category label to a list of
lists of accuracies. Each item in the key is a list of accuracies for
a specific training size, ordered by increasing training size.
@param trainSizes (list) Sizes of training sets for trials.
"""
# Convert list of list of accuracies to list of means
classificationSummaries = [(label, map(numpy.mean, acc))
for label, acc in classificationAccuracies.iteritems()]
data = []
sizes = sorted(set(trainSizes))
for label, summary in classificationSummaries:
data.append(Scatter(x=sizes, y=summary, name=label))
data = Data(data)
layout = Layout(
title='Cumulative Accuracies for ' + self.experimentName,
xaxis=XAxis(
title='Training size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Accuracy',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Cumulative Accuracies URL: ", plot_url
def plotBucketsMetrics(
self, metricsDict, comboMethod, numIterations, modelName):
"""
@param metricsDicts (dict) Arrays for the min, mean, and max of each
metric.
@param comboMethod (str) Concatenation method from the experiment.
@param numIterations (str) Number of inference steps run.
@param modelName (str) Name of tested model.
"""
xData = range(1, numIterations+1)
for metricName, results in metricsDict.iteritems():
if metricName == "totalRanked": continue
minTrace = Scatter(
x = xData,
y = results[0],
mode = "lines+markers",
name = "min"
)
meanTrace = Scatter(
x = xData,
y = results[1],
mode = "lines+markers",
name = "mean"
)
maxTrace = Scatter(
x = xData,
y = results[2],
mode = "lines+markers",
name = "max"
)
data = [minTrace, meanTrace, maxTrace]
layout = Layout(
title="Buckets Experiment for {} ('{}' concatenation) ".format(
modelName, comboMethod),
xaxis=XAxis(
title="Number of samples queried",
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
dtick=1
),
yaxis=YAxis(
title=metricName,
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = Figure(data=data, layout=layout)
plotUrl = py.plot(fig)
print "Plot URL for {}: {}".format(metricName, plotUrl)
| ywcui1990/nupic.research | htmresearch/support/nlp_classification_plotting.py | Python | agpl-3.0 | 12,290 |
import pos_multi_session_models
import controllers
| bmya/pos-addons | pos_multi_session/__init__.py | Python | lgpl-3.0 | 51 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import Mock, patch
import jenkins
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook
from airflow.providers.jenkins.operators.jenkins_job_trigger import JenkinsJobTriggerOperator
class TestJenkinsOperator(unittest.TestCase):
@parameterized.expand(
[
(
"dict params",
{'a_param': 'blip', 'another_param': '42'},
),
(
"string params",
'{"second_param": "beep", "third_param": "153"}',
),
(
"list params",
['final_one', 'bop', 'real_final', 'eggs'],
),
]
)
def test_execute(self, _, parameters):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_build_info.return_value = {
'result': 'SUCCESS',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
task_id="operator_test",
job_name="a_job_on_jenkins",
parameters=parameters,
sleep_time=1,
)
operator.execute(None)
assert jenkins_mock.get_build_info.call_count == 1
jenkins_mock.get_build_info.assert_called_once_with(name='a_job_on_jenkins', number='1')
@parameterized.expand(
[
(
"dict params",
{'a_param': 'blip', 'another_param': '42'},
),
(
"string params",
'{"second_param": "beep", "third_param": "153"}',
),
(
"list params",
['final_one', 'bop', 'real_final', 'eggs'],
),
]
)
def test_execute_job_polling_loop(self, _, parameters):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.side_effect = [
{'result': None},
{'result': 'SUCCESS', 'url': 'http://aaa.fake-url.com/congratulation/its-a-job'},
]
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
parameters=parameters,
sleep_time=1,
)
operator.execute(None)
assert jenkins_mock.get_build_info.call_count == 2
@parameterized.expand(
[
(
"dict params",
{'a_param': 'blip', 'another_param': '42'},
),
(
"string params",
'{"second_param": "beep", "third_param": "153"}',
),
(
"list params",
['final_one', 'bop', 'real_final', 'eggs'],
),
]
)
def test_execute_job_failure(self, _, parameters):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': 'FAILURE',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
parameters=parameters,
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
sleep_time=1,
)
with pytest.raises(AirflowException):
operator.execute(None)
@parameterized.expand(
[
(
'SUCCESS',
['SUCCESS', 'UNSTABLE'],
),
(
'UNSTABLE',
['SUCCESS', 'UNSTABLE'],
),
(
'UNSTABLE',
['UNSTABLE'],
),
(
'SUCCESS',
None,
),
]
)
def test_allowed_jenkins_states(self, state, allowed_jenkins_states):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': state,
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
allowed_jenkins_states=allowed_jenkins_states,
# The hook is mocked, this connection won't be used
sleep_time=1,
)
try:
operator.execute(None)
except AirflowException:
pytest.fail(f'Job failed with state={state} while allowed states={allowed_jenkins_states}')
@parameterized.expand(
[
(
'FAILURE',
['SUCCESS', 'UNSTABLE'],
),
(
'UNSTABLE',
['SUCCESS'],
),
(
'SUCCESS',
['UNSTABLE'],
),
(
'FAILURE',
None,
),
(
'UNSTABLE',
None,
),
]
)
def test_allowed_jenkins_states_failure(self, state, allowed_jenkins_states):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': state,
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
allowed_jenkins_states=allowed_jenkins_states,
# The hook is mocked, this connection won't be used
sleep_time=1,
)
with pytest.raises(AirflowException):
operator.execute(None)
def test_build_job_request_settings(self):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret', timeout=2)
jenkins_mock.build_job_url.return_value = 'http://apache.org'
with patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="build_job_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
)
operator.build_job(jenkins_mock)
mock_request = mock_make_request.call_args_list[0][0][1]
assert mock_request.method == 'POST'
assert mock_request.url == 'http://apache.org'
| apache/incubator-airflow | tests/providers/jenkins/operators/test_jenkins_job_trigger.py | Python | apache-2.0 | 12,070 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import logging
import sys
import traceback
class PageTestResults(object):
def __init__(self, output_stream=None):
super(PageTestResults, self).__init__()
self._output_stream = output_stream
self.pages_that_had_errors = set()
self.pages_that_had_failures = set()
self.successes = []
self.errors = []
self.failures = []
self.skipped = []
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def pages_that_had_errors_or_failures(self):
return self.pages_that_had_errors.union(
self.pages_that_had_failures)
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def StartTest(self, page):
pass
def StopTest(self, page):
pass
def AddError(self, page, err):
self.pages_that_had_errors.add(page)
self.errors.append((page, self._GetStringFromExcInfo(err)))
def AddFailure(self, page, err):
self.pages_that_had_failures.add(page)
self.failures.append((page, self._GetStringFromExcInfo(err)))
def AddSkip(self, page, reason):
self.skipped.append((page, reason))
def AddSuccess(self, page):
self.successes.append(page)
def AddFailureMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddFailure(page, sys.exc_info())
def AddErrorMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddError(page, sys.exc_info())
def PrintSummary(self):
if self.failures:
logging.error('Failed pages:\n%s', '\n'.join(
p.display_name for p in zip(*self.failures)[0]))
if self.errors:
logging.error('Errored pages:\n%s', '\n'.join(
p.display_name for p in zip(*self.errors)[0]))
if self.skipped:
logging.warning('Skipped pages:\n%s', '\n'.join(
p.display_name for p in zip(*self.skipped)[0]))
| boundarydevices/android_external_chromium_org | tools/telemetry/telemetry/results/page_test_results.py | Python | bsd-3-clause | 2,264 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.data import kb
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces space character after SQL statement with a valid random blank character.
Afterwards replace character = with LIKE operator
Requirement:
* Blue Coat SGOS with WAF activated as documented in
https://kb.bluecoat.com/index?page=content&id=FAQ2147
Tested against:
* MySQL 5.1, SGOS
Notes:
* Useful to bypass Blue Coat's recommended WAF rule configuration
>>> tamper('SELECT id FROM users WHERE id = 1')
'SELECT%09id FROM%09users WHERE%09id LIKE 1'
"""
def process(match):
word = match.group('word')
if word.upper() in kb.keywords:
return match.group().replace(word, "%s%%09" % word)
else:
return match.group()
retVal = payload
if payload:
retVal = re.sub(r"\b(?P<word>[A-Z_]+)(?=[^\w(]|\Z)", lambda match: process(match), retVal)
retVal = re.sub(r"\s*=\s*", " LIKE ", retVal)
retVal = retVal.replace("%09 ", "%09")
return retVal
| V11/volcano | server/sqlmap/tamper/bluecoat.py | Python | mit | 1,316 |
from django.conf.urls import patterns, url
from django.contrib.admin.views.decorators import staff_member_required
from oscar.core.application import Application
from . import views
class PayFlowDashboardApplication(Application):
name = None
list_view = views.TransactionListView
detail_view = views.TransactionDetailView
def get_urls(self):
urlpatterns = patterns('',
url(r'^transactions/$', self.list_view.as_view(),
name='paypal-payflow-list'),
url(r'^transactions/(?P<pk>\d+)/$', self.detail_view.as_view(),
name='paypal-payflow-detail'),
)
return self.post_process_urls(urlpatterns)
def get_url_decorator(self, url_name):
return staff_member_required
application = PayFlowDashboardApplication()
| aarticianpc/greenpointtrees | src/paypal/payflow/dashboard/app.py | Python | mit | 816 |
import sys,os
from blur.build import *
path = os.path.dirname(os.path.abspath(__file__))
smoke_path = os.path.join(path,'smoke/qt')
perlqt_path = os.path.join(path,'PerlQt')
win = sys.platform == 'win32'
if not win or os.path.exists('c:/perl/bin/perl.exe'):
# Install the libraries
#if sys.platform != 'win32':
# All_Targets.append(LibInstallTarget("stoneinstall","lib/stone","stone","/usr/lib/"))
# Python module targets
gen_cmd = './generate.pl'
if win:
gen_cmd = 'c:/perl/bin/perl generate.pl'
smoke_generate = StaticTarget( "smoke_generate", smoke_path, gen_cmd, ["classmaker","classes"] )
smoke = QMakeTarget( "smoke", smoke_path, "smoke.pro", [smoke_generate] )
perlqt_lib = QMakeTarget( "perlqt_lib", perlqt_path, "perlqt.pro", [smoke] )
perlqt = Target( "perlqt", path, [perlqt_lib], [] )
RPMTarget("perlqtrpm",'blur-perlqt',path,'../../../rpm/spec/perlqt.spec.template','1.0')
if __name__ == "__main__":
build()
| cypod/arsenalsuite | cpp/lib/perlqt/build.py | Python | gpl-2.0 | 943 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.model.document import Document
from frappe.utils import comma_and
class ProgramEnrollment(Document):
def validate(self):
self.validate_duplication()
if not self.student_name:
self.student_name = frappe.db.get_value("Student", self.student, "title")
def on_submit(self):
self.update_student_joining_date()
self.make_fee_records()
def validate_duplication(self):
enrollment = frappe.db.sql("""select name from `tabProgram Enrollment` where student= %s and program= %s
and academic_year= %s and docstatus<2 and name != %s""", (self.student, self.program, self.academic_year, self.name))
if enrollment:
frappe.throw(_("Student is already enrolled."))
def update_student_joining_date(self):
date = frappe.db.sql("select min(enrollment_date) from `tabProgram Enrollment` where student= %s", self.student)
frappe.db.set_value("Student", self.student, "joining_date", date)
def make_fee_records(self):
from erpnext.schools.api import get_fee_components
fee_list = []
for d in self.fees:
fee_components = get_fee_components(d.fee_structure)
if fee_components:
fees = frappe.new_doc("Fees")
fees.update({
"student": self.student,
"academic_year": self.academic_year,
"academic_term": d.academic_term,
"fee_structure": d.fee_structure,
"program": self.program,
"due_date": d.due_date,
"student_name": self.student_name,
"program_enrollment": self.name,
"components": fee_components
})
fees.save()
fees.submit()
fee_list.append(fees.name)
if fee_list:
fee_list = ["""<a href="#Form/Fees/%s" target="_blank">%s</a>""" % \
(fee, fee) for fee in fee_list]
msgprint(_("Fee Records Created - {0}").format(comma_and(fee_list))) | bhupennewalkar1337/erpnext | erpnext/schools/doctype/program_enrollment/program_enrollment.py | Python | gpl-3.0 | 1,966 |
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
import logging
logger = logging.getLogger(__name__)
if "notification" in settings.INSTALLED_APPS:
import notification
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.models.NoticeType.create("layer_uploaded", _("Layer Uploaded"), _("A layer was uploaded"))
notification.models.NoticeType.create("layer_comment", _("Comment on Layer"), _("A layer was commented on"))
notification.models.NoticeType.create("layer_rated", _("Rating for Layer"), _("A rating was given to a layer"))
signals.post_syncdb.connect(create_notice_types, sender=notification)
logger.info("Notifications Configured for geonode.layers.managment.commands")
else:
logger.info("Skipping creation of NoticeTypes for geonode.layers.management.commands, since notification app was not found.")
| GISPPU/GrenadaLandInformation | geonode/layers/management/__init__.py | Python | gpl-3.0 | 1,781 |
"""Config flow for Kostal Plenticore Solar Inverter integration."""
import asyncio
import logging
from aiohttp.client_exceptions import ClientError
from kostal.plenticore import PlenticoreApiClient, PlenticoreAuthenticationException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_BASE, CONF_HOST, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_PASSWORD): str,
}
)
async def test_connection(hass: HomeAssistant, data) -> str:
"""Test the connection to the inverter.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
async with PlenticoreApiClient(session, data["host"]) as client:
await client.login(data["password"])
values = await client.get_setting_values("scb:network", "Hostname")
return values["scb:network"]["Hostname"]
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Kostal Plenticore Solar Inverter."""
VERSION = 1
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
hostname = None
if user_input is not None:
self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})
try:
hostname = await test_connection(self.hass, user_input)
except PlenticoreAuthenticationException as ex:
errors[CONF_PASSWORD] = "invalid_auth"
_LOGGER.error("Error response: %s", ex)
except (ClientError, asyncio.TimeoutError):
errors[CONF_HOST] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors[CONF_BASE] = "unknown"
if not errors:
return self.async_create_entry(title=hostname, data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
| aronsky/home-assistant | homeassistant/components/kostal_plenticore/config_flow.py | Python | apache-2.0 | 2,305 |
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params, cv=5)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| vortex-ape/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | Python | bsd-3-clause | 2,013 |
import unittest
from node.openbazaar import create_argument_parser
from node.openbazaar_daemon import OpenBazaarContext
class TestLauncher(unittest.TestCase):
def setUp(self):
self.default_ctx = OpenBazaarContext.create_default_instance()
def test_argument_parser(self):
parser = create_argument_parser()
# base case
arguments = parser.parse_args(['start'])
self.assertEqual(arguments.command, 'start')
self.assertEqual(arguments.server_ip, self.default_ctx.server_ip)
self.assertEqual(arguments.server_port, self.default_ctx.server_port)
self.assertEqual(arguments.http_ip, self.default_ctx.http_ip)
self.assertEqual(arguments.http_port, self.default_ctx.http_port)
self.assertEqual(arguments.log, self.default_ctx.log_path)
self.assertEqual(arguments.log_level, self.default_ctx.log_level)
self.assertEqual(arguments.dev_mode, self.default_ctx.dev_mode)
self.assertEqual(arguments.dev_nodes, self.default_ctx.dev_nodes)
self.assertEqual(arguments.db_path, self.default_ctx.db_path)
self.assertEqual(arguments.disable_sqlite_crypt, self.default_ctx.disable_sqlite_crypt)
self.assertEqual(arguments.bm_user, self.default_ctx.bm_user)
self.assertEqual(arguments.bm_pass, self.default_ctx.bm_pass)
self.assertEqual(arguments.bm_port, self.default_ctx.bm_port)
self.assertEqual(arguments.market_id, self.default_ctx.market_id)
self.assertEqual(arguments.disable_upnp, self.default_ctx.disable_upnp)
self.assertEqual(arguments.disable_stun_check, self.default_ctx.disable_stun_check)
self.assertEqual(arguments.seed_mode, self.default_ctx.seed_mode)
self.assertEqual(arguments.disable_open_browser, self.default_ctx.disable_open_browser)
self.assertEqual(arguments.config_file, None)
self.assertEqual(arguments.enable_ip_checker, self.default_ctx.enable_ip_checker)
# todo: add more cases to make sure arguments are being parsed correctly.
if __name__ == "__main__":
unittest.main()
| Renelvon/OpenBazaar | tests/test_launcher.py | Python | mit | 2,106 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Mehmet Atakan Gürkan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (probably in a file named COPYING).
# If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
from math import floor, fmod, fabs, atan2, atan, asin, sqrt, sin, cos
import datetime, calendar, ephem, pytz, pyx
from datetime import timedelta as TD
from pyx import path, canvas, color, style, text, graph
from scipy import optimize
PI = atan(1)*4.0
class PyEph_body() :
def __init__(self, pyephem_name, clr=color.cmyk.Gray,
symbol='~', tsize='small') :
self.body = pyephem_name
self.color = clr
self.symbol = symbol
self.tsize = tsize
self.rising = []
self.rising_text = []
self.transit = []
self.transit_text = []
self.setting = []
self.setting_text = []
def update_rising(self, obs) :
self.rising.append(obs.next_rising(self.body))
def update_transit(self, obs) :
self.transit.append(obs.next_transit(self.body))
def update_setting(self, obs) :
self.setting.append(obs.next_setting(self.body))
def to_chart_coord(event_time, chart) :
diff = event_time - chart.ULcorn
X = fmod(diff,1)
if X<0.0 : X+= 1.0
Y = floor(diff)
X *= chart.width / (chart.URcorn - chart.ULcorn)
Y *= chart.height / (chart.ULcorn - chart.LLcorn)
Y += chart.height
return [X, Y]
def event_to_path(event, chart, do_check=True, xoffset=0.0, yoffset=0.0) :
'''accepts an array of points representing an event, converts this
event to a path'''
x, y = to_chart_coord(event[0], chart)
p = path.path(path.moveto(x,y))
for e in event[1:] :
old_x = x
old_y = y
x, y = to_chart_coord(e, chart)
if (do_check == False or
(fabs(old_x - x) < chart.width/2.0 and
fabs(old_y - y) < chart.height/2.0)) :
p.append(path.lineto(x+xoffset, y+yoffset))
else :
p.append(path.moveto(x+xoffset, y+yoffset))
return p
#def event_to_path_no_check(event, chart) :
# '''accepts an array of points representing an event, converts this
# event to a path. this version does not check for big jumps in x
# coordinate'''
# x, y = to_chart_coord(event[0], chart)
# p = path.path(path.moveto(x,y))
# for e in event[1:] :
# old_x = x
# x, y = to_chart_coord(e, chart)
# p.append(path.lineto(x, y))
# return p
#
#def event_to_path_no_check_with_offset(event, chart, xoffset=0.0, yoffset=0.0) :
# '''accepts an array of points representing an event, converts this
# event to a path. this version does not check for big jumps in x
# coordinate'''
# x, y = to_chart_coord(event[0], chart)
# p = path.path(path.moveto(x+xoffset,y+yoffset))
# for e in event[1:] :
# old_x = x
# x, y = to_chart_coord(e, chart)
# p.append(path.lineto(x+xoffset, y+yoffset))
# return p
| digitalvapor/PySkyAlmanac | sabanci/almanac_utils.py | Python | gpl-3.0 | 3,563 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
*** Description ***
A pygame drum computer with recording and playback functionality.
The drum computer is completely controlled by the keyboard, no MIDI
hardware is required. You only have to specify an sf2 file.
*** Keys ***
r Enter record mode
p Exit record mode and play
Escape Quit
On the keypad:
0 Snare
1 Base
2 Low tom
3 Middle tom
4 Crash
5 Hihat closed
6 Hihat opened
9 Ride
Enter High tom
"""
import pygame
from pygame.locals import *
from mingus.containers import *
from mingus.midi import fluidsynth
from os import sys
SF2 = 'soundfont.sf2'
PAD_PLACEMENT = [ # high, mid, low, snare bass, crash, ride, open, close
(190, 20),
(330, 20),
(470, 20),
(330, 160),
(190, 300),
(20, 20),
(470, 160),
(20, 160),
(20, 300),
]
FADEOUT = 0.125 # coloration fadout time (1 tick = 0.001)
def load_img(name):
"""Load image and return an image object"""
fullname = name
try:
image = pygame.image.load(fullname)
if image.get_alpha() is None:
image = image.convert()
else:
image = image.convert_alpha()
except pygame.error, message:
print "Error: couldn't load image: ", fullname
raise SystemExit, message
return (image, image.get_rect())
if not fluidsynth.init(SF2):
print "Couldn't load soundfont", SF2
sys.exit(1)
pygame.init()
screen = pygame.display.set_mode((610, 500))
(pad, pad_rect) = load_img('pad.png')
hit = pygame.Surface(pad_rect.size) # Used to display which pad was hit
track = pygame.Surface((610, 45))
track.fill((0, 0, 0))
pygame.draw.rect(track, (255, 0, 0), track.get_rect(), 1)
for y in range(1, 9):
pygame.draw.line(track, (255, 0, 0), (0, y * 5), (610, y * 5), 1)
pygame.display.set_caption('mingus drum')
def play_note(note):
"""play_note determines which pad was 'hit' and send the
play request to fluidsynth"""
index = None
if note == Note('B', 2):
index = 0
elif note == Note('A', 2):
index = 1
elif note == Note('G', 2):
index = 2
elif note == Note('E', 2):
index = 3
elif note == Note('C', 2):
index = 4
elif note == Note('A', 3):
index = 5
elif note == Note('B', 3):
index = 6
elif note == Note('A#', 2):
index = 7
elif note == Note('G#', 2):
index = 8
if index != None and status == 'record':
playing.append([index, tick])
recorded.append([index, tick, note])
recorded_buffer.append([index, tick])
fluidsynth.play_Note(note, 9, 100)
tick = 0.0
quit = False
# The left and right sides of the track representation. Used as a window onto
# the recording
low_barrier = 0.0
high_barrier = 0.50
playing = [] # Notes playing right now
recorded = [] # Recorded notes. A list of all the notes entered.
recorded_buffer = [] # Recorded notes that are in the display window (ie. their
# tick is between low and high barrier)
played = 0 # Used to keep track of the place in the recording, when status is
# 'play'
buffered = 0 # Used to keep track of the buffer, when status is 'play'
need_buffer = True # This is only False when status is 'play' and there are no
# more notes to buffer
status = 'stopped'
while not quit:
screen.fill((0, 0, 0))
# Blit drum pads
for (x, y) in PAD_PLACEMENT:
screen.blit(pad, (x, y))
# Check each playing note
for note in playing:
diff = max(0, tick - note[1])
# If the note should be removed, remove it. Otherwise blit a fading
# 'hit' surface.
if diff > FADEOUT:
playing.remove(note)
else:
hit.fill((0, ((FADEOUT - diff) / FADEOUT) * 155, 0))
screen.blit(hit, PAD_PLACEMENT[note[0]], None, BLEND_SUB)
# Check if the view window onto the track has to be changed
if tick > high_barrier:
high_barrier += high_barrier - low_barrier
low_barrier = tick
track_c = track.copy()
# Draw a line representing the current place on the track surface
current = tick - low_barrier
x = (current / (high_barrier - low_barrier)) * 610
pygame.draw.line(track_c, (0, 255, 0), (x, 0), (x, 50), 2)
# Blit all the notes in recorded_buffer onto the track surface as little
# squeares or remove the note if it's outside the viewing window
for r in recorded_buffer:
if r[1] < low_barrier:
recorded_buffer.remove(r)
else:
y = r[0] * 5
x = ((r[1] - low_barrier) / (high_barrier - low_barrier)) * 610
pygame.draw.rect(track_c, (255, 0, 0), (x, y, 5, 5))
# Blit the track
screen.blit(track_c, (0, 440))
for event in pygame.event.get():
if event.type == QUIT:
quit = True
if event.type == KEYDOWN:
if event.key == K_KP0:
play_note(Note('E', 2)) # snare
elif event.key == K_KP1 or event.key == K_SPACE:
play_note(Note('C', 2)) # bass
elif event.key == K_KP_ENTER:
play_note(Note('B', 2)) # high tom
elif event.key == K_KP2:
play_note(Note('A', 2)) # middle tom
elif event.key == K_KP3:
play_note(Note('G', 2)) # low tom
elif event.key == K_KP4:
play_note(Note('A', 3)) # crash
elif event.key == K_KP5:
play_note(Note('G#', 2)) # hihat closed
elif event.key == K_KP6:
play_note(Note('A#', 2)) # hihat opened
elif event.key == K_KP9:
play_note(Note('B', 3)) # ride
if status == 'record':
if event.key == K_p:
# Starts playing mode, which a lot of variables have to be
# adjusted for
status = 'play'
tick = 0.0
low_barrier = 0.0
high_barrier = 0.50
played = 0
# A new recorded buffer has to be loaded
recorded_buffer = []
buffered = 0
need_buffer = True
for r in recorded:
if r[1] <= 0.50:
recorded_buffer.append([r[0], r[1]])
buffered += 1
else:
break
elif status == 'stopped':
if event.key == K_r:
status = 'record'
if event.key == K_ESCAPE:
quit = True
if status == 'play':
try:
while recorded[played][1] <= tick:
playing.append([recorded[played][0], recorded[played][1]])
fluidsynth.play_Note(recorded[played][2], 9, 100)
played += 1
if played == len(recorded) - 1:
status = 'stopped'
except:
pass
# Update the recorded_buffer
try:
while need_buffer and recorded[buffered][1] <= high_barrier:
recorded_buffer.append([recorded[buffered][0],
recorded[buffered][1]])
buffered += 1
if buffered >= len(recorded) - 1:
buffered = len(recorded) - 1
need_buffer = False
except:
pass
pygame.display.update()
if status != 'stopped':
tick += 0.001
pygame.quit()
| yardex/python-mingus | mingus_examples/pygame-drum/pygame-drum.py | Python | gpl-3.0 | 7,732 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: profitbricks_volume_attachments
short_description: Attach or detach a volume.
description:
- Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
datacenter:
description:
- The datacenter in which to operate.
required: true
server:
description:
- The name of the server you wish to detach or attach the volume.
required: true
volume:
description:
- The volume name or ID.
required: true
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
required: false
wait:
description:
- wait for the operation to complete before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- Indicate desired state of the resource
required: false
default: 'present'
choices: ["present", "absent"]
requirements: [ "profitbricks" ]
author: Matt Baldwin ([email protected])
'''
EXAMPLES = '''
# Attach a Volume
- profitbricks_volume_attachments:
datacenter: Tardis One
server: node002
volume: vol01
wait_timeout: 500
state: present
# Detach a Volume
- profitbricks_volume_attachments:
datacenter: Tardis One
server: node002
volume: vol01
wait_timeout: 500
state: absent
'''
import re
import uuid
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, Volume
except ImportError:
HAS_PB_SDK = False
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def attach_volume(module, profitbricks):
"""
Attaches a volume.
This will attach a volume to the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the volume was attached, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
volume = module.params.get('volume')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server= s['id']
break
# Locate UUID for Volume
if not (uuid_match.match(volume)):
volume_list = profitbricks.list_volumes(datacenter)
for v in volume_list['items']:
if volume == v['properties']['name']:
volume = v['id']
break
return profitbricks.attach_volume(datacenter, server, volume)
def detach_volume(module, profitbricks):
"""
Detaches a volume.
This will remove a volume from the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the volume was detached, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
volume = module.params.get('volume')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server= s['id']
break
# Locate UUID for Volume
if not (uuid_match.match(volume)):
volume_list = profitbricks.list_volumes(datacenter)
for v in volume_list['items']:
if volume == v['properties']['name']:
volume = v['id']
break
return profitbricks.detach_volume(datacenter, server, volume)
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
volume=dict(),
subscription_user=dict(),
subscription_password=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required')
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
if not module.params.get('volume'):
module.fail_json(msg='volume parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
try:
(changed) = detach_volume(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
elif state == 'present':
try:
attach_volume(module, profitbricks)
module.exit_json()
except Exception as e:
module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| wshallum/ansible | lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py | Python | gpl-3.0 | 8,217 |
"""Provides factories for User API models."""
from factory.django import DjangoModelFactory
from factory import SubFactory
from student.tests.factories import UserFactory
from opaque_keys.edx.locator import CourseLocator
from ..models import UserCourseTag, UserOrgTag, UserPreference
# Factories are self documenting
# pylint: disable=missing-docstring
class UserPreferenceFactory(DjangoModelFactory):
class Meta(object):
model = UserPreference
user = None
key = None
value = "default test value"
class UserCourseTagFactory(DjangoModelFactory):
class Meta(object):
model = UserCourseTag
user = SubFactory(UserFactory)
course_id = CourseLocator('org', 'course', 'run')
key = None
value = None
class UserOrgTagFactory(DjangoModelFactory):
""" Simple factory class for generating UserOrgTags """
class Meta(object):
model = UserOrgTag
user = SubFactory(UserFactory)
org = 'org'
key = None
value = None
| ahmedaljazzar/edx-platform | openedx/core/djangoapps/user_api/tests/factories.py | Python | agpl-3.0 | 993 |
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from test_views import *
from test_store import *
from test_auth import *
def suite():
suite = unittest.TestSuite()
for name in ['test_auth', 'test_store', 'test_views']:
mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite'])
suite.addTest(mod.suite())
return suite
| jaredjennings/snowy | wsgi/snowy/snowy/lib/django_openid_auth/tests/__init__.py | Python | agpl-3.0 | 1,710 |
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from nose.tools import eq_
from ...scenarios import qualifier
import navitiacommon.response_pb2 as response_pb2
from jormungandr.utils import str_to_time_stamp
#journey.arrival_date_time
#journey.duration
#journey.nb_transfers
#journeys.sections[i].type
#journeys.sections[i].duration
#journey.sections
def qualifier_one_direct_test():
journeys = []
journey_direct = response_pb2.Journey()
journey_direct.arrival_date_time = str_to_time_stamp("20131107T151200")
journey_direct.duration = 25 * 60
journey_direct.nb_transfers = 0
journey_direct.sections.add()
journey_direct.sections.add()
journey_direct.sections[0].type = response_pb2.STREET_NETWORK
journey_direct.sections[0].duration = 2 * 60
journey_direct.sections[1].type = response_pb2.STREET_NETWORK
journey_direct.sections[1].duration = 4 * 60
journeys.append(journey_direct)
qualifier.qualifier_one(journeys, "departure")
assert("non_pt" in journey_direct.type)
# Test with 5 journeys
# a standard with 3 tranfers and 60mn trip
# the fastest with 2 transfers and 62 mn trip
# the healthiest with 1 transfert, 65 mn trip and more walk
# the most confortable with 1 transfert and 80mn trip and less walk
def qualifier_two_test():
journeys = []
journey_standard = response_pb2.Journey()
journey_standard.type = "none"
journey_standard.arrival_date_time = str_to_time_stamp("20131107T150000")
journey_standard.duration = 60 * 60
journey_standard.nb_transfers = 3
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections[0].type = response_pb2.STREET_NETWORK
journey_standard.sections[0].duration = 10 * 60
journey_standard.sections[-1].type = response_pb2.STREET_NETWORK
journey_standard.sections[-1].duration = 10 * 60
journeys.append(journey_standard)
journey_rapid = response_pb2.Journey()
journey_rapid.arrival_date_time = str_to_time_stamp("20131107T150500")
journey_rapid.duration = 62 * 60
journey_rapid.nb_transfers = 2
journey_rapid.sections.add()
journey_rapid.sections.add()
journey_rapid.sections.add()
journey_rapid.sections[0].type = response_pb2.STREET_NETWORK
journey_rapid.sections[0].duration = 10 * 60
journey_rapid.sections[-1].type = response_pb2.STREET_NETWORK
journey_rapid.sections[-1].duration = 10 * 60
journeys.append(journey_rapid)
journey_health = response_pb2.Journey()
journey_health.arrival_date_time = str_to_time_stamp("20131107T151000")
journey_health.duration = 70 * 60
journey_health.nb_transfers = 1
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections[0].type = response_pb2.STREET_NETWORK
journey_health.sections[0].duration = 15 * 60
journey_health.sections[1].type = response_pb2.TRANSFER
journey_health.sections[1].duration = 10 * 60
journey_health.sections[-1].type = response_pb2.STREET_NETWORK
journey_health.sections[-1].duration = 10 * 60
journeys.append(journey_health)
journey_confort = response_pb2.Journey()
journey_confort.arrival_date_time = str_to_time_stamp("20131107T152000")
journey_confort.duration = 80 * 60
journey_confort.nb_transfers = 1
journey_confort.sections.add()
journey_confort.sections.add()
journey_confort.sections.add()
journey_confort.sections[0].type = response_pb2.STREET_NETWORK
journey_confort.sections[0].duration = 5 * 60
journey_confort.sections[-1].type = response_pb2.STREET_NETWORK
journey_confort.sections[-1].duration = 5 * 60
journeys.append(journey_confort)
qualifier.qualifier_one(journeys, "departure")
eq_(journey_standard.type, "fastest") # the standard should be the fastest
eq_(journey_rapid.type, "rapid")
#TODO! refacto this test with custom rules not to depends on changing business rules
# eq_(journey_confort.type, "comfort")
# eq_(journey_health.type, "healthy")
def has_car_test():
journey = response_pb2.Journey()
journey.sections.add()
section = journey.sections[0]
section.type = response_pb2.STREET_NETWORK
section.street_network.mode = response_pb2.Car
assert(qualifier.has_car(journey))
foot_journey = response_pb2.Journey()
foot_journey.sections.add()
foot_journey.sections.add()
foot_journey.sections.add()
foot_journey.sections[0].street_network.mode = response_pb2.Walking
foot_journey.sections[1].street_network.mode = response_pb2.Bike
foot_journey.sections[2].street_network.mode = response_pb2.Bss
assert(not qualifier.has_car(foot_journey))
foot_journey.sections.add()
foot_journey.sections[3].type = response_pb2.STREET_NETWORK
foot_journey.sections[3].street_network.mode = response_pb2.Car
assert(qualifier.has_car(foot_journey))
def standard_choice_test():
journeys = []
#the first is the worst one
journey_worst = response_pb2.Journey()
journey_worst.arrival_date_time = str_to_time_stamp("20131107T161200")
journey_worst.sections.add()
journey_worst.sections[0].type = response_pb2.STREET_NETWORK
journey_worst.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_worst)
# arrive later but no car
journey_not_good = response_pb2.Journey()
journey_not_good.arrival_date_time = str_to_time_stamp("20131107T171200")
journey_not_good.sections.add()
journey_not_good.sections[0].type = response_pb2.STREET_NETWORK
journey_not_good.sections[0].street_network.mode = response_pb2.Bike
journeys.append(journey_not_good)
#this is the standard
journey_1 = response_pb2.Journey()
journey_1.arrival_date_time = str_to_time_stamp("20131107T151200")
journey_1.sections.add()
journey_1.sections[0].type = response_pb2.STREET_NETWORK
journey_1.sections[0].street_network.mode = response_pb2.Bike
journeys.append(journey_1)
# a better journey, but using car
journey_2 = response_pb2.Journey()
journey_2.arrival_date_time = str_to_time_stamp("20131107T151000")
journey_2.sections.add()
journey_2.sections[0].type = response_pb2.STREET_NETWORK
journey_2.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_2)
standard = qualifier.choose_standard(journeys, qualifier.arrival_crit)
print qualifier.has_car(standard)
print "standard ", standard.arrival_date_time
eq_(standard, journey_1)
def standard_choice_with_pt_test():
journeys = []
#the first is the worst one
journey_worst = response_pb2.Journey()
journey_worst.arrival_date_time = str_to_time_stamp("20131107T161200")
journey_worst.sections.add()
journey_worst.sections[0].type = response_pb2.STREET_NETWORK
journey_worst.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_worst)
# arrive later but no car
journey_not_good = response_pb2.Journey()
journey_not_good.arrival_date_time = str_to_time_stamp("20131107T171200")
journey_not_good.sections.add()
journey_not_good.sections[0].type = response_pb2.STREET_NETWORK
journey_not_good.sections[0].street_network.mode = response_pb2.Bike
journeys.append(journey_not_good)
#this is the standard
journey_1 = response_pb2.Journey()
journey_1.arrival_date_time = str_to_time_stamp("20131107T201200")
journey_1.sections.add()
journey_1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journeys.append(journey_1)
# a better journey, but using car
journey_2 = response_pb2.Journey()
journey_2.arrival_date_time = str_to_time_stamp("20131107T151000")
journey_2.sections.add()
journey_2.sections[0].type = response_pb2.STREET_NETWORK
journey_2.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_2)
standard = qualifier.choose_standard(journeys, qualifier.arrival_crit)
print qualifier.has_car(standard)
print "standard ", standard.arrival_date_time
eq_(standard, journey_1)
def choose_standard_pt_car():
journeys = []
journey1 = response_pb2.Journey()
journey1.arrival_date_time = str_to_time_stamp("20141120T170000")
journey1.sections.add()
journey1.sections[0].type = response_pb2.STREET_NETWORK
journey1.sections[0].street_ne
def tranfers_cri_test():
journeys = []
dates = ["20131107T100000", "20131107T150000", "20131107T050000",
"20131107T100000", "20131107T150000", "20131107T050000"]
transfers = [4, 3, 8, 1, 1, 2]
for i in range(6):
journey = response_pb2.Journey()
journey.nb_transfers = transfers[i]
journey.arrival_date_time = str_to_time_stamp(dates[i])
journeys.append(journey)
best = qualifier.min_from_criteria(journeys, [qualifier.transfers_crit,
qualifier.arrival_crit])
#the transfert criterion is first, and then if 2 journeys have
#the same nb_transfers, we compare the dates
eq_(best.nb_transfers, 1)
eq_(best.arrival_date_time, str_to_time_stamp("20131107T100000"))
def qualifier_crowfly_test():
journeys = []
journey_standard = response_pb2.Journey()
journey_standard.arrival_date_time = str_to_time_stamp('20140825T113224')
journey_standard.duration = 2620
journey_standard.nb_transfers = 1
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections[0].type = response_pb2.CROW_FLY
journey_standard.sections[0].duration = 796
journey_standard.sections[-1].type = response_pb2.CROW_FLY
journey_standard.sections[-1].duration = 864
journeys.append(journey_standard)
journey_health = response_pb2.Journey()
journey_health.arrival_date_time = str_to_time_stamp('20140825T114000')
journey_health.duration = 3076
journey_health.nb_transfers = 1
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections[0].type = response_pb2.CROW_FLY
journey_health.sections[0].duration = 796
journey_health.sections[-1].type = response_pb2.CROW_FLY
journey_health.sections[-1].duration = 0
journeys.append(journey_health)
qualifier.qualifier_one(journeys, "departure")
eq_(journey_standard.type, "rapid") # the standard should be the fastest
eq_(journey_health.type, "less_fallback_walk")
def qualifier_nontransport_duration_only_walk_test():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp('20140825T113224')
journey.duration = 2620
journey.nb_transfers = 1
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections[0].type = response_pb2.CROW_FLY
journey.sections[0].duration = 796
journey.sections[1].type = response_pb2.TRANSFER
journey.sections[1].duration = 328
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].duration = 864
eq_(qualifier.get_nontransport_duration(journey), 1988)
def qualifier_nontransport_duration_with_tc_test():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp('20140825T113224')
journey.duration = 2620
journey.nb_transfers = 2
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections[0].type = response_pb2.CROW_FLY
journey.sections[0].duration = 796
journey.sections[1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[1].duration = 328
journey.sections[2].type = response_pb2.TRANSFER
journey.sections[2].duration = 418
journey.sections[3].type = response_pb2.WAITING
journey.sections[3].duration = 719
journey.sections[4].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[4].duration = 175
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].duration = 864
eq_(qualifier.get_nontransport_duration(journey), 2797)
| datanel/navitia | source/jormungandr/jormungandr/scenarios/tests/qualifier_tests.py | Python | agpl-3.0 | 13,445 |
import sys, os, os.path
from distutils.core import Extension
from distutils.errors import DistutilsOptionError
from versioninfo import get_base_dir, split_version
try:
from Cython.Distutils import build_ext as build_pyx
import Cython.Compiler.Version
CYTHON_INSTALLED = True
except ImportError:
CYTHON_INSTALLED = False
EXT_MODULES = ["lxml.etree", "lxml.objectify"]
PACKAGE_PATH = "src%slxml%s" % (os.path.sep, os.path.sep)
INCLUDE_PACKAGE_PATH = PACKAGE_PATH + 'includes'
if sys.version_info[0] >= 3:
_system_encoding = sys.getdefaultencoding()
if _system_encoding is None:
_system_encoding = "iso-8859-1" # :-)
def decode_input(data):
if isinstance(data, str):
return data
return data.decode(_system_encoding)
else:
def decode_input(data):
return data
def env_var(name):
value = os.getenv(name)
if value:
value = decode_input(value)
if sys.platform == 'win32' and ';' in value:
return value.split(';')
else:
return value.split()
else:
return []
def ext_modules(static_include_dirs, static_library_dirs,
static_cflags, static_binaries):
global XML2_CONFIG, XSLT_CONFIG
if OPTION_BUILD_LIBXML2XSLT:
from buildlibxml import build_libxml2xslt, get_prebuilt_libxml2xslt
if sys.platform.startswith('win'):
get_prebuilt_libxml2xslt(
OPTION_DOWNLOAD_DIR, static_include_dirs, static_library_dirs)
else:
XML2_CONFIG, XSLT_CONFIG = build_libxml2xslt(
OPTION_DOWNLOAD_DIR, 'build/tmp',
static_include_dirs, static_library_dirs,
static_cflags, static_binaries,
libiconv_version=OPTION_LIBICONV_VERSION,
libxml2_version=OPTION_LIBXML2_VERSION,
libxslt_version=OPTION_LIBXSLT_VERSION,
multicore=OPTION_MULTICORE)
if OPTION_WITHOUT_OBJECTIFY:
modules = [ entry for entry in EXT_MODULES
if 'objectify' not in entry ]
else:
modules = EXT_MODULES
c_files_exist = [ os.path.exists('%s%s.c' % (PACKAGE_PATH, module)) for module in modules ]
if CYTHON_INSTALLED and (OPTION_WITH_CYTHON or False in c_files_exist):
source_extension = ".pyx"
print("Building with Cython %s." % Cython.Compiler.Version.version)
# generate module cleanup code
from Cython.Compiler import Options
Options.generate_cleanup_code = 3
Options.clear_to_none = False
elif not OPTION_WITHOUT_CYTHON and False in c_files_exist:
for exists, module in zip(c_files_exist, modules):
if not exists:
raise RuntimeError(
"ERROR: Trying to build without Cython, but pre-generated '%s%s.c' "
"is not available (pass --without-cython to ignore this error)." % (
PACKAGE_PATH, module))
else:
if False in c_files_exist:
for exists, module in zip(c_files_exist, modules):
if not exists:
print("WARNING: Trying to build without Cython, but pre-generated "
"'%s%s.c' is not available." % (PACKAGE_PATH, module))
source_extension = ".c"
print("Building without Cython.")
lib_versions = get_library_versions()
if lib_versions[0]:
print("Using build configuration of libxml2 %s and libxslt %s" %
lib_versions)
else:
print("Using build configuration of libxslt %s" %
lib_versions[1])
_include_dirs = include_dirs(static_include_dirs)
_library_dirs = library_dirs(static_library_dirs)
_cflags = cflags(static_cflags)
_define_macros = define_macros()
_libraries = libraries()
_include_dirs.append(os.path.join(get_base_dir(), INCLUDE_PACKAGE_PATH))
if _library_dirs:
message = "Building against libxml2/libxslt in "
if len(_library_dirs) > 1:
print(message + "one of the following directories:")
for dir in _library_dirs:
print(" " + dir)
else:
print(message + "the following directory: " +
_library_dirs[0])
if OPTION_AUTO_RPATH:
runtime_library_dirs = _library_dirs
else:
runtime_library_dirs = []
if CYTHON_INSTALLED and OPTION_SHOW_WARNINGS:
from Cython.Compiler import Errors
Errors.LEVEL = 0
result = []
for module in modules:
main_module_source = PACKAGE_PATH + module + source_extension
result.append(
Extension(
module,
sources = [main_module_source],
depends = find_dependencies(module),
extra_compile_args = _cflags,
extra_objects = static_binaries,
define_macros = _define_macros,
include_dirs = _include_dirs,
library_dirs = _library_dirs,
runtime_library_dirs = runtime_library_dirs,
libraries = _libraries,
))
if CYTHON_INSTALLED and OPTION_WITH_CYTHON_GDB:
for ext in result:
ext.cython_gdb = True
if CYTHON_INSTALLED and source_extension == '.pyx':
# build .c files right now and convert Extension() objects
from Cython.Build import cythonize
result = cythonize(result)
return result
def find_dependencies(module):
if not CYTHON_INSTALLED:
return []
base_dir = get_base_dir()
package_dir = os.path.join(base_dir, PACKAGE_PATH)
includes_dir = os.path.join(base_dir, INCLUDE_PACKAGE_PATH)
pxd_files = [ os.path.join(includes_dir, filename)
for filename in os.listdir(includes_dir)
if filename.endswith('.pxd') ]
if 'etree' in module:
pxi_files = [ os.path.join(PACKAGE_PATH, filename)
for filename in os.listdir(package_dir)
if filename.endswith('.pxi')
and 'objectpath' not in filename ]
pxd_files = [ filename for filename in pxd_files
if 'etreepublic' not in filename ]
elif 'objectify' in module:
pxi_files = [ os.path.join(PACKAGE_PATH, 'objectpath.pxi') ]
else:
pxi_files = []
return pxd_files + pxi_files
def extra_setup_args():
result = {}
if CYTHON_INSTALLED:
result['cmdclass'] = {'build_ext': build_pyx}
return result
def libraries():
if sys.platform in ('win32',):
libs = ['libxslt', 'libexslt', 'libxml2', 'iconv']
if OPTION_STATIC:
libs = ['%s_a' % lib for lib in libs]
libs.extend(['zlib', 'WS2_32'])
elif OPTION_STATIC:
libs = ['z', 'm']
else:
libs = ['xslt', 'exslt', 'xml2', 'z', 'm']
return libs
def library_dirs(static_library_dirs):
if OPTION_STATIC:
if not static_library_dirs:
static_library_dirs = env_var('LIBRARY')
assert static_library_dirs, "Static build not configured, see doc/build.txt"
return static_library_dirs
# filter them from xslt-config --libs
result = []
possible_library_dirs = flags('libs')
for possible_library_dir in possible_library_dirs:
if possible_library_dir.startswith('-L'):
result.append(possible_library_dir[2:])
return result
def include_dirs(static_include_dirs):
if OPTION_STATIC:
if not static_include_dirs:
static_include_dirs = env_var('INCLUDE')
return static_include_dirs
# filter them from xslt-config --cflags
result = []
possible_include_dirs = flags('cflags')
for possible_include_dir in possible_include_dirs:
if possible_include_dir.startswith('-I'):
result.append(possible_include_dir[2:])
return result
def cflags(static_cflags):
result = []
if not OPTION_SHOW_WARNINGS:
result.append('-w')
if OPTION_DEBUG_GCC:
result.append('-g2')
if OPTION_STATIC:
if not static_cflags:
static_cflags = env_var('CFLAGS')
result.extend(static_cflags)
else:
# anything from xslt-config --cflags that doesn't start with -I
possible_cflags = flags('cflags')
for possible_cflag in possible_cflags:
if not possible_cflag.startswith('-I'):
result.append(possible_cflag)
if sys.platform in ('darwin',):
for opt in result:
if 'flat_namespace' in opt:
break
else:
result.append('-flat_namespace')
return result
def define_macros():
macros = []
if OPTION_WITHOUT_ASSERT:
macros.append(('PYREX_WITHOUT_ASSERTIONS', None))
if OPTION_WITHOUT_THREADING:
macros.append(('WITHOUT_THREADING', None))
if OPTION_WITH_REFNANNY:
macros.append(('CYTHON_REFNANNY', None))
if OPTION_WITH_UNICODE_STRINGS:
macros.append(('LXML_UNICODE_STRINGS', '1'))
return macros
_ERROR_PRINTED = False
def run_command(cmd, *args):
if not cmd:
return ''
if args:
cmd = ' '.join((cmd,) + args)
try:
import subprocess
except ImportError:
# Python 2.3
sf, rf, ef = os.popen3(cmd)
sf.close()
errors = ef.read()
stdout_data = rf.read()
else:
# Python 2.4+
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, errors = p.communicate()
global _ERROR_PRINTED
if errors and not _ERROR_PRINTED:
_ERROR_PRINTED = True
print("ERROR: %s" % errors)
print("** make sure the development packages of libxml2 and libxslt are installed **\n")
return decode_input(stdout_data).strip()
def get_library_versions():
xml2_version = run_command(find_xml2_config(), "--version")
xslt_version = run_command(find_xslt_config(), "--version")
return xml2_version, xslt_version
def flags(option):
xml2_flags = run_command(find_xml2_config(), "--%s" % option)
xslt_flags = run_command(find_xslt_config(), "--%s" % option)
flag_list = xml2_flags.split()
for flag in xslt_flags.split():
if flag not in flag_list:
flag_list.append(flag)
return flag_list
XSLT_CONFIG = None
XML2_CONFIG = None
def find_xml2_config():
global XML2_CONFIG
if XML2_CONFIG:
return XML2_CONFIG
option = '--with-xml2-config='
for arg in sys.argv:
if arg.startswith(option):
sys.argv.remove(arg)
XML2_CONFIG = arg[len(option):]
return XML2_CONFIG
else:
# default: do nothing, rely only on xslt-config
XML2_CONFIG = os.getenv('XML2_CONFIG', '')
return XML2_CONFIG
def find_xslt_config():
global XSLT_CONFIG
if XSLT_CONFIG:
return XSLT_CONFIG
option = '--with-xslt-config='
for arg in sys.argv:
if arg.startswith(option):
sys.argv.remove(arg)
XSLT_CONFIG = arg[len(option):]
return XSLT_CONFIG
else:
XSLT_CONFIG = os.getenv('XSLT_CONFIG', 'xslt-config')
return XSLT_CONFIG
## Option handling:
def has_option(name):
try:
sys.argv.remove('--%s' % name)
return True
except ValueError:
pass
# allow passing all cmd line options also as environment variables
env_val = os.getenv(name.upper().replace('-', '_'), 'false').lower()
if env_val == "true":
return True
return False
def option_value(name):
for index, option in enumerate(sys.argv):
if option == '--' + name:
if index+1 >= len(sys.argv):
raise DistutilsOptionError(
'The option %s requires a value' % option)
value = sys.argv[index+1]
sys.argv[index:index+2] = []
return value
if option.startswith('--' + name + '='):
value = option[len(name)+3:]
sys.argv[index:index+1] = []
return value
env_val = os.getenv(name.upper().replace('-', '_'))
return env_val
staticbuild = bool(os.environ.get('STATICBUILD', ''))
# pick up any commandline options and/or env variables
OPTION_WITHOUT_OBJECTIFY = has_option('without-objectify')
OPTION_WITH_UNICODE_STRINGS = has_option('with-unicode-strings')
OPTION_WITHOUT_ASSERT = has_option('without-assert')
OPTION_WITHOUT_THREADING = has_option('without-threading')
OPTION_WITHOUT_CYTHON = has_option('without-cython')
OPTION_WITH_CYTHON = has_option('with-cython')
OPTION_WITH_CYTHON_GDB = has_option('cython-gdb')
OPTION_WITH_REFNANNY = has_option('with-refnanny')
if OPTION_WITHOUT_CYTHON:
CYTHON_INSTALLED = False
OPTION_STATIC = staticbuild or has_option('static')
OPTION_DEBUG_GCC = has_option('debug-gcc')
OPTION_SHOW_WARNINGS = has_option('warnings')
OPTION_AUTO_RPATH = has_option('auto-rpath')
OPTION_BUILD_LIBXML2XSLT = staticbuild or has_option('static-deps')
if OPTION_BUILD_LIBXML2XSLT:
OPTION_STATIC = True
OPTION_LIBXML2_VERSION = option_value('libxml2-version')
OPTION_LIBXSLT_VERSION = option_value('libxslt-version')
OPTION_LIBICONV_VERSION = option_value('libiconv-version')
OPTION_MULTICORE = option_value('multicore')
OPTION_DOWNLOAD_DIR = option_value('download-dir')
if OPTION_DOWNLOAD_DIR is None:
OPTION_DOWNLOAD_DIR = 'libs'
| cloudera/hue | desktop/core/ext-py/lxml-3.3.6/setupinfo.py | Python | apache-2.0 | 13,458 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
# NOTE(cyeoh): We cannot use the metadata serializers from
# nova.api.openstack.common because the JSON format is different from
# the V2 API. The metadata serializer for V2 is in common because it
# is shared between image and server metadata, but since the image api
# is not ported to the V3 API there is no point creating a common
# version for the V3 API
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
key = metadata_node.getAttribute("key")
metadata = {}
metadata[key] = self.extract_text(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('metadata', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('metadata', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=common.metadata_nsmap)
class ServerMetadataController(wsgi.Controller):
"""The server metadata API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
super(ServerMetadataController, self).__init__()
def _get_metadata(self, context, server_id):
try:
server = self.compute_api.get(context, server_id)
meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
meta_dict = {}
for key, value in meta.iteritems():
meta_dict[key] = value
return meta_dict
@extensions.expected_errors(404)
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, server_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
return {'metadata': self._get_metadata(context, server_id)}
@extensions.expected_errors((400, 404, 409, 413))
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
@wsgi.response(201)
def create(self, req, server_id, body):
if not self.is_valid_body(body, 'metadata'):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
metadata = body['metadata']
context = req.environ['nova.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=False)
return {'metadata': new_metadata}
@extensions.expected_errors((400, 404, 409, 413))
@wsgi.serializers(xml=MetaItemTemplate)
@wsgi.deserializers(xml=MetaItemDeserializer)
def update(self, req, server_id, id, body):
if not self.is_valid_body(body, 'metadata'):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
meta_item = body['metadata']
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['nova.context']
self._update_instance_metadata(context,
server_id,
meta_item,
delete=False)
return {'metadata': meta_item}
@extensions.expected_errors((400, 404, 409, 413))
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def update_all(self, req, server_id, body):
if not self.is_valid_body(body, 'metadata'):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
metadata = body['metadata']
context = req.environ['nova.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
server = self.compute_api.get(context, server_id)
return self.compute_api.update_instance_metadata(context,
server,
metadata,
delete)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadata as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'update metadata')
@extensions.expected_errors(404)
@wsgi.serializers(xml=MetaItemTemplate)
def show(self, req, server_id, id):
"""Return a single metadata item."""
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
try:
return {'metadata': {id: data[id]}}
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors((404, 409))
@wsgi.response(204)
def delete(self, req, server_id, id):
"""Deletes an existing metadata."""
context = req.environ['nova.context']
metadata = self._get_metadata(context, server_id)
if id not in metadata:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
try:
server = self.compute_api.get(context, server_id)
self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete metadata')
class ServerMetadata(extensions.V3APIExtensionBase):
"""Server Metadata API."""
name = "Server Metadata"
alias = "server-metadata"
namespace = "http://docs.openstack.org/compute/core/server_metadata/v3"
version = 1
def get_resources(self):
parent = {'member_name': 'server',
'collection_name': 'servers'}
resources = [extensions.ResourceExtension('metadata',
ServerMetadataController(),
member_name='server_meta',
parent=parent,
custom_routes_fn=
self.server_metadata_map
)]
return resources
def get_controller_extensions(self):
return []
def server_metadata_map(self, mapper, wsgi_resource):
mapper.connect("metadata", "/servers/{server_id}/metadata",
controller=wsgi_resource,
action='update_all', conditions={"method": ['PUT']})
| ntt-sic/nova | nova/api/openstack/compute/plugins/v3/server_metadata.py | Python | apache-2.0 | 9,307 |
# -*- coding: utf-8 -*-
import json
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.encoding import smart_str
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY
from cms.models import Page, GlobalPagePermission
from cms.utils import get_language_from_request
from cms.utils import get_language_list
from cms.utils import get_cms_setting
NOT_FOUND_RESPONSE = "NotFound"
def jsonify_request(response):
""" Turn any response in a 200 response to let jQuery code handle it nicely.
Response contains a json object with the following attributes:
* status: original response status code
* content: original response content
"""
content = {'status': response.status_code, 'content': smart_str(response.content, response._charset)}
return HttpResponse(json.dumps(content), content_type="application/json")
publisher_classes = {
PUBLISHER_STATE_DIRTY: "publisher_dirty",
PUBLISHER_STATE_PENDING: "publisher_pending",
}
def get_admin_menu_item_context(request, page, filtered=False, language=None):
"""
Used for rendering the page tree, inserts into context everything what
we need for single item
"""
has_add_page_permission = page.has_add_permission(request)
has_move_page_permission = page.has_move_page_permission(request)
site = Site.objects.get_current()
lang = get_language_from_request(request)
#slug = page.get_slug(language=lang, fallback=True) # why was this here ??
metadata = ""
if get_cms_setting('PERMISSION'):
# jstree metadata generator
md = []
#if not has_add_page_permission:
if not has_move_page_permission:
md.append(('valid_children', False))
md.append(('draggable', False))
if md:
# just turn it into simple javascript object
metadata = "{" + ", ".join(map(lambda e: "%s: %s" % (e[0],
isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + "}"
has_add_on_same_level_permission = False
opts = Page._meta
if get_cms_setting('PERMISSION'):
if hasattr(request.user, '_global_add_perm_cache'):
global_add_perm = request.user._global_add_perm_cache
else:
global_add_perm = GlobalPagePermission.objects.user_has_add_permission(
request.user, page.site_id).exists()
request.user._global_add_perm_cache = global_add_perm
if request.user.has_perm(opts.app_label + '.' + get_permission_codename('add', opts)) and global_add_perm:
has_add_on_same_level_permission = True
from cms.utils import permissions
if not has_add_on_same_level_permission and page.parent_id:
has_add_on_same_level_permission = permissions.has_generic_permission(page.parent_id, request.user, "add",
page.site_id)
#has_add_on_same_level_permission = has_add_page_on_same_level_permission(request, page)
context = {
'page': page,
'site': site,
'lang': lang,
'filtered': filtered,
'metadata': metadata,
'preview_language': language,
'has_change_permission': page.has_change_permission(request),
'has_publish_permission': page.has_publish_permission(request),
'has_delete_permission': page.has_delete_permission(request),
'has_move_page_permission': has_move_page_permission,
'has_add_page_permission': has_add_page_permission,
'has_add_on_same_level_permission': has_add_on_same_level_permission,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
}
return context
def render_admin_menu_item(request, page, template=None, language=None):
"""
Renders requested page item for the tree. This is used in case when item
must be reloaded over ajax.
"""
if not template:
template = "admin/cms/page/tree/menu_fragment.html"
if not page.pk:
return HttpResponse(NOT_FOUND_RESPONSE) # Not found - tree will remove item
# languages
from cms.utils import permissions
languages = get_language_list(page.site_id)
context = RequestContext(request, {
'has_add_permission': permissions.has_page_add_permission(request),
'site_languages': languages,
})
filtered = 'filtered' in request.REQUEST
context.update(get_admin_menu_item_context(request, page, filtered, language))
# add mimetype to help out IE
return render_to_response(template, context, content_type="text/html; charset=utf-8")
| DylannCordel/django-cms | cms/utils/admin.py | Python | bsd-3-clause | 4,796 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_taboo_contract
short_description: Manage taboo contracts (vz:BrCP)
description:
- Manage taboo contracts on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(vz:BrCP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
taboo_contract:
description:
- The name of the Taboo Contract.
required: yes
aliases: [ name ]
description:
description:
- The description for the Taboo Contract.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
scope:
description:
- The scope of a service contract.
- The APIC defaults to C(context) when unset during creation.
choices: [ application-profile, context, global, tenant ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add taboo contract
aci_taboo_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: ansible_test
taboo_contract: taboo_contract_test
state: present
- name: Remove taboo contract
aci_taboo_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: ansible_test
taboo_contract: taboo_contract_test
state: absent
- name: Query all taboo contracts
aci_taboo_contract:
host: apic
username: admin
password: SomeSecretPassword
state: query
- name: Query a specific taboo contract
aci_taboo_contract:
host: apic
username: admin
password: SomeSecretPassword
tenant: ansible_test
taboo_contract: taboo_contract_test
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
taboo_contract=dict(type='str', required=False, aliases=['name']), # Not required for querying all contracts
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all contracts
scope=dict(type='str', choices=['application-profile', 'context', 'global', 'tenant']),
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['tenant', 'taboo_contract']],
['state', 'present', ['tenant', 'taboo_contract']],
],
)
taboo_contract = module.params['taboo_contract']
description = module.params['description']
scope = module.params['scope']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzTaboo',
aci_rn='taboo-{0}'.format(taboo_contract),
filter_target='eq(vzTaboo.name, "{0}")'.format(taboo_contract),
module_object=taboo_contract,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzTaboo',
class_config=dict(
name=taboo_contract,
descr=description,
scope=scope,
),
)
aci.get_diff(aci_class='vzTaboo')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| drmrd/ansible | lib/ansible/modules/network/aci/aci_taboo_contract.py | Python | gpl-3.0 | 7,355 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shutil,sys,platform
from waflib import TaskGen,Task,Build,Options,Utils,Errors
from waflib.TaskGen import taskgen_method,feature,after_method,before_method
app_info='''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>%s</string>
</dict>
</plist>
'''
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET']=self.env['MACOSX_DEPLOYMENT_TARGET']
elif'MACOSX_DEPLOYMENT_TARGET'not in os.environ:
if Utils.unversioned_sys_platform()=='darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET']='.'.join(platform.mac_ver()[0].split('.')[:2])
def create_bundle_dirs(self,name,out):
bld=self.bld
dir=out.parent.find_or_declare(name)
dir.mkdir()
macos=dir.find_or_declare(['Contents','MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name=out.name
k=name.rfind('.')
if k>=0:
name=name[:k]+'.app'
else:
name=name+'.app'
return name
def create_task_macapp(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','MacOS',out.name])
self.apptask=self.create_task('macapp',self.link_task.outputs,n1)
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/MacOS/'%name
self.bld.install_files(inst_to,n1,chmod=Utils.O755)
if getattr(self,'mac_resources',None):
res_dir=n1.parent.parent.make_node('Resources')
inst_to=getattr(self,'install_path','/Applications')+'/%s/Resources'%name
for x in self.to_list(self.mac_resources):
node=self.path.find_node(x)
if not node:
raise Errors.WafError('Missing mac_resource %r in %r'%(x,self))
parent=node.parent
if os.path.isdir(node.abspath()):
nodes=node.ant_glob('**')
else:
nodes=[node]
for node in nodes:
rel=node.path_from(parent)
tsk=self.create_task('macapp',node,res_dir.make_node(rel))
self.bld.install_as(inst_to+'/%s'%rel,node)
if getattr(self.bld,'is_install',None):
self.install_task.hasrun=Task.SKIP_ME
def create_task_macplist(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','Info.plist'])
self.plisttask=plisttask=self.create_task('macplist',[],n1)
if getattr(self,'mac_plist',False):
node=self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code=self.mac_plist
else:
plisttask.code=app_info%self.link_task.outputs[0].name
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/'%name
self.bld.install_files(inst_to,n1)
def apply_bundle(self):
if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False):
self.env['LINKFLAGS_cshlib']=self.env['LINKFLAGS_cxxshlib']=[]
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['macbundle_PATTERN']
use=self.use=self.to_list(getattr(self,'use',[]))
if not'MACBUNDLE'in use:
use.append('MACBUNDLE')
app_dirs=['Contents','Contents/MacOS','Contents/Resources']
class macapp(Task.Task):
color='PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(),self.outputs[0].abspath())
class macplist(Task.Task):
color='PINK'
ext_in=['.bin']
def run(self):
if getattr(self,'code',None):
txt=self.code
else:
txt=self.inputs[0].read()
self.outputs[0].write(txt)
feature('c','cxx')(set_macosx_deployment_target)
taskgen_method(create_bundle_dirs)
feature('cprogram','cxxprogram')(create_task_macapp)
after_method('apply_link')(create_task_macapp)
feature('cprogram','cxxprogram')(create_task_macplist)
after_method('apply_link')(create_task_macplist)
feature('cshlib','cxxshlib')(apply_bundle)
before_method('apply_link','propagate_uselib_vars')(apply_bundle) | rafalmiel/glmark2-wl | waflib/Tools/c_osx.py | Python | gpl-3.0 | 4,428 |
def func():
value = "not-none"
<caret>if value is None: # pylint: disable=unused-argument
print("None")
else:
print("Not none") | siosio/intellij-community | python/testData/intentions/PyInvertIfConditionIntentionTest/commentsPylintInlineIf.py | Python | apache-2.0 | 157 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal widget - Display Indico seminars
"""
from invenio.config import CFG_CACHEDIR, CFG_SITE_LANG
from xml.dom import minidom
from invenio.utils.url import create_Indico_request_url, make_invenio_opener
import time
import base64
import socket
from invenio.legacy.webjournal.utils import \
parse_url_string, WEBJOURNAL_OPENER
from invenio.base.i18n import gettext_set_language
update_frequency = 3600 # in seconds
def format_element(bfo, indico_baseurl="https://indico.cern.ch", indico_what='categ', indico_loc="", indico_id="1l7", indico_key="", indico_sig="", indico_onlypublic='yes', indico_from="today", indico_to='today', indico_credential_path=""):
"""
Display the list of seminar from the given Indico instance
See Indico HTTP Export APIs:
http://indico.cern.ch/ihelp/html/ExportAPI/index.html
@param indico_baseurl: Indico base URL from which to retrieve information
@param indico_what: element to export
@type indico_what: one of the strings: C{categ}, C{event}, C{room}, C{reservation}
@param indico_loc: location of the element(s) specified by ID (only used for some elements)
@param indico_id: ID of the element to be exported
@type indico_id: a string or a list/tuple of strings
@param indico_type: output format
@type indico_type: one of the strings: C{json}, C{jsonp}, C{xml}, C{html}, C{ics}, C{atom}
@param indico_params: parameters of the query. See U{http://indico.cern.ch/ihelp/html/ExportAPI/common.html}
@param indico_key: API key provided for the given Indico instance
@param indico_sig: API secret key (signature) provided for the given Indico instance
@param indico_credential_path: if provided, load 'indico_key' and 'indico_sig' from this path
"""
args = parse_url_string(bfo.user_info['uri'])
journal_name = args["journal_name"]
cached_filename = "webjournal_widget_seminars_%s.xml" % journal_name
out = get_widget_html(bfo, indico_baseurl, indico_what, indico_loc, indico_id,
indico_onlypublic, indico_from, indico_to,
indico_key, indico_sig, indico_credential_path,
cached_filename, bfo.lang)
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
def get_widget_html(bfo, indico_baseurl, indico_what, indico_loc, indico_id,
indico_onlypublic, indico_from, indico_to, indico_key,
indico_sig, indico_credential_path,
cached_filename, ln=CFG_SITE_LANG):
"""
Indico seminars of the day service
Gets seminars of the day from CERN Indico every 60 minutes and displays
them in a widget.
"""
_ = gettext_set_language(ln)
try:
seminar_xml = minidom.parse('%s/%s' % (CFG_CACHEDIR, cached_filename))
except:
try:
_update_seminars(indico_baseurl, indico_what, indico_loc, indico_id,
indico_onlypublic, indico_from, indico_to, indico_key,
indico_sig, indico_credential_path, cached_filename)
seminar_xml = minidom.parse('%s/%s' % (CFG_CACHEDIR, cached_filename))
except:
return "<ul><li><i>" + _("No information available") + "</i></li></ul>"
try:
timestamp = seminar_xml.firstChild.getAttribute("time")
except:
timestamp = time.struct_time()
last_update = time.mktime(time.strptime(timestamp,
"%a, %d %b %Y %H:%M:%S %Z"))
now = time.mktime(time.gmtime())
if last_update + update_frequency < now:
try:
_update_seminars(indico_baseurl, indico_what, indico_loc, indico_id,
indico_onlypublic, indico_from, indico_to, indico_key,
indico_sig, indico_credential_path, cached_filename)
seminar_xml = minidom.parse('%s/%s' % (CFG_CACHEDIR, cached_filename))
except:
return "<ul><li><i>" + _("No information available") + "</i></li></ul>"
seminars = seminar_xml.getElementsByTagName("seminar")
if not seminars:
return "<ul><li><i>" + _("No seminars today") + "</i></li></ul>"
html = ""
for seminar in seminars:
html += "<li>"
try:
seminar_time = seminar.getElementsByTagName("start_time")[0].firstChild.toxml(encoding="utf-8")
except:
seminar_time = ""
try:
category = seminar.getElementsByTagName("category")[0].firstChild.toxml(encoding="utf-8")
except:
category = "Seminar"
html += '%s %s<br/>' % (seminar_time, category)
try:
title = seminar.getElementsByTagName("title")[0].firstChild.toxml(encoding="utf-8")
except:
title = ""
try:
url = seminar.getElementsByTagName("url")[0].firstChild.toxml(encoding="utf-8")
except:
url = "#"
try:
speaker = seminar.getElementsByTagName("chair")[0].firstChild.toxml(encoding="utf-8")
except:
try:
speaker = seminar.getElementsByTagName("creator")[0].firstChild.toxml(encoding="utf-8")
except:
speaker = ""
if title:
html += '<strong><a href="%s">%s</a></strong>, %s<br />' % (url, title, speaker)
try:
location = seminar.getElementsByTagName("location")[0].firstChild.toxml(encoding="utf-8") + " "
except:
location = ""
html += location
try:
room = seminar.getElementsByTagName("room")[0].firstChild.toxml(encoding="utf-8")
except:
room = ""
html += room
html += "</li>"
html = "<ul>" + html + "</ul>"
return html
def _update_seminars(indico_baseurl, indico_what, indico_loc, indico_id,
indico_onlypublic, indico_from, indico_to,
indico_key, indico_sig, indico_credential_path,
cached_filename):
"""
helper function that gets the xml data source from CERN Indico and creates
a dedicated xml file in the cache for easy use in the widget.
"""
if indico_credential_path:
indico_key, indico_sig = get_indico_credentials(indico_credential_path)
url = create_Indico_request_url(indico_baseurl,
indico_what,
indico_loc,
indico_id,
'xml',
{'onlypublic': indico_onlypublic,
'from': indico_from,
'to': indico_to},
indico_key, indico_sig)
default_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(2.0)
try:
try:
indico_xml = WEBJOURNAL_OPENER.open(url)
except:
return
finally:
socket.setdefaulttimeout(default_timeout)
xml_file_handler = minidom.parseString(indico_xml.read())
seminar_xml = ['<Indico_Seminars time="%s">' % time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()), ]
agenda_items = xml_file_handler.getElementsByTagName("conference")
for item in agenda_items:
seminar_xml.extend(["<seminar>", ])
for childNode in item.childNodes:
if childNode.tagName == "startDate":
key = "start_time"
value = childNode.firstChild.toxml(encoding="utf-8")
value = value and value[11:16] or ""
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
continue
#if childNode.tagName == "endDate":
# continue
if childNode.tagName == "creator":
for extraChildNode in childNode.getElementsByTagName("fullName"):
key = "creator"
value = extraChildNode.firstChild.toxml(encoding="utf-8")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
# Only get the first childNode
break
continue
#if childNode.tagName == "hasAnyProtection":
# continue
#if childNode.tagName == "roomFullname":
# continue
#if childNode.tagName == "modificationDate":
# continue
#if childNode.tagName == "timezone":
# continue
if childNode.tagName == "category":
key = "category"
value = childNode.firstChild.toxml(encoding="utf-8")
value = value.split("/")[-1].replace("&", "").replace("nbsp;", "").replace(" ", "")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
continue
if childNode.tagName == "title":
key = "title"
value = childNode.firstChild.toxml(encoding="utf-8")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
continue
if childNode.tagName == "location":
key = "location"
value = childNode.firstChild.toxml(encoding="utf-8")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
continue
#if childNode.tagName == "type":
# continue
#if childNode.tagName == "categoryId":
# continue
#if childNode.tagName == "description":
# continue
#if childNode.tagName == "roomMapURL":
# continue
#if childNode.tagName == "material":
# continue
#if childNode.tagName == "visibility":
# continue
#if childNode.tagName == "address":
# continue
#if childNode.tagName == "creationDate":
# continue
if childNode.tagName == "room":
key = "room"
value = childNode.firstChild.toxml(encoding="utf-8")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
continue
if childNode.tagName == "chairs":
for extraChildNode in childNode.getElementsByTagName("fullName"):
key = "chair"
value = extraChildNode.firstChild.toxml(encoding="utf-8")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
# Only get the first childNode
break
continue
if childNode.tagName == "url":
key = "url"
value = childNode.firstChild.toxml(encoding="utf-8")
seminar_xml.extend(["<%s>%s</%s>" % (key, value, key), ])
continue
seminar_xml.extend(["</seminar>", ])
seminar_xml.extend(["</Indico_Seminars>", ])
# write the created file to cache
fptr = open("%s/%s" % (CFG_CACHEDIR, cached_filename), "w")
fptr.write("\n".join(seminar_xml))
fptr.close()
def get_indico_credentials(path):
"""
Returns the Indico API key and (secret) signature as a tuple
(public_key, private_key).
"""
try:
fd = open(path, "r")
_indico_credentials = fd.read()
fd.close()
except IOError as e:
return ('', '')
return base64.decodestring(_indico_credentials).split('\n', 1)
_ = gettext_set_language('en')
dummy = _("What's on today")
dummy = _("Seminars of the week")
| zenodo/invenio | invenio/modules/bulletin/format_elements/bfe_webjournal_widget_seminars.py | Python | gpl-2.0 | 12,516 |
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: yaml
type: stdout
short_description: yaml-ized Ansible screen output
version_added: 2.5
description:
- Ansible output that can be quite a bit easier to read than the
default JSON formatting.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
import yaml
import json
import re
import string
import sys
from ansible.plugins.callback import CallbackBase, strip_internal_keys
from ansible.plugins.callback.default import CallbackModule as Default
from ansible.parsing.yaml.dumper import AnsibleDumper
# from http://stackoverflow.com/a/15423007/115478
def should_use_block(value):
"""Returns true if string should be in block format"""
for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
if c in value:
return True
return False
def my_represent_scalar(self, tag, value, style=None):
"""Uses block style for multi-line strings"""
if style is None:
if should_use_block(value):
style = '|'
# we care more about readable than accuracy, so...
# ...no trailing space
value = value.rstrip()
# ...and non-printable characters
value = ''.join(x for x in value if x in string.printable)
# ...tabs prevent blocks from expanding
value = value.expandtabs()
# ...and odd bits of whitespace
value = re.sub(r'[\x0b\x0c\r]', '', value)
# ...as does trailing space
value = re.sub(r' +\n', '\n', value)
else:
style = self.default_style
node = yaml.representer.ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
class CallbackModule(Default):
"""
Variation of the Default output which uses nicely readable YAML instead
of JSON for printing results.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'yaml'
def __init__(self):
super(CallbackModule, self).__init__()
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(result)
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
dumped = ''
# put changed and skipped into a header line
if 'changed' in abridged_result:
dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
del abridged_result['changed']
if 'skipped' in abridged_result:
dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
del abridged_result['skipped']
# if we already have stdout, we don't need stdout_lines
if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
abridged_result['stdout_lines'] = '<omitted>'
if abridged_result:
dumped += '\n'
dumped += yaml.dump(abridged_result, width=1000, Dumper=AnsibleDumper, default_flow_style=False)
# indent by a couple of spaces
dumped = '\n '.join(dumped.split('\n')).rstrip()
return dumped
| Russell-IO/ansible | lib/ansible/plugins/callback/yaml.py | Python | gpl-3.0 | 4,321 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for handling events."""
__author__ = 'Sean Lip'
import inspect
from core import jobs_registry
from core.domain import exp_domain
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
taskqueue_services = models.Registry.import_taskqueue_services()
import feconf
class BaseEventHandler(object):
"""Base class for event dispatchers."""
# A string denoting the type of the event. Should be specified by
# subclasses and considered immutable.
EVENT_TYPE = None
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
"""Dispatch events asynchronously to continuous computation realtime
layers that are listening for them.
"""
taskqueue_services.defer_to_events_queue(
jobs_registry.ContinuousComputationEventDispatcher.dispatch_event,
cls.EVENT_TYPE, *args, **kwargs)
@classmethod
def _handle_event(cls, *args, **kwargs):
"""Perform in-request processing of an incoming event."""
raise NotImplementedError(
'Subclasses of BaseEventHandler should implement the '
'_handle_event() method, using explicit arguments '
'(no *args or **kwargs).')
@classmethod
def record(cls, *args, **kwargs):
"""Process incoming events.
Callers of event handlers should call this method, not _handle_event().
"""
cls._notify_continuous_computation_listeners_async(*args, **kwargs)
cls._handle_event(*args, **kwargs)
class AnswerSubmissionEventHandler(BaseEventHandler):
"""Event handler for recording answer submissions."""
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
# Disable this method until we can deal with large answers, otherwise
# the data that is being placed on the task queue is too large.
pass
@classmethod
def _handle_event(cls, exploration_id, exploration_version, state_name,
handler_name, rule, answer):
"""Records an event when an answer triggers a rule."""
# TODO(sll): Escape these args?
stats_models.process_submitted_answer(
exploration_id, exploration_version, state_name,
handler_name, rule, answer)
class DefaultRuleAnswerResolutionEventHandler(BaseEventHandler):
"""Event handler for recording resolving of answers triggering the default
rule."""
EVENT_TYPE = feconf.EVENT_TYPE_DEFAULT_ANSWER_RESOLVED
@classmethod
def _handle_event(cls, exploration_id, state_name, handler_name, answers):
"""Resolves a list of answers for the default rule of this state."""
# TODO(sll): Escape these args?
stats_models.resolve_answers(
exploration_id, state_name, handler_name,
exp_domain.DEFAULT_RULESPEC_STR, answers)
class StartExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration start events."""
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, params,
play_type)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration leave events."""
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
# TODO(sll): remove params before sending this event to the jobs taskqueue
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class ExplorationContentChangeEventHandler(BaseEventHandler):
"""Event handler for receiving exploration change events. This event is
triggered whenever changes to an exploration's contents or metadata (title,
blurb etc.) are persisted. This includes when a a new exploration is
created.
"""
EVENT_TYPE = feconf.EVENT_TYPE_EXPLORATION_CHANGE
@classmethod
def _handle_event(cls, *args, **kwargs):
pass
class ExplorationStatusChangeEventHandler(BaseEventHandler):
"""Event handler for receiving exploration status change events.
These events are triggered whenever an exploration is published,
publicized, unpublished or unpublicized.
"""
EVENT_TYPE = feconf.EVENT_TYPE_EXPLORATION_STATUS_CHANGE
@classmethod
def _handle_event(cls, *args, **kwargs):
pass
class Registry(object):
"""Registry of event handlers."""
# Dict mapping event types to their classes.
_event_types_to_classes = {}
@classmethod
def _refresh_registry(cls):
"""Regenerates the event handler registry."""
cls._event_types_to_classes.clear()
# Find all subclasses of BaseEventHandler in the current module.
for obj_name, obj in globals().iteritems():
if inspect.isclass(obj) and issubclass(obj, BaseEventHandler):
if obj_name == 'BaseEventHandler':
continue
if not obj.EVENT_TYPE:
raise Exception(
'Event handler class %s does not specify an event '
'type' % obj_name)
elif obj.EVENT_TYPE in cls._event_types_to_classes:
raise Exception('Duplicate event type %s' % obj.EVENT_TYPE)
cls._event_types_to_classes[obj.EVENT_TYPE] = obj
@classmethod
def get_event_class_by_type(cls, event_type):
"""Gets an event handler class by its type.
Refreshes once if the event type is not found; subsequently, throws an
error.
"""
if event_type not in cls._event_types_to_classes:
cls._refresh_registry()
return cls._event_types_to_classes[event_type]
| nagyistoce/oppia | core/domain/event_services.py | Python | apache-2.0 | 7,275 |
# Copyright (c) 2013 Gergely Nagy <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from hy.models import HyObject
from hy._compat import str_type
KEYWORD_PREFIX = "\uFDD0"
class HyKeyword(HyObject, str_type):
"""Generic Hy Keyword object. It's either a ``str`` or a ``unicode``,
depending on the Python version.
"""
def __new__(cls, value):
if not value.startswith(KEYWORD_PREFIX):
value = KEYWORD_PREFIX + value
obj = str_type.__new__(cls, value)
return obj
| farhaven/hy | hy/models/keyword.py | Python | mit | 1,599 |
# -*- coding: iso-8859-1 -*-
#
# SmartInfo-Converter for Dreambox/Enigma-2
# Version: 1.0
# Coded by Vali (c)2009-2011
#
#######################################################################
from enigma import iServiceInformation
from Components.Converter.Converter import Converter
from Components.Element import cached
class valioTunerInfo(Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
self.ar_fec = ["Auto", "1/2", "2/3", "3/4", "5/6", "7/8", "8/9", "3/5", "4/5", "9/10","None","None","None","None","None"]
self.ar_pol = ["H", "V", "CL", "CR", "na", "na", "na", "na", "na", "na", "na", "na"]
@cached
def getText(self):
service = self.source.service
info = service and service.info()
if not info:
return ""
Ret_Text = ""
if True:
feinfo = (service and service.frontendInfo())
if (feinfo is not None):
frontendData = (feinfo and feinfo.getAll(True))
if (frontendData is not None):
if ((frontendData.get("tuner_type") == "DVB-S") or (frontendData.get("tuner_type") == "DVB-C")):
frequency = str(int(frontendData.get("frequency") / 1000))
symbolrate = str(int(frontendData.get("symbol_rate")) / 1000)
try:
if (frontendData.get("tuner_type") == "DVB-S"):
polarisation_i = frontendData.get("polarization")
else:
polarisation_i = 0
fec_i = frontendData.get("fec_inner")
Ret_Text = frequency + " " + self.ar_pol[polarisation_i] + " " + self.ar_fec[fec_i] + " " + symbolrate
except:
Ret_Text = "FQ:" + frequency + " SR:" + symbolrate
elif (frontendData.get("tuner_type") == "DVB-T"):
frequency = str((frontendData.get("frequency") / 1000)) + " MHz"
Ret_Text = "Freq: " + frequency
return Ret_Text
return "n/a"
text = property(getText)
def changed(self, what):
Converter.changed(self, what) | sklnet/opendroid-enigma2 | lib/python/Components/Converter/valioTunerInfo.py | Python | gpl-2.0 | 1,889 |
from __future__ import print_function
from tempfile import mkdtemp
import os
from shutil import rmtree
import rpm
import base64
class KeyError(Exception):
def __init__(self, key, *args):
Exception.__init__(self)
self.args = args
self.key = key
def __str__(self):
return ''+self.key+' :'+' '.join(self.args)
class Checker:
def __init__(self):
self.dbdir = mkdtemp(prefix='oscrpmdb')
self.imported = {}
rpm.addMacro('_dbpath', self.dbdir)
self.ts = rpm.TransactionSet()
self.ts.initDB()
self.ts.openDB()
self.ts.setVSFlags(0)
#self.ts.Debug(1)
def readkeys(self, keys=[]):
rpm.addMacro('_dbpath', self.dbdir)
for key in keys:
try:
self.readkey(key)
except KeyError as e:
print(e)
if not len(self.imported):
raise KeyError('', "no key imported")
rpm.delMacro("_dbpath")
# python is an idiot
# def __del__(self):
# self.cleanup()
def cleanup(self):
self.ts.closeDB()
rmtree(self.dbdir)
def readkey(self, file):
if file in self.imported:
return
fd = open(file, "r")
line = fd.readline()
if line and line[0:14] == "-----BEGIN PGP":
line = fd.readline()
while line and line != "\n":
line = fd.readline()
if not line:
raise KeyError(file, "not a pgp public key")
else:
raise KeyError(file, "not a pgp public key")
key = ''
line = fd.readline()
crc = None
while line:
if line[0:12] == "-----END PGP":
break
line = line.rstrip()
if (line[0] == '='):
crc = line[1:]
line = fd.readline()
break
else:
key += line
line = fd.readline()
fd.close()
if not line or line[0:12] != "-----END PGP":
raise KeyError(file, "not a pgp public key")
# TODO: compute and compare CRC, see RFC 2440
bkey = base64.b64decode(key)
r = self.ts.pgpImportPubkey(bkey)
if r != 0:
raise KeyError(file, "failed to import pubkey")
self.imported[file] = 1
def check(self, pkg):
# avoid errors on non rpm
if pkg[-4:] != '.rpm':
return
fd = None
try:
fd = os.open(pkg, os.O_RDONLY)
hdr = self.ts.hdrFromFdno(fd)
finally:
if fd is not None:
os.close(fd)
if __name__ == "__main__":
import sys
keyfiles = []
pkgs = []
for arg in sys.argv[1:]:
if arg[-4:] == '.rpm':
pkgs.append(arg)
else:
keyfiles.append(arg)
checker = Checker()
try:
checker.readkeys(keyfiles)
for pkg in pkgs:
checker.check(pkg)
except Exception as e:
checker.cleanup()
raise e
# vim: sw=4 et
| OlegGirko/osc | osc/checker.py | Python | gpl-2.0 | 3,087 |
from . import driver | ddico/odoo | addons/hw_drivers/controllers/__init__.py | Python | agpl-3.0 | 20 |
import sys
import os
import unittest
sys.path.insert(0, os.getcwd())
import pybootchartgui.parsing as parsing
import pybootchartgui.process_tree as process_tree
import pybootchartgui.main as main
if sys.version_info >= (3, 0):
long = int
class TestProcessTree(unittest.TestCase):
def setUp(self):
self.name = "Process tree unittest"
self.rootdir = os.path.join(os.path.dirname(sys.argv[0]), '../../examples/1/')
parser = main._mk_options_parser()
options, args = parser.parse_args(['--q', self.rootdir])
writer = main._mk_writer(options)
trace = parsing.Trace(writer, args, options)
parsing.parse_file(writer, trace, self.mk_fname('proc_ps.log'))
trace.compile(writer)
self.processtree = process_tree.ProcessTree(writer, None, trace.ps_stats, \
trace.ps_stats.sample_period, None, options.prune, None, None, False, for_testing = True)
def mk_fname(self,f):
return os.path.join(self.rootdir, f)
def flatten(self, process_tree):
flattened = []
for p in process_tree:
flattened.append(p)
flattened.extend(self.flatten(p.child_list))
return flattened
def checkAgainstJavaExtract(self, filename, process_tree):
test_data = open(filename)
for expected, actual in zip(test_data, self.flatten(process_tree)):
tokens = expected.split('\t')
self.assertEqual(int(tokens[0]), actual.pid // 1000)
self.assertEqual(tokens[1], actual.cmd)
self.assertEqual(long(tokens[2]), 10 * actual.start_time)
self.assert_(long(tokens[3]) - 10 * actual.duration < 5, "duration")
self.assertEqual(int(tokens[4]), len(actual.child_list))
self.assertEqual(int(tokens[5]), len(actual.samples))
test_data.close()
def testBuild(self):
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.1.log'), process_tree)
def testMergeLogger(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.2.log'), process_tree)
def testPrune(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3b.log'), process_tree)
def testMergeExploders(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3c.log'), process_tree)
def testMergeSiblings(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
self.processtree.merge_siblings(self.processtree.process_tree)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3d.log'), process_tree)
def testMergeRuns(self):
self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
self.processtree.prune(self.processtree.process_tree, None)
self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
self.processtree.merge_siblings(self.processtree.process_tree)
self.processtree.merge_runs(self.processtree.process_tree)
process_tree = self.processtree.process_tree
self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3e.log'), process_tree)
if __name__ == '__main__':
unittest.main()
| wwright2/dcim3-angstrom1 | sources/openembedded-core/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py | Python | mit | 4,203 |
#!/usr/bin/python
################################################################
# SEMTK_policy_check.py
#
# This file is parsing *init.rc and /system/* executable file
#
# Input:
# Assume /system/* is generated first
#
# Output:
# print warning log into ./semtk_policy_check/semtk_policy_check.log
#
################################################################
import sys
import os
import re
import commands
import shutil
debug=1
print_tag="[SEMTK_Check_Policy] : "
#output policy and log dir
semtk_policy_check_dir="SEAndroid"
semtk_policy_check_obj_dir=""
#out_dir
out_dir=""
#output log file
semtk_policy_log_filename="semtk.log"
#output policy
semtk_file_contexts_filename="file_contexts"
semtk_dev_type_filename="mtk_device.te"
semtk_file_type_filename="mtk_file.te"
#type enforcement file directory
seandroid_root_policy_dir="external/sepolicy"
semtk_root_policy_dir=""
product_name=""
alps_path=""
load_path=""
#scan executable file path
exec_type_postfix = "exec"
out_sbin_path = "root/sbin" #from out/target/product/XXX/
phone_sbin_dir = "/sbin"
out_system_bin_path = "system/bin"
phone_system_bin_dir = "/system/bin"
out_system_xbin_path = "system/xbin"
phone_system_xbin_dir = "/system/xbin"
out_vendor_bin_path = "system/vendor/bin"
phone_vendor_bin_dir = "/system/vendor/bin"
out_vendor_xbin_path = "system/vendor/xbin"
phone_vendor_xbin_dir = "/system/vendor/xbin"
#scan init file path
out_init_dir="root"
#policy setting
phone_data_dir = "/data"
data_type_postfix="data_file"
data_file_type_set="file_type, data_file_type"
phone_data_misc_dir = "/data/misc"
data_misc_type_postfix="data_file"
phone_dev_dir = "/dev"
dev_type_postfix="device"
dev_file_type_set="dev_type"
phone_socket_dir="/dev/socket"
socket_type_postfix="socket"
file_type_set="file_type"
file_contexts_fobj=[]
file_contexts_flag=[]
file_contexts_label=[]
cnt_label=0
init_update=False
def del_after_char(line,spec_char):
#
# delete the input line after specific char (ex : . / -)
#
cmd_idx = line.find(spec_char)
if cmd_idx != -1:
line_valid = line[:cmd_idx]
else:
line_valid=line
return line_valid
def split_by_whitespace(line):
#
# split the input line by whitespace (used in read file)
#
line_split = filter(None,line.replace("\t"," ").replace("\n"," ").replace("\r"," ").split(" "))
return line_split
def append_label(fobj,type_label,flag):
#
# append input contexts to file_context structure
#
# input description
# =======================
# fobj, type_label, flag : file_context setting
# =======================
global cnt_label
global file_contexts_fobj
global file_contexts_flag
global file_contexts_label
#print "fobj = " + fobj
#print "type_labe = " + type_label
file_contexts_fobj.insert(cnt_label,fobj)
file_contexts_flag.insert(cnt_label,flag)
file_contexts_label.insert(cnt_label,type_label) #"".join(["u:object_r:",domain,":s0"])
cnt_label = cnt_label+1
#read and parsing orginal file_contexts
def read_file_contexts(fd):
#
# read original file_contexts setting
#
# input description
# =======================
# fd : file_contexts fd
# =======================
global cnt_label
global file_contexts_fobj
global file_contexts_flag
global file_contexts_label
cnt_label = 0
del file_contexts_fobj[:]
del file_contexts_flag[:]
del file_contexts_label[:]
for line in fd:
line_valid = del_after_char(line,"#") #delete after #
label = split_by_whitespace(line_valid)
for cnt_item in range(len(label)):
item = label[cnt_item]
#check item
if item.find("/") != -1: #check the first fill is directory
#bug?? (.*)? is not formal regular expression
file_contexts_fobj.insert(cnt_label, item.replace("(.*)?",".*"))
if item.find("(/.*)?") != -1:
file_contexts_flag.insert(cnt_label, "flag_all_under_dir")
elif item is "/":
file_contexts_flag.insert(cnt_label, "flag_root_dir")
else:
file_contexts_flag.insert(cnt_label, "flag_matched_dir_file")
elif item.find("--") != -1:
file_contexts_flag.insert(cnt_label, "flag_matched_only_file") #this two flag is exclusive
elif item.find("u:") != -1: #seandroid user is only 1 u:
file_contexts_label.insert(cnt_label, item)
cnt_label=cnt_label+1
break
for cnt_print in range(cnt_label):
if debug: print print_tag + "file_contexts_label : " + file_contexts_fobj[cnt_print] + " " + file_contexts_flag[cnt_print] + " " + file_contexts_label[cnt_print]
if debug:
print print_tag + "Total Scan file_contexts number : " + str(cnt_label)
print print_tag + "Scan file_contexts finished"
def find_dir(path,find_dir_name):
#
# find the specific dir path (ex : alps), for use in relative path.
#
# input description
# =======================
# path : the file path
# dir_name : the specific dir path name you want to find (ex : alps )
# =======================
while path != "":
if find_dir_name == os.path.basename(path):
return path
else:
path = os.path.dirname(path)
print "path is " + path
print "Error in Path Dir in SEMTK_file_contexts_parser.py !!"
print "There is no alps dir name"
sys.exit(1);
def label_exec(out_exec_dir,fd,phone_dir_path,type_postfix,fd_log):
#
# label execution file and mark in the file_contexts
#
# input description
# =======================
# out_exect_dir : scan dir relative path to out load_path
# fd : file_contexts fd
# phone_dir_path : the exect directory path in the phone
# type_postfix : the executable file file type postfix , ex: _exec or _data_file or _binary
# =======================
exec_dir = "/".join([load_path,out_exec_dir])
if debug: print print_tag + "Scan exec_dir : " + exec_dir
for dirpath, dirnames, filenames in os.walk(exec_dir):
for filename in filenames:
check_file = "/".join([dirpath,filename])
if debug: print "check_file : " + check_file
if not os.path.islink(check_file): #skip link file
# maybe in the dir ex: system/xbin/asan/app_process => asan_app_process
domain_name = check_file.replace("/".join([exec_dir,""]),"")
if debug : print "domain_name : " + domain_name
file_path = "/".join([phone_dir_path,domain_name])
# for handling lost+found
filename_path = file_path.replace("+","\+")
if debug: print "filename_path : " + filename_path
# search orinigal file_contexts
if not match_file_contexts(filename_path,False): #didn't set in original
type_label = filename_to_label(domain_name,False)
gen_te_file_true = gen_te_file(type_label,phone_dir_path,type_postfix)
if gen_te_file_true:
write_to_file_contexts(fd,file_path,type_label,type_postfix,"flag_matched_dir_file")
semtk_domain_new_file = "".join([type_label,".te"])
exec_source_file(fd_log,file_path,domain_name,semtk_domain_new_file)
def exec_source_file(fd_log,file_path,domain_name,semtk_domain_new_file):
out_log_path = "".join(["out/target/product","/",product_name,"_android.log"])
if debug: print "out_log_path : " + out_log_path
if not find_source_by_target(fd_log,out_log_path,file_path,domain_name,semtk_domain_new_file):
if not find_source_by_file(fd_log,out_log_path,file_path,domain_name,semtk_domain_new_file):
fd_log.write("".join(["Error!! Cannot found source file of ",file_path," ",semtk_domain_new_file]))
fd_log.write("\n")
def find_source_by_target(fd_log,out_log_path,file_path,type_label,semtk_domain_new_file):
found_source = False
grep_pat = "".join(["target thumb C\+*: ",type_label, " <= "])
grep_cmd_c = "".join(["grep -iE ","\"",grep_pat,"\" ",out_log_path])
if debug: print "grep_cmd_c : " + grep_cmd_c
cnt_cmd_c = "| wc -l"
is_exec_c = commands.getoutput(grep_cmd_c)
cnt_exec_c = commands.getoutput("".join(["echo \"", is_exec_c," \"", cnt_cmd_c]))
if debug: print "is_exec_c : " + is_exec_c
if debug: print "cnt_exec_c : " + cnt_exec_c
if is_exec_c != "":
first_exec_c = del_after_char(is_exec_c,"\n")
if debug: print "first_exec_c : " + first_exec_c
if debug: print "grep_pat : " + grep_pat
first_exec_c_split = split_by_whitespace(first_exec_c)
first_exec_source = first_exec_c_split[len(first_exec_c_split)-1]
if debug: print "first_exec_source : " + first_exec_source
fd_log.write(" ".join([file_path,first_exec_source,semtk_domain_new_file]))
fd_log.write('\n')
found_source = True
return found_source
def find_source_by_file(fd_log,out_log_path,file_path,type_label,semtk_domain_new_file):
found_source = False
grep_pat = "".join(["[(Notice)|(Export includes)].*(file:.*"])
grep_cmd_c = "".join(["grep -iE ","\"",grep_pat,"\<",type_label,")\>\" ",out_log_path])
if debug: print "grep_cmd_c : " + grep_cmd_c
cnt_cmd_c = "| wc -l"
is_exec_c = commands.getoutput(grep_cmd_c)
cnt_exec_c = commands.getoutput("".join(["echo \"", is_exec_c," \"", cnt_cmd_c]))
if debug: print "is_exec_c : " + is_exec_c
if debug: print "cnt_exec_c : " + cnt_exec_c
if is_exec_c != "":
first_exec_c = del_after_char(is_exec_c,"\n")
first_exec_c_split = split_by_whitespace(first_exec_c)
first_exec_c_mod = first_exec_c_split[len(first_exec_c_split)-3]
first_exec_source = first_exec_c_mod.replace("/NOTICE","/Android.mk")
if debug: print "first_exec_source : " + first_exec_source
if os.path.isfile(first_exec_source):
fd_log.write(" ".join([file_path,first_exec_source,semtk_domain_new_file]))
fd_log.write('\n')
found_source = True
return found_source
def filename_to_label(filename,is_init):
#
# according to the label naming rule modified the filename to label
#
# input description
# =======================
# filename : filename
# =======================
#the type label is modified from filename
if is_init:
type_label_noext = filename.replace(".","_")
else:
type_label_noext = del_after_char(filename,".") #delete after .
if re.match("[0-9].*",type_label_noext): #can not number begin in the label
type_label_nonum = "".join(["mtk_",type_label_noext])
else:
type_label_nonum = type_label_noext
type_label = type_label_nonum.replace("/","_").replace("+","_").replace("-","_")
return type_label
def write_to_file_contexts(fd,file_path,type_label,type_postfix,type_flag):
#
# create new label and write into file_contexts file/structure
#
# input description
# =======================
# fd : add a new label in this fd file (file_contexts)
# file_path : the file path in the phone (for label setting)
# type_label : the type label
# type_flag : the new label append to file_context matrix, the type flag setting.
# =======================
#special write new label into file_contexts file
type_label = "".join(["u:object_r:",type_label,"_",type_postfix,":s0"])
print "write a new label : " + "".join([file_path," ",type_label])
fd.write("".join([file_path," ",type_label]))
fd.write('\n')
#special write new label into file_contexts structure
append_label(file_path,type_label,type_flag)
def gen_te_file(domain,mode,type_postfix):
#
# according to the label naming rule modified the filename to label
#
# input description
# =======================
# domain : domain name ( /sbin/adbd : domain : adbd
# mode : the execution file is located in the which phone dir
# type_postfix : the execution file postfix (ex : exec)
# =======================
gen_te_file_true=False
seandroid_domain_te_file="".join([alps_path,"/",seandroid_root_policy_dir,"/",domain,".te"])
semtk_domain_product_te_file="".join([alps_path,"/",semtk_root_policy_dir,"/",domain,".te"])
semtk_domain_new_file = "".join([semtk_policy_check_obj_dir,"/",domain,".te"])
if debug: print "seandroid_domain_te_file = " + seandroid_domain_te_file
if debug: print "semtk_domain_product_te_file = " + semtk_domain_product_te_file
if debug: print "semtk_domain_new_file =" + semtk_domain_new_file
#print seandroid_domain_te_file
if not os.path.exists(seandroid_domain_te_file):
if not os.path.exists(semtk_domain_product_te_file):
if debug: print "create te file : " + semtk_domain_new_file
fd=open(semtk_domain_new_file, 'w')
fd.write("# =============================================="+'\n')
fd.write("".join(["# Policy File of ",mode,domain, " Executable File \n"]))
fd.write('\n\n')
fd.write("# =============================================="+'\n')
fd.write("# Type Declaration\n")
fd.write("# =============================================="+'\n')
fd.write('\n')
fd.write("".join(["type ", domain,"_", type_postfix," , exec_type, file_type;"]))
fd.write('\n')
fd.write("".join(["type ", domain," ,domain;"]))
fd.write('\n\n')
fd.write("# ==============================================")
fd.write("\n# Android Policy Rule\n")
fd.write("# =============================================="+'\n')
fd.write('\n')
fd.write("# ==============================================")
fd.write("\n# NSA Policy Rule\n")
fd.write("# =============================================="+'\n')
fd.write('\n')
fd.write("# ==============================================")
fd.write("\n# MTK Policy Rule\n")
fd.write("# =============================================="+'\n')
fd.write('\n')
fd.write("".join(["permissive ", domain, ";"]))
fd.write('\n')
if not mode==phone_sbin_dir:
fd.write("".join(["init_daemon_domain(", domain,")"]))
fd.write('\n')
fd.close()
if debug: print "create te file Done : " + semtk_domain_product_te_file
gen_te_file_true=True
return gen_te_file_true
def label_init(socket_mode,out_init_dir,fd,phone_dir_path,type_postfix,fd_type,file_type_set):
#
# label /dev, /data, socket important file/dir sets in the *.init.rc
#
# input description
# =======================
# out_init_dir : scan *.init.rc path relative path to load_path
# fd : file_contexts fd
# phone_dir_path : the exect directory path in the phone
# type_postfix : the executable file file type postfix , ex: _exec or _data_file or _binary
# fd_type : generate a new file using the fd_type (ex: "_dev.te" or "_data.te")
# file_type_set : file type setting (process domain is empty, but file structure is data or dev)
# =======================
global init_update
init_dir="/".join([load_path,out_init_dir])
if socket_mode:
match_pat = ""
search_file_pat = "\s+socket\s?"
else:
match_pat = "".join(["\\",phone_dir_path,"\\/"])
search_file_pat = "".join(["(chmod|chown).*",match_pat])
if debug: print "match_pat : " + match_pat
for dirPath, dirNames, fileNames in os.walk(init_dir): #find all file
for fileName in fileNames:
if fileName.find("init") != -1 and fileName.find(".rc") != -1: #get *init.rc file
file_path = "".join([dirPath,"/",fileName])
if debug: print "init file_path : " + file_path
for line in open(file_path, 'r'): #search all line with chmod and chown
line_valid = del_after_char(line,"#")
if re.search(search_file_pat,line_valid):
if socket_mode:
line_valid_mod = line_valid.replace("socket","")
else:
line_valid_mod = line_valid
if debug: print "matched line : " + line_valid_mod
label = split_by_whitespace(line_valid_mod)
for cnt_item in range(len(label)):
item = label[cnt_item]
match = re.match(match_pat,item)
if match or socket_mode :
if debug: print "init check item = " + str(item)
item_type_remove_root_dir = item.replace("".join([phone_dir_path,"/"]),"")
item_type = del_after_char(item_type_remove_root_dir,"/") #get the first hirarchy
item_file_path = "".join([phone_dir_path,"/",item_type,"(/.*)?"])
item_file_valid = item_file_path.replace("+","\+") #for handling lost+found
if debug: print "item_type_remove_root_dir = " + item_type_remove_root_dir
if debug: print "item_file_valid = " + item_file_valid
if (item_type != "misc") and not match_file_contexts(item_file_valid,False): #didn't set in original
init_update=True
type_label = filename_to_label(item_type,True)
type_label_postfix = "".join([type_label,"_",type_postfix])
if debug: print "type_label = " + type_label
gen_type(fd_type,type_label_postfix,file_type_set) #check
write_to_file_contexts(fd,item_file_valid,type_label,type_postfix,"flag_all_under_dir")
break
def gen_type(fd_type,file_type,file_type_set):
#
# check the device and file type is not already delcaration
#
# input description
# =======================
# fd_type : the file needs to write the type into.
# file_type : find the file_type is exist or not
# file_type_set : the exect directory path in the phone
# =======================
grep_cmd="".join(["grep ", file_type, " -il "])
#if debug: print "grep_cmd = " + "".join([grep_cmd,alps_path,"/",seandroid_root_policy_dir,"/*.te"])
#seandroid_dir="".join([alps_path,"/",seandroid_root_policy_dir,"/*.te"])
semtk_root_policy_dir_escape = semtk_root_policy_dir.replace("[","\[").replace("]","\]")
semtk_dir="".join([alps_path,"/",semtk_root_policy_dir])
semtk_dir_escape="".join([alps_path,"/",semtk_root_policy_dir_escape,"/*"])
if debug: print "grep_cmd = " + "".join([grep_cmd,semtk_dir_escape])
#is_seandroid_file_type = ""
is_semtk_product_file_type = ""
#if os.path.exists(seandroid_dir) :
# is_seandroid_file_type = commands.getoutput("".join([grep_cmd,seandroid_dir]))
if os.path.exists(semtk_dir):
is_semtk_product_file_type = commands.getoutput("".join([grep_cmd,semtk_dir_escape]))
#if debug: print "seandroid has the type? " + is_seandroid_file_type
if debug: print "semtk has the type? " + is_semtk_product_file_type + " mtk_dir_exist = " + str(os.path.exists(semtk_dir))
#print seandroid_domain_te_file
if is_semtk_product_file_type == "":
if debug: print file_type + "into " + "fd_type"
fd_type.write("".join(["type ", file_type,", ", file_type_set, ";"]))
fd_type.write('\n')
else:
print "the type is already exist : " + file_type + " in " + is_semtk_product_file_type
def match_file_contexts(file_path,all_under_dir_valid):
#
# find the file_path is already in the file_contexts or not
#
# input description
# =======================
# file_path : the search file path
# all_under_dir_valid : search about dir (./*)
# =======================
global cnt_label
global file_contexts_fobj
global file_contexts_flag
global file_contexts_label
match = False
for cnt_scan in range(cnt_label):
# XXX/YYY/(./*) match ignore
match_all_under_dir = all_under_dir_valid and file_contexts_flag[cnt_scan].find("flag_all_under_dir")!=-1 #found
# the file-context setting is / => root setting => ignore
match_root_dir = file_contexts_flag[cnt_scan].find("flag_root_dir")!=-1 #found
# exact file assign in original in file_contexts
match_exact_dir = file_path == file_contexts_fobj[cnt_scan]
if match_exact_dir:
match = True
break
elif match_all_under_dir and not match_root_dir: #ignore all_under_dir -1:not match
match = re.search("".join(["^",file_contexts_fobj[cnt_scan],"$"]),file_path) #match string begin 1:pat 2:search string
if (match) or (match_exact_dir):
match = True
break
if debug: print "match original file_contexts?" + str(match)
if debug and match: print "org : " + file_contexts_fobj[cnt_scan] + " new : " + file_path
return match
def main():
global out_dir
global semtk_policy_check_obj_dir
global semtk_root_policy_dir
global product_name
global alps_path
global load_path
if len(sys.argv) == 2:
product_in=sys.argv[1]
out_dir = os.environ.get('OUT_DIR')
elif len(sys.argv) == 3:
product_in=sys.argv[1]
out_dir=sys.argv[2]
else:
print print_tag + "Error in Usage SEMTK_policy_check.py !!"
print sys.argv[0] + " [product_name]"
print sys.argv[0] + " [product_name] + [output diretory]"
sys.exit(1);
if debug: print product_in
if debug: print out_dir
#set alps path
product_name = del_after_char(product_in,"[")
alps_path = find_dir(os.getcwd(),"alps");
load_path = "/".join([out_dir,product_name])
#set android policy path
seandroid_root_policy_dir = "external/sepolicy"
seandroid_file_contexts_file = "/".join([alps_path,seandroid_root_policy_dir,semtk_file_contexts_filename])
#set mtk policy path
semtk_root_policy_dir = "mediatek/custom/out/" + product_in + "/sepolicy"
semtk_file_contexts_file = "/".join([alps_path,semtk_root_policy_dir,semtk_file_contexts_filename])
if debug: print print_tag + seandroid_file_contexts_file
if debug: print print_tag + semtk_file_contexts_file
#make policy directory
semtk_policy_check_obj_dir = "/".join([load_path,"obj",semtk_policy_check_dir])
if not os.path.exists(semtk_policy_check_obj_dir):
os.mkdir(semtk_policy_check_obj_dir)
else:
shutil.rmtree(semtk_policy_check_obj_dir)
os.mkdir(semtk_policy_check_obj_dir)
#open policy log file
semtk_policy_log_file = "".join([out_dir,"/",product_name,"_",semtk_policy_log_filename])
if debug: print print_tag + semtk_policy_log_file
fd_log=open(semtk_policy_log_file, 'w')
semtk_dev_type_file = "/".join([alps_path,semtk_root_policy_dir,semtk_dev_type_filename])
semtk_file_type_file = "/".join([alps_path,semtk_root_policy_dir,semtk_file_type_filename])
semtk_file_contexts_new_file = "/".join([semtk_policy_check_obj_dir,semtk_file_contexts_filename])
semtk_dev_type_new_file = "/".join([semtk_policy_check_obj_dir,semtk_dev_type_filename])
semtk_file_type_new_file = "/".join([semtk_policy_check_obj_dir,semtk_file_type_filename])
#open default file_contexts to check
if os.path.exists(semtk_file_contexts_file):
shutil.copyfile(semtk_file_contexts_file,semtk_file_contexts_new_file)
fd_fc=open(semtk_file_contexts_new_file, 'r+w')
else:
shutil.copyfile(seandroid_file_contexts_file,semtk_file_contexts_new_file)
fd_fc=open(semtk_file_contexts_new_file, 'r+w')
if os.path.exists(semtk_dev_type_file):
shutil.copyfile(semtk_dev_type_file,semtk_dev_type_new_file)
fd_dev=open(semtk_dev_type_new_file, 'a')
fd_dev.write("# =============================================="+'\n')
fd_dev.write("".join(["# Type Declaration of MTK Device\n"]))
fd_dev.write('\n\n')
fd_dev.write("# =============================================="+'\n')
fd_dev.write("# MTK Device Type Declaration\n")
fd_dev.write("# =============================================="+'\n')
fd_dev.write("\n")
if os.path.exists(semtk_file_type_file):
shutil.copyfile(semtk_file_type_file,semtk_file_type_new_file)
fd_file=open(semtk_file_type_new_file, 'a')
fd_file.write("# =============================================="+'\n')
fd_file.write("".join(["# Type Declaration of MTK File\n"]))
fd_file.write('\n\n')
fd_file.write("# =============================================="+'\n')
fd_file.write("# MTK Policy Rule\n")
fd_file.write("# =============================================="+'\n')
fd_file.write("\n")
if debug: print print_tag + semtk_policy_log_file
if debug: print print_tag + semtk_file_contexts_new_file
if debug: print print_tag + semtk_dev_type_new_file
if debug: print print_tag + semtk_file_type_new_file
#fd_fc move to the start of the file
fd_fc.seek(0)
read_file_contexts(fd_fc)
#fd_fc move to end of the file
fd_fc.seek(0,os.SEEK_END)
label_exec(out_sbin_path,fd_fc,phone_sbin_dir,exec_type_postfix,fd_log)
label_exec(out_system_bin_path,fd_fc,phone_system_bin_dir,exec_type_postfix,fd_log)
label_exec(out_system_xbin_path,fd_fc,phone_system_xbin_dir,exec_type_postfix,fd_log)
label_exec(out_vendor_bin_path,fd_fc,phone_vendor_bin_dir,exec_type_postfix,fd_log)
label_exec(out_vendor_xbin_path,fd_fc,phone_vendor_xbin_dir,exec_type_postfix,fd_log)
label_init(False, out_init_dir,fd_fc,phone_data_dir,data_type_postfix,fd_file,data_file_type_set)
label_init(False, out_init_dir,fd_fc,phone_data_misc_dir,data_type_postfix,fd_file,data_file_type_set)
label_init(False, out_init_dir,fd_fc,phone_dev_dir,dev_type_postfix,fd_dev,dev_file_type_set)
label_init(True, out_init_dir,fd_fc,phone_socket_dir,socket_type_postfix,fd_dev,file_type_set)
if init_update:
fd_log.write("".join(["file_contexts mediatek/custom/",product_in,"/sepolicy/file_contexts"]))
fd_log.write("\n")
fd_fc.close()
fd_dev.close()
fd_file.close()
fd_log.close()
if __name__ == "__main__":
main()
| visi0nary/mediatek | mt6732/mediatek/custom/common/sepolicy/SEMTK_policy_check.py | Python | gpl-2.0 | 27,354 |
#! /usr/bin/env python
__author__ = "Cathy Lozupone"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Catherine Lozupone", "Greg Caporaso",
"Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Cathy Lozupone"
__email__ = "[email protected]"
print """\nThis script has been moved to the FastUnifrac repository. \
For more information, see http://github.com/qiime/FastUnifrac"""
| josenavas/qiime | scripts/convert_unifrac_sample_mapping_to_otu_table.py | Python | gpl-2.0 | 457 |
# -*- coding: utf-8 -*-
u"""A module to test whether doctest recognizes some 2.2 features,
like static and class methods.
>>> print 'yup' # 1
yup
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
from test import test_support
class C(object):
u"""Class C.
>>> print C() # 2
42
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
def __init__(self):
"""C.__init__.
>>> print C() # 3
42
"""
def __str__(self):
"""
>>> print C() # 4
42
"""
return "42"
class D(object):
"""A nested D class.
>>> print "In D!" # 5
In D!
"""
def nested(self):
"""
>>> print 3 # 6
3
"""
def getx(self):
"""
>>> c = C() # 7
>>> c.x = 12 # 8
>>> print c.x # 9
-12
"""
return -self._x
def setx(self, value):
"""
>>> c = C() # 10
>>> c.x = 12 # 11
>>> print c.x # 12
-12
"""
self._x = value
x = property(getx, setx, doc="""\
>>> c = C() # 13
>>> c.x = 12 # 14
>>> print c.x # 15
-12
""")
def statm():
"""
A static method.
>>> print C.statm() # 16
666
>>> print C().statm() # 17
666
"""
return 666
statm = staticmethod(statm)
def clsm(cls, val):
"""
A class method.
>>> print C.clsm(22) # 18
22
>>> print C().clsm(23) # 19
23
"""
return val
clsm = classmethod(clsm)
def test_main():
from test import test_doctest2
EXPECTED = 19
f, t = test_support.run_doctest(test_doctest2)
if t != EXPECTED:
raise test_support.TestFailed("expected %d tests to run, not %d" %
(EXPECTED, t))
# Pollute the namespace with a bunch of imported functions and classes,
# to make sure they don't get tested.
from doctest import *
if __name__ == '__main__':
test_main()
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/test/test_doctest2.py | Python | gpl-2.0 | 2,297 |
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import utils
class XmlHandler(xml.sax.ContentHandler):
def __init__(self, root_node, connection):
self.connection = connection
self.nodes = [('root', root_node)]
self.current_text = ''
def startElement(self, name, attrs):
self.current_text = ''
t = self.nodes[-1][1].startElement(name, attrs, self.connection)
if t != None:
if isinstance(t, tuple):
self.nodes.append(t)
else:
self.nodes.append((name, t))
def endElement(self, name):
self.nodes[-1][1].endElement(name, self.current_text, self.connection)
if self.nodes[-1][0] == name:
self.nodes.pop()
self.current_text = ''
def characters(self, content):
self.current_text += content
def parse(self, s):
xml.sax.parseString(s, self)
class Element(dict):
def __init__(self, connection=None, element_name=None,
stack=None, parent=None, list_marker=('Set',),
item_marker=('member', 'item'),
pythonize_name=False):
dict.__init__(self)
self.connection = connection
self.element_name = element_name
self.list_marker = utils.mklist(list_marker)
self.item_marker = utils.mklist(item_marker)
if stack is None:
self.stack = []
else:
self.stack = stack
self.pythonize_name = pythonize_name
self.parent = parent
def __getattr__(self, key):
if key in self:
return self[key]
for k in self:
e = self[k]
if isinstance(e, Element):
try:
return getattr(e, key)
except AttributeError:
pass
raise AttributeError
def get_name(self, name):
if self.pythonize_name:
name = utils.pythonize_name(name)
return name
def startElement(self, name, attrs, connection):
self.stack.append(name)
for lm in self.list_marker:
if name.endswith(lm):
l = ListElement(self.connection, name, self.list_marker,
self.item_marker, self.pythonize_name)
self[self.get_name(name)] = l
return l
if len(self.stack) > 0:
element_name = self.stack[-1]
e = Element(self.connection, element_name, self.stack, self,
self.list_marker, self.item_marker,
self.pythonize_name)
self[self.get_name(element_name)] = e
return (element_name, e)
else:
return None
def endElement(self, name, value, connection):
if len(self.stack) > 0:
self.stack.pop()
value = value.strip()
if value:
if isinstance(self.parent, Element):
self.parent[self.get_name(name)] = value
elif isinstance(self.parent, ListElement):
self.parent.append(value)
class ListElement(list):
def __init__(self, connection=None, element_name=None,
list_marker=['Set'], item_marker=('member', 'item'),
pythonize_name=False):
list.__init__(self)
self.connection = connection
self.element_name = element_name
self.list_marker = list_marker
self.item_marker = item_marker
self.pythonize_name = pythonize_name
def get_name(self, name):
if self.pythonize_name:
name = utils.pythonize_name(name)
return name
def startElement(self, name, attrs, connection):
for lm in self.list_marker:
if name.endswith(lm):
l = ListElement(self.connection, name, self.item_marker,
pythonize_name=self.pythonize_name)
setattr(self, self.get_name(name), l)
return l
if name in self.item_marker:
e = Element(self.connection, name, parent=self,
pythonize_name=self.pythonize_name)
self.append(e)
return e
else:
return None
def endElement(self, name, value, connection):
if name == self.element_name:
if len(self) > 0:
empty = []
for e in self:
if isinstance(e, Element):
if len(e) == 0:
empty.append(e)
for e in empty:
self.remove(e)
else:
setattr(self, self.get_name(name), value)
| apavlo/h-store | third_party/python/boto/jsonresponse.py | Python | gpl-3.0 | 5,809 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-16 19:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0119_add_registrationprovider_perms_to_admin'),
('osf', '0119_add_asset_perms'),
]
operations = [
]
| erinspace/osf.io | osf/migrations/0120_merge_20180716_1457.py | Python | apache-2.0 | 349 |
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| aabadie/scikit-learn | sklearn/neighbors/classification.py | Python | bsd-3-clause | 14,359 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/DownloadSettingsAction.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/DownloadSettingsAction.proto',
package='POGOProtos.Settings',
syntax='proto3',
serialized_pb=_b('\n0POGOProtos/Settings/DownloadSettingsAction.proto\x12\x13POGOProtos.Settings\"&\n\x16\x44ownloadSettingsAction\x12\x0c\n\x04hash\x18\x01 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DOWNLOADSETTINGSACTION = _descriptor.Descriptor(
name='DownloadSettingsAction',
full_name='POGOProtos.Settings.DownloadSettingsAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hash', full_name='POGOProtos.Settings.DownloadSettingsAction.hash', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=111,
)
DESCRIPTOR.message_types_by_name['DownloadSettingsAction'] = _DOWNLOADSETTINGSACTION
DownloadSettingsAction = _reflection.GeneratedProtocolMessageType('DownloadSettingsAction', (_message.Message,), dict(
DESCRIPTOR = _DOWNLOADSETTINGSACTION,
__module__ = 'POGOProtos.Settings.DownloadSettingsAction_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.DownloadSettingsAction)
))
_sym_db.RegisterMessage(DownloadSettingsAction)
# @@protoc_insertion_point(module_scope)
| DenL/pogom-webhook | pogom/pgoapi/protos/POGOProtos/Settings/DownloadSettingsAction_pb2.py | Python | mit | 2,206 |
"""
Doctests for Reversion.
These tests require Python version 2.5 or higher to run.
"""
from __future__ import with_statement
import datetime
from django.db import models, transaction
from django.test import TestCase
import reversion
from reversion.models import Version, Revision
from reversion.revisions import RegistrationError, DEFAULT_SERIALIZATION_FORMAT
class TestModel(models.Model):
"""A test model for reversion."""
name = models.CharField(max_length=100)
class Meta:
app_label = "reversion"
class ReversionRegistrationTest(TestCase):
"""Tests the django-reversion registration functionality."""
def setUp(self):
"""Sets up the TestModel."""
reversion.register(TestModel)
def testCanRegisterModel(self):
"""Tests that a model can be registered."""
self.assertTrue(reversion.is_registered(TestModel))
# Check that duplicate registration is disallowed.
self.assertRaises(RegistrationError, lambda: reversion.register(TestModel))
def testCanReadRegistrationInfo(self):
"""Tests that the registration info for a model is obtainable."""
registration_info = reversion.revision.get_registration_info(TestModel)
self.assertEqual(registration_info.fields, ("id", "name",))
self.assertEqual(registration_info.file_fields, ())
self.assertEqual(registration_info.follow, ())
self.assertEqual(registration_info.format, DEFAULT_SERIALIZATION_FORMAT)
def testCanUnregisterModel(self):
"""Tests that a model can be unregistered."""
reversion.unregister(TestModel)
self.assertFalse(reversion.is_registered(TestModel))
# Check that duplicate unregistration is disallowed.
self.assertRaises(RegistrationError, lambda: reversion.unregister(TestModel))
# Re-register the model.
reversion.register(TestModel)
def tearDown(self):
"""Tears down the tests."""
reversion.unregister(TestModel)
class ReversionCreateTest(TestCase):
"""Tests the django-reversion revision creation functionality."""
def setUp(self):
"""Sets up the TestModel."""
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Register the model.
reversion.register(TestModel)
def testCanSaveWithNoRevision(self):
"""Tests that without an active revision, no model is saved."""
test = TestModel.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(test).count(), 0)
def testRevisionContextManager(self):
"""Tests that the revision context manager works."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
def testRevisionDecorator(self):
"""Tests that the revision function decorator works."""
@reversion.revision.create_on_success
def create_revision():
return TestModel.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(create_revision()).count(), 1)
def testRevisionAbandonedOnError(self):
"""Tests that the revision is abandoned on error."""
# Create the first revision.
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
# Create the second revision.
try:
with reversion.revision:
test.name = "test1.1"
test.save()
raise Exception()
except:
transaction.rollback()
# Check that there is still only one revision.
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(TestModel)
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
class ReversionQueryTest(TestCase):
"""Tests that django-reversion can retrieve revisions using the api."""
def setUp(self):
"""Sets up the TestModel."""
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Register the model.
reversion.register(TestModel)
# Create some initial revisions.
with reversion.revision:
self.test = TestModel.objects.create(name="test1.0")
with reversion.revision:
self.test.name = "test1.1"
self.test.save()
with reversion.revision:
self.test.name = "test1.2"
self.test.save()
def testCanGetVersions(self):
"""Tests that the versions for an obj can be retrieved."""
versions = Version.objects.get_for_object(self.test)
self.assertEqual(versions[0].field_dict["name"], "test1.0")
self.assertEqual(versions[1].field_dict["name"], "test1.1")
self.assertEqual(versions[2].field_dict["name"], "test1.2")
def testCanGetUniqueVersions(self):
"""Tests that the unique versions for an objext can be retrieved."""
with reversion.revision:
self.test.save()
versions = Version.objects.get_unique_for_object(self.test)
# Check correct version data.
self.assertEqual(versions[0].field_dict["name"], "test1.0")
self.assertEqual(versions[1].field_dict["name"], "test1.1")
self.assertEqual(versions[2].field_dict["name"], "test1.2")
# Check correct number of versions.
self.assertEqual(len(versions), 3)
def testCanGetForDate(self):
"""Tests that the latest version for a particular date can be loaded."""
self.assertEqual(Version.objects.get_for_date(self.test, datetime.datetime.now()).field_dict["name"], "test1.2")
def testCanRevert(self):
"""Tests that an object can be reverted to a previous revision."""
oldest = Version.objects.get_for_object(self.test)[0]
self.assertEqual(oldest.field_dict["name"], "test1.0")
oldest.revert()
self.assertEqual(TestModel.objects.get().name, "test1.0")
def testCanGetDeleted(self):
"""Tests that deleted objects can be retrieved."""
self.assertEqual(len(Version.objects.get_deleted(TestModel)), 0)
# Delete the test model.
self.test.delete()
# Ensure that there is now a deleted model.
deleted = Version.objects.get_deleted(TestModel)
self.assertEqual(deleted[0].field_dict["name"], "test1.2")
self.assertEqual(len(deleted), 1)
def testCanRecoverDeleted(self):
"""Tests that a deleted object can be recovered."""
self.test.delete()
# Ensure deleted.
self.assertEqual(TestModel.objects.count(), 0)
# Recover.
Version.objects.get_deleted(TestModel)[0].revert()
# Ensure recovered.
self.assertEqual(TestModel.objects.get().name, "test1.2")
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(TestModel)
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Clear references.
del self.test
class ReversionCustomRegistrationTest(TestCase):
"""Tests the custom model registration options."""
def setUp(self):
"""Sets up the TestModel."""
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Register the model.
reversion.register(TestModel, fields=("id",), format="xml")
# Create some initial revisions.
with reversion.revision:
self.test = TestModel.objects.create(name="test1.0")
with reversion.revision:
self.test.name = "test1.1"
self.test.save()
with reversion.revision:
self.test.name = "test1.2"
self.test.save()
def testCustomRegistrationHonored(self):
"""Ensures that the custom settings were honored."""
self.assertEqual(reversion.revision.get_registration_info(TestModel).fields, ("id",))
self.assertEqual(reversion.revision.get_registration_info(TestModel).format, "xml")
def testCanRevertOnlySpecifiedFields(self):
""""Ensures that only the restricted set of fields are loaded."""
Version.objects.get_for_object(self.test)[0].revert()
self.assertEqual(TestModel.objects.get().name, "")
def testCustomSerializationFormat(self):
"""Ensures that the custom serialization format is used."""
self.assertEquals(Version.objects.get_for_object(self.test)[0].serialized_data[0], "<");
def testIgnoreDuplicates(self):
"""Ensures that duplicate revisions can be ignores."""
self.assertEqual(len(Version.objects.get_for_object(self.test)), 3)
with reversion.revision:
self.test.save()
self.assertEqual(len(Version.objects.get_for_object(self.test)), 4)
with reversion.revision:
reversion.revision.ignore_duplicates = True
self.test.save()
self.assertEqual(len(Version.objects.get_for_object(self.test)), 4)
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(TestModel)
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Clear references.
del self.test
class TestRelatedModel(models.Model):
"""A model used to test Reversion relation following."""
name = models.CharField(max_length=100)
relation = models.ForeignKey(TestModel)
class Meta:
app_label = "reversion"
class ReversionRelatedTest(TestCase):
"""Tests the ForeignKey and OneToMany support."""
def setUp(self):
"""Sets up the TestModel."""
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
TestRelatedModel.objects.all().delete()
# Register the models.
reversion.register(TestModel, follow=("testrelatedmodel_set",))
reversion.register(TestRelatedModel, follow=("relation",))
def testCanCreateRevisionForiegnKey(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
self.assertEqual(Version.objects.get_for_object(related).count(), 1)
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.get_for_object(test)[0].revision.version_set.all().count(), 2)
def testCanCreateRevisionOneToMany(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.save()
self.assertEqual(Version.objects.get_for_object(test).count(), 2)
self.assertEqual(Version.objects.get_for_object(related).count(), 2)
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.get_for_object(test)[1].revision.version_set.all().count(), 2)
def testCanRevertRevision(self):
"""Tests that an entire revision can be reverted."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.name = "test1.1"
test.save()
related.name = "related1.1"
related.save()
# Attempt revert.
Version.objects.get_for_object(test)[0].revision.revert()
self.assertEqual(TestModel.objects.get().name, "test1.0")
self.assertEqual(TestRelatedModel.objects.get().name, "related1.0")
def testCanRecoverRevision(self):
"""Tests that an entire revision can be recovered."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.name = "test1.1"
test.save()
related.name = "related1.1"
related.save()
# Delete the models.
test.delete()
# Ensure deleted.
self.assertEqual(TestModel.objects.count(), 0)
self.assertEqual(TestRelatedModel.objects.count(), 0)
# Query the deleted models..
self.assertEqual(len(Version.objects.get_deleted(TestModel)), 1)
self.assertEqual(len(Version.objects.get_deleted(TestRelatedModel)), 1)
# Revert the revision.
Version.objects.get_deleted(TestModel)[0].revision.revert()
# Ensure reverted.
self.assertEqual(TestModel.objects.count(), 1)
self.assertEqual(TestRelatedModel.objects.count(), 1)
# Ensure correct version.
self.assertEqual(TestModel.objects.get().name, "test1.1")
self.assertEqual(TestRelatedModel.objects.get().name, "related1.1")
def testIgnoreDuplicates(self):
"""Ensures the ignoring duplicates works across a foreign key."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.name = "test1.1"
test.save()
related.name = "related1.1"
related.save()
self.assertEqual(len(Version.objects.get_for_object(test)), 2)
with reversion.revision:
test.save()
self.assertEqual(len(Version.objects.get_for_object(test)), 3)
with reversion.revision:
test.save()
reversion.revision.ignore_duplicates = True
self.assertEqual(len(Version.objects.get_for_object(test)), 3)
def tearDown(self):
"""Tears down the tests."""
# Unregister the models.
reversion.unregister(TestModel)
reversion.unregister(TestRelatedModel)
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
TestRelatedModel.objects.all().delete()
class TestManyToManyModel(models.Model):
"""A model used to test Reversion M2M relation following."""
name = models.CharField(max_length=100)
relations = models.ManyToManyField(TestModel)
class Meta:
app_label = "reversion"
class ReversionManyToManyTest(TestCase):
"""Tests the ManyToMany support."""
def setUp(self):
"""Sets up the TestModel."""
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
TestManyToManyModel.objects.all().delete()
# Register the models.
reversion.register(TestModel, follow=("testmanytomanymodel_set",))
reversion.register(TestManyToManyModel, follow=("relations",))
def testCanCreateRevision(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test1 = TestModel.objects.create(name="test1.0")
test2 = TestModel.objects.create(name="test2.0")
related = TestManyToManyModel.objects.create(name="related1.0")
related.relations.add(test1)
related.relations.add(test2)
self.assertEqual(Version.objects.get_for_object(test1).count(), 1)
self.assertEqual(Version.objects.get_for_object(test2).count(), 1)
self.assertEqual(Version.objects.get_for_object(related).count(), 1)
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.get_for_object(related)[0].revision.version_set.all().count(), 3)
def testCanCreateRevisionRelated(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test = TestModel.objects.create(name="test1.0")
related1 = TestManyToManyModel.objects.create(name="related1.0")
related2 = TestManyToManyModel.objects.create(name="related2.0")
test.testmanytomanymodel_set.add(related1)
test.testmanytomanymodel_set.add(related2)
with reversion.revision:
test.save()
self.assertEqual(Version.objects.get_for_object(test).count(), 2)
self.assertEqual(Version.objects.get_for_object(related1).count(), 2)
self.assertEqual(Version.objects.get_for_object(related2).count(), 2)
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.get_for_object(test)[0].revision.version_set.all().count(), 3)
def testCanRevertRevision(self):
"""Tests that an entire revision can be reverted."""
with reversion.revision:
test1 = TestModel.objects.create(name="test1.0")
test2 = TestModel.objects.create(name="test2.0")
related = TestManyToManyModel.objects.create(name="related1.0")
related.relations.add(test1)
related.relations.add(test2)
with reversion.revision:
test1.name = "test1.1"
test1.save()
test2.name = "test2.1"
test2.save()
related.name = "related1.1"
related.save()
# Attempt revert.
Version.objects.get_for_object(related)[0].revision.revert()
self.assertEqual(TestModel.objects.get(pk=test1.pk).name, "test1.0")
self.assertEqual(TestModel.objects.get(pk=test2.pk).name, "test2.0")
self.assertEqual(TestManyToManyModel.objects.get().name, "related1.0")
def testCanRecoverRevision(self):
"""Tests that an entire revision can be recovered."""
with reversion.revision:
test1 = TestModel.objects.create(name="test1.0")
test2 = TestModel.objects.create(name="test2.0")
related = TestManyToManyModel.objects.create(name="related1.0")
related.relations.add(test1)
related.relations.add(test2)
with reversion.revision:
test1.name = "test1.1"
test1.save()
test2.name = "test2.1"
test2.save()
related.name = "related1.1"
related.save()
# Save the pks.
test1_pk = test1.pk
test2_pk = test2.pk
# Delete the models.
related.delete()
test1.delete()
test2.delete()
# Ensure deleted.
self.assertEqual(TestModel.objects.count(), 0)
self.assertEqual(TestManyToManyModel.objects.count(), 0)
# Query the deleted models..
self.assertEqual(len(Version.objects.get_deleted(TestModel)), 2)
self.assertEqual(len(Version.objects.get_deleted(TestManyToManyModel)), 1)
# Revert the revision.
Version.objects.get_deleted(TestManyToManyModel)[0].revision.revert()
# Ensure reverted.
self.assertEqual(TestModel.objects.count(), 2)
self.assertEqual(TestManyToManyModel.objects.count(), 1)
# Ensure correct version.
self.assertEqual(TestModel.objects.get(pk=test1_pk).name, "test1.1")
self.assertEqual(TestModel.objects.get(pk=test2_pk).name, "test2.1")
self.assertEqual(TestManyToManyModel.objects.get().name, "related1.1")
def tearDown(self):
"""Tears down the tests."""
# Unregister the models.
reversion.unregister(TestModel)
reversion.unregister(TestManyToManyModel)
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
TestManyToManyModel.objects.all().delete()
# Test the patch helpers, if available.
try:
from reversion.helpers import generate_patch, generate_patch_html
except ImportError:
pass
else:
class PatchTest(TestCase):
"""Tests the patch generation functionality."""
def setUp(self):
"""Sets up a versioned site model to test."""
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Register the TestModel.
reversion.register(TestModel)
# Create some versions.
with reversion.revision:
test = TestModel.objects.create(name="test1.0",)
with reversion.revision:
test.name = "test1.1"
test.save()
# Get the version data.
self.test_0 = Version.objects.get_for_object(test)[0]
self.test_1 = Version.objects.get_for_object(test)[1]
def testCanGeneratePatch(self):
"""Tests that text patches can be generated."""
self.assertEqual(generate_patch(self.test_0, self.test_1, "name"),
"@@ -3,5 +3,5 @@\n st1.\n-0\n+1\n")
def testCanGeneratePathHtml(self):
"""Tests that html patches can be generated."""
self.assertEqual(generate_patch_html(self.test_0, self.test_1, "name"),
u'<SPAN TITLE="i=0">test1.</SPAN><DEL STYLE="background:#FFE6E6;" TITLE="i=6">0</DEL><INS STYLE="background:#E6FFE6;" TITLE="i=6">1</INS>')
def tearDown(self):
"""Deletes the versioned site model."""
# Unregister the model.
reversion.unregister(TestModel)
# Clear the database.
Version.objects.all().delete()
TestModel.objects.all().delete()
# Clear references.
del self.test_0
del self.test_1
| mzdaniel/oh-mainline | vendor/packages/django-reversion/src/reversion/tests.py | Python | agpl-3.0 | 22,563 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class res_partner_strip_name(models.Model):
_inherit = 'res.partner'
@api.one
def write(self, vals):
vals = self._check_name_field(vals)
return super(res_partner_strip_name, self).write(vals)
@api.model
def create(self, vals):
vals = self._check_name_field(vals)
return super(res_partner_strip_name, self).create(vals)
def _check_name_field(self, vals):
if vals.get('name'):
vals['name'] = vals['name'].strip().strip('"')
return vals
| Trust-Code/addons-yelizariev | res_partner_strip_name/models.py | Python | lgpl-3.0 | 588 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import mobilenet
from tensorflow.python.keras.applications import keras_modules_injection
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
@keras_modules_injection
def MobileNet(*args, **kwargs):
return mobilenet.MobileNet(*args, **kwargs)
@keras_export('keras.applications.mobilenet.decode_predictions')
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return mobilenet.decode_predictions(*args, **kwargs)
@keras_export('keras.applications.mobilenet.preprocess_input')
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return mobilenet.preprocess_input(*args, **kwargs)
| theflofly/tensorflow | tensorflow/python/keras/applications/mobilenet.py | Python | apache-2.0 | 1,618 |
import re
from django.db.backends import BaseDatabaseIntrospection
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'max_length': int(m.group(1))})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li+1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple([s.strip('"') for s in m.groups()]))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': False,
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
| lzw120/django | django/db/backends/sqlite3/introspection.py | Python | bsd-3-clause | 7,499 |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.internet import reactor
def main():
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
d = factory.getRootObject()
d.addCallbacks(got_obj)
reactor.run()
def got_obj(obj):
# change "broken" into "broken2" to demonstrate an unhandled exception
d2 = obj.callRemote("broken")
d2.addCallback(working)
d2.addErrback(broken)
def working():
print "erm, it wasn't *supposed* to work.."
def broken(reason):
print "got remote Exception"
# reason should be a Failure (or subclass) holding the MyError exception
print " .__class__ =", reason.__class__
print " .getErrorMessage() =", reason.getErrorMessage()
print " .type =", reason.type
reactor.stop()
main()
| mzdaniel/oh-mainline | vendor/packages/twisted/doc/core/howto/listings/pb/exc_client.py | Python | agpl-3.0 | 886 |
# Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code is partially taken from django-rest-framework:
# Copyright (c) 2011-2014, Tom Christie
from django.core.urlresolvers import resolve, get_script_prefix
def get_breadcrumbs(url):
"""
Given a url returns a list of breadcrumbs, which are each a
tuple of (name, url).
"""
from taiga.base.api.settings import api_settings
from taiga.base.api.views import APIView
view_name_func = api_settings.VIEW_NAME_FUNCTION
def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen):
"""
Add tuples of (name, url) to the breadcrumbs list,
progressively chomping off parts of the url.
"""
try:
(view, unused_args, unused_kwargs) = resolve(url)
except Exception:
pass
else:
# Check if this is a REST framework view,
# and if so add it to the breadcrumbs
cls = getattr(view, "cls", None)
if cls is not None and issubclass(cls, APIView):
# Don't list the same view twice in a row.
# Probably an optional trailing slash.
if not seen or seen[-1] != view:
suffix = getattr(view, "suffix", None)
name = view_name_func(cls, suffix)
breadcrumbs_list.insert(0, (name, prefix + url))
seen.append(view)
if url == "":
# All done
return breadcrumbs_list
elif url.endswith("/"):
# Drop trailing slash off the end and continue to try to
# resolve more breadcrumbs
url = url.rstrip("/")
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
# Drop trailing non-slash off the end and continue to try to
# resolve more breadcrumbs
url = url[:url.rfind("/") + 1]
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
prefix = get_script_prefix().rstrip("/")
url = url[len(prefix):]
return breadcrumbs_recursive(url, [], prefix, [])
| CoolCloud/taiga-back | taiga/base/api/utils/breadcrumbs.py | Python | agpl-3.0 | 2,886 |
Subsets and Splits