repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
SebasSBM/django
|
tests/test_client/views.py
|
109
|
10961
|
from xml.dom.minidom import parseString
from django.contrib.auth.decorators import login_required, permission_required
from django.core import mail
from django.forms import fields
from django.forms.forms import Form, ValidationError
from django.forms.formsets import BaseFormSet, formset_factory
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed,
HttpResponseNotFound, HttpResponseRedirect,
)
from django.shortcuts import render_to_response
from django.template import Context, Template
from django.test import Client
from django.utils.decorators import method_decorator
from django.utils.six.moves.urllib.parse import urlencode
def get_view(request):
"A simple view that expects a GET request, and returns a rendered template"
t = Template('This is a test. {{ var }} is the value.', name='GET Template')
c = Context({'var': request.GET.get('var', 42)})
return HttpResponse(t.render(c))
def trace_view(request):
"""
A simple view that expects a TRACE request and echoes its status line.
TRACE requests should not have an entity; the view will return a 400 status
response if it is present.
"""
if request.method.upper() != "TRACE":
return HttpResponseNotAllowed("TRACE")
elif request.body:
return HttpResponseBadRequest("TRACE requests MUST NOT include an entity")
else:
protocol = request.META["SERVER_PROTOCOL"]
t = Template(
'{{ method }} {{ uri }} {{ version }}',
name="TRACE Template",
)
c = Context({
'method': request.method,
'uri': request.path,
'version': protocol,
})
return HttpResponse(t.render(c))
def post_view(request):
"""A view that expects a POST, and returns a different template depending
on whether any POST data is available
"""
if request.method == 'POST':
if request.POST:
t = Template('Data received: {{ data }} is the value.', name='POST Template')
c = Context({'data': request.POST['value']})
else:
t = Template('Viewing POST page.', name='Empty POST Template')
c = Context()
else:
t = Template('Viewing GET page.', name='Empty GET Template')
c = Context()
return HttpResponse(t.render(c))
def view_with_header(request):
"A view that has a custom header"
response = HttpResponse()
response['X-DJANGO-TEST'] = 'Slartibartfast'
return response
def raw_post_view(request):
"""A view which expects raw XML to be posted and returns content extracted
from the XML"""
if request.method == 'POST':
root = parseString(request.body)
first_book = root.firstChild.firstChild
title, author = [n.firstChild.nodeValue for n in first_book.childNodes]
t = Template("{{ title }} - {{ author }}", name="Book template")
c = Context({"title": title, "author": author})
else:
t = Template("GET request.", name="Book GET template")
c = Context()
return HttpResponse(t.render(c))
def redirect_view(request):
"A view that redirects all requests to the GET view"
if request.GET:
query = '?' + urlencode(request.GET, True)
else:
query = ''
return HttpResponseRedirect('/get_view/' + query)
def view_with_secure(request):
"A view that indicates if the request was secure"
response = HttpResponse()
response.test_was_secure_request = request.is_secure()
response.test_server_port = request.META.get('SERVER_PORT', 80)
return response
def double_redirect_view(request):
"A view that redirects all requests to a redirection view"
return HttpResponseRedirect('/permanent_redirect_view/')
def bad_view(request):
"A view that returns a 404 with some error content"
return HttpResponseNotFound('Not found!. This page contains some MAGIC content')
TestChoices = (
('a', 'First Choice'),
('b', 'Second Choice'),
('c', 'Third Choice'),
('d', 'Fourth Choice'),
('e', 'Fifth Choice')
)
class TestForm(Form):
text = fields.CharField()
email = fields.EmailField()
value = fields.IntegerField()
single = fields.ChoiceField(choices=TestChoices)
multi = fields.MultipleChoiceField(choices=TestChoices)
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get("text") == "Raise non-field error":
raise ValidationError("Non-field error.")
return cleaned_data
def form_view(request):
"A view that tests a simple form"
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
t = Template('Valid POST data.', name='Valid POST Template')
c = Context()
else:
t = Template('Invalid POST data. {{ form.errors }}', name='Invalid POST Template')
c = Context({'form': form})
else:
form = TestForm(request.GET)
t = Template('Viewing base form. {{ form }}.', name='Form GET Template')
c = Context({'form': form})
return HttpResponse(t.render(c))
def form_view_with_template(request):
"A view that tests a simple form"
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
message = 'POST data OK'
else:
message = 'POST data has errors'
else:
form = TestForm()
message = 'GET form page'
return render_to_response('form_view.html',
{
'form': form,
'message': message
}
)
class BaseTestFormSet(BaseFormSet):
def clean(self):
"""Checks that no two email addresses are the same."""
if any(self.errors):
# Don't bother validating the formset unless each form is valid
return
emails = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
email = form.cleaned_data['email']
if email in emails:
raise ValidationError(
"Forms in a set must have distinct email addresses."
)
emails.append(email)
TestFormSet = formset_factory(TestForm, BaseTestFormSet)
def formset_view(request):
"A view that tests a simple formset"
if request.method == 'POST':
formset = TestFormSet(request.POST)
if formset.is_valid():
t = Template('Valid POST data.', name='Valid POST Template')
c = Context()
else:
t = Template('Invalid POST data. {{ my_formset.errors }}',
name='Invalid POST Template')
c = Context({'my_formset': formset})
else:
formset = TestForm(request.GET)
t = Template('Viewing base formset. {{ my_formset }}.',
name='Formset GET Template')
c = Context({'my_formset': formset})
return HttpResponse(t.render(c))
def login_protected_view(request):
"A simple view that is login protected."
t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
login_protected_view = login_required(login_protected_view)
def login_protected_view_changed_redirect(request):
"A simple view that is login protected with a custom redirect field set"
t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
login_protected_view_changed_redirect = (
login_required(redirect_field_name="redirect_to")(login_protected_view_changed_redirect)
)
def _permission_protected_view(request):
"A simple view that is permission protected."
t = Template('This is a permission protected test. '
'Username is {{ user.username }}. '
'Permissions are {{ user.get_all_permissions }}.',
name='Permissions Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
permission_protected_view = permission_required('permission_not_granted')(_permission_protected_view)
permission_protected_view_exception = (
permission_required('permission_not_granted', raise_exception=True)(_permission_protected_view)
)
class _ViewManager(object):
@method_decorator(login_required)
def login_protected_view(self, request):
t = Template('This is a login protected test using a method. '
'Username is {{ user.username }}.',
name='Login Method Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
@method_decorator(permission_required('permission_not_granted'))
def permission_protected_view(self, request):
t = Template('This is a permission protected test using a method. '
'Username is {{ user.username }}. '
'Permissions are {{ user.get_all_permissions }}.',
name='Permissions Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
_view_manager = _ViewManager()
login_protected_method_view = _view_manager.login_protected_view
permission_protected_method_view = _view_manager.permission_protected_view
def session_view(request):
"A view that modifies the session"
request.session['tobacconist'] = 'hovercraft'
t = Template('This is a view that modifies the session.',
name='Session Modifying View Template')
c = Context()
return HttpResponse(t.render(c))
def broken_view(request):
"""A view which just raises an exception, simulating a broken view."""
raise KeyError("Oops! Looks like you wrote some bad code.")
def mail_sending_view(request):
mail.EmailMessage(
"Test message",
"This is a test email",
"[email protected]",
['[email protected]', '[email protected]']).send()
return HttpResponse("Mail sent")
def mass_mail_sending_view(request):
m1 = mail.EmailMessage(
'First Test message',
'This is the first test email',
'[email protected]',
['[email protected]', '[email protected]'])
m2 = mail.EmailMessage(
'Second Test message',
'This is the second test email',
'[email protected]',
['[email protected]', '[email protected]'])
c = mail.get_connection()
c.send_messages([m1, m2])
return HttpResponse("Mail sent")
def nesting_exception_view(request):
"""
A view that uses a nested client to call another view and then raises an
exception.
"""
client = Client()
client.get('/get_view/')
raise Exception('exception message')
def django_project_redirect(request):
return HttpResponseRedirect('https://www.djangoproject.com/')
|
bsd-3-clause
|
vigilv/scikit-learn
|
examples/neighbors/plot_kde_1d.py
|
347
|
5100
|
"""
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
|
bsd-3-clause
|
chaen/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/FileMetadata.py
|
2
|
20988
|
""" DIRAC FileCatalog plugin class to manage file metadata. This contains only
non-indexed metadata for the moment.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Time import queryTime
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.DataManagementSystem.Client.MetaQuery import FILE_STANDARD_METAKEYS, \
FILES_TABLE_METAKEYS, \
FILEINFO_TABLE_METAKEYS
class FileMetadata:
def __init__(self, database=None):
self.db = database
def setDatabase(self, database):
self.db = database
##############################################################################
#
# Manage Metadata fields
#
##############################################################################
def addMetadataField(self, pname, ptype, credDict):
""" Add a new metadata parameter to the Metadata Database.
pname - parameter name, ptype - parameter type in the MySQL notation
"""
if pname in FILE_STANDARD_METAKEYS:
return S_ERROR('Illegal use of reserved metafield name')
result = self.db.dmeta.getMetadataFields(credDict)
if not result['OK']:
return result
if pname in result['Value'].keys():
return S_ERROR('The metadata %s is already defined for Directories' % pname)
result = self.getFileMetadataFields(credDict)
if not result['OK']:
return result
if pname in result['Value'].keys():
if ptype.lower() == result['Value'][pname].lower():
return S_OK('Already exists')
else:
return S_ERROR('Attempt to add an existing metadata with different type: %s/%s' %
(ptype, result['Value'][pname]))
valueType = ptype
if ptype == "MetaSet":
valueType = "VARCHAR(64)"
req = "CREATE TABLE FC_FileMeta_%s ( FileID INTEGER NOT NULL, Value %s, PRIMARY KEY (FileID), INDEX (Value) )" \
% (pname, valueType)
result = self.db._query(req)
if not result['OK']:
return result
result = self.db.insertFields('FC_FileMetaFields', ['MetaName', 'MetaType'], [pname, ptype])
if not result['OK']:
return result
metadataID = result['lastRowId']
result = self.__transformMetaParameterToData(pname)
if not result['OK']:
return result
return S_OK("Added new metadata: %d" % metadataID)
def deleteMetadataField(self, pname, credDict):
""" Remove metadata field
"""
req = "DROP TABLE FC_FileMeta_%s" % pname
result = self.db._update(req)
error = ''
if not result['OK']:
error = result["Message"]
req = "DELETE FROM FC_FileMetaFields WHERE MetaName='%s'" % pname
result = self.db._update(req)
if not result['OK']:
if error:
result["Message"] = error + "; " + result["Message"]
return result
def getFileMetadataFields(self, credDict):
""" Get all the defined metadata fields
"""
req = "SELECT MetaName,MetaType FROM FC_FileMetaFields"
result = self.db._query(req)
if not result['OK']:
return result
metaDict = {}
for row in result['Value']:
metaDict[row[0]] = row[1]
return S_OK(metaDict)
###########################################################
#
# Set and get metadata for files
#
###########################################################
def setMetadata(self, path, metadict, credDict):
""" Set the value of a given metadata field for the the given directory path
"""
result = self.getFileMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.fileManager._findFiles([path])
if not result['OK']:
return result
if result['Value']['Successful']:
fileID = result['Value']['Successful'][path]['FileID']
else:
return S_ERROR('File %s not found' % path)
for metaName, metaValue in metadict.items():
if metaName not in metaFields:
result = self.__setFileMetaParameter(fileID, metaName, metaValue, credDict)
else:
result = self.db.insertFields('FC_FileMeta_%s' % metaName,
['FileID', 'Value'],
[fileID, metaValue])
if not result['OK']:
if result['Message'].find('Duplicate') != -1:
req = "UPDATE FC_FileMeta_%s SET Value='%s' WHERE FileID=%d" % (metaName, metaValue, fileID)
result = self.db._update(req)
if not result['OK']:
return result
else:
return result
return S_OK()
def removeMetadata(self, path, metadata, credDict):
""" Remove the specified metadata for the given file
"""
result = self.getFileMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.fileManager._findFiles([path])
if not result['OK']:
return result
if result['Value']['Successful']:
fileID = result['Value']['Successful'][path]['FileID']
else:
return S_ERROR('File %s not found' % path)
failedMeta = {}
for meta in metadata:
if meta in metaFields:
# Indexed meta case
req = "DELETE FROM FC_FileMeta_%s WHERE FileID=%d" % (meta, fileID)
result = self.db._update(req)
if not result['OK']:
failedMeta[meta] = result['Value']
else:
# Meta parameter case
req = "DELETE FROM FC_FileMeta WHERE MetaKey='%s' AND FileID=%d" % (meta, fileID)
result = self.db._update(req)
if not result['OK']:
failedMeta[meta] = result['Value']
if failedMeta:
metaExample = failedMeta.keys()[0]
result = S_ERROR('Failed to remove %d metadata, e.g. %s' % (len(failedMeta), failedMeta[metaExample]))
result['FailedMetadata'] = failedMeta
else:
return S_OK()
def __getFileID(self, path):
result = self.db.fileManager._findFiles([path])
if not result['OK']:
return result
if result['Value']['Successful']:
fileID = result['Value']['Successful'][path]['FileID']
else:
return S_ERROR('File not found')
return S_OK(fileID)
def __setFileMetaParameter(self, fileID, metaName, metaValue, credDict):
""" Set an meta parameter - metadata which is not used in the the data
search operations
"""
result = self.db.insertFields('FC_FileMeta',
['FileID', 'MetaKey', 'MetaValue'],
[fileID, metaName, str(metaValue)])
return result
def setFileMetaParameter(self, path, metaName, metaValue, credDict):
result = self.__getFileID(path)
if not result['OK']:
return result
fileID = result['Value']
return self.__setFileMetaParameter(fileID, metaName, metaValue, credDict)
def _getFileUserMetadataByID(self, fileIDList, credDict, connection=False):
""" Get file user metadata for the list of file IDs
"""
# First file metadata
result = self.getFileMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
stringIDs = ','.join(['%s' % fId for fId in fileIDList])
metaDict = {}
for meta in metaFields:
req = "SELECT Value,FileID FROM FC_FileMeta_%s WHERE FileID in (%s)" % (meta, stringIDs)
result = self.db._query(req, conn=connection)
if not result['OK']:
return result
for value, fileID in result['Value']:
metaDict.setdefault(fileID, {})
metaDict[fileID][meta] = value
req = "SELECT FileID,MetaKey,MetaValue from FC_FileMeta where FileID in (%s)" % stringIDs
result = self.db._query(req, conn=connection)
if not result['OK']:
return result
for fileID, key, value in result['Value']:
metaDict.setdefault(fileID, {})
metaDict[fileID][key] = value
return S_OK(metaDict)
def getFileUserMetadata(self, path, credDict):
""" Get metadata for the given file
"""
# First file metadata
result = self.getFileMetadataFields(credDict)
if not result['OK']:
return result
metaFields = result['Value']
result = self.__getFileID(path)
if not result['OK']:
return result
fileID = result['Value']
metaDict = {}
metaTypeDict = {}
for meta in metaFields:
req = "SELECT Value,FileID FROM FC_FileMeta_%s WHERE FileID=%d" % (meta, fileID)
result = self.db._query(req)
if not result['OK']:
return result
if result['Value']:
metaDict[meta] = result['Value'][0][0]
metaTypeDict[meta] = metaFields[meta]
result = self.getFileMetaParameters(path, credDict)
if result['OK']:
metaDict.update(result['Value'])
for meta in result['Value']:
metaTypeDict[meta] = 'NonSearchable'
result = S_OK(metaDict)
result['MetadataType'] = metaTypeDict
return result
def __getFileMetaParameters(self, fileID, credDict):
req = "SELECT FileID,MetaKey,MetaValue from FC_FileMeta where FileID=%d " % fileID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK({})
metaDict = {}
for fileID, key, value in result['Value']:
if key in metaDict:
if isinstance(metaDict[key], list):
metaDict[key].append(value)
else:
metaDict[key] = [metaDict[key]].append(value)
else:
metaDict[key] = value
return S_OK(metaDict)
def getFileMetaParameters(self, path, credDict):
""" Get meta parameters for the given file
"""
result = self.__getFileID(path)
if not result['OK']:
return result
fileID = result['Value']
return self.__getFileMetaParameters(fileID, credDict)
def __transformMetaParameterToData(self, metaname):
""" Relocate the meta parameters of all the directories to the corresponding
indexed metadata table
"""
req = "SELECT FileID,MetaValue from FC_FileMeta WHERE MetaKey='%s'" % metaname
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK()
insertValueList = []
for fileID, meta in result['Value']:
insertValueList.append("( %d,'%s' )" % (fileID, meta))
req = "INSERT INTO FC_FileMeta_%s (FileID,Value) VALUES %s" % (metaname, ', '.join(insertValueList))
result = self.db._update(req)
if not result['OK']:
return result
req = "DELETE FROM FC_FileMeta WHERE MetaKey='%s'" % metaname
result = self.db._update(req)
return result
#########################################################################
#
# Finding files by metadata
#
#########################################################################
def __createMetaSelection(self, value):
''' Create selection string to be used in the SQL query
'''
queryList = []
if isinstance(value, float):
queryList.append(('=', '%f' % value))
elif isinstance(value, (int, long)):
queryList.append(('=', '%d' % value))
elif isinstance(value, str):
if value.lower() == 'any':
queryList.append(('IS', 'NOT NULL'))
elif value.lower() == 'missing':
queryList.append(('IS', 'NULL'))
elif value:
result = self.db._escapeString(value)
if not result['OK']:
return result
eValue = result['Value']
if '*' in eValue or '?' in eValue:
eValue = eValue.replace('*', '%%')
eValue = eValue.replace('?', '_')
queryList.append(('LIKE', eValue))
else:
queryList.append(('=', eValue))
else:
queryList.append(('', ''))
elif isinstance(value, list):
if not value:
queryList.append(('', ''))
else:
result = self.db._escapeValues(value)
if not result['OK']:
return result
query = '( $s )' % ', '.join(result['Value'])
queryList.append(('IN', query))
elif isinstance(value, dict):
for operation, operand in value.items():
# Prepare the escaped operand first
if isinstance(operand, list):
result = self.db._escapeValues(operand)
if not result['OK']:
return result
escapedOperand = ', '.join(result['Value'])
elif isinstance(operand, (int, long)):
escapedOperand = '%d' % operand
elif isinstance(operand, float):
escapedOperand = '%f' % operand
else:
result = self.db._escapeString(operand)
if not result['OK']:
return result
escapedOperand = result['Value']
# Treat the operations
if operation in ['>', '<', '>=', '<=']:
if isinstance(operand, list):
return S_ERROR('Illegal query: list of values for comparison operation')
else:
queryList.append((operation, escapedOperand))
elif operation == 'in' or operation == "=":
if isinstance(operand, list):
queryList.append(('IN', '( %s )' % escapedOperand))
else:
queryList.append(('=', escapedOperand))
elif operation == 'nin' or operation == "!=":
if isinstance(operand, list):
queryList.append(('NOT IN', '( %s )' % escapedOperand))
else:
queryList.append(('!=', escapedOperand))
return S_OK(queryList)
def __buildSEQuery(self, storageElements):
""" Return a tuple with table and condition to locate files in a given SE
"""
if not storageElements:
return S_OK([])
seIDList = []
for se in storageElements:
seID = self.db.seNames.get(se, -1)
if seID == -1:
return S_ERROR('Unknown SE %s' % se)
seIDList.append(seID)
table = 'FC_Replicas'
seString = intListToString(seIDList)
query = '%%s.SEID IN ( %s )' % seString
return S_OK([(table, query)])
def __buildUserMetaQuery(self, userMetaDict):
""" Return a list of tuples with tables and conditions to locate files for a given user Metadata
"""
if not userMetaDict:
return S_OK([])
resultList = []
leftJoinTables = []
for meta, value in userMetaDict.items():
table = 'FC_FileMeta_%s' % meta
result = self.__createMetaSelection(value)
if not result['OK']:
return result
for operation, operand in result['Value']:
resultList.append((table, '%%s.Value %s %s' % (operation, operand)))
if operand == 'NULL':
leftJoinTables.append(table)
result = S_OK(resultList)
result['LeftJoinTables'] = leftJoinTables
return result
def __buildStandardMetaQuery(self, standardMetaDict):
table = 'FC_Files'
queriesFiles = []
queriesFileInfo = []
for infield, invalue in standardMetaDict.items():
value = invalue
if infield in FILES_TABLE_METAKEYS:
if infield == 'User':
value = self.db.users.get(invalue, -1)
if value == '-1':
return S_ERROR('Unknown user %s' % invalue)
elif infield == 'Group':
value = self.db.groups.get(invalue, -1)
if value == '-1':
return S_ERROR('Unknown group %s' % invalue)
table = 'FC_Files'
tableIndex = 'F'
field = FILES_TABLE_METAKEYS[infield]
result = self.__createMetaSelection(value)
if not result['OK']:
return result
for operation, operand in result['Value']:
queriesFiles.append('%s.%s %s %s' % (tableIndex, field, operation, operand))
elif infield in FILEINFO_TABLE_METAKEYS:
table = 'FC_FileInfo'
tableIndex = 'FI'
field = FILEINFO_TABLE_METAKEYS[infield]
result = self.__createMetaSelection(value)
if not result['OK']:
return result
for operation, operand in result['Value']:
queriesFileInfo.append('%s.%s %s %s' % (tableIndex, field, operation, operand))
else:
return S_ERROR('Illegal standard meta key %s' % infield)
resultList = []
if queriesFiles:
query = ' AND '.join(queriesFiles)
resultList.append(('FC_Files', query))
if queriesFileInfo:
query = ' AND '.join(queriesFileInfo)
resultList.append(('FC_FileInfo', query))
return S_OK(resultList)
def __findFilesByMetadata(self, metaDict, dirList, credDict):
""" Find a list of file IDs meeting the metaDict requirements and belonging
to directories in dirList
"""
# 1.- classify Metadata keys
storageElements = None
standardMetaDict = {}
userMetaDict = {}
leftJoinTables = []
for meta, value in metaDict.items():
if meta == "SE":
if isinstance(value, dict):
storageElements = value.get('in', [])
else:
storageElements = [value]
elif meta in FILE_STANDARD_METAKEYS:
standardMetaDict[meta] = value
else:
userMetaDict[meta] = value
tablesAndConditions = []
leftJoinTables = []
# 2.- standard search
if standardMetaDict:
result = self.__buildStandardMetaQuery(standardMetaDict)
if not result['OK']:
return result
tablesAndConditions.extend(result['Value'])
# 3.- user search
if userMetaDict:
result = self.__buildUserMetaQuery(userMetaDict)
if not result['OK']:
return result
tablesAndConditions.extend(result['Value'])
leftJoinTables = result['LeftJoinTables']
# 4.- SE constraint
if storageElements:
result = self.__buildSEQuery(storageElements)
if not result['OK']:
return result
tablesAndConditions.extend(result['Value'])
query = 'SELECT F.FileID FROM FC_Files F '
conditions = []
tables = []
if dirList:
dirString = intListToString(dirList)
conditions.append("F.DirID in (%s)" % dirString)
counter = 0
for table, condition in tablesAndConditions:
if table == 'FC_FileInfo':
query += 'INNER JOIN FC_FileInfo FI USING( FileID ) '
condition = condition.replace('%%', '%')
elif table == 'FC_Files':
condition = condition.replace('%%', '%')
else:
counter += 1
if table in leftJoinTables:
tables.append('LEFT JOIN %s M%d USING( FileID )' % (table, counter))
else:
tables.append('INNER JOIN %s M%d USING( FileID )' % (table, counter))
table = 'M%d' % counter
condition = condition % table
conditions.append(condition)
query += ' '.join(tables)
if conditions:
query += ' WHERE %s' % ' AND '.join(conditions)
result = self.db._query(query)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
# fileList = [ row[0] for row in result['Value' ] ]
fileList = []
for row in result['Value']:
fileID = row[0]
fileList.append(fileID)
return S_OK(fileList)
@queryTime
def findFilesByMetadata(self, metaDict, path, credDict):
""" Find Files satisfying the given metadata
:param dict metaDict: dictionary with the metaquery parameters
:param str path: Path to search into
:param dict credDict: Dictionary with the user credentials
:return: S_OK/S_ERROR, Value ID:LFN dictionary of selected files
"""
if not path:
path = '/'
# 1.- Get Directories matching the metadata query
result = self.db.dmeta.findDirIDsByMetadata(metaDict, path, credDict)
if not result['OK']:
return result
dirList = result['Value']
dirFlag = result['Selection']
# 2.- Get known file metadata fields
# fileMetaDict = {}
result = self.getFileMetadataFields(credDict)
if not result['OK']:
return result
fileMetaKeys = result['Value'].keys() + FILE_STANDARD_METAKEYS.keys()
fileMetaDict = dict(item for item in metaDict.items() if item[0] in fileMetaKeys)
fileList = []
idLfnDict = {}
if dirFlag != 'None':
# None means that no Directory satisfies the given query, thus the search is empty
if dirFlag == 'All':
# All means that there is no Directory level metadata in query, full name space is considered
dirList = []
if fileMetaDict:
# 3.- Do search in File Metadata
result = self.__findFilesByMetadata(fileMetaDict, dirList, credDict)
if not result['OK']:
return result
fileList = result['Value']
elif dirList:
# 4.- if not File Metadata, return the list of files in given directories
result = self.db.dtree.getFileLFNsInDirectoryByDirectory(dirList, credDict)
if not result['OK']:
return result
return S_OK(result['Value']['IDLFNDict'])
else:
# if there is no File Metadata and no Dir Metadata, return an empty list
idLfnDict = {}
if fileList:
# 5.- get the LFN
result = self.db.fileManager._getFileLFNs(fileList)
if not result['OK']:
return result
idLfnDict = result['Value']['Successful']
return S_OK(idLfnDict)
|
gpl-3.0
|
falau/pogom
|
pogom/pgoapi/protos/POGOProtos/Networking/Responses/GetHatchedEggsResponse_pb2.py
|
15
|
4314
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/GetHatchedEggsResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/GetHatchedEggsResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n<POGOProtos/Networking/Responses/GetHatchedEggsResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\x8e\x01\n\x16GetHatchedEggsResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x16\n\npokemon_id\x18\x02 \x03(\x06\x42\x02\x10\x01\x12\x1a\n\x12\x65xperience_awarded\x18\x03 \x03(\x05\x12\x15\n\rcandy_awarded\x18\x04 \x03(\x05\x12\x18\n\x10stardust_awarded\x18\x05 \x03(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETHATCHEDEGGSRESPONSE = _descriptor.Descriptor(
name='GetHatchedEggsResponse',
full_name='POGOProtos.Networking.Responses.GetHatchedEggsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='POGOProtos.Networking.Responses.GetHatchedEggsResponse.success', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='POGOProtos.Networking.Responses.GetHatchedEggsResponse.pokemon_id', index=1,
number=2, type=6, cpp_type=4, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='experience_awarded', full_name='POGOProtos.Networking.Responses.GetHatchedEggsResponse.experience_awarded', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='candy_awarded', full_name='POGOProtos.Networking.Responses.GetHatchedEggsResponse.candy_awarded', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stardust_awarded', full_name='POGOProtos.Networking.Responses.GetHatchedEggsResponse.stardust_awarded', index=4,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=240,
)
DESCRIPTOR.message_types_by_name['GetHatchedEggsResponse'] = _GETHATCHEDEGGSRESPONSE
GetHatchedEggsResponse = _reflection.GeneratedProtocolMessageType('GetHatchedEggsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETHATCHEDEGGSRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.GetHatchedEggsResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.GetHatchedEggsResponse)
))
_sym_db.RegisterMessage(GetHatchedEggsResponse)
_GETHATCHEDEGGSRESPONSE.fields_by_name['pokemon_id'].has_options = True
_GETHATCHEDEGGSRESPONSE.fields_by_name['pokemon_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
|
mit
|
bshaffer/google-api-php-client-services
|
generator/tests/api_exception_test.py
|
10
|
1039
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for api_exception.py."""
from google.apputils import basetest
from googleapis.codegen.api_exception import ApiException
class ApiExceptionTest(basetest.TestCase):
def testExceptionStr(self):
e = ApiException('foo')
self.assertEquals('foo', str(e))
e = ApiException('foo', {'bar': 1})
self.assertEquals("""foo: {'bar': 1}""", str(e))
if __name__ == '__main__':
basetest.main()
|
apache-2.0
|
jgliss/pyplis
|
pyplis/inout.py
|
1
|
22643
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss ([email protected])
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module containing all sorts of I/O-routines (e.g. test data access)."""
from __future__ import (absolute_import, division)
from os.path import join, basename, exists, isfile, abspath, expanduser
from os import listdir, mkdir, remove, walk
from re import split
from collections import OrderedDict as od
try:
from progressbar import (ProgressBar, Percentage, Bar,
RotatingMarker, ETA, FileTransferSpeed)
PGBAR_AVAILABLE = True
except BaseException:
PGBAR_AVAILABLE = False
from zipfile import ZipFile, ZIP_DEFLATED
try:
from urllib.request import urlopen, urlretrieve
except ImportError:
from urllib2 import urlopen
from urllib import urlretrieve
from pyplis import logger, print_log
from tempfile import mktemp, gettempdir
from shutil import copy2
import six
def data_search_dirs():
"""Get basic search directories for package data files.
Data files are searched for in `~/my_pyplis`, `./data` and, if set,
in the `PYPLIS_DATADIR` environment variable.
"""
from pyplis import __dir__
import os
usr_dir = expanduser(join('~', 'my_pyplis'))
if not exists(usr_dir):
mkdir(usr_dir)
try:
env = os.environ["PYPLIS_DATADIR"]
return (usr_dir, join(__dir__, "data"), env)
except KeyError:
return (usr_dir, join(__dir__, "data"))
def zip_example_scripts(repo_base):
from pyplis import __version__ as v
vstr = ".".join(v.split(".")[:3])
logger.info("Adding zipped version of pyplis example scripts for version %s" %
vstr)
scripts_dir = join(repo_base, "scripts")
if not exists(scripts_dir):
raise IOError("Cannot created zipped version of scripts, folder %s "
"does not exist" % scripts_dir)
save_dir = join(scripts_dir, "old_versions")
if not exists(save_dir):
raise IOError("Cannot create zipped version of scripts, folder %s "
"does not exist" % save_dir)
name = "scripts-%s.zip" % vstr
zipf = ZipFile(join(save_dir, name), 'w', ZIP_DEFLATED)
for fname in listdir(scripts_dir):
if fname.endswith("py"):
zipf.write(join(scripts_dir, fname))
zipf.close()
def get_all_files_in_dir(directory, file_type=None, include_sub_dirs=False):
"""Find all files in a certain directory.
Parameters
----------
directory : str
path to directory
file_type : :obj:`str`, optional
specify file type (e.g. "png", "fts"). If unspecified, then all files
are considered
include_sub_dirs : bool
if True, also all files from all sub-directories are extracted
Returns
-------
list
sorted list containing paths of all files detected
"""
p = directory
if p is None or not exists(p):
message = ('Error: path %s does not exist' % p)
logger.warning(message)
return []
use_all_types = False
if not isinstance(file_type, str):
use_all_types = True
if include_sub_dirs:
logger.info("Include files from subdirectories")
all_paths = []
if use_all_types:
logger.info("Using all file types")
for path, subdirs, files in walk(p):
for filename in files:
all_paths.append(join(path, filename))
else:
logger.info("Using only %s files" % file_type)
for path, subdirs, files in walk(p):
for filename in files:
if filename.endswith(file_type):
all_paths.append(join(path, filename))
else:
logger.info("Exclude files from subdirectories")
if use_all_types:
logger.info("Using all file types")
all_paths = [join(p, f) for f in listdir(p) if isfile(join(p, f))]
else:
logger.info("Using only %s files" % file_type)
all_paths = [join(p, f) for f in listdir(p) if
isfile(join(p, f)) and f.endswith(file_type)]
all_paths.sort()
return all_paths
def create_temporary_copy(path):
temp_dir = gettempdir()
temp_path = join(temp_dir, basename(path))
copy2(path, temp_path)
return temp_path
def download_test_data(save_path=None):
"""Download pyplis test data.
:param save_path: location where path is supposed to be stored
Code for progress bar was "stolen" `here <http://stackoverflow.com/
questions/11143767/how-to-make-a-download-with>`_
(last access date: 11/01/2017)
-progress-bar-in-python
"""
from pyplis import URL_TESTDATA
url = URL_TESTDATA
dirs = data_search_dirs()
where = dirs[0]
fp = join(where, "_paths.txt")
if not exists(fp):
where = dirs[1]
fp = join(where, "_paths.txt")
if save_path is None or not exists(save_path):
save_path = where
logger.info("Save path unspecified")
else:
with open(fp, "a") as f:
f.write("\n" + save_path + "\n")
logger.info("Adding new path for test data location in "
"file _paths.txt: %s" % save_path)
f.close()
print_log.info("installing test data at %s" % save_path)
filename = mktemp('.zip')
if PGBAR_AVAILABLE:
widgets = ['Downloading pyplis test data: ', Percentage(), ' ',
Bar(marker=RotatingMarker()), ' ',
ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets)
def dl_progress(count, block_size, total_size):
if pbar.maxval is None:
pbar.maxval = total_size
pbar.start()
pbar.update(min(count * block_size, total_size))
urlretrieve(url, filename, reporthook=dl_progress)
pbar.finish()
else:
print_log.info("Downloading Pyplis testdata (this can take a while, install"
"Progressbar package if you want to receive download info")
urlretrieve(url, filename)
thefile = ZipFile(filename)
print_log.info("Extracting data at: %s (this may take a while)" % save_path)
thefile.extractall(save_path)
thefile.close()
remove(filename)
print_log.info("Download successfully finished, deleted temporary data file"
"at: %s" % filename)
def find_test_data():
"""Search location of test data folder."""
dirs = data_search_dirs()
folder_name = "pyplis_etna_testdata"
for data_path in dirs:
if folder_name in listdir(data_path):
print_log.info("Found test data at location: %s" % data_path)
return join(data_path, folder_name)
try:
with open(join(data_path, "_paths.txt"), "r") as f:
lines = f.readlines()
for line in lines:
p = line.split("\n")[0]
if exists(p) and folder_name in listdir(p):
print_log.info("Found test data at default location: %s" % p)
f.close()
return join(p, folder_name)
except:
pass
raise IOError("pyplis test data could not be found, please download"
"testdata first, using method "
"pyplis.inout.download_test_data or"
"specify the local path where the test data is stored using"
"pyplis.inout.set_test_data_path")
def all_test_data_paths():
"""Return list of all search paths for test data."""
dirs = data_search_dirs()
paths = []
[paths.append(x) for x in dirs]
for data_path in dirs:
fp = join(data_path, "_paths.txt")
if exists(fp):
with open(join(data_path, "_paths.txt"), "r") as f:
lines = f.readlines()
for line in lines:
p = line.split("\n")[0].lower()
if exists(p):
paths.append(p)
return paths
def set_test_data_path(save_path):
"""Set local path where test data is stored."""
if save_path.lower() in all_test_data_paths():
logger.info("Path is already in search tree")
return
dirs = data_search_dirs()
fp = join(dirs[0], "_paths.txt")
if not exists(fp):
fp = join(dirs[1], "_paths.txt")
save_path = abspath(save_path)
try:
if not exists(save_path):
raise IOError("Could not set test data path: specified location "
"does not exist: %s" % save_path)
with open(fp, "a") as f:
f.write("\n" + save_path + "\n")
print_log.info("Adding new path for test data location in "
"file _paths.txt: %s" % save_path)
f.close()
if "pyplis_etna_testdata" not in listdir(save_path):
logger.warning("WARNING: test data folder (name: pyplis_etna_testdata) "
"could not be found at specified location, please download "
"test data, unzip and save at: %s" % save_path)
except:
raise
def _load_cam_info(cam_id, filepath):
"""Load camera info from a specific cam_info file."""
dat = od()
if cam_id is None:
return dat
with open(filepath, 'rb') as f:
filters = []
darkinfo = []
io_opts = {}
found = 0
for ll in f:
line = ll.decode('utf-8').rstrip()
if not line:
continue
if "END" in line and found:
dat["default_filters"] = filters
dat["dark_info"] = darkinfo
dat["io_opts"] = io_opts
return dat
spl = line.split(":")
if len(spl) == 1:
continue
if found:
if line[0] == "#":
continue
k = spl[0].strip()
if k == "dark_info":
l = [x.strip()
for x in spl[1].split("#")[0].split(',')]
darkinfo.append(l)
elif k == "filter":
l = [x.strip()
for x in spl[1].split("#")[0].split(',')]
filters.append(l)
elif k == "io_opts":
l = [x.strip()
for x in split("=|,", spl[1].split("#")[0])]
keys, vals = l[::2], l[1::2]
if len(keys) == len(vals):
for i in range(len(keys)):
io_opts[keys[i]] = bool(int(vals[i]))
elif k == "reg_shift_off":
try:
l = [float(x.strip()) for x in
spl[1].split("#")[0].split(',')]
dat["reg_shift_off"] = l
except:
pass
else:
data_str = spl[1].split("#")[0].strip()
if any([data_str == x for x in ["''", '""']]):
data_str = ""
dat[k] = data_str
if spl[0] == "cam_ids":
l = [x.strip() for x in spl[1].split("#")[0].split(',')]
if cam_id in l:
found = 1
dat["cam_ids"] = l
raise IOError("Camera info for cam_id %s could not be found" % cam_id)
def get_camera_info(cam_id):
"""Try access camera information from file "cam_info.txt" (package data).
:param str cam_id: string ID of camera (e.g. "ecII")
"""
dirs = data_search_dirs()
try:
return _load_cam_info(cam_id, join(dirs[0], "cam_info.txt"))
except:
return _load_cam_info(cam_id, join(dirs[1], "cam_info.txt"))
def save_new_default_camera(info_dict):
"""Save new default camera to data file *cam_info.txt*.
:param dict info_dict: dictionary containing camera default information
Only valid keys will be added to the
"""
dirs = data_search_dirs()
cam_file = join(dirs[0], "cam_info.txt")
if not exists(cam_file):
cam_file = join(dirs[1], "cam_info.txt")
keys = get_camera_info("ecII").keys()
for key in keys:
logger.info("%s (in input: %s)" % (key, key in info_dict))
if "cam_id" not in info_dict:
raise KeyError("Missing specification of cam_id")
try:
cam_ids = info_dict["cam_ids"]
except:
info_dict["cam_ids"] = [info_dict["cam_id"]]
cam_ids = [info_dict["cam_id"]]
if not all([x in info_dict.keys() for x in keys]):
raise KeyError("Input dictionary does not include all required keys "
"for creating a new default camera type, required "
"keys are %s" % keys)
ids = get_all_valid_cam_ids()
if any([x in ids for x in info_dict["cam_ids"]]):
raise KeyError("Cam ID conflict: one of the provided IDs already "
"exists in database...")
cam_file_temp = create_temporary_copy(cam_file)
with open(cam_file_temp, "a") as info_file:
info_file.write("\n\nNEWCAM\ncam_ids:")
cam_ids = [str(x) for x in cam_ids]
info_file.write(",".join(cam_ids))
info_file.write("\n")
for k, v in six.iteritems(info_dict):
if k in keys:
if k == "default_filters":
for finfo in v:
info_file.write("filter:")
finfo = [str(x) for x in finfo]
info_file.write(",".join(finfo))
info_file.write("\n")
elif k == "dark_info":
for finfo in v:
info_file.write("dark_info:")
finfo = [str(x) for x in finfo]
info_file.write(",".join(finfo))
info_file.write("\n")
elif k == "io_opts":
s = "io_opts:"
for opt, val in six.iteritems(v):
s += "%s=%d," % (opt, val)
s = s[:-1] + "\n"
info_file.write(s)
elif k == "reg_shift_off":
info_file.write("%s:%.2f,%.2f\n" % (k, v[0], v[1]))
elif k == "cam_ids":
pass
else:
info_file.write("%s:%s\n" % (k, v))
info_file.write("ENDCAM")
info_file.close()
# Writing ended without errors: replace data base file "cam_info.txt" with
# the temporary file and delete the temporary file
copy2(cam_file_temp, cam_file)
remove(cam_file_temp)
print_log.info("Successfully added new default camera %s to database at %s"
% (info_dict["cam_id"], cam_file))
def save_default_source(info_dict):
"""Add a new default source to file source_info.txt."""
if not all(k in info_dict for k in ("name", "lon", "lat", "altitude")):
raise ValueError("Cannot save source information, require at least "
"name, lon, lat and altitude")
dirs = data_search_dirs()
path = join(dirs[0], "my_sources.txt")
if not exists(path):
path = join(dirs[1], "my_sources.txt")
if info_dict["name"] in get_source_ids():
raise NameError("A source with name %s already exists in database"
% info_dict["name"])
source_file_temp = create_temporary_copy(path)
with open(source_file_temp, "a") as info_file:
info_file.write("\n\nsource_ids:%s\n" % info_dict["name"])
for k, v in six.iteritems(info_dict):
info_file.write("%s:%s\n" % (k, v))
info_file.write("END")
info_file.close()
# Writing ended without errors: replace data base file "cam_info.txt" with
# the temporary file and delete the temporary file
copy2(source_file_temp, path)
remove(source_file_temp)
print_log.info("Successfully added new default source %s to database file at %s"
% (info_dict["name"], path))
def get_all_valid_cam_ids():
"""Load all valid camera string ids.
Reads info from file cam_info.txt which is part of package data
"""
from pyplis import _LIBDIR
ids = []
with open(join(_LIBDIR, "data", "cam_info.txt"), "rb") as f:
for line in f:
spl = line.decode("ISO-8859-1").split(":")
if spl[0].strip().lower() == "cam_ids":
ids.extend([x.strip()
for x in spl[1].split("#")[0].split(',')])
return ids
def get_cam_ids():
"""Load all default camera string ids.
Reads info from file cam_info.txt which is part of package data
"""
dirs = data_search_dirs()
ids = []
for path in dirs:
try:
with open(join(path, "cam_info.txt")) as f:
for line in f:
spl = line.split(":")
if spl[0].strip().lower() == "cam_id":
sid = spl[1].split("#")[0].strip()
if sid not in ids:
ids.append(sid)
except IOError:
pass
return ids
def get_source_ids():
"""Get all existing source IDs.
Reads info from file my_sources.txt which is part of package data
"""
dirs = data_search_dirs()
ids = []
for path in dirs:
try:
with open(join(path, "my_sources.txt")) as f:
for line in f:
spl = line.split(":")
if spl[0].strip().lower() == "name":
sid = spl[1].split("#")[0].strip()
if sid not in ids:
ids.append(sid)
except IOError:
pass
return ids
def get_source_info(source_id, try_online=True):
"""Try access source information from file "my_sources.txt".
File is part of package data
:param str source_id: string ID of source (e.g. Etna)
:param bool try_online: if True and local access fails, try to find source
ID in online database
"""
from pyplis import _LIBDIR
dat = od()
if source_id == "":
return dat
found = 0
with open(join(_LIBDIR, "data", "my_sources.txt")) as f:
for line in f:
if "END" in line and found:
return od([(source_id, dat)])
spl = line.split(":")
if found:
if not any([line[0] == x for x in["#", "\n"]]):
spl = line.split(":")
k = spl[0].strip()
data_str = spl[1].split("#")[0].strip()
dat[k] = data_str
if spl[0] == "source_ids":
if source_id in [x.strip()
for x in spl[1].split("#")[0].split(',')]:
found = 1
print_log.warning("Source info for source %s could not be found" % source_id)
if try_online:
try:
return get_source_info_online(source_id)
except BaseException:
pass
return od()
def get_source_info_online(source_id):
"""Try to load source info from online database (@ www.ngdc.noaa.gov).
:param str source_id: ID of source
"""
name = source_id
name = name.lower()
url = ("http://www.ngdc.noaa.gov/nndc/struts/results?type_0=Like&query_0="
"&op_8=eq&v_8=&type_10=EXACT&query_10=None+Selected&le_2=&ge_3="
"&le_3=&ge_2=&op_5=eq&v_5=&op_6=eq&v_6=&op_7=eq&v_7=&t=102557&s=5"
"&d=5")
logger.info("Trying to access volcano data from URL:")
logger.info(url)
try:
# it's a file like object and works just like a file
data = urlopen(url)
except BaseException:
raise
res = od()
in_row = 0
in_data = 0
lc = 0
col_num = 10
first_volcano_name = "Abu" # this needs to be identical
ids = ["name", "country", "region", "lat", "lon", "altitude", "type",
"status", "last_eruption"]
types = [str, str, str, float, float, float, str, str, str]
for line in data:
lc += 1
if first_volcano_name in line and line.split(">")[1].\
split("</td")[0].strip() == first_volcano_name:
in_data, c = 1, 0
if in_data:
if c % col_num == 0 and name in line.lower():
logger.info("FOUND candidate, line: ", lc)
spl = line.split(">")[1].split("</td")[0].strip().lower()
if name in spl:
logger.info("FOUND MATCH: ", spl)
in_row, cc = 1, 0
cid = spl
res[cid] = od()
if in_row:
spl = line.split(">")[1].split("</td")[0].strip()
res[cid][ids[cc]] = types[cc](spl)
cc += 1
if in_row and cc == 9:
logger.info("End of data row reached for %s" % cid)
cc, in_row = 0, 0
c += 1
return res
def get_icon(name, color=None):
"""Try to find icon in lib icon folder.
:param str name: name of icon (i.e. filename is <name>.png)
:param color (None): color of the icon ("r", "k", "g")
Returns icon image filepath if valid
"""
try:
from pyplis import _LIBDIR
except BaseException:
raise
subfolders = ["axialis", "myIcons"]
for subf in subfolders:
base_path = join(_LIBDIR, "data", "icons", subf)
if color is not None:
base_path = join(base_path, color)
for file in listdir(base_path):
fname = basename(file).split(".")[0]
if fname == name:
return base_path + file
logger.warning("Failed to load icon at: " + _LIBDIR)
return False
if __name__ == '__main__':
i1 = get_camera_info('ecII')
i2 = get_camera_info('usgs')
|
gpl-3.0
|
shahar-stratoscale/nova
|
nova/tests/integrated/v3/test_services.py
|
20
|
3605
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova.openstack.common import timeutils
from nova.tests.api.openstack.compute.plugins.v3 import test_services
from nova.tests.integrated.v3 import api_sample_base
class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV3):
extension_name = "os-services"
def setUp(self):
super(ServicesJsonTest, self).setUp()
self.stubs.Set(db, "service_get_all",
test_services.fake_db_api_service_get_all)
self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
self.stubs.Set(timeutils, "utcnow_ts",
test_services.fake_utcnow_ts)
self.stubs.Set(db, "service_get_by_args",
test_services.fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update",
test_services.fake_service_update)
def tearDown(self):
super(ServicesJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_services_list(self):
"""Return a list of all agent builds."""
response = self._do_get('os-services')
subs = {'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up'}
subs.update(self._get_regexes())
self._verify_response('services-list-get-resp', subs, response, 200)
def test_service_enable(self):
"""Enable an existing agent build."""
subs = {"host": "host1",
'binary': 'nova-compute'}
response = self._do_put('os-services/enable',
'service-enable-put-req', subs)
subs = {"host": "host1",
"binary": "nova-compute"}
self._verify_response('service-enable-put-resp', subs, response, 200)
def test_service_disable(self):
"""Disable an existing agent build."""
subs = {"host": "host1",
'binary': 'nova-compute'}
response = self._do_put('os-services/disable',
'service-disable-put-req', subs)
subs = {"host": "host1",
"binary": "nova-compute"}
self._verify_response('service-disable-put-resp', subs, response, 200)
def test_service_disable_log_reason(self):
"""Disable an existing service and log the reason."""
subs = {"host": "host1",
'binary': 'nova-compute',
'disabled_reason': 'test2'}
response = self._do_put('os-services/disable-log-reason',
'service-disable-log-put-req', subs)
return self._verify_response('service-disable-log-put-resp',
subs, response, 200)
def test_service_delete(self):
"""Delete an existing service."""
response = self._do_delete('os-services/1')
self.assertEqual(response.status, 204)
self.assertEqual(response.read(), "")
|
apache-2.0
|
ksmit799/Toontown-Source
|
toontown/minigame/DivingGameToonSD.py
|
6
|
3074
|
from direct.showbase.ShowBaseGlobal import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.fsm import ClassicFSM
from direct.fsm import State
import CatchGameGlobals
class DivingGameToonSD(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('DivingGameToonSD')
FallBackAnim = 'slip-backward'
FallFwdAnim = 'slip-forward'
CatchNeutralAnim = 'catch-neutral'
CatchRunAnim = 'catch-run'
EatNeutralAnim = 'catch-eatneutral'
EatNRunAnim = 'catch-eatnrun'
status = ''
animList = [FallBackAnim,
FallFwdAnim,
CatchNeutralAnim,
CatchRunAnim,
EatNeutralAnim,
EatNRunAnim]
def __init__(self, avId, game):
self.avId = avId
self.game = game
self.isLocal = avId == base.localAvatar.doId
self.toon = self.game.getAvatar(self.avId)
self.unexpectedExit = False
self.fsm = ClassicFSM.ClassicFSM('CatchGameAnimFSM-%s' % self.avId, [State.State('init', self.enterInit, self.exitInit, ['normal']),
State.State('normal', self.enterNormal, self.exitNormal, ['freeze', 'treasure']),
State.State('treasure', self.enterTreasure, self.exitTreasure, ['freeze', 'normal']),
State.State('freeze', self.enterFreeze, self.exitFreeze, ['normal']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'init', 'cleanup')
def load(self):
self.setAnimState('off', 1.0)
for anim in self.animList:
self.toon.pose(anim, 0)
def unload(self):
del self.fsm
self.game = None
return
def enter(self):
self.fsm.enterInitialState()
def exit(self, unexpectedExit = False):
self.unexpectedExit = unexpectedExit
if not unexpectedExit:
self.fsm.requestFinalState()
def enterInit(self):
self.notify.debug('enterInit')
self.status = 'init'
def exitInit(self):
pass
def enterNormal(self):
self.status = 'normal'
self.notify.debug('enterNormal')
self.setAnimState('dive', 1.0)
def exitNormal(self):
self.setAnimState('off', 1.0)
def enterTreasure(self):
self.status = 'treasure'
self.notify.debug('enterTreasure')
self.setAnimState('swimhold', 1.0)
def exitTreasure(self):
self.notify.debug('exitTreasure')
def enterFreeze(self):
self.status = 'freeze'
self.notify.debug('enterFreeze')
self.setAnimState('cringe', 1.0)
def exitFreeze(self):
pass
def enterCleanup(self):
self.status = 'cleanup'
self.notify.debug('enterCleanup')
if self.toon:
self.toon.resetLOD()
def exitCleanup(self):
pass
def setAnimState(self, newState, playRate):
if not self.unexpectedExit:
self.toon.setAnimState(newState, playRate)
|
mit
|
sahildua2305/eden
|
static/scripts/tools/dbstruct_mysql.py
|
59
|
6606
|
#
# Recommended usage: fabfile.py
#
# - or manually:
#
# 0. Configure /root/.my.cnf to allow the root user to access MySQL as root without password
#
# 1. Create a new, empty MySQL database 'sahana' as-normal
# mysqladmin create sahana
# mysql
# GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,INDEX,ALTER,DROP ON sahana.* TO 'sahana'@'localhost' IDENTIFIED BY 'password';
#
# 2. set deployment_settings.base.prepopulate = False in models/000_config.py
#
# 3. Allow web2py to run the Eden model to configure the Database structure
# web2py -N -S eden -M
#
# 4. Export the Live database from the Live server (including structure)
# mysqldump sahana > backup.sql
#
# 5. Use this to populate a new table 'old'
# mysqladmin create old
# mysql old < backup.sql
#
# 6. Change database names/passwords in the script &/or access rights in the table, if not using root & .my.cnf
# mysql
# GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,INDEX,ALTER,DROP ON old.* TO 'sahana'@'localhost' IDENTIFIED BY 'password';
#
# 7. Run the script
# python dbstruct.py
#
# 8. Fixup manually anything which couldn't be done automatically, e.g.:
# "ALTER TABLE `gis_location` DROP `marker_id` ;
# The table -> gis_location has a field -> marker_id that could not be automatically removed"
# =>
# mysql
# \r old
# show innodb status;
# ALTER TABLE gis_location DROP FOREIGN KEY gis_location_ibfk_2;
# ALTER TABLE gis_location DROP marker_id ;
# ALTER TABLE gis_location DROP osm_id ;
#
# 9. Take a dump of the fixed data (no structure, full inserts)
# mysqldump -tc old > old.sql
#
# 10. Import it into the empty database
# mysql sahana < old.sql
#
# 11. Dump the final database with good structure/data ready to import on the server (including structure)
# mysqldump sahana > new.sql
#
# 12. Import it on the Server
# mysqladmin drop sahana
# mysqladmin create sahana
# mysql sahana < new.sql
#
# 13. Restore indexes
# w2p
# tablename = "pr_person"
# field = "first_name"
# db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# field = "middle_name"
# db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# field = "last_name"
# db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# tablename = "gis_location"
# field = "name"
# db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
#
user = "root"
# Password can be added to script, if it can't be read from /root/.my.cnf
passwd = "password"
# An empty database with the new db schema
new_db = "sahana"
# The old database (structure & data)
old_db = "old"
# ----------------------------------------------------------------------------------
import os
import sys
import subprocess
import MySQLdb
# If possible, read the password for root from /root/.my.cnf
config = "/root/.my.cnf"
if os.access(config, os.R_OK):
f = open(config, "r")
lines = f.readlines()
f.close()
for line in lines:
findstring = "password="
if findstring in line:
passwd = line.replace(findstring, "").strip()
# DB1 is the db to sync to (Test or new Live)
db1 = MySQLdb.connection(host="localhost", user=user, passwd=passwd, db=new_db)
# DB2 is the db to sync from (backup of Live)
db2 = MySQLdb.connection(host="localhost", user=user, passwd=passwd, db=old_db)
def tablelist(db):
db.query("SHOW TABLES;")
r = db.store_result()
tables = []
for row in r.fetch_row(300):
tables.append(row[0])
return tables
# Dict to load up the database Structure
def tablestruct(db):
tablestruct = {}
tables = tablelist(db)
for table in tables:
db.query("describe %s;" % table)
r = db.store_result()
structure = []
for row in r.fetch_row(100):
structure.append(row[0])
tablestruct[table] = structure
return tablestruct
struct1 = tablestruct(db1)
struct2 = tablestruct(db2)
tables_to_delete = []
fields_to_delete = {}
for key in struct2:
fields_to_delete[key] = []
try:
fields1 = struct1[key]
fields2 = struct2[key]
for field in fields2:
try:
fields1.index(field)
except:
fields_to_delete[key].append(field)
except:
tables_to_delete.append(key)
for table in fields_to_delete.keys():
if fields_to_delete[table] == []:
del fields_to_delete[table]
print tables_to_delete
print fields_to_delete
for table in tables_to_delete:
db2.query("SET FOREIGN_KEY_CHECKS = 0;")
db2.query("DROP TABLE %s;" % table)
db2.query("SET FOREIGN_KEY_CHECKS = 1;")
problems = ""
filename = "/root/mysql.status"
for table in fields_to_delete:
for field in fields_to_delete[table]:
print "ALTER TABLE `" + table + "` DROP `" + field + "` ;"
db2.query("SET FOREIGN_KEY_CHECKS = 0;")
try:
db2.query("ALTER TABLE `" + table + "` DROP `" + field + "` ;")
except:
print "Table %s has a field %s with a FK constraint" % (table, field)
# Try to resolve any FK constraint issues automatically
cmd = "mysql -e 'show innodb status;' > %s" % filename
subprocess.call(cmd, shell=True)
if os.access(filename, os.R_OK):
f = open(filename, "r")
lines = f.readlines()
f.close()
fk = lines[1].split("CONSTRAINT")[1].split("FOREIGN")[0].strip()
print "ALTER TABLE `" + table + "` DROP FOREIGN KEY `" + fk + "`;"
try:
db2.query("ALTER TABLE `" + table + "` DROP FOREIGN KEY `" + fk + "`;")
except (MySQLdb.OperationalError,):
e = sys.exc_info()[1]
print "Failed to remove FK %s from table %s" % (fk, table) + e.message
# Try again now that FK constraint has been removed
print "ALTER TABLE `" + table + "` DROP `" + field + "` ;"
try:
db2.query("ALTER TABLE `" + table + "` DROP `" + field + "` ;")
except (MySQLdb.OperationalError,):
e = sys.exc_info()[1]
message = "Failed to drop field %s from table %s" % (field, table) + e.message
print message
problems += message
db2.query("SET FOREIGN_KEY_CHECKS = 1;")
# List any remaining issues as clearly/concisely as possible for us to resolve manually
print problems
|
mit
|
zzh8829/PythonCraft
|
exp/Test.py
|
1
|
24657
|
import pygame
import OpenGL
import numpy as np
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
class Tessellator:
convertQuadsToTriangles = False
tryVBO = False
def __init__(self,bufsize = 2097152 ):
self.bufferSize = bufsize
self.byteBuffer = np.array(range(bufsize*4),dtype = 'b')
self.rawBuffer = np.array(range(bufsize),dtype = 'f')
self.rawBufferIndex = 0
self.isDrawing = False
self.vboIndex = 0
self.vboCount = 10
if self.useVBO:
self.vertexBuffers = glGenBuffers(self.vboCount)
self.xOffset = 0
self.yOffset = 0
self.zOffset = 0
self.drawMode = 0
self.useVBO = False
self.addedVertices = 0
self.color = 0
self.brightness = 0
self.hasColor = False
self.hasTexture = False
self.hasBrightness = False
self.textureU = 0
self.textureV = 0
def reset(self):
self.vertexCount = 0
self.byteBuffer.fill(0)
self.rawBufferIndex = 0
self.addedVertices = 0
def startDrawingQuads(self):
self.startDrawing(GL_QUADS)
def startDrawing(self,mode):
self.isDrawing = True
self.reset()
self.drawMode = mode
self.hasNormals = False
self.hasColor = False
self.hasTexture = False
self.hasBrightness = False
self.isColorDisabled = False
def setTextureUV(self,u,v):
self.hasTexture = True
self.textureU = u
self.textureV = v
def addVertexWithUV(self,x,y,z,u,v):
self.setTextureUV(u,v)
self.addVertex(x,y,z)
def addVertex(self, x, y, z):
self.addedVertices += 1
if self.drawMode == GL_QUADS and self.convertQuadsToTriangles and self.addedVertices % 4 == 0:
for i in range(2):
offset = 8 * (3 - i)
if self.hasTexture :
self.rawBuffer[self.rawBufferIndex + 3] = self.rawBuffer[self.rawBufferIndex - offset + 3]
self.rawBuffer[self.rawBufferIndex + 4] = self.rawBuffer[self.rawBufferIndex - offset + 4]
if self.hasBrightness :
self.rawBuffer[self.rawBufferIndex + 7] = self.rawBuffer[self.rawBufferIndex - offset + 7]
if self.hasColor:
self.rawBuffer[self.rawBufferIndex + 5] = self.rawBuffer[self.rawBufferIndex - offset + 5]
self.rawBuffer[self.rawBufferIndex + 0] = self.rawBuffer[self.rawBufferIndex - offset + 0]
self.rawBuffer[self.rawBufferIndex + 1] = self.rawBuffer[self.rawBufferIndex - offset + 1]
self.rawBuffer[self.rawBufferIndex + 2] = self.rawBuffer[self.rawBufferIndex - offset + 2]
self.vertexCount += 1
self.rawBufferIndex += 8
if self.hasTexture:
self.rawBuffer[self.rawBufferIndex + 3] = self.textureU
self.rawBuffer[self.rawBufferIndex + 4] = self.textureV
if self.hasBrightness:
self.rawBuffer[self.rawBufferIndex + 7] = self.brightness
if self.hasColor:
self.rawBuffer[self.rawBufferIndex + 5] = self.color
if self.hasNormals:
self.rawBuffer[self.rawBufferIndex + 6] = self.normal
self.rawBuffer[self.rawBufferIndex + 0] = x + self.xOffset
self.rawBuffer[self.rawBufferIndex + 1] = y + self.yOffset
self.rawBuffer[self.rawBufferIndex + 2] = z + self.zOffset
self.rawBufferIndex += 8
self.vertexCount+=1
if self.vertexCount % 4 == 0 and self.rawBufferIndex >= self.bufferSize - 32:
self.draw()
self.isDrawing = True
def setTranslation(self, xoff, yoff, zoff):
self.xOffset = xoff
self.yOffset = yoff
self.zOffset = zoff
def addTranslation(self, xoff, yoff, zoff):
self.xOffset += xoff
self.yOffset += yoff
self.zOffset += zoff
def draw(self):
if not self.isDrawing:
raise Exception("Not tesselating!")
else:
self.isDrawing = False
if self.vertexCount > 0:
self.intBuffer.fill(0)
self.intBuffer.put(self.rawBuffer, 0, self.rawBufferIndex)
self.byteBuffer.position(0)
self.byteBuffer.limit(self.rawBufferIndex * 4)
if self.useVBO:
self.vboIndex = (self.vboIndex + 1) % self.vboCount
glBindBuffer(GL_ARRAY_BUFFER, self.vertexBuffers[self.vboIndex])
glBufferData(GL_ARRAY_BUFFER, self.byteBuffer, GL_STREAM_DRAW)
if self.hasTexture:
if self.useVBO:
glTexCoordPointer(2, GL_FLOAT, 32, 12)
else:
self.floatBuffer.position(3)
glTexCoordPointer(2, 32, self.floatBuffer)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
if self.hasBrightness:
#OpenGlHelper.setClientActiveTexture(OpenGlHelper.lightmapTexUnit)
if self.useVBO:
glTexCoordPointer(2, GL_SHORT, 32, 28)
else:
self.shortBuffer.position(14)
glTexCoordPointer(2, 32, self.shortBuffer)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
#OpenGlHelper.setClientActiveTexture(OpenGlHelper.defaultTexUnit)
if self.hasColor:
if self.useVBO:
glColorPointer(4, GL_UNSIGNED_BYTE, 32, 20)
else:
self.byteBuffer.position(20)
glColorPointer(4, true, 32, self.byteBuffer)
glEnableClientState(GL_COLOR_ARRAY)
if self.hasNormals:
if self.useVBO:
glNormalPointer(GL_UNSIGNED_BYTE, 32, 24)
else:
self.byteBuffer.position(24)
glNormalPointer(32, self.byteBuffer)
glEnableClientState(GL_NORMAL_ARRAY)
if self.useVBO:
glVertexPointer(3, GL_FLOAT, 32, 0)
else:
self.floatBuffer.position(0)
glVertexPointer(3, 32, self.floatBuffer)
glEnableClientState(GL_VERTEX_ARRAY)
if self.drawMode == GL_QUADS and self.convertQuadsToTriangles:
glDrawArrays(GL_TRIANGLES, 0, self.vertexCount)
else:
glDrawArrays(self.drawMode, 0, self.vertexCount)
glDisableClientState(GL_VERTEX_ARRAY)
if self.hasTexture:
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
if self.hasBrightness:
#OpenGlHelper.setClientActiveTexture(OpenGlHelper.lightmapTexUnit)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
#OpenGlHelper.setClientActiveTexture(OpenGlHelper.defaultTexUnit)
if self.hasColor:
glDisableClientState(GL_COLOR_ARRAY)
if self.hasNormals:
glDisableClientState(GL_NORMAL_ARRAY)
idx = self.rawBufferIndex * 4
self.reset()
return idx
class Block:
blocksList = ['air','dirt']
def __init__(self):
pass
def hasComparatorInputOverride():
return False
class Chunk:
def __init__(self,world,x,z):
self.isChunkLoaded = False
#self.storageArrays = new ExtendedBlockStorage[16]
self.blockBiomeArray = [-1 for i in range(256)]
self.precipitationHeightMap = [-999 for i in range(256)]
#self.updateSkylightColumns = new boolean[256]
self.isGapLightingUpdated = False
#self.chunkTileEntityMap = new HashMap()
self.isTerrainPopulated = False
self.isModified = False
self.hasEntities = False
self.lastSaveTime = 0
self.sendUpdates = False
self.heightMapMinimum = 0
self.queuedLightChecks = 4096
self.field_76653_p = False
self.entityLists = [ [] for i in range(16)]
self.worldObj = world
self.xPosition = x
self.zPosition = z
self.heightMap = [0 for i in range(256)]
def getBlockId(self,x,y,z):
print(x,y,z)
return 1
class EmptyChunk(Chunk):
def __init__(self,world,x,z):
super().__init__(world,x,z)
class ChunkPosition:
def __init__(self,x,y,z):
self.x = z
self.y = y
self.z = z
class ChunkCoordIntPair:
def __init__(self,x,z):
self.chunkX = x
self.chunkZ = z
@staticmethod
def chunkXZ2Int(x,z):
return (x & 4294967295) | (z & 4294967295) << 32
def getCenterXPosition(self):
return (self.chunkX << 4) + 8
def getCenterZPosition(self):
return (self.chunkZ << 4) + 8
def getCenterPosition(self,y):
return ChunkPosition(self.getCenterXPosition(),y,self.getCenterZPosition())
def __str__(self):
return '[%d, %d]'%(self,chunkX,self.chunkZ)
class ChunkProviderClient:
def __init__(self,world):
self.blankChunk = EmptyChunk(world,0,0)
self.worldObj = world
self.chunkMapping = {}
self.chunkListing = []
def chunkExists(x,z):
return True
def loadChunk(self,x,z):
chunk = Chunk(self.worldObj,x,z)
self.chunkMapping[ChunkCoordIntPair.chunkXZ2Int(x,z)] = chunk
chunk.isChunkLoaded =True
return chunk
def provideChunk(self,x,z):
try:
chunk = self.chunkMapping[ChunkCoordIntPair.chunkXZ2Int(x,z)]
return chunk
except:
return self.blankChunk
class ChunkCoordinates:
def __init__(self,x=0,y=0,z=0):
self.posX = x
self.posY = y
self.posZ = z
class World:
def __init__(self,worldInfo):
self.isRemote = False
self.chunkProvider = ChunkProviderClient(self)
self.worldInfo = worldInfo
def getBlockId(self,x,y,z):
if x >= -30000000 and z >= -30000000 and x < 30000000 and z < 30000000:
if y < 0 or y>256:
return 0
else:
chunk = self.getChunkFromChunkCoords(x>>4,z>>4)
return chunk.getBlockId(x&15 , y, z&15)
else:
return 0
def getChunkFromBlockCoords(self,x,z):
return self.getChunkFromChunkCoords(x>>4,z>>4)
def getChunkFromChunkCoords(self,x,z):
print(x,z)
return EmptyChunk(self,x,z)
def isAirBlock(self,x,y,z):
return self.getBlockId(x,y,z) == 0
def blockExists(self,x,y,z):
if 0 <= y < 256:
return self.chunkExists(x>>4,z>>4)
else:
return False
def setBlock(self,x,y,z,blockId,metadata=0,flags=3):
if x >= -30000000 and z >= -30000000 and x < 30000000 and z < 30000000:
if y < 0 or y>256:
return 0
else:
chunk = self.getChunkFromChunkCoords(x>>4,z>>4)
ID = 0
if (flags&1) !=0:
ID = chunk.getBlockId(x&15,y,z&15)
result = chunk.setBlockIDWithMetadata(x&15,y,z&15,blockId,metadata)
self.updateAllLightTypes(x,y,z)
if result == True:
if (flags&2)!=0 and (not self.isRemote or (flags&4)==0) :
self.markBlockForUpdate(x,y,z)
if not self.isRemote and (flags&1) != 0:
self.notifyBlockChange(x,y,z,ID)
block = Block.blocksList[blockId]
if block and block.hasComparatorInputOverride():
pass
#self.function(x,y,z,blockId)
return result
else:
return 0
def updateAllLightTypes(self,x,y,z):
pass
def markBlockForUpdate(self,x,y,z):
pass
def isAABBNonEmpty(self,aabb):
minx = int(aabb.minX)
maxx = int(aabb.maxX + 1)
miny = int(aabb.minY)
maxy = int(aabb.maxY + 1)
minz = int(aabb.minZ)
maxz = int(aabb.maxZ + 1)
if aabb.minX < 0:
minx -= 1
if aabb.minY < 0:
miny -= 1
if aabb.minZ < 0:
minz -= 1
for x in range(minx,maxx):
for y in range(miny,maxy):
for z in range(minz,maxz):
try:
block = Block.blocksList[self.getBlockId(x, y, z)]
if block:
return True
except:
pass
return False
def getSpawnPoint():
return ChunkCoordinates(self.worldInfo.spawnX,self.worldInfo.spawnY, self.worldInfo.spawnZ)
class NBTBase:
def __init__(self,name):
self.name = name
def setName(self,name = ""):
self.name = name
def getName(self):
return self.name if self.name else ""
class NBTTagEnd(NBTBase):
def __init__(self,name = ""):
super().__init__(name)
def __str__(self):
return 'END'
def getId(self):
return '\x00'
class NBTTagByte(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readByte()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x01'
def __str__(self):
return str(self.data)
class NBTTagShort(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readInt16()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x02'
def __str__(self):
return str(self.data)
class NBTTagInt(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readInt32()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x03'
def __str__(self):
return str(self.data)
class NBTTagLong(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readInt64()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x04'
def __str__(self):
return str(self.data)
class NBTTagFloat(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readFloat()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x05'
def __str__(self):
return str(self.data)
class NBTTagDouble(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readDouble()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x06'
def __str__(self):
return str(self.data)
class NBTTagByteArray(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
self.byteArray = b""
def load(self,binaryReader):
length = binaryReader.readInt()
self.byteArray = binaryReader.readBytes(length)
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x07'
def __str__(self):
return str(self.data)
class NBTTagString(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
def load(self,binaryReader):
self.data = binaryReader.readUTF()
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x08'
def __str__(self):
return str(self.data)
class NBTTagList(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
self.tagList = []
self.tagType = '\x00'
def load(self,binaryReader):
self.tagType = binaryReader.readByte()
length = binaryReader.readInt32()
self.tagList = []
for i in range(length):
tag = NBTFactory.newTag(self.tagType,"")
tag.load(binaryReader)
self.tagList.append(tag)
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x09'
def __str__(self):
return str(self.data)
class NBTTagCompound(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
self.tagMap = {}
def load(self,binaryReader):
self.tagMap = {}
while True:
tag = NBTFactory.readNamedTag(binaryReader)
if ord(tag.getId()) == 0:
break
self.tagMap[tag.getName()] = tag
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x0a'
def getCompoundTag(self,name):
try: return self.tagMap[name]
except: return NBTTagCompound(name)
def getByte (self,name):
try: return self.tagMap[name].data
except: return b""
def getShort(self,name):
try: return self.tagMap[name].data
except: return 0
def getInteger(self,name):
try: return self.tagMap[name].data
except: return 0
def getLong(self,name):
try: return self.tagMap[name].data
except: return 0
def getFloat(self,name):
try: return self.tagMap[name].data
except: return 0.0
def getDouble(self,name):
try: return self.tagMap[name].data
except: return 0.0
def getString(self,name):
try: return self.tagMap[name].data
except: return ""
def getByteArray(self,name):
try: return self.tagMap[name].byteArray
except: return b""
def getIntArray(self,name):
try: return self.tagMap[name].intArray
except: return []
def getTagList (self,name):
try: return self.tagMap[name]
except: return NBTTagList(name)
def getBoolean(self,name):
try: return ord(self.getByte(name)) != 0
except: return False
def hasKey(self,key):
return key in self.tagMap
def __str__(self):
return str(self.data)
class NBTTagIntArray(NBTBase):
def __init__(self,name = "", data = None):
super().__init__(name)
self.data = data
self.intArray = []
def load(self,binaryReader):
length = binaryReader.readInt32()
self.intArray = []
for i in range(length):
self.intArray.append(binaryReader.readInt32())
def write(self,binaryWriter):
binaryWriter.write(self.data)
def getId(self):
return '\x0b'
def __str__(self):
return str(self.data)
class NBTFactory:
TAGCLASS = [
NBTTagEnd,
NBTTagByte,
NBTTagShort,
NBTTagInt,
NBTTagLong,
NBTTagFloat,
NBTTagDouble,
NBTTagByteArray,
NBTTagString,
NBTTagList,
NBTTagCompound,
NBTTagIntArray
]
TAGNAME = [
"TAG_End",
"TAG_Byte",
"TAG_Short",
"TAG_Int",
"TAG_Long",
"TAG_Float",
"TAG_Double",
"TAG_Byte_Array",
"TAG_String",
"TAG_List",
"TAG_Compound",
"TAG_Int_Array"
]
@classmethod
def readNamedTag(cls,binaryReader):
typ = binaryReader.readByte()
if ord(typ) == 0:
return NBTTagEnd()
else:
val = binaryReader.readUTF()
tag = cls.newTag(typ,val)
tag.load(binaryReader)
return tag
@classmethod
def newTag(cls,typ,val):
return cls.TAGCLASS[ord(typ)](val)
@classmethod
def getTagName(cls,typ):
return cls.TAGNAME[ord(typ)]
from BinaryReader import *
import gzip
def readCompressed(filename):
stream = BigEndianBinaryReader(gzip.GzipFile(filename,'rb'))
nbtbase = NBTFactory.readNamedTag(stream)
return nbtbase
class WorldType:
worldTypes = [ 0 for i in range(16)]
def __init__(self,typeId,name):
self.worldType = name
self.worldTypeId = typeId
self.worldTypes[typeId] = self
@classmethod
def parseWorldType(cls,name):
for t in cls.worldTypes:
if t and name.lower() == t:
return t
WorldType.DEFAULT = WorldType(0,"default")
WorldType.FLAT = WorldType(1,"flat")
WorldType.LARGE_BIOMES = WorldType(2,"large_biomes")
WorldType.DEFAULT_1_1 = WorldType(8,"default_1_1")
class WorldInfo:
def __init__(self,*args):
self.randomSeed = 0
self.terrainType = WorldType.DEFAULT
self.generatorOptions = ""
self.spawnX = 0
self.spawnY = 0
self.spawnZ = 0
self.totalTime = 0
self.worldTime = 0
self.lastTimePlayed = 0
self.sizeOnDisk = 0
self.playerTag = None
self.dimension = 0
self.levelName = 0
self.saveVersion = 0
self.raining = False
self.rainTime = 0
self.thundering = False
self.thunderTime = 0
self.theGameType = None
self.mapFeaturesEnabled = False
self.hardcore = False
self.allowCommands = False
self.initialized = False
self.theGameRules = None
if len(args)==1:
if isinstance(args[0],NBTBase):
tag = args[0]
self.randomSeed = tag.getLong("RandomSeed")
if(tag.hasKey("generatorName")):
name = tag.getString("generatorName")
self.terrainType = WorldType.parseWorldType(name)
if self.terrainType == None:
self.terrainType = WorldType.DEFAULT
elif 0:
pass
self.generatorOptions = tag.getString("generatorOptions")
self.theGameType = tag.getInteger("GameType")
self.spawnX = tag.getInteger('SpawnX')
self.spawnY = tag.getInteger('SpawnY')
self.spawnZ = tag.getInteger('SpawnZ')
self.totalTime = tag.getLong('Time')
self.worldTime = tag.getLong('DayTime')
self.lastTimePlayed = tag.getLong("LastPlayed")
self.sizeOnDisk = tag.getLong("SizeOnDisk")
self.levelName = tag.getString("LevelName")
self.saveVersion = tag.getInteger("version")
self.rainTime = tag.getInteger("rainTime")
self.raining = tag.getBoolean("raining")
self.thunderTime = tag.getInteger("thunderTime")
self.thundering = tag.getBoolean("thundering")
self.hardcore = tag.getBoolean("hardcore")
self.playerTag = tag.getCompoundTag("Player")
self.dimension = self.playerTag.getInteger('Dimension')
def getSaveVersion(self):
return self.saveVersion
def getWorldName(self):
return self.levelName
import os
import numpy as np
import struct
import zlib
class RegionFile:
SECTOR_BYTES = 4096
SECTOR_INTS = SECTOR_BYTES / 4
CHUNK_HEADER_SIZE = 5
VERSION_GZIP = 1
VERSION_DEFLATE = 2
def __init__(self,folder,x,z):
self.filename = os.path.join(folder,"r.%d.%d.mca"%(x,z))
f = open(self.filename,'rb')
filesize = os.path.getsize(self.filename)
if filesize & 0xfff:
filesize = (filesize|0xfff)+1
f.truncate(filesize)
f.seek(0)
offsetsData = f.read(self.SECTOR_BYTES)
modTimesData = f.read(self.SECTOR_BYTES)
#print(offsetsData)
self.freeSectors = [True] * (filesize // self.SECTOR_BYTES)
self.freeSectors[0:2] = False, False
self.offsets = np.fromstring(offsetsData, dtype='>u4')
self.modTimes = np.fromstring(modTimesData, dtype='>u4')
print(self.offsets)
needsRepair = False
for offset in self.offsets:
sector = offset >> 8
count = offset & 0xff
for i in range(sector, sector + count):
if i >= len(self.freeSectors):
# raise RegionMalformed("Region file offset table points to sector {0} (past the end of the file)".format(i))
print( "Region file offset table points to sector {0} (past the end of the file)".format(i))
needsRepair = True
break
if self.freeSectors[i] is False:
needsRepair = True
self.freeSectors[i] = False
def readChunk(self,x,z):
x &= 0x1f
z &= 0x1f
offset = self.getOffset(x, z)
sectorStart = offset >> 8
numSectors = offset & 0xff
f = open(self.filename,'rb')
f.seek(sectorStart * self.SECTOR_BYTES)
data = f.read(numSectors * self.SECTOR_BYTES)
length = struct.unpack_from(">I", data)[0]
format = struct.unpack_from("B", data, 4)[0]
data = data[5:length + 5]
return zlib.decrompress(data)
def getOffset(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
return self.offsets[cx + cz * 32]
class AnvilSaveLoader:
def __init__(self,folder,name):
self.directory = folder
self.worldname = name
self.worldDirectory = os.path.join(folder,name)
self.playerDirectory = os.path.join(self.worldDirectory,'players')
self.mapDataDirectory = os.path.join(self.worldDirectory,'data')
self.saveDirectoryName = name
self.regionDirectory = os.path.join(self.worldDirectory,'region')
self.regionFiles = {}
self.chunks = {}
def getWorldInfo(self):
folder = os.path.join(self.directory,self.worldname)
data = os.path.join(folder,'level.dat')
cst = readCompressed(data)
return WorldInfo(cst.getCompoundTag('Data'))
def getRegionForChunk(self,x,z):
return self.getRegionFile(x>>5,y>>5)
def getRegionFile(self,x,z):
if (x,z) not in self.regionFiles:
regionFile = RegionFile(self.regionDirectory,x,z)
self.regionFiles[x,z] = regionFile
return self.regionFiles[x,z]
def getChunk(self,x,z):
if not (x,z) in self.chunks:
global chunkLoader
self.chunks[x,z] = chunkLoader.loadChunk(x,z)
return self.chunks[x,z]
def readChunkStream(self,x,z):
return self.getRegionFile(x,z).readChunk(x,z)
class AnvilChunkLoader:
def __init__(self,world,saveloader):
self.saveLoader = saveloader
self.worldObj = world
def loadChunk(self,x,z):
pos = ChunkCoordIntPair(x,z)
stream = self.saveLoader.readChunkStream(x,z)
tag = NBTFactory.readNamedTag(BigEndianBinaryReader(stream))
chunk = self.readChunkFromNBT(world,tag)
if not chunk.isAtLocation(x,z):
tag.setInteger('xPos',x)
tag.setInteger('yPos',y)
chunk = self.readChunkFromNBT(world,tag)
return chunk
def readChunkFromNBT(self,world,tag):
x = tag.getInteger("xPos")
z = tag.getInteger("zPos")
chunk = Chunk(world, x, z)
chunk.heightMap = tag.getIntArray("HeightMap")
chunk.isTerrainPopulated = tag.getBoolean("TerrainPopulated")
sections = tag.getTagList("Sections")
length = 16
extBlockStorage = [ None for i in range(length)]
hasSky = not world.provider.hasNoSky
'''
for i in range(sections.tagCount()):
stag = sections.tagAt(i)
Yval = stag.getByte("Y")
ExtendedBlockStorage blockStorage = new ExtendedBlockStorage(Yval << 4, hasSky)
blockStorage.setBlockLSBArray(stag.getByteArray("Blocks"))
if (stag.hasKey("Add")):
blockStorage.setBlockMSBArray(new NibbleArray(stag.getByteArray("Add"), 4))
blockStorage.setBlockMetadataArray(new NibbleArray(stag.getByteArray("Data"), 4))
blockStorage.setBlocklightArray(new NibbleArray(stag.getByteArray("BlockLight"), 4))
if (hasSky):
blockStorage.setSkylightArray(new NibbleArray(stag.getByteArray("SkyLight"), 4))
blockStorage.removeInvalidBlocks()
extBlockStorage[Yval] = blockStorage
chunk.setStorageArrays(extBlockStorage)
if (tag.hasKey("Biomes")):
chunk.setBiomeArray(tag.getByteArray("Biomes"))
'''
chunk.hasEntities = False
return chunk
if __name__ == '__main__':
#world = World()
#world.getBlockId(100,100,100)
loader = AnvilSaveLoader('../saves/','Plain')
info = loader.getWorldInfo()
world = World(info)
chunkLoader = AnvilChunkLoader(world,loader)
def loadChunk(x,z):
#hashcode = ChunkCoordinates.chunkXZ2Int(x,z)
chunk = chunkLoader.loadChunk(world,x,z)
print(info.randomSeed)
|
mit
|
solring/TWCompanyTree
|
TWCTenv/lib/python2.7/site-packages/setuptools/command/bdist_wininst.py
|
325
|
2283
|
from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
import os, sys
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command (self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
if not hasattr(_bdist_wininst, 'get_installer_filename'):
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
return installer_name
# get_installer_filename()
|
unlicense
|
Sannoso/baxter_examples
|
src/baxter_examples/__init__.py
|
6
|
1590
|
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .recorder import JointRecorder
|
bsd-3-clause
|
richardcs/ansible
|
lib/ansible/module_utils/lxd.py
|
79
|
6224
|
# -*- coding: utf-8 -*-
# (c) 2016, Hiroaki Nakamura <[email protected]>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import ssl
from ansible.module_utils.urls import generic_urlparse
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.six.moves import http_client
from ansible.module_utils._text import to_text
# httplib/http.client connection using unix domain socket
HTTPConnection = http_client.HTTPConnection
HTTPSConnection = http_client.HTTPSConnection
import json
class UnixHTTPConnection(HTTPConnection):
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class LXDClientException(Exception):
def __init__(self, msg, **kwargs):
self.msg = msg
self.kwargs = kwargs
class LXDClient(object):
def __init__(self, url, key_file=None, cert_file=None, debug=False):
"""LXD Client.
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
:type url: ``str``
:param key_file: The path of the client certificate key file.
:type key_file: ``str``
:param cert_file: The path of the client certificate file.
:type cert_file: ``str``
:param debug: The debug flag. The request and response are stored in logs when debug is true.
:type debug: ``bool``
"""
self.url = url
self.debug = debug
self.logs = []
if url.startswith('https:'):
self.cert_file = cert_file
self.key_file = key_file
parts = generic_urlparse(urlparse(self.url))
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(cert_file, keyfile=key_file)
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
elif url.startswith('unix:'):
unix_socket_path = url[len('unix:'):]
self.connection = UnixHTTPConnection(unix_socket_path)
else:
raise LXDClientException('URL scheme must be unix: or https:')
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
if resp_json['type'] == 'async':
url = '{0}/wait'.format(resp_json['operation'])
resp_json = self._send_request('GET', url)
if resp_json['metadata']['status'] != 'Success':
self._raise_err_from_json(resp_json)
return resp_json
def authenticate(self, trust_password):
body_json = {'type': 'client', 'password': trust_password}
return self._send_request('POST', '/1.0/certificates', body_json=body_json)
def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
try:
body = json.dumps(body_json)
self.connection.request(method, url, body=body)
resp = self.connection.getresponse()
resp_data = resp.read()
resp_data = to_text(resp_data, errors='surrogate_or_strict')
resp_json = json.loads(resp_data)
self.logs.append({
'type': 'sent request',
'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
'response': {'json': resp_json}
})
resp_type = resp_json.get('type', None)
if resp_type == 'error':
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
return resp_json
if resp_json['error'] == "Certificate already in trust store":
return resp_json
self._raise_err_from_json(resp_json)
return resp_json
except socket.error as e:
raise LXDClientException('cannot connect to the LXD server', err=e)
def _raise_err_from_json(self, resp_json):
err_params = {}
if self.debug:
err_params['logs'] = self.logs
raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
@staticmethod
def _get_err_from_resp_json(resp_json):
err = None
metadata = resp_json.get('metadata', None)
if metadata is not None:
err = metadata.get('err', None)
if err is None:
err = resp_json.get('error', None)
return err
|
gpl-3.0
|
teichopsia-/take_brake
|
lib/python2.7/site-packages/wheel/signatures/__init__.py
|
565
|
3779
|
"""
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
|
mpl-2.0
|
SaschaWillems/vulkan_slim
|
external/vulkan/spirv.py
|
11
|
26312
|
# Copyright (c) 2014-2017 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and/or associated documentation files (the "Materials"),
# to deal in the Materials without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Materials, and to permit persons to whom the
# Materials are furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Materials.
#
# MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
# STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
# HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
# IN THE MATERIALS.
# This header is automatically generated by the same tool that creates
# the Binary Section of the SPIR-V specification.
# Enumeration tokens for SPIR-V, in various styles:
# C, C++, C++11, JSON, Lua, Python
#
# - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
# - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
# - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
# - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
# - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
#
# Some tokens act like mask values, which can be OR'd together,
# while others are mutually exclusive. The mask-like ones have
# "Mask" in their name, and a parallel enum that has the shift
# amount (1 << x) for each corresponding enumerant.
spv = {
'MagicNumber' : 0x07230203,
'Version' : 0x00010100,
'Revision' : 6,
'OpCodeMask' : 0xffff,
'WordCountShift' : 16,
'SourceLanguage' : {
'Unknown' : 0,
'ESSL' : 1,
'GLSL' : 2,
'OpenCL_C' : 3,
'OpenCL_CPP' : 4,
},
'ExecutionModel' : {
'Vertex' : 0,
'TessellationControl' : 1,
'TessellationEvaluation' : 2,
'Geometry' : 3,
'Fragment' : 4,
'GLCompute' : 5,
'Kernel' : 6,
},
'AddressingModel' : {
'Logical' : 0,
'Physical32' : 1,
'Physical64' : 2,
},
'MemoryModel' : {
'Simple' : 0,
'GLSL450' : 1,
'OpenCL' : 2,
},
'ExecutionMode' : {
'Invocations' : 0,
'SpacingEqual' : 1,
'SpacingFractionalEven' : 2,
'SpacingFractionalOdd' : 3,
'VertexOrderCw' : 4,
'VertexOrderCcw' : 5,
'PixelCenterInteger' : 6,
'OriginUpperLeft' : 7,
'OriginLowerLeft' : 8,
'EarlyFragmentTests' : 9,
'PointMode' : 10,
'Xfb' : 11,
'DepthReplacing' : 12,
'DepthGreater' : 14,
'DepthLess' : 15,
'DepthUnchanged' : 16,
'LocalSize' : 17,
'LocalSizeHint' : 18,
'InputPoints' : 19,
'InputLines' : 20,
'InputLinesAdjacency' : 21,
'Triangles' : 22,
'InputTrianglesAdjacency' : 23,
'Quads' : 24,
'Isolines' : 25,
'OutputVertices' : 26,
'OutputPoints' : 27,
'OutputLineStrip' : 28,
'OutputTriangleStrip' : 29,
'VecTypeHint' : 30,
'ContractionOff' : 31,
'Initializer' : 33,
'Finalizer' : 34,
'SubgroupSize' : 35,
'SubgroupsPerWorkgroup' : 36,
},
'StorageClass' : {
'UniformConstant' : 0,
'Input' : 1,
'Uniform' : 2,
'Output' : 3,
'Workgroup' : 4,
'CrossWorkgroup' : 5,
'Private' : 6,
'Function' : 7,
'Generic' : 8,
'PushConstant' : 9,
'AtomicCounter' : 10,
'Image' : 11,
},
'Dim' : {
'Dim1D' : 0,
'Dim2D' : 1,
'Dim3D' : 2,
'Cube' : 3,
'Rect' : 4,
'Buffer' : 5,
'SubpassData' : 6,
},
'SamplerAddressingMode' : {
'None' : 0,
'ClampToEdge' : 1,
'Clamp' : 2,
'Repeat' : 3,
'RepeatMirrored' : 4,
},
'SamplerFilterMode' : {
'Nearest' : 0,
'Linear' : 1,
},
'ImageFormat' : {
'Unknown' : 0,
'Rgba32f' : 1,
'Rgba16f' : 2,
'R32f' : 3,
'Rgba8' : 4,
'Rgba8Snorm' : 5,
'Rg32f' : 6,
'Rg16f' : 7,
'R11fG11fB10f' : 8,
'R16f' : 9,
'Rgba16' : 10,
'Rgb10A2' : 11,
'Rg16' : 12,
'Rg8' : 13,
'R16' : 14,
'R8' : 15,
'Rgba16Snorm' : 16,
'Rg16Snorm' : 17,
'Rg8Snorm' : 18,
'R16Snorm' : 19,
'R8Snorm' : 20,
'Rgba32i' : 21,
'Rgba16i' : 22,
'Rgba8i' : 23,
'R32i' : 24,
'Rg32i' : 25,
'Rg16i' : 26,
'Rg8i' : 27,
'R16i' : 28,
'R8i' : 29,
'Rgba32ui' : 30,
'Rgba16ui' : 31,
'Rgba8ui' : 32,
'R32ui' : 33,
'Rgb10a2ui' : 34,
'Rg32ui' : 35,
'Rg16ui' : 36,
'Rg8ui' : 37,
'R16ui' : 38,
'R8ui' : 39,
},
'ImageChannelOrder' : {
'R' : 0,
'A' : 1,
'RG' : 2,
'RA' : 3,
'RGB' : 4,
'RGBA' : 5,
'BGRA' : 6,
'ARGB' : 7,
'Intensity' : 8,
'Luminance' : 9,
'Rx' : 10,
'RGx' : 11,
'RGBx' : 12,
'Depth' : 13,
'DepthStencil' : 14,
'sRGB' : 15,
'sRGBx' : 16,
'sRGBA' : 17,
'sBGRA' : 18,
'ABGR' : 19,
},
'ImageChannelDataType' : {
'SnormInt8' : 0,
'SnormInt16' : 1,
'UnormInt8' : 2,
'UnormInt16' : 3,
'UnormShort565' : 4,
'UnormShort555' : 5,
'UnormInt101010' : 6,
'SignedInt8' : 7,
'SignedInt16' : 8,
'SignedInt32' : 9,
'UnsignedInt8' : 10,
'UnsignedInt16' : 11,
'UnsignedInt32' : 12,
'HalfFloat' : 13,
'Float' : 14,
'UnormInt24' : 15,
'UnormInt101010_2' : 16,
},
'ImageOperandsShift' : {
'Bias' : 0,
'Lod' : 1,
'Grad' : 2,
'ConstOffset' : 3,
'Offset' : 4,
'ConstOffsets' : 5,
'Sample' : 6,
'MinLod' : 7,
},
'ImageOperandsMask' : {
'MaskNone' : 0,
'Bias' : 0x00000001,
'Lod' : 0x00000002,
'Grad' : 0x00000004,
'ConstOffset' : 0x00000008,
'Offset' : 0x00000010,
'ConstOffsets' : 0x00000020,
'Sample' : 0x00000040,
'MinLod' : 0x00000080,
},
'FPFastMathModeShift' : {
'NotNaN' : 0,
'NotInf' : 1,
'NSZ' : 2,
'AllowRecip' : 3,
'Fast' : 4,
},
'FPFastMathModeMask' : {
'MaskNone' : 0,
'NotNaN' : 0x00000001,
'NotInf' : 0x00000002,
'NSZ' : 0x00000004,
'AllowRecip' : 0x00000008,
'Fast' : 0x00000010,
},
'FPRoundingMode' : {
'RTE' : 0,
'RTZ' : 1,
'RTP' : 2,
'RTN' : 3,
},
'LinkageType' : {
'Export' : 0,
'Import' : 1,
},
'AccessQualifier' : {
'ReadOnly' : 0,
'WriteOnly' : 1,
'ReadWrite' : 2,
},
'FunctionParameterAttribute' : {
'Zext' : 0,
'Sext' : 1,
'ByVal' : 2,
'Sret' : 3,
'NoAlias' : 4,
'NoCapture' : 5,
'NoWrite' : 6,
'NoReadWrite' : 7,
},
'Decoration' : {
'RelaxedPrecision' : 0,
'SpecId' : 1,
'Block' : 2,
'BufferBlock' : 3,
'RowMajor' : 4,
'ColMajor' : 5,
'ArrayStride' : 6,
'MatrixStride' : 7,
'GLSLShared' : 8,
'GLSLPacked' : 9,
'CPacked' : 10,
'BuiltIn' : 11,
'NoPerspective' : 13,
'Flat' : 14,
'Patch' : 15,
'Centroid' : 16,
'Sample' : 17,
'Invariant' : 18,
'Restrict' : 19,
'Aliased' : 20,
'Volatile' : 21,
'Constant' : 22,
'Coherent' : 23,
'NonWritable' : 24,
'NonReadable' : 25,
'Uniform' : 26,
'SaturatedConversion' : 28,
'Stream' : 29,
'Location' : 30,
'Component' : 31,
'Index' : 32,
'Binding' : 33,
'DescriptorSet' : 34,
'Offset' : 35,
'XfbBuffer' : 36,
'XfbStride' : 37,
'FuncParamAttr' : 38,
'FPRoundingMode' : 39,
'FPFastMathMode' : 40,
'LinkageAttributes' : 41,
'NoContraction' : 42,
'InputAttachmentIndex' : 43,
'Alignment' : 44,
'MaxByteOffset' : 45,
'OverrideCoverageNV' : 5248,
'PassthroughNV' : 5250,
'ViewportRelativeNV' : 5252,
'SecondaryViewportRelativeNV' : 5256,
},
'BuiltIn' : {
'Position' : 0,
'PointSize' : 1,
'ClipDistance' : 3,
'CullDistance' : 4,
'VertexId' : 5,
'InstanceId' : 6,
'PrimitiveId' : 7,
'InvocationId' : 8,
'Layer' : 9,
'ViewportIndex' : 10,
'TessLevelOuter' : 11,
'TessLevelInner' : 12,
'TessCoord' : 13,
'PatchVertices' : 14,
'FragCoord' : 15,
'PointCoord' : 16,
'FrontFacing' : 17,
'SampleId' : 18,
'SamplePosition' : 19,
'SampleMask' : 20,
'FragDepth' : 22,
'HelperInvocation' : 23,
'NumWorkgroups' : 24,
'WorkgroupSize' : 25,
'WorkgroupId' : 26,
'LocalInvocationId' : 27,
'GlobalInvocationId' : 28,
'LocalInvocationIndex' : 29,
'WorkDim' : 30,
'GlobalSize' : 31,
'EnqueuedWorkgroupSize' : 32,
'GlobalOffset' : 33,
'GlobalLinearId' : 34,
'SubgroupSize' : 36,
'SubgroupMaxSize' : 37,
'NumSubgroups' : 38,
'NumEnqueuedSubgroups' : 39,
'SubgroupId' : 40,
'SubgroupLocalInvocationId' : 41,
'VertexIndex' : 42,
'InstanceIndex' : 43,
'SubgroupEqMaskKHR' : 4416,
'SubgroupGeMaskKHR' : 4417,
'SubgroupGtMaskKHR' : 4418,
'SubgroupLeMaskKHR' : 4419,
'SubgroupLtMaskKHR' : 4420,
'BaseVertex' : 4424,
'BaseInstance' : 4425,
'DrawIndex' : 4426,
'DeviceIndex' : 4438,
'ViewIndex' : 4440,
'ViewportMaskNV' : 5253,
'SecondaryPositionNV' : 5257,
'SecondaryViewportMaskNV' : 5258,
'PositionPerViewNV' : 5261,
'ViewportMaskPerViewNV' : 5262,
},
'SelectionControlShift' : {
'Flatten' : 0,
'DontFlatten' : 1,
},
'SelectionControlMask' : {
'MaskNone' : 0,
'Flatten' : 0x00000001,
'DontFlatten' : 0x00000002,
},
'LoopControlShift' : {
'Unroll' : 0,
'DontUnroll' : 1,
'DependencyInfinite' : 2,
'DependencyLength' : 3,
},
'LoopControlMask' : {
'MaskNone' : 0,
'Unroll' : 0x00000001,
'DontUnroll' : 0x00000002,
'DependencyInfinite' : 0x00000004,
'DependencyLength' : 0x00000008,
},
'FunctionControlShift' : {
'Inline' : 0,
'DontInline' : 1,
'Pure' : 2,
'Const' : 3,
},
'FunctionControlMask' : {
'MaskNone' : 0,
'Inline' : 0x00000001,
'DontInline' : 0x00000002,
'Pure' : 0x00000004,
'Const' : 0x00000008,
},
'MemorySemanticsShift' : {
'Acquire' : 1,
'Release' : 2,
'AcquireRelease' : 3,
'SequentiallyConsistent' : 4,
'UniformMemory' : 6,
'SubgroupMemory' : 7,
'WorkgroupMemory' : 8,
'CrossWorkgroupMemory' : 9,
'AtomicCounterMemory' : 10,
'ImageMemory' : 11,
},
'MemorySemanticsMask' : {
'MaskNone' : 0,
'Acquire' : 0x00000002,
'Release' : 0x00000004,
'AcquireRelease' : 0x00000008,
'SequentiallyConsistent' : 0x00000010,
'UniformMemory' : 0x00000040,
'SubgroupMemory' : 0x00000080,
'WorkgroupMemory' : 0x00000100,
'CrossWorkgroupMemory' : 0x00000200,
'AtomicCounterMemory' : 0x00000400,
'ImageMemory' : 0x00000800,
},
'MemoryAccessShift' : {
'Volatile' : 0,
'Aligned' : 1,
'Nontemporal' : 2,
},
'MemoryAccessMask' : {
'MaskNone' : 0,
'Volatile' : 0x00000001,
'Aligned' : 0x00000002,
'Nontemporal' : 0x00000004,
},
'Scope' : {
'CrossDevice' : 0,
'Device' : 1,
'Workgroup' : 2,
'Subgroup' : 3,
'Invocation' : 4,
},
'GroupOperation' : {
'Reduce' : 0,
'InclusiveScan' : 1,
'ExclusiveScan' : 2,
},
'KernelEnqueueFlags' : {
'NoWait' : 0,
'WaitKernel' : 1,
'WaitWorkGroup' : 2,
},
'KernelProfilingInfoShift' : {
'CmdExecTime' : 0,
},
'KernelProfilingInfoMask' : {
'MaskNone' : 0,
'CmdExecTime' : 0x00000001,
},
'Capability' : {
'Matrix' : 0,
'Shader' : 1,
'Geometry' : 2,
'Tessellation' : 3,
'Addresses' : 4,
'Linkage' : 5,
'Kernel' : 6,
'Vector16' : 7,
'Float16Buffer' : 8,
'Float16' : 9,
'Float64' : 10,
'Int64' : 11,
'Int64Atomics' : 12,
'ImageBasic' : 13,
'ImageReadWrite' : 14,
'ImageMipmap' : 15,
'Pipes' : 17,
'Groups' : 18,
'DeviceEnqueue' : 19,
'LiteralSampler' : 20,
'AtomicStorage' : 21,
'Int16' : 22,
'TessellationPointSize' : 23,
'GeometryPointSize' : 24,
'ImageGatherExtended' : 25,
'StorageImageMultisample' : 27,
'UniformBufferArrayDynamicIndexing' : 28,
'SampledImageArrayDynamicIndexing' : 29,
'StorageBufferArrayDynamicIndexing' : 30,
'StorageImageArrayDynamicIndexing' : 31,
'ClipDistance' : 32,
'CullDistance' : 33,
'ImageCubeArray' : 34,
'SampleRateShading' : 35,
'ImageRect' : 36,
'SampledRect' : 37,
'GenericPointer' : 38,
'Int8' : 39,
'InputAttachment' : 40,
'SparseResidency' : 41,
'MinLod' : 42,
'Sampled1D' : 43,
'Image1D' : 44,
'SampledCubeArray' : 45,
'SampledBuffer' : 46,
'ImageBuffer' : 47,
'ImageMSArray' : 48,
'StorageImageExtendedFormats' : 49,
'ImageQuery' : 50,
'DerivativeControl' : 51,
'InterpolationFunction' : 52,
'TransformFeedback' : 53,
'GeometryStreams' : 54,
'StorageImageReadWithoutFormat' : 55,
'StorageImageWriteWithoutFormat' : 56,
'MultiViewport' : 57,
'SubgroupDispatch' : 58,
'NamedBarrier' : 59,
'PipeStorage' : 60,
'SubgroupBallotKHR' : 4423,
'DrawParameters' : 4427,
'SubgroupVoteKHR' : 4431,
'StorageUniformBufferBlock16' : 4433,
'StorageUniform16' : 4434,
'StoragePushConstant16' : 4435,
'StorageInputOutput16' : 4436,
'DeviceGroup' : 4437,
'MultiView' : 4439,
'SampleMaskOverrideCoverageNV' : 5249,
'GeometryShaderPassthroughNV' : 5251,
'ShaderViewportIndexLayerNV' : 5254,
'ShaderViewportMaskNV' : 5255,
'ShaderStereoViewNV' : 5259,
'PerViewAttributesNV' : 5260,
},
'Op' : {
'OpNop' : 0,
'OpUndef' : 1,
'OpSourceContinued' : 2,
'OpSource' : 3,
'OpSourceExtension' : 4,
'OpName' : 5,
'OpMemberName' : 6,
'OpString' : 7,
'OpLine' : 8,
'OpExtension' : 10,
'OpExtInstImport' : 11,
'OpExtInst' : 12,
'OpMemoryModel' : 14,
'OpEntryPoint' : 15,
'OpExecutionMode' : 16,
'OpCapability' : 17,
'OpTypeVoid' : 19,
'OpTypeBool' : 20,
'OpTypeInt' : 21,
'OpTypeFloat' : 22,
'OpTypeVector' : 23,
'OpTypeMatrix' : 24,
'OpTypeImage' : 25,
'OpTypeSampler' : 26,
'OpTypeSampledImage' : 27,
'OpTypeArray' : 28,
'OpTypeRuntimeArray' : 29,
'OpTypeStruct' : 30,
'OpTypeOpaque' : 31,
'OpTypePointer' : 32,
'OpTypeFunction' : 33,
'OpTypeEvent' : 34,
'OpTypeDeviceEvent' : 35,
'OpTypeReserveId' : 36,
'OpTypeQueue' : 37,
'OpTypePipe' : 38,
'OpTypeForwardPointer' : 39,
'OpConstantTrue' : 41,
'OpConstantFalse' : 42,
'OpConstant' : 43,
'OpConstantComposite' : 44,
'OpConstantSampler' : 45,
'OpConstantNull' : 46,
'OpSpecConstantTrue' : 48,
'OpSpecConstantFalse' : 49,
'OpSpecConstant' : 50,
'OpSpecConstantComposite' : 51,
'OpSpecConstantOp' : 52,
'OpFunction' : 54,
'OpFunctionParameter' : 55,
'OpFunctionEnd' : 56,
'OpFunctionCall' : 57,
'OpVariable' : 59,
'OpImageTexelPointer' : 60,
'OpLoad' : 61,
'OpStore' : 62,
'OpCopyMemory' : 63,
'OpCopyMemorySized' : 64,
'OpAccessChain' : 65,
'OpInBoundsAccessChain' : 66,
'OpPtrAccessChain' : 67,
'OpArrayLength' : 68,
'OpGenericPtrMemSemantics' : 69,
'OpInBoundsPtrAccessChain' : 70,
'OpDecorate' : 71,
'OpMemberDecorate' : 72,
'OpDecorationGroup' : 73,
'OpGroupDecorate' : 74,
'OpGroupMemberDecorate' : 75,
'OpVectorExtractDynamic' : 77,
'OpVectorInsertDynamic' : 78,
'OpVectorShuffle' : 79,
'OpCompositeConstruct' : 80,
'OpCompositeExtract' : 81,
'OpCompositeInsert' : 82,
'OpCopyObject' : 83,
'OpTranspose' : 84,
'OpSampledImage' : 86,
'OpImageSampleImplicitLod' : 87,
'OpImageSampleExplicitLod' : 88,
'OpImageSampleDrefImplicitLod' : 89,
'OpImageSampleDrefExplicitLod' : 90,
'OpImageSampleProjImplicitLod' : 91,
'OpImageSampleProjExplicitLod' : 92,
'OpImageSampleProjDrefImplicitLod' : 93,
'OpImageSampleProjDrefExplicitLod' : 94,
'OpImageFetch' : 95,
'OpImageGather' : 96,
'OpImageDrefGather' : 97,
'OpImageRead' : 98,
'OpImageWrite' : 99,
'OpImage' : 100,
'OpImageQueryFormat' : 101,
'OpImageQueryOrder' : 102,
'OpImageQuerySizeLod' : 103,
'OpImageQuerySize' : 104,
'OpImageQueryLod' : 105,
'OpImageQueryLevels' : 106,
'OpImageQuerySamples' : 107,
'OpConvertFToU' : 109,
'OpConvertFToS' : 110,
'OpConvertSToF' : 111,
'OpConvertUToF' : 112,
'OpUConvert' : 113,
'OpSConvert' : 114,
'OpFConvert' : 115,
'OpQuantizeToF16' : 116,
'OpConvertPtrToU' : 117,
'OpSatConvertSToU' : 118,
'OpSatConvertUToS' : 119,
'OpConvertUToPtr' : 120,
'OpPtrCastToGeneric' : 121,
'OpGenericCastToPtr' : 122,
'OpGenericCastToPtrExplicit' : 123,
'OpBitcast' : 124,
'OpSNegate' : 126,
'OpFNegate' : 127,
'OpIAdd' : 128,
'OpFAdd' : 129,
'OpISub' : 130,
'OpFSub' : 131,
'OpIMul' : 132,
'OpFMul' : 133,
'OpUDiv' : 134,
'OpSDiv' : 135,
'OpFDiv' : 136,
'OpUMod' : 137,
'OpSRem' : 138,
'OpSMod' : 139,
'OpFRem' : 140,
'OpFMod' : 141,
'OpVectorTimesScalar' : 142,
'OpMatrixTimesScalar' : 143,
'OpVectorTimesMatrix' : 144,
'OpMatrixTimesVector' : 145,
'OpMatrixTimesMatrix' : 146,
'OpOuterProduct' : 147,
'OpDot' : 148,
'OpIAddCarry' : 149,
'OpISubBorrow' : 150,
'OpUMulExtended' : 151,
'OpSMulExtended' : 152,
'OpAny' : 154,
'OpAll' : 155,
'OpIsNan' : 156,
'OpIsInf' : 157,
'OpIsFinite' : 158,
'OpIsNormal' : 159,
'OpSignBitSet' : 160,
'OpLessOrGreater' : 161,
'OpOrdered' : 162,
'OpUnordered' : 163,
'OpLogicalEqual' : 164,
'OpLogicalNotEqual' : 165,
'OpLogicalOr' : 166,
'OpLogicalAnd' : 167,
'OpLogicalNot' : 168,
'OpSelect' : 169,
'OpIEqual' : 170,
'OpINotEqual' : 171,
'OpUGreaterThan' : 172,
'OpSGreaterThan' : 173,
'OpUGreaterThanEqual' : 174,
'OpSGreaterThanEqual' : 175,
'OpULessThan' : 176,
'OpSLessThan' : 177,
'OpULessThanEqual' : 178,
'OpSLessThanEqual' : 179,
'OpFOrdEqual' : 180,
'OpFUnordEqual' : 181,
'OpFOrdNotEqual' : 182,
'OpFUnordNotEqual' : 183,
'OpFOrdLessThan' : 184,
'OpFUnordLessThan' : 185,
'OpFOrdGreaterThan' : 186,
'OpFUnordGreaterThan' : 187,
'OpFOrdLessThanEqual' : 188,
'OpFUnordLessThanEqual' : 189,
'OpFOrdGreaterThanEqual' : 190,
'OpFUnordGreaterThanEqual' : 191,
'OpShiftRightLogical' : 194,
'OpShiftRightArithmetic' : 195,
'OpShiftLeftLogical' : 196,
'OpBitwiseOr' : 197,
'OpBitwiseXor' : 198,
'OpBitwiseAnd' : 199,
'OpNot' : 200,
'OpBitFieldInsert' : 201,
'OpBitFieldSExtract' : 202,
'OpBitFieldUExtract' : 203,
'OpBitReverse' : 204,
'OpBitCount' : 205,
'OpDPdx' : 207,
'OpDPdy' : 208,
'OpFwidth' : 209,
'OpDPdxFine' : 210,
'OpDPdyFine' : 211,
'OpFwidthFine' : 212,
'OpDPdxCoarse' : 213,
'OpDPdyCoarse' : 214,
'OpFwidthCoarse' : 215,
'OpEmitVertex' : 218,
'OpEndPrimitive' : 219,
'OpEmitStreamVertex' : 220,
'OpEndStreamPrimitive' : 221,
'OpControlBarrier' : 224,
'OpMemoryBarrier' : 225,
'OpAtomicLoad' : 227,
'OpAtomicStore' : 228,
'OpAtomicExchange' : 229,
'OpAtomicCompareExchange' : 230,
'OpAtomicCompareExchangeWeak' : 231,
'OpAtomicIIncrement' : 232,
'OpAtomicIDecrement' : 233,
'OpAtomicIAdd' : 234,
'OpAtomicISub' : 235,
'OpAtomicSMin' : 236,
'OpAtomicUMin' : 237,
'OpAtomicSMax' : 238,
'OpAtomicUMax' : 239,
'OpAtomicAnd' : 240,
'OpAtomicOr' : 241,
'OpAtomicXor' : 242,
'OpPhi' : 245,
'OpLoopMerge' : 246,
'OpSelectionMerge' : 247,
'OpLabel' : 248,
'OpBranch' : 249,
'OpBranchConditional' : 250,
'OpSwitch' : 251,
'OpKill' : 252,
'OpReturn' : 253,
'OpReturnValue' : 254,
'OpUnreachable' : 255,
'OpLifetimeStart' : 256,
'OpLifetimeStop' : 257,
'OpGroupAsyncCopy' : 259,
'OpGroupWaitEvents' : 260,
'OpGroupAll' : 261,
'OpGroupAny' : 262,
'OpGroupBroadcast' : 263,
'OpGroupIAdd' : 264,
'OpGroupFAdd' : 265,
'OpGroupFMin' : 266,
'OpGroupUMin' : 267,
'OpGroupSMin' : 268,
'OpGroupFMax' : 269,
'OpGroupUMax' : 270,
'OpGroupSMax' : 271,
'OpReadPipe' : 274,
'OpWritePipe' : 275,
'OpReservedReadPipe' : 276,
'OpReservedWritePipe' : 277,
'OpReserveReadPipePackets' : 278,
'OpReserveWritePipePackets' : 279,
'OpCommitReadPipe' : 280,
'OpCommitWritePipe' : 281,
'OpIsValidReserveId' : 282,
'OpGetNumPipePackets' : 283,
'OpGetMaxPipePackets' : 284,
'OpGroupReserveReadPipePackets' : 285,
'OpGroupReserveWritePipePackets' : 286,
'OpGroupCommitReadPipe' : 287,
'OpGroupCommitWritePipe' : 288,
'OpEnqueueMarker' : 291,
'OpEnqueueKernel' : 292,
'OpGetKernelNDrangeSubGroupCount' : 293,
'OpGetKernelNDrangeMaxSubGroupSize' : 294,
'OpGetKernelWorkGroupSize' : 295,
'OpGetKernelPreferredWorkGroupSizeMultiple' : 296,
'OpRetainEvent' : 297,
'OpReleaseEvent' : 298,
'OpCreateUserEvent' : 299,
'OpIsValidEvent' : 300,
'OpSetUserEventStatus' : 301,
'OpCaptureEventProfilingInfo' : 302,
'OpGetDefaultQueue' : 303,
'OpBuildNDRange' : 304,
'OpImageSparseSampleImplicitLod' : 305,
'OpImageSparseSampleExplicitLod' : 306,
'OpImageSparseSampleDrefImplicitLod' : 307,
'OpImageSparseSampleDrefExplicitLod' : 308,
'OpImageSparseSampleProjImplicitLod' : 309,
'OpImageSparseSampleProjExplicitLod' : 310,
'OpImageSparseSampleProjDrefImplicitLod' : 311,
'OpImageSparseSampleProjDrefExplicitLod' : 312,
'OpImageSparseFetch' : 313,
'OpImageSparseGather' : 314,
'OpImageSparseDrefGather' : 315,
'OpImageSparseTexelsResident' : 316,
'OpNoLine' : 317,
'OpAtomicFlagTestAndSet' : 318,
'OpAtomicFlagClear' : 319,
'OpImageSparseRead' : 320,
'OpSizeOf' : 321,
'OpTypePipeStorage' : 322,
'OpConstantPipeStorage' : 323,
'OpCreatePipeFromPipeStorage' : 324,
'OpGetKernelLocalSizeForSubgroupCount' : 325,
'OpGetKernelMaxNumSubgroups' : 326,
'OpTypeNamedBarrier' : 327,
'OpNamedBarrierInitialize' : 328,
'OpMemoryNamedBarrier' : 329,
'OpModuleProcessed' : 330,
'OpSubgroupBallotKHR' : 4421,
'OpSubgroupFirstInvocationKHR' : 4422,
'OpSubgroupAllKHR' : 4428,
'OpSubgroupAnyKHR' : 4429,
'OpSubgroupAllEqualKHR' : 4430,
'OpSubgroupReadInvocationKHR' : 4432,
},
}
|
mit
|
mapleoin/braintree_python
|
braintree/payment_method_gateway.py
|
4
|
4012
|
import braintree
from braintree.apple_pay_card import ApplePayCard
from braintree.credit_card import CreditCard
from braintree.payment_method import PaymentMethod
from braintree.paypal_account import PayPalAccount
from braintree.europe_bank_account import EuropeBankAccount
from braintree.coinbase_account import CoinbaseAccount
from braintree.android_pay_card import AndroidPayCard
from braintree.unknown_payment_method import UnknownPaymentMethod
from braintree.error_result import ErrorResult
from braintree.exceptions.not_found_error import NotFoundError
from braintree.ids_search import IdsSearch
from braintree.resource import Resource
from braintree.resource_collection import ResourceCollection
from braintree.successful_result import SuccessfulResult
class PaymentMethodGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def create(self, params={}):
Resource.verify_keys(params, PaymentMethod.create_signature())
return self._post("/payment_methods", {"payment_method": params})
def find(self, payment_method_token):
try:
if payment_method_token == None or payment_method_token.strip() == "":
raise NotFoundError()
response = self.config.http().get(self.config.base_merchant_path() + "/payment_methods/any/" + payment_method_token)
return self._parse_payment_method(response)
except NotFoundError:
raise NotFoundError("payment method with token " + repr(payment_method_token) + " not found")
def update(self, payment_method_token, params):
Resource.verify_keys(params, PaymentMethod.update_signature())
try:
if payment_method_token == None or payment_method_token.strip() == "":
raise NotFoundError()
return self._put(
"/payment_methods/any/" + payment_method_token,
{"payment_method": params}
)
except NotFoundError:
raise NotFoundError("payment method with token " + repr(payment_method_token) + " not found")
def delete(self, payment_method_token):
self.config.http().delete(self.config.base_merchant_path() + "/payment_methods/any/" + payment_method_token)
return SuccessfulResult()
def _post(self, url, params={}):
response = self.config.http().post(self.config.base_merchant_path() + url, params)
if "api_error_response" in response:
return ErrorResult(self.gateway, response["api_error_response"])
else:
payment_method = self._parse_payment_method(response)
return SuccessfulResult({"payment_method": payment_method})
def _put(self, url, params={}):
response = self.config.http().put(self.config.base_merchant_path() + url, params)
if "api_error_response" in response:
return ErrorResult(self.gateway, response["api_error_response"])
else:
payment_method = self._parse_payment_method(response)
return SuccessfulResult({"payment_method": payment_method})
def _parse_payment_method(self, response):
if "paypal_account" in response:
return PayPalAccount(self.gateway, response["paypal_account"])
elif "credit_card" in response:
return CreditCard(self.gateway, response["credit_card"])
elif "europe_bank_account" in response:
return EuropeBankAccount(self.gateway, response["europe_bank_account"])
elif "apple_pay_card" in response:
return ApplePayCard(self.gateway, response["apple_pay_card"])
elif "android_pay_card" in response:
return AndroidPayCard(self.gateway, response["android_pay_card"])
elif "coinbase_account" in response:
return CoinbaseAccount(self.gateway, response["coinbase_account"])
else:
name = list(response)[0]
return UnknownPaymentMethod(self.gateway, response[name])
|
mit
|
Krossom/python-for-android
|
python3-alpha/python3-src/Lib/test/test_bool.py
|
57
|
11719
|
# Test properties of bool promised by PEP 285
import unittest
from test import support
import os
class BoolTest(unittest.TestCase):
def test_subclass(self):
try:
class C(bool):
pass
except TypeError:
pass
else:
self.fail("bool should not be subclassable")
self.assertRaises(TypeError, int.__new__, bool, 0)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
print(False, True, file=fo)
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), 'False True\n')
finally:
fo.close()
os.remove(support.TESTFN)
def test_repr(self):
self.assertEqual(repr(False), 'False')
self.assertEqual(repr(True), 'True')
self.assertEqual(eval(repr(False)), False)
self.assertEqual(eval(repr(True)), True)
def test_str(self):
self.assertEqual(str(False), 'False')
self.assertEqual(str(True), 'True')
def test_int(self):
self.assertEqual(int(False), 0)
self.assertIsNot(int(False), False)
self.assertEqual(int(True), 1)
self.assertIsNot(int(True), True)
def test_float(self):
self.assertEqual(float(False), 0.0)
self.assertIsNot(float(False), False)
self.assertEqual(float(True), 1.0)
self.assertIsNot(float(True), True)
def test_math(self):
self.assertEqual(+False, 0)
self.assertIsNot(+False, False)
self.assertEqual(-False, 0)
self.assertIsNot(-False, False)
self.assertEqual(abs(False), 0)
self.assertIsNot(abs(False), False)
self.assertEqual(+True, 1)
self.assertIsNot(+True, True)
self.assertEqual(-True, -1)
self.assertEqual(abs(True), 1)
self.assertIsNot(abs(True), True)
self.assertEqual(~False, -1)
self.assertEqual(~True, -2)
self.assertEqual(False+2, 2)
self.assertEqual(True+2, 3)
self.assertEqual(2+False, 2)
self.assertEqual(2+True, 3)
self.assertEqual(False+False, 0)
self.assertIsNot(False+False, False)
self.assertEqual(False+True, 1)
self.assertIsNot(False+True, True)
self.assertEqual(True+False, 1)
self.assertIsNot(True+False, True)
self.assertEqual(True+True, 2)
self.assertEqual(True-True, 0)
self.assertIsNot(True-True, False)
self.assertEqual(False-False, 0)
self.assertIsNot(False-False, False)
self.assertEqual(True-False, 1)
self.assertIsNot(True-False, True)
self.assertEqual(False-True, -1)
self.assertEqual(True*1, 1)
self.assertEqual(False*1, 0)
self.assertIsNot(False*1, False)
self.assertEqual(True/1, 1)
self.assertIsNot(True/1, True)
self.assertEqual(False/1, 0)
self.assertIsNot(False/1, False)
for b in False, True:
for i in 0, 1, 2:
self.assertEqual(b**i, int(b)**i)
self.assertIsNot(b**i, bool(int(b)**i))
for a in False, True:
for b in False, True:
self.assertIs(a&b, bool(int(a)&int(b)))
self.assertIs(a|b, bool(int(a)|int(b)))
self.assertIs(a^b, bool(int(a)^int(b)))
self.assertEqual(a&int(b), int(a)&int(b))
self.assertIsNot(a&int(b), bool(int(a)&int(b)))
self.assertEqual(a|int(b), int(a)|int(b))
self.assertIsNot(a|int(b), bool(int(a)|int(b)))
self.assertEqual(a^int(b), int(a)^int(b))
self.assertIsNot(a^int(b), bool(int(a)^int(b)))
self.assertEqual(int(a)&b, int(a)&int(b))
self.assertIsNot(int(a)&b, bool(int(a)&int(b)))
self.assertEqual(int(a)|b, int(a)|int(b))
self.assertIsNot(int(a)|b, bool(int(a)|int(b)))
self.assertEqual(int(a)^b, int(a)^int(b))
self.assertIsNot(int(a)^b, bool(int(a)^int(b)))
self.assertIs(1==1, True)
self.assertIs(1==0, False)
self.assertIs(0<1, True)
self.assertIs(1<0, False)
self.assertIs(0<=0, True)
self.assertIs(1<=0, False)
self.assertIs(1>0, True)
self.assertIs(1>1, False)
self.assertIs(1>=1, True)
self.assertIs(0>=1, False)
self.assertIs(0!=1, True)
self.assertIs(0!=0, False)
x = [1]
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
x = {1: 2}
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
self.assertIs(not True, False)
self.assertIs(not False, True)
def test_convert(self):
self.assertRaises(TypeError, bool, 42, 42)
self.assertIs(bool(10), True)
self.assertIs(bool(1), True)
self.assertIs(bool(-1), True)
self.assertIs(bool(0), False)
self.assertIs(bool("hello"), True)
self.assertIs(bool(""), False)
self.assertIs(bool(), False)
def test_format(self):
self.assertEqual("%d" % False, "0")
self.assertEqual("%d" % True, "1")
self.assertEqual("%x" % False, "0")
self.assertEqual("%x" % True, "1")
def test_hasattr(self):
self.assertIs(hasattr([], "append"), True)
self.assertIs(hasattr([], "wobble"), False)
def test_callable(self):
self.assertIs(callable(len), True)
self.assertIs(callable(1), False)
def test_isinstance(self):
self.assertIs(isinstance(True, bool), True)
self.assertIs(isinstance(False, bool), True)
self.assertIs(isinstance(True, int), True)
self.assertIs(isinstance(False, int), True)
self.assertIs(isinstance(1, bool), False)
self.assertIs(isinstance(0, bool), False)
def test_issubclass(self):
self.assertIs(issubclass(bool, int), True)
self.assertIs(issubclass(int, bool), False)
def test_contains(self):
self.assertIs(1 in {}, False)
self.assertIs(1 in {1:1}, True)
def test_string(self):
self.assertIs("xyz".endswith("z"), True)
self.assertIs("xyz".endswith("x"), False)
self.assertIs("xyz0123".isalnum(), True)
self.assertIs("@#$%".isalnum(), False)
self.assertIs("xyz".isalpha(), True)
self.assertIs("@#$%".isalpha(), False)
self.assertIs("0123".isdigit(), True)
self.assertIs("xyz".isdigit(), False)
self.assertIs("xyz".islower(), True)
self.assertIs("XYZ".islower(), False)
self.assertIs("0123".isdecimal(), True)
self.assertIs("xyz".isdecimal(), False)
self.assertIs("0123".isnumeric(), True)
self.assertIs("xyz".isnumeric(), False)
self.assertIs(" ".isspace(), True)
self.assertIs("\xa0".isspace(), True)
self.assertIs("\u3000".isspace(), True)
self.assertIs("XYZ".isspace(), False)
self.assertIs("X".istitle(), True)
self.assertIs("x".istitle(), False)
self.assertIs("XYZ".isupper(), True)
self.assertIs("xyz".isupper(), False)
self.assertIs("xyz".startswith("x"), True)
self.assertIs("xyz".startswith("z"), False)
def test_boolean(self):
self.assertEqual(True & 1, 1)
self.assertNotIsInstance(True & 1, bool)
self.assertIs(True & True, True)
self.assertEqual(True | 1, 1)
self.assertNotIsInstance(True | 1, bool)
self.assertIs(True | True, True)
self.assertEqual(True ^ 1, 0)
self.assertNotIsInstance(True ^ 1, bool)
self.assertIs(True ^ True, False)
def test_fileclosed(self):
try:
f = open(support.TESTFN, "w")
self.assertIs(f.closed, False)
f.close()
self.assertIs(f.closed, True)
finally:
os.remove(support.TESTFN)
def test_types(self):
# types are always true.
for t in [bool, complex, dict, float, int, list, object,
set, str, tuple, type]:
self.assertIs(bool(t), True)
def test_operator(self):
import operator
self.assertIs(operator.truth(0), False)
self.assertIs(operator.truth(1), True)
self.assertIs(operator.not_(1), False)
self.assertIs(operator.not_(0), True)
self.assertIs(operator.contains([], 1), False)
self.assertIs(operator.contains([1], 1), True)
self.assertIs(operator.lt(0, 0), False)
self.assertIs(operator.lt(0, 1), True)
self.assertIs(operator.is_(True, True), True)
self.assertIs(operator.is_(True, False), False)
self.assertIs(operator.is_not(True, True), False)
self.assertIs(operator.is_not(True, False), True)
def test_marshal(self):
import marshal
self.assertIs(marshal.loads(marshal.dumps(True)), True)
self.assertIs(marshal.loads(marshal.dumps(False)), False)
def test_pickle(self):
import pickle
self.assertIs(pickle.loads(pickle.dumps(True)), True)
self.assertIs(pickle.loads(pickle.dumps(False)), False)
self.assertIs(pickle.loads(pickle.dumps(True, True)), True)
self.assertIs(pickle.loads(pickle.dumps(False, True)), False)
def test_picklevalues(self):
# Test for specific backwards-compatible pickle values
import pickle
self.assertEqual(pickle.dumps(True, protocol=0), b"I01\n.")
self.assertEqual(pickle.dumps(False, protocol=0), b"I00\n.")
self.assertEqual(pickle.dumps(True, protocol=1), b"I01\n.")
self.assertEqual(pickle.dumps(False, protocol=1), b"I00\n.")
self.assertEqual(pickle.dumps(True, protocol=2), b'\x80\x02\x88.')
self.assertEqual(pickle.dumps(False, protocol=2), b'\x80\x02\x89.')
def test_convert_to_bool(self):
# Verify that TypeError occurs when bad things are returned
# from __bool__(). This isn't really a bool test, but
# it's related.
check = lambda o: self.assertRaises(TypeError, bool, o)
class Foo(object):
def __bool__(self):
return self
check(Foo())
class Bar(object):
def __bool__(self):
return "Yes"
check(Bar())
class Baz(int):
def __bool__(self):
return self
check(Baz())
# __bool__() must return a bool not an int
class Spam(int):
def __bool__(self):
return 1
check(Spam())
class Eggs:
def __len__(self):
return -1
self.assertRaises(ValueError, bool, Eggs())
def test_sane_len(self):
# this test just tests our assumptions about __len__
# this will start failing if __len__ changes assertions
for badval in ['illegal', -1, 1 << 32]:
class A:
def __len__(self):
return badval
try:
bool(A())
except (Exception) as e_bool:
try:
len(A())
except (Exception) as e_len:
self.assertEqual(str(e_bool), str(e_len))
def test_main():
support.run_unittest(BoolTest)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-storage-queue/azure/storage/queue/_models.py
|
1
|
19037
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, too-many-instance-attributes
# pylint: disable=super-init-not-called
from typing import List # pylint: disable=unused-import
from azure.core.exceptions import HttpResponseError
from azure.core.paging import PageIterator
from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
from ._shared.models import DictMixin
from ._generated.models import AccessPolicy as GenAccessPolicy
from ._generated.models import Logging as GeneratedLogging
from ._generated.models import Metrics as GeneratedMetrics
from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
from ._generated.models import CorsRule as GeneratedCorsRule
class QueueAnalyticsLogging(GeneratedLogging):
"""Azure Analytics Logging settings.
All required parameters must be populated in order to send to Azure.
:keyword str version: Required. The version of Storage Analytics to configure.
:keyword bool delete: Required. Indicates whether all delete requests should be logged.
:keyword bool read: Required. Indicates whether all read requests should be logged.
:keyword bool write: Required. Indicates whether all write requests should be logged.
:keyword ~azure.storage.queue.RetentionPolicy retention_policy: Required.
The retention policy for the metrics.
"""
def __init__(self, **kwargs):
self.version = kwargs.get('version', u'1.0')
self.delete = kwargs.get('delete', False)
self.read = kwargs.get('read', False)
self.write = kwargs.get('write', False)
self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
version=generated.version,
delete=generated.delete,
read=generated.read,
write=generated.write,
retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
)
class Metrics(GeneratedMetrics):
"""A summary of request statistics grouped by API in hour or minute aggregates.
All required parameters must be populated in order to send to Azure.
:keyword str version: The version of Storage Analytics to configure.
:keyword bool enabled: Required. Indicates whether metrics are enabled for the service.
:keyword bool include_ap_is: Indicates whether metrics should generate summary
statistics for called API operations.
:keyword ~azure.storage.queue.RetentionPolicy retention_policy: Required.
The retention policy for the metrics.
"""
def __init__(self, **kwargs):
self.version = kwargs.get('version', u'1.0')
self.enabled = kwargs.get('enabled', False)
self.include_apis = kwargs.get('include_apis')
self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
version=generated.version,
enabled=generated.enabled,
include_apis=generated.include_apis,
retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
)
class RetentionPolicy(GeneratedRetentionPolicy):
"""The retention policy which determines how long the associated data should
persist.
All required parameters must be populated in order to send to Azure.
:param bool enabled: Required. Indicates whether a retention policy is enabled
for the storage service.
:param int days: Indicates the number of days that metrics or logging or
soft-deleted data should be retained. All data older than this value will
be deleted.
"""
def __init__(self, enabled=False, days=None):
self.enabled = enabled
self.days = days
if self.enabled and (self.days is None):
raise ValueError("If policy is enabled, 'days' must be specified.")
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
enabled=generated.enabled,
days=generated.days,
)
class CorsRule(GeneratedCorsRule):
"""CORS is an HTTP feature that enables a web application running under one
domain to access resources in another domain. Web browsers implement a
security restriction known as same-origin policy that prevents a web page
from calling APIs in a different domain; CORS provides a secure way to
allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param list(str) allowed_origins:
A list of origin domains that will be allowed via CORS, or "*" to allow
all domains. The list of must contain at least one entry. Limited to 64
origin domains. Each allowed origin can have up to 256 characters.
:param list(str) allowed_methods:
A list of HTTP methods that are allowed to be executed by the origin.
The list of must contain at least one entry. For Azure Storage,
permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
:keyword int max_age_in_seconds:
The number of seconds that the client/browser should cache a
pre-flight response.
:keyword list(str) exposed_headers:
Defaults to an empty list. A list of response headers to expose to CORS
clients. Limited to 64 defined headers and two prefixed headers. Each
header can be up to 256 characters.
:keyword list(str) allowed_headers:
Defaults to an empty list. A list of headers allowed to be part of
the cross-origin request. Limited to 64 defined headers and 2 prefixed
headers. Each header can be up to 256 characters.
"""
def __init__(self, allowed_origins, allowed_methods, **kwargs):
self.allowed_origins = ','.join(allowed_origins)
self.allowed_methods = ','.join(allowed_methods)
self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
@classmethod
def _from_generated(cls, generated):
return cls(
[generated.allowed_origins],
[generated.allowed_methods],
allowed_headers=[generated.allowed_headers],
exposed_headers=[generated.exposed_headers],
max_age_in_seconds=generated.max_age_in_seconds,
)
class AccessPolicy(GenAccessPolicy):
"""Access Policy class used by the set and get access policy methods.
A stored access policy can specify the start time, expiry time, and
permissions for the Shared Access Signatures with which it's associated.
Depending on how you want to control access to your resource, you can
specify all of these parameters within the stored access policy, and omit
them from the URL for the Shared Access Signature. Doing so permits you to
modify the associated signature's behavior at any time, as well as to revoke
it. Or you can specify one or more of the access policy parameters within
the stored access policy, and the others on the URL. Finally, you can
specify all of the parameters on the URL. In this case, you can use the
stored access policy to revoke the signature, but not to modify its behavior.
Together the Shared Access Signature and the stored access policy must
include all fields required to authenticate the signature. If any required
fields are missing, the request will fail. Likewise, if a field is specified
both in the Shared Access Signature URL and in the stored access policy, the
request will fail with status code 400 (Bad Request).
:param str permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: ~datetime.datetime or str
"""
def __init__(self, permission=None, expiry=None, start=None):
self.start = start
self.expiry = expiry
self.permission = permission
class QueueMessage(DictMixin):
"""Represents a queue message.
:ivar str id:
A GUID value assigned to the message by the Queue service that
identifies the message in the queue. This value may be used together
with the value of pop_receipt to delete a message from the queue after
it has been retrieved with the receive messages operation.
:ivar date inserted_on:
A UTC date value representing the time the messages was inserted.
:ivar date expires_on:
A UTC date value representing the time the message expires.
:ivar int dequeue_count:
Begins with a value of 1 the first time the message is received. This
value is incremented each time the message is subsequently received.
:param obj content:
The message content. Type is determined by the decode_function set on
the service. Default is str.
:ivar str pop_receipt:
A receipt str which can be used together with the message_id element to
delete a message from the queue after it has been retrieved with the receive
messages operation. Only returned by receive messages operations. Set to
None for peek messages.
:ivar date next_visible_on:
A UTC date value representing the time the message will next be visible.
Only returned by receive messages operations. Set to None for peek messages.
"""
def __init__(self, content=None):
self.id = None
self.inserted_on = None
self.expires_on = None
self.dequeue_count = None
self.content = content
self.pop_receipt = None
self.next_visible_on = None
@classmethod
def _from_generated(cls, generated):
message = cls(content=generated.message_text)
message.id = generated.message_id
message.inserted_on = generated.insertion_time
message.expires_on = generated.expiration_time
message.dequeue_count = generated.dequeue_count
if hasattr(generated, 'pop_receipt'):
message.pop_receipt = generated.pop_receipt
message.next_visible_on = generated.time_next_visible
return message
class MessagesPaged(PageIterator):
"""An iterable of Queue Messages.
:param callable command: Function to retrieve the next page of items.
:param int results_per_page: The maximum number of messages to retrieve per
call.
"""
def __init__(self, command, results_per_page=None, continuation_token=None):
if continuation_token is not None:
raise ValueError("This operation does not support continuation token")
super(MessagesPaged, self).__init__(
self._get_next_cb,
self._extract_data_cb,
)
self._command = command
self.results_per_page = results_per_page
def _get_next_cb(self, continuation_token):
try:
return self._command(number_of_messages=self.results_per_page)
except HttpResponseError as error:
process_storage_error(error)
def _extract_data_cb(self, messages): # pylint: disable=no-self-use
# There is no concept of continuation token, so raising on my own condition
if not messages:
raise StopIteration("End of paging")
return "TOKEN_IGNORED", [QueueMessage._from_generated(q) for q in messages] # pylint: disable=protected-access
class QueueProperties(DictMixin):
"""Queue Properties.
:ivar str name: The name of the queue.
:keyword dict(str,str) metadata:
A dict containing name-value pairs associated with the queue as metadata.
This var is set to None unless the include=metadata param was included
for the list queues operation. If this parameter was specified but the
"""
def __init__(self, **kwargs):
self.name = None
self.metadata = kwargs.get('metadata')
self.approximate_message_count = kwargs.get('x-ms-approximate-messages-count')
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.metadata = generated.metadata
return props
class QueuePropertiesPaged(PageIterator):
"""An iterable of Queue properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A queue name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str next_marker: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only queues whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of queue names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
super(QueuePropertiesPaged, self).__init__(
self._get_next_cb,
self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.service_endpoint = None
self.prefix = prefix
self.marker = None
self.results_per_page = results_per_page
self.location_mode = None
def _get_next_cb(self, continuation_token):
try:
return self._command(
marker=continuation_token or None,
maxresults=self.results_per_page,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except HttpResponseError as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.service_endpoint = self._response.service_endpoint
self.prefix = self._response.prefix
self.marker = self._response.marker
self.results_per_page = self._response.max_results
props_list = [QueueProperties._from_generated(q) for q in self._response.queue_items] # pylint: disable=protected-access
return self._response.next_marker or None, props_list
class QueueSasPermissions(object):
"""QueueSasPermissions class to be used with the
:func:`~azure.storage.queue.generate_queue_sas` function and for the AccessPolicies used with
:func:`~azure.storage.queue.QueueClient.set_queue_access_policy`.
:param bool read:
Read metadata and properties, including message count. Peek at messages.
:param bool add:
Add messages to the queue.
:param bool update:
Update messages in the queue. Note: Use the Process permission with
Update so you can first get the message you want to update.
:param bool process:
Get and delete messages from the queue.
"""
def __init__(self, read=False, add=False, update=False, process=False):
self.read = read
self.add = add
self.update = update
self.process = process
self._str = (('r' if self.read else '') +
('a' if self.add else '') +
('u' if self.update else '') +
('p' if self.process else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a QueueSasPermissions from a string.
To specify read, add, update, or process permissions you need only to
include the first letter of the word in the string. E.g. For read and
update permissions, you would provide a string "ru".
:param str permission: The string which dictates the
read, add, update, or process permissions.
:return: A QueueSasPermissions object
:rtype: ~azure.storage.queue.QueueSasPermissions
"""
p_read = 'r' in permission
p_add = 'a' in permission
p_update = 'u' in permission
p_process = 'p' in permission
parsed = cls(p_read, p_add, p_update, p_process)
return parsed
def service_stats_deserialize(generated):
"""Deserialize a ServiceStats objects into a dict.
"""
return {
'geo_replication': {
'status': generated.geo_replication.status,
'last_sync_time': generated.geo_replication.last_sync_time,
}
}
def service_properties_deserialize(generated):
"""Deserialize a ServiceProperties objects into a dict.
"""
return {
'analytics_logging': QueueAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access
'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access
'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access
'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access
}
|
mit
|
eviljeff/zamboni
|
mkt/webapps/utils.py
|
3
|
4596
|
# -*- coding: utf-8 -*-
import json
from django.core.cache import cache
from django.core.files.storage import default_storage as storage
import commonware.log
import lib.iarc
import mkt
from mkt.site.utils import JSONEncoder
from mkt.translations.utils import find_language
log = commonware.log.getLogger('z.webapps')
def get_locale_properties(manifest, property, default_locale=None):
locale_dict = {}
for locale in manifest.get('locales', {}):
if property in manifest['locales'][locale]:
locale_dict[locale] = manifest['locales'][locale][property]
# Add in the default locale name.
default = manifest.get('default_locale') or default_locale
root_property = manifest.get(property)
if default and root_property:
locale_dict[default] = root_property
return locale_dict
def get_supported_locales(manifest):
"""
Returns a list of locales found in the "locales" property of the manifest.
This will convert locales found in the SHORTER_LANGUAGES setting to their
full locale. It will also remove locales not found in AMO_LANGUAGES.
Note: The default_locale is not included.
"""
return sorted(filter(None, map(find_language, set(
manifest.get('locales', {}).keys()))))
def dehydrate_content_rating(rating):
"""
{body.id, rating.id} to translated rating.label.
"""
try:
body = mkt.ratingsbodies.dehydrate_ratings_body(
mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])])
except TypeError:
# Legacy ES format (bug 943371).
return {}
rating = mkt.ratingsbodies.dehydrate_rating(
body.ratings[int(rating['rating'])])
return rating.label
def dehydrate_content_ratings(content_ratings):
"""Dehydrate an object of content ratings from rating IDs to dict."""
for body in content_ratings or {}:
# Dehydrate all content ratings.
content_ratings[body] = dehydrate_content_rating(content_ratings[body])
return content_ratings
def iarc_get_app_info(app):
client = lib.iarc.client.get_iarc_client('services')
iarc = app.iarc_info
iarc_id = iarc.submission_id
iarc_code = iarc.security_code
# Generate XML.
xml = lib.iarc.utils.render_xml(
'get_app_info.xml',
{'submission_id': iarc_id, 'security_code': iarc_code})
# Process that shizzle.
resp = client.Get_App_Info(XMLString=xml)
# Handle response.
return lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
def get_cached_minifest(app_or_langpack, force=False):
"""
Create a "mini" manifest for a packaged app or langpack and cache it (Call
with `force=True` to bypass existing cache).
Note that platform expects name/developer/locales to match the data from
the real manifest in the package, so it needs to be read from the zip file.
"""
cache_key = '{0}:{1}:manifest'.format(app_or_langpack._meta.model_name,
app_or_langpack.pk)
if not force:
data = cache.get(cache_key)
if data:
return data
sign_if_packaged = getattr(app_or_langpack, 'sign_if_packaged', None)
if sign_if_packaged is None:
# Langpacks are already signed when we generate the manifest and have
# a file_path attribute.
signed_file_path = app_or_langpack.file_path
else:
# sign_if_packaged() will return the signed path. But to call it, we
# need a current version. If we don't have one, return an empty
# manifest, bypassing caching so that when a version does become
# available it can get picked up correctly.
if not app_or_langpack.current_version:
return '{}'
signed_file_path = sign_if_packaged()
manifest = app_or_langpack.get_manifest_json()
package_path = app_or_langpack.get_package_path()
data = {
'size': storage.size(signed_file_path),
'package_path': package_path,
}
if hasattr(app_or_langpack, 'current_version'):
data['version'] = app_or_langpack.current_version.version
data['release_notes'] = app_or_langpack.current_version.releasenotes
else:
# LangPacks have no version model, the version number is an attribute
# and they don't have release notes.
data['version'] = app_or_langpack.version
for key in ['developer', 'icons', 'locales', 'name']:
if key in manifest:
data[key] = manifest[key]
data = json.dumps(data, cls=JSONEncoder)
cache.set(cache_key, data, None)
return data
|
bsd-3-clause
|
coding-happily/FlaskTest
|
venv/lib/python3.5/site-packages/werkzeug/contrib/fixers.py
|
259
|
10183
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. While
Werkzeug-based applications already can use
:py:func:`werkzeug.wsgi.get_host` to retrieve the current host even if
behind proxy setups, this middleware can be used for applications which
access the WSGI environment directly.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
|
mit
|
runcom/flocker
|
flocker/provision/_common.py
|
1
|
1492
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
from characteristic import attributes, Attribute
from twisted.python.constants import Values, ValueConstant
@attributes([
Attribute('version', default_value=None),
Attribute('os_version', default_value=None),
Attribute('branch', default_value=None),
Attribute('build_server', default_value="http://build.clusterhq.com/"),
])
class PackageSource(object):
"""
Source for the installation of a flocker package.
:ivar bytes version: The version of flocker to install. If not specified,
install the most recent version.
:ivar bytes os_version: The version of the OS package of flocker to
install. If not specified, install the most recent version.
:ivar bytes branch: The branch from which to install flocker.
If not specified, install from the release repository.
:ivar bytes build_server: The builderver to install from.
Only meaningful if a branch is specified.
"""
class Variants(Values):
"""
Provisioning variants for wider acceptance testing coverage.
:ivar DISTRO_TESTING: Install packages from the distribution's
proposed-updates repository.
:ivar DOCKER_HEAD: Install docker from a repository tracking docker HEAD.
:ivar ZFS_TESTING: Install latest zfs build.
"""
DISTRO_TESTING = ValueConstant("distro-testing")
DOCKER_HEAD = ValueConstant("docker-head")
ZFS_TESTING = ValueConstant("zfs-testing")
|
apache-2.0
|
zhanqxun/cv_fish
|
PIL/ImageCms.py
|
4
|
37120
|
# The Python Imaging Library.
# $Id$
# Optional color management support, based on Kevin Cazabon's PyCMS
# library.
# History:
# 2009-03-08 fl Added to PIL.
# Copyright (C) 2002-2003 Kevin Cazabon
# Copyright (c) 2009 by Fredrik Lundh
# Copyright (c) 2013 by Eric Soroos
# See the README file for information on usage and redistribution. See
# below for the original description.
from __future__ import print_function
import sys
from PIL import Image
try:
from PIL import _imagingcms
except ImportError as ex:
# Allow error import for doc purposes, but error out when accessing
# anything in core.
from _util import deferred_error
_imagingcms = deferred_error(ex)
from PIL._util import isStringType
DESCRIPTION = """
pyCMS
a Python / PIL interface to the littleCMS ICC Color Management System
Copyright (C) 2002-2003 Kevin Cazabon
[email protected]
http://www.cazabon.com
pyCMS home page: http://www.cazabon.com/pyCMS
littleCMS home page: http://www.littlecms.com
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
Originally released under LGPL. Graciously donated to PIL in
March 2009, for distribution under the standard PIL license
The pyCMS.py module provides a "clean" interface between Python/PIL and
pyCMSdll, taking care of some of the more complex handling of the direct
pyCMSdll functions, as well as error-checking and making sure that all
relevant data is kept together.
While it is possible to call pyCMSdll functions directly, it's not highly
recommended.
Version History:
1.0.0 pil Oct 2013 Port to LCMS 2.
0.1.0 pil mod March 10, 2009
Renamed display profile to proof profile. The proof
profile is the profile of the device that is being
simulated, not the profile of the device which is
actually used to display/print the final simulation
(that'd be the output profile) - also see LCMSAPI.txt
input colorspace -> using 'renderingIntent' -> proof
colorspace -> using 'proofRenderingIntent' -> output
colorspace
Added LCMS FLAGS support.
Added FLAGS["SOFTPROOFING"] as default flag for
buildProofTransform (otherwise the proof profile/intent
would be ignored).
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
0.0.2 alpha Jan 6, 2002
Added try/except statements around type() checks of
potential CObjects... Python won't let you use type()
on them, and raises a TypeError (stupid, if you ask
me!)
Added buildProofTransformFromOpenProfiles() function.
Additional fixes in DLL, see DLL code for details.
0.0.1 alpha first public release, Dec. 26, 2002
Known to-do list with current version (of Python interface, not pyCMSdll):
none
"""
VERSION = "1.0.0 pil"
# --------------------------------------------------------------------.
core = _imagingcms
#
# intent/direction values
INTENT_PERCEPTUAL = 0
INTENT_RELATIVE_COLORIMETRIC = 1
INTENT_SATURATION = 2
INTENT_ABSOLUTE_COLORIMETRIC = 3
DIRECTION_INPUT = 0
DIRECTION_OUTPUT = 1
DIRECTION_PROOF = 2
#
# flags
FLAGS = {
"MATRIXINPUT": 1,
"MATRIXOUTPUT": 2,
"MATRIXONLY": (1 | 2),
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
# Don't create prelinearization tables on precalculated transforms
# (internal use):
"NOPRELINEARIZATION": 16,
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
"NOTCACHE": 64, # Inhibit 1-pixel cache
"NOTPRECALC": 256,
"NULLTRANSFORM": 512, # Don't transform anyway
"HIGHRESPRECALC": 1024, # Use more memory to give better accuracy
"LOWRESPRECALC": 2048, # Use less memory to minimize resources
"WHITEBLACKCOMPENSATION": 8192,
"BLACKPOINTCOMPENSATION": 8192,
"GAMUTCHECK": 4096, # Out of Gamut alarm
"SOFTPROOFING": 16384, # Do softproofing
"PRESERVEBLACK": 32768, # Black preservation
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints
}
_MAX_FLAG = 0
for flag in FLAGS.values():
if isinstance(flag, int):
_MAX_FLAG = _MAX_FLAG | flag
# --------------------------------------------------------------------.
# Experimental PIL-level API
# --------------------------------------------------------------------.
##
# Profile.
class ImageCmsProfile(object):
def __init__(self, profile):
"""
:param profile: Either a string representing a filename,
a file like object containing a profile or a
low-level profile object
"""
if isStringType(profile):
self._set(core.profile_open(profile), profile)
elif hasattr(profile, "read"):
self._set(core.profile_frombytes(profile.read()))
else:
self._set(profile) # assume it's already a profile
def _set(self, profile, filename=None):
self.profile = profile
self.filename = filename
if profile:
self.product_name = None # profile.product_name
self.product_info = None # profile.product_info
else:
self.product_name = None
self.product_info = None
def tobytes(self):
"""
Returns the profile in a format suitable for embedding in
saved images.
:returns: a bytes object containing the ICC profile.
"""
return core.profile_tobytes(self.profile)
class ImageCmsTransform(Image.ImagePointHandler):
"""
Transform. This can be used with the procedural API, or with the standard
Image.point() method.
Will return the output profile in the output.info['icc_profile'].
"""
def __init__(self, input, output, input_mode, output_mode,
intent=INTENT_PERCEPTUAL, proof=None,
proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0):
if proof is None:
self.transform = core.buildTransform(
input.profile, output.profile,
input_mode, output_mode,
intent,
flags
)
else:
self.transform = core.buildProofTransform(
input.profile, output.profile, proof.profile,
input_mode, output_mode,
intent, proof_intent,
flags
)
# Note: inputMode and outputMode are for pyCMS compatibility only
self.input_mode = self.inputMode = input_mode
self.output_mode = self.outputMode = output_mode
self.output_profile = output
def point(self, im):
return self.apply(im)
def apply(self, im, imOut=None):
im.load()
if imOut is None:
imOut = Image.new(self.output_mode, im.size, None)
self.transform.apply(im.im.id, imOut.im.id)
imOut.info['icc_profile'] = self.output_profile.tobytes()
return imOut
def apply_in_place(self, im):
im.load()
if im.mode != self.output_mode:
raise ValueError("mode mismatch") # wrong output mode
self.transform.apply(im.im.id, im.im.id)
im.info['icc_profile'] = self.output_profile.tobytes()
return im
def get_display_profile(handle=None):
""" (experimental) Fetches the profile for the current display device.
:returns: None if the profile is not known.
"""
if sys.platform == "win32":
from PIL import ImageWin
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(handle, 1)
else:
profile = core.get_display_profile_win32(handle or 0)
else:
try:
get = _imagingcms.get_display_profile
except AttributeError:
return None
else:
profile = get()
return ImageCmsProfile(profile)
# --------------------------------------------------------------------.
# pyCMS compatible layer
# --------------------------------------------------------------------.
class PyCMSError(Exception):
""" (pyCMS) Exception class.
This is used for all errors in the pyCMS API. """
pass
def profileToProfile(
im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL,
outputMode=None, inPlace=0, flags=0):
"""
(pyCMS) Applies an ICC transformation to a given image, mapping from
inputProfile to outputProfile.
If the input or output profiles specified are not valid filenames, a
PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode,
a PyCMSError will be raised. If an error occurs during application of
the profiles, a PyCMSError will be raised. If outputMode is not a mode
supported by the outputProfile (or by pyCMS), a PyCMSError will be
raised.
This function applies an ICC transformation to im from inputProfile's
color space to outputProfile's color space using the specified rendering
intent to decide how to handle out-of-gamut colors.
OutputMode can be used to specify that a color mode conversion is to
be done using these profiles, but the specified profiles must be able
to handle that mode. I.e., if converting im from RGB to CMYK using
profiles, the input profile must handle RGB data, and the output
profile must handle CMYK data.
:param im: An open PIL image object (i.e. Image.new(...) or
Image.open(...), etc.)
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this image, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this image, or a profile object
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param outputMode: A valid PIL mode for the output image (i.e. "RGB",
"CMYK", etc.). Note: if rendering the image "inPlace", outputMode
MUST be the same mode as the input, or omitted completely. If
omitted, the outputMode will be the same as the mode of the input
image (im.mode)
:param inPlace: Boolean (1 = True, None or 0 = False). If True, the
original image is modified in-place, and None is returned. If False
(default), a new Image object is returned with the transform applied.
:param flags: Integer (0-...) specifying additional flags
:returns: Either None or a new PIL image object, depending on value of
inPlace
:exception PyCMSError:
"""
if outputMode is None:
outputMode = im.mode
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError(
"flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile, outputProfile, im.mode, outputMode,
renderingIntent, flags=flags
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
def getOpenProfile(profileFilename):
"""
(pyCMS) Opens an ICC profile file.
The PyCMSProfile object can be passed back into pyCMS for use in creating
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
If profileFilename is not a vaild filename for an ICC profile, a PyCMSError
will be raised.
:param profileFilename: String, as a valid filename path to the ICC profile
you wish to open, or a file-like object.
:returns: A CmsProfile class object.
:exception PyCMSError:
"""
try:
return ImageCmsProfile(profileFilename)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def buildTransform(
inputProfile, outputProfile, inMode, outMode,
renderingIntent=INTENT_PERCEPTUAL, flags=0):
"""
(pyCMS) Builds an ICC transform mapping from the inputProfile to the
outputProfile. Use applyTransform to apply the transform to a given
image.
If the input or output profiles specified are not valid filenames, a
PyCMSError will be raised. If an error occurs during creation of the
transform, a PyCMSError will be raised.
If inMode or outMode are not a mode supported by the outputProfile (or
by pyCMS), a PyCMSError will be raised.
This function builds and returns an ICC transform from the inputProfile
to the outputProfile using the renderingIntent to determine what to do
with out-of-gamut colors. It will ONLY work for converting images that
are in inMode to images that are in outMode color format (PIL mode,
i.e. "RGB", "RGBA", "CMYK", etc.).
Building the transform is a fair part of the overhead in
ImageCms.profileToProfile(), so if you're planning on converting multiple
images using the same input/output settings, this can save you time.
Once you have a transform object, it can be used with
ImageCms.applyProfile() to convert images without the need to re-compute
the lookup table for the transform.
The reason pyCMS returns a class object rather than a handle directly
to the transform is that it needs to keep track of the PIL input/output
modes that the transform is meant for. These attributes are stored in
the "inMode" and "outMode" attributes of the object (which can be
manually overridden if you really want to, but I don't know of any
time that would be of use, or would even work).
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError(
"flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(
inputProfile, outputProfile, inMode, outMode,
renderingIntent, flags=flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def buildProofTransform(
inputProfile, outputProfile, proofProfile, inMode, outMode,
renderingIntent=INTENT_PERCEPTUAL,
proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC,
flags=FLAGS["SOFTPROOFING"]):
"""
(pyCMS) Builds an ICC transform mapping from the inputProfile to the
outputProfile, but tries to simulate the result that would be
obtained on the proofProfile device.
If the input, output, or proof profiles specified are not valid
filenames, a PyCMSError will be raised.
If an error occurs during creation of the transform, a PyCMSError will
be raised.
If inMode or outMode are not a mode supported by the outputProfile
(or by pyCMS), a PyCMSError will be raised.
This function builds and returns an ICC transform from the inputProfile
to the outputProfile, but tries to simulate the result that would be
obtained on the proofProfile device using renderingIntent and
proofRenderingIntent to determine what to do with out-of-gamut
colors. This is known as "soft-proofing". It will ONLY work for
converting images that are in inMode to images that are in outMode
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
Usage of the resulting transform object is exactly the same as with
ImageCms.buildTransform().
Proof profiling is generally used when using an output device to get a
good idea of what the final printed/displayed image would look like on
the proofProfile device when it's quicker and easier to use the
output device for judging color. Generally, this means that the
output device is a monitor, or a dye-sub printer (etc.), and the simulated
device is something more expensive, complicated, or time consuming
(making it difficult to make a real print for color judgement purposes).
Soft-proofing basically functions by adjusting the colors on the
output device to match the colors of the device being simulated. However,
when the simulated device has a much wider gamut than the output
device, you may obtain marginal results.
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
(monitor, usually) profile you wish to use for this transform, or a
profile object
:param proofProfile: String, as a valid filename path to the ICC proof
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the input->proof (simulated) transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for proof->output transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError(
"flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(
inputProfile, outputProfile, inMode, outMode, renderingIntent,
proofProfile, proofRenderingIntent, flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
def applyTransform(im, transform, inPlace=0):
"""
(pyCMS) Applies a transform to a given image.
If im.mode != transform.inMode, a PyCMSError is raised.
If inPlace == TRUE and transform.inMode != transform.outMode, a
PyCMSError is raised.
If im.mode, transfer.inMode, or transfer.outMode is not supported by
pyCMSdll or the profiles you used for the transform, a PyCMSError is
raised.
If an error occurs while the transform is being applied, a PyCMSError
is raised.
This function applies a pre-calculated transform (from
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
to an image. The transform can be used for multiple images, saving
considerable calculation time if doing the same conversion multiple times.
If you want to modify im in-place instead of receiving a new image as
the return value, set inPlace to TRUE. This can only be done if
transform.inMode and transform.outMode are the same, because we can't
change the mode in-place (the buffer sizes for some modes are
different). The default behavior is to return a new Image object of
the same dimensions in mode transform.outMode.
:param im: A PIL Image object, and im.mode must be the same as the inMode
supported by the transform.
:param transform: A valid CmsTransform class object
:param inPlace: Bool (1 == True, 0 or None == False). If True, im is
modified in place and None is returned, if False, a new Image object
with the transform applied is returned (and im is not changed). The
default is False.
:returns: Either None, or a new PIL Image object, depending on the value of
inPlace. The profile will be returned in the image's
info['icc_profile'].
:exception PyCMSError:
"""
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
def createProfile(colorSpace, colorTemp=-1):
"""
(pyCMS) Creates a profile.
If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised
If using LAB and colorTemp != a positive integer, a PyCMSError is raised.
If an error occurs while creating the profile, a PyCMSError is raised.
Use this function to create common profiles on-the-fly instead of
having to supply a profile on disk and knowing the path to it. It
returns a normal CmsProfile object that can be passed to
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
to images.
:param colorSpace: String, the color space of the profile you wish to
create.
Currently only "LAB", "XYZ", and "sRGB" are supported.
:param colorTemp: Positive integer for the white point for the profile, in
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
profiles, and is ignored for XYZ and sRGB.
:returns: A CmsProfile class object
:exception PyCMSError:
"""
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
raise PyCMSError(
"Color space not supported for on-the-fly profile creation (%s)"
% colorSpace)
if colorSpace == "LAB":
try:
colorTemp = float(colorTemp)
except:
raise PyCMSError(
"Color temperature must be numeric, \"%s\" not valid"
% colorTemp)
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileName(profile):
"""
(pyCMS) Gets the internal product name for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised If an error occurs while trying to obtain the
name tag, a PyCMSError is raised.
Use this function to obtain the INTERNAL name of the profile (stored
in an ICC tag in the profile itself), usually the one used when the
profile was originally created. Sometimes this tag also contains
additional information supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal name of the profile as stored
in an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# do it in python, not c.
# // name was "%s - %s" (model, manufacturer) || Description ,
# // but if the Model and Manufacturer were the same or the model
# // was long, Just the model, in 1.x
model = profile.profile.product_model
manufacturer = profile.profile.product_manufacturer
if not (model or manufacturer):
return profile.profile.product_description + "\n"
if not manufacturer or len(model) > 30:
return model + "\n"
return "%s - %s\n" % (model, manufacturer)
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileInfo(profile):
"""
(pyCMS) Gets the internal product information for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the info tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
info tag. This often contains details about the profile, and how it
was created, as supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
# Python, not C. the white point bits weren't working well,
# so skipping.
# // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
description = profile.profile.product_description
cpright = profile.profile.product_copyright
arr = []
for elt in (description, cpright):
if elt:
arr.append(elt)
return "\r\n\r\n".join(arr) + "\r\n\r\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileCopyright(profile):
"""
(pyCMS) Gets the copyright for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the copyright tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
copyright tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_copyright + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileManufacturer(profile):
"""
(pyCMS) Gets the manufacturer for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the manufacturer tag, a
PyCMSError is raised
Use this function to obtain the information stored in the profile's
manufacturer tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_manufacturer + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileModel(profile):
"""
(pyCMS) Gets the model for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the model tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
model tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_model + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileDescription(profile):
"""
(pyCMS) Gets the description for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the description tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
description tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in an
ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_description + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getDefaultIntent(profile):
"""
(pyCMS) Gets the default intent name for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the default intent, a
PyCMSError is raised.
Use this function to determine the default (and usually best optimized)
rendering intent for this profile. Most profiles support multiple
rendering intents, but are intended mostly for one type of conversion.
If you wish to use a different intent than returned, use
ImageCms.isIntentSupported() to verify it will work first.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: Integer 0-3 specifying the default rendering intent for this
profile.
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def isIntentSupported(profile, intent, direction):
"""
(pyCMS) Checks if a given intent is supported.
Use this function to verify that you can use your desired
renderingIntent with profile, and that profile can be used for the
input/output/proof profile as you desire.
Some profiles are created specifically for one "direction", can cannot
be used for others. Some profiles can only be used for certain
rendering intents... so it's best to either verify this before trying
to create a transform with them (using this function), or catch the
potential PyCMSError that will occur if they don't support the modes
you select.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:param intent: Integer (0-3) specifying the rendering intent you wish to
use with this profile
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param direction: Integer specifying if the profile is to be used for input,
output, or proof
INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
:returns: 1 if the intent/direction are supported, -1 if they are not.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def versions():
"""
(pyCMS) Fetches versions.
"""
return (
VERSION, core.littlecms_version,
sys.version.split()[0], Image.VERSION
)
# --------------------------------------------------------------------
if __name__ == "__main__":
# create a cheap manual from the __doc__ strings for the functions above
print(__doc__)
for f in dir(sys.modules[__name__]):
doc = None
try:
exec("doc = %s.__doc__" % (f))
if "pyCMS" in doc:
# so we don't get the __doc__ string for imported modules
print("=" * 80)
print("%s" % f)
print(doc)
except (AttributeError, TypeError):
pass
# End of file
|
apache-2.0
|
4eek/edx-platform
|
common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py
|
35
|
20486
|
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
import datetime
import cPickle as pickle
import math
import zlib
import pymongo
import pytz
import re
from contextlib import contextmanager
from time import time
# Import this just to export it
from pymongo.errors import DuplicateKeyError # pylint: disable=unused-import
from django.core.cache import get_cache, InvalidCacheBackendError
import dogstats_wrapper as dog_stats_api
from contracts import check, new_contract
from mongodb_proxy import autoretry_read, MongoProxy
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore import BlockData
from xmodule.modulestore.split_mongo import BlockKey
new_contract('BlockData', BlockData)
def round_power_2(value):
"""
Return value rounded up to the nearest power of 2.
"""
if value == 0:
return 0
return math.pow(2, math.ceil(math.log(value, 2)))
class Tagger(object):
"""
An object used by :class:`QueryTimer` to allow timed code blocks
to add measurements and tags to the timer.
"""
def __init__(self, default_sample_rate):
self.added_tags = []
self.measures = []
self.sample_rate = default_sample_rate
def measure(self, name, size):
"""
Record a measurement of the timed data. This would be something to
indicate the size of the value being timed.
Arguments:
name: The name of the measurement.
size (float): The size of the measurement.
"""
self.measures.append((name, size))
def tag(self, **kwargs):
"""
Add tags to the timer.
Arguments:
**kwargs: Each keyword is treated as a tag name, and the
value of the argument is the tag value.
"""
self.added_tags.extend(kwargs.items())
@property
def tags(self):
"""
Return all tags for this (this includes any tags added with :meth:`tag`,
and also all of the added measurements, bucketed into powers of 2).
"""
return [
'{}:{}'.format(name, round_power_2(size))
for name, size in self.measures
] + [
'{}:{}'.format(name, value)
for name, value in self.added_tags
]
class QueryTimer(object):
"""
An object that allows timing a block of code while also recording measurements
about that code.
"""
def __init__(self, metric_base, sample_rate=1):
"""
Arguments:
metric_base: The prefix to be used for all queries captured
with this :class:`QueryTimer`.
"""
self._metric_base = metric_base
self._sample_rate = sample_rate
@contextmanager
def timer(self, metric_name, course_context):
"""
Contextmanager which acts as a timer for the metric ``metric_name``,
but which also yields a :class:`Tagger` object that allows the timed block
of code to add tags and quantity measurements. Tags are added verbatim to the
timer output. Measurements are recorded as histogram measurements in their own,
and also as bucketed tags on the timer measurement.
Arguments:
metric_name: The name used to aggregate all of these metrics.
course_context: The course which the query is being made for.
"""
tagger = Tagger(self._sample_rate)
metric_name = "{}.{}".format(self._metric_base, metric_name)
start = time()
try:
yield tagger
finally:
end = time()
tags = tagger.tags
tags.append('course:{}'.format(course_context))
for name, size in tagger.measures:
dog_stats_api.histogram(
'{}.{}'.format(metric_name, name),
size,
timestamp=end,
tags=[tag for tag in tags if not tag.startswith('{}:'.format(metric_name))],
sample_rate=tagger.sample_rate,
)
dog_stats_api.histogram(
'{}.duration'.format(metric_name),
end - start,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
dog_stats_api.increment(
metric_name,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
TIMER = QueryTimer(__name__, 0.01)
def structure_from_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
Arguments:
structure: The document structure to convert
course_context (CourseKey): For metrics gathering, the CourseKey
for the course that this data is being processed for.
"""
with TIMER.timer('structure_from_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = BlockData(**block)
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
with TIMER.timer('structure_to_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('BlockKey', structure['root'])
check('dict(BlockKey: BlockData)', structure['blocks'])
for block in structure['blocks'].itervalues():
if 'children' in block.fields:
check('list(BlockKey)', block.fields['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in structure['blocks'].iteritems():
new_block = dict(block.to_storable())
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
class CourseStructureCache(object):
"""
Wrapper around django cache object to cache course structure objects.
The course structures are pickled and compressed when cached.
If the 'course_structure_cache' doesn't exist, then don't do anything for
for set and get.
"""
def __init__(self):
self.no_cache_found = False
try:
self.cache = get_cache('course_structure_cache')
except InvalidCacheBackendError:
self.no_cache_found = True
def get(self, key, course_context=None):
"""Pull the compressed, pickled struct data from cache and deserialize."""
if self.no_cache_found:
return None
with TIMER.timer("CourseStructureCache.get", course_context) as tagger:
compressed_pickled_data = self.cache.get(key)
tagger.tag(from_cache=str(compressed_pickled_data is not None).lower())
if compressed_pickled_data is None:
# Always log cache misses, because they are unexpected
tagger.sample_rate = 1
return None
tagger.measure('compressed_size', len(compressed_pickled_data))
pickled_data = zlib.decompress(compressed_pickled_data)
tagger.measure('uncompressed_size', len(pickled_data))
return pickle.loads(pickled_data)
def set(self, key, structure, course_context=None):
"""Given a structure, will pickle, compress, and write to cache."""
if self.no_cache_found:
return None
with TIMER.timer("CourseStructureCache.set", course_context) as tagger:
pickled_data = pickle.dumps(structure, pickle.HIGHEST_PROTOCOL)
tagger.measure('uncompressed_size', len(pickled_data))
# 1 = Fastest (slightly larger results)
compressed_pickled_data = zlib.compress(pickled_data, 1)
tagger.measure('compressed_size', len(compressed_pickled_data))
# Stuctures are immutable, so we set a timeout of "never"
self.cache.set(key, compressed_pickled_data, None)
class MongoConnection(object):
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
def __init__(
self, db, collection, host, port=27017, tz_aware=True, user=None, password=None,
asset_collection=None, retry_wait_time=0.1, **kwargs
):
"""
Create & open the connection, authenticate, and provide pointers to the collections
"""
if kwargs.get('replicaSet') is None:
kwargs.pop('replicaSet', None)
mongo_class = pymongo.MongoClient
else:
mongo_class = pymongo.MongoReplicaSetClient
_client = mongo_class(
host=host,
port=port,
tz_aware=tz_aware,
**kwargs
)
self.database = MongoProxy(
pymongo.database.Database(_client, db),
wait_time=retry_wait_time
)
if user is not None and password is not None:
self.database.authenticate(user, password)
self.course_index = self.database[collection + '.active_versions']
self.structures = self.database[collection + '.structures']
self.definitions = self.database[collection + '.definitions']
# every app has write access to the db (v having a flag to indicate r/o v write)
# Force mongo to report errors, at the expense of performance
# pymongo docs suck but explanation:
# http://api.mongodb.org/java/2.10.1/com/mongodb/WriteConcern.html
self.course_index.write_concern = {'w': 1}
self.structures.write_concern = {'w': 1}
self.definitions.write_concern = {'w': 1}
def heartbeat(self):
"""
Check that the db is reachable.
"""
if self.database.connection.alive():
return True
else:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo')
def get_structure(self, key, course_context=None):
"""
Get the structure from the persistence mechanism whose id is the given key.
This method will use a cached version of the structure if it is availble.
"""
with TIMER.timer("get_structure", course_context) as tagger_get_structure:
cache = CourseStructureCache()
structure = cache.get(key, course_context)
tagger_get_structure.tag(from_cache=str(bool(structure)).lower())
if not structure:
# Always log cache misses, because they are unexpected
tagger_get_structure.sample_rate = 1
with TIMER.timer("get_structure.find_one", course_context) as tagger_find_one:
doc = self.structures.find_one({'_id': key})
tagger_find_one.measure("blocks", len(doc['blocks']))
structure = structure_from_mongo(doc, course_context)
tagger_find_one.sample_rate = 1
cache.set(key, structure, course_context)
return structure
@autoretry_read()
def find_structures_by_id(self, ids, course_context=None):
"""
Return all structures that specified in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'_id': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_structures_derived_from(self, ids, course_context=None):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_derived_from", course_context) as tagger:
tagger.measure("base_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'previous_version': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_ancestor_structures(self, original_version, block_key, course_context=None):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
with TIMER.timer("find_ancestor_structures", course_context) as tagger:
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({
'original_version': original_version,
'blocks': {
'$elemMatch': {
'block_id': block_key.id,
'block_type': block_key.type,
'edit_info.update_version': {
'$exists': True,
},
},
},
})
]
tagger.measure("structures", len(docs))
return docs
def insert_structure(self, structure, course_context=None):
"""
Insert a new structure into the database.
"""
with TIMER.timer("insert_structure", course_context) as tagger:
tagger.measure("blocks", len(structure["blocks"]))
self.structures.insert(structure_to_mongo(structure, course_context))
def get_course_index(self, key, ignore_case=False):
"""
Get the course_index from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_course_index", key):
if ignore_case:
query = {
key_attr: re.compile(u'^{}$'.format(re.escape(getattr(key, key_attr))), re.IGNORECASE)
for key_attr in ('org', 'course', 'run')
}
else:
query = {
key_attr: getattr(key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.find_one(query)
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None, course_context=None):
"""
Find the course_index matching particular conditions.
Arguments:
branch: If specified, this branch must exist in the returned courses
search_targets: If specified, this must be a dictionary specifying field values
that must exist in the search_targets of the returned courses
org_target: If specified, this is an ORG filter so that only course_indexs are
returned for the specified ORG
"""
with TIMER.timer("find_matching_course_indexes", course_context):
query = {}
if branch is not None:
query['versions.{}'.format(branch)] = {'$exists': True}
if search_targets:
for key, value in search_targets.iteritems():
query['search_targets.{}'.format(key)] = value
if org_target:
query['org'] = org_target
return self.course_index.find(query)
def insert_course_index(self, course_index, course_context=None):
"""
Create the course_index in the db
"""
with TIMER.timer("insert_course_index", course_context):
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.insert(course_index)
def update_course_index(self, course_index, from_index=None, course_context=None):
"""
Update the db record for course_index.
Arguments:
from_index: If set, only update an index if it matches the one specified in `from_index`.
"""
with TIMER.timer("update_course_index", course_context):
if from_index:
query = {"_id": from_index["_id"]}
# last_update not only tells us when this course was last updated but also helps
# prevent collisions
if 'last_update' in from_index:
query['last_update'] = from_index['last_update']
else:
query = {
'org': course_index['org'],
'course': course_index['course'],
'run': course_index['run'],
}
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.update(query, course_index, upsert=False,)
def delete_course_index(self, course_key):
"""
Delete the course_index from the persistence mechanism whose id is the given course_index
"""
with TIMER.timer("delete_course_index", course_key):
query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.remove(query)
def get_definition(self, key, course_context=None):
"""
Get the definition from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_definition", course_context) as tagger:
definition = self.definitions.find_one({'_id': key})
tagger.measure("fields", len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
return definition
def get_definitions(self, definitions, course_context=None):
"""
Retrieve all definitions listed in `definitions`.
"""
with TIMER.timer("get_definitions", course_context) as tagger:
tagger.measure('definitions', len(definitions))
definitions = self.definitions.find({'_id': {'$in': definitions}})
return definitions
def insert_definition(self, definition, course_context=None):
"""
Create the definition in the db
"""
with TIMER.timer("insert_definition", course_context) as tagger:
tagger.measure('fields', len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
self.definitions.insert(definition)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.course_index.create_index(
[
('org', pymongo.ASCENDING),
('course', pymongo.ASCENDING),
('run', pymongo.ASCENDING)
],
unique=True,
background=True
)
|
agpl-3.0
|
nan86150/ImageFusion
|
lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py
|
436
|
5992
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
|
mit
|
erocarrera/pefile
|
peutils.py
|
2
|
18206
|
# -*- coding: Latin-1 -*-
"""peutils, Portable Executable utilities module
Copyright (c) 2005-2020 Ero Carrera <[email protected]>
All rights reserved.
"""
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import os
import re
import string
import urllib.request, urllib.parse, urllib.error
import pefile
__author__ = 'Ero Carrera'
__version__ = pefile.__version__
__contact__ = '[email protected]'
class SignatureDatabase(object):
"""This class loads and keeps a parsed PEiD signature database.
Usage:
sig_db = SignatureDatabase('/path/to/signature/file')
and/or
sig_db = SignatureDatabase()
sig_db.load('/path/to/signature/file')
Signature databases can be combined by performing multiple loads.
The filename parameter can be a URL too. In that case the
signature database will be downloaded from that location.
"""
def __init__(self, filename=None, data=None):
# RegExp to match a signature block
#
self.parse_sig = re.compile(
'\[(.*?)\]\s+?signature\s*=\s*(.*?)(\s+\?\?)*\s*ep_only\s*=\s*(\w+)(?:\s*section_start_only\s*=\s*(\w+)|)', re.S)
# Signature information
#
# Signatures are stored as trees using dictionaries
# The keys are the byte values while the values for
# each key are either:
#
# - Other dictionaries of the same form for further
# bytes in the signature
#
# - A dictionary with a string as a key (packer name)
# and None as value to indicate a full signature
#
self.signature_tree_eponly_true = dict ()
self.signature_count_eponly_true = 0
self.signature_tree_eponly_false = dict ()
self.signature_count_eponly_false = 0
self.signature_tree_section_start = dict ()
self.signature_count_section_start = 0
# The depth (length) of the longest signature
#
self.max_depth = 0
self.__load(filename=filename, data=data)
def generate_section_signatures(self, pe, name, sig_length=512):
"""Generates signatures for all the sections in a PE file.
If the section contains any data a signature will be created
for it. The signature name will be a combination of the
parameter 'name' and the section number and its name.
"""
section_signatures = list()
for idx, section in enumerate(pe.sections):
if section.SizeOfRawData < sig_length:
continue
#offset = pe.get_offset_from_rva(section.VirtualAddress)
offset = section.PointerToRawData
sig_name = '%s Section(%d/%d,%s)' % (
name, idx + 1, len(pe.sections),
''.join([c for c in section.Name if c in string.printable]))
section_signatures.append(
self.__generate_signature(
pe, offset, sig_name, ep_only=False,
section_start_only=True,
sig_length=sig_length) )
return '\n'.join(section_signatures)+'\n'
def generate_ep_signature(self, pe, name, sig_length=512):
"""Generate signatures for the entry point of a PE file.
Creates a signature whose name will be the parameter 'name'
and the section number and its name.
"""
offset = pe.get_offset_from_rva(pe.OPTIONAL_HEADER.AddressOfEntryPoint)
return self.__generate_signature(
pe, offset, name, ep_only=True, sig_length=sig_length)
def __generate_signature(self, pe, offset, name, ep_only=False,
section_start_only=False, sig_length=512):
data = pe.__data__[offset:offset+sig_length]
signature_bytes = ' '.join(['%02x' % ord(c) for c in data])
if ep_only == True:
ep_only = 'true'
else:
ep_only = 'false'
if section_start_only == True:
section_start_only = 'true'
else:
section_start_only = 'false'
signature = '[%s]\nsignature = %s\nep_only = %s\nsection_start_only = %s\n' % (
name, signature_bytes, ep_only, section_start_only)
return signature
def match(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns the exact match(es).
If ep_only is True the result will be a string with
the packer name. Otherwise it will be a list of the
form (file_offset, packer_name) specifying where
in the file the signature was found.
"""
matches = self.__match(pe, ep_only, section_start_only)
# The last match (the most precise) from the
# list of matches (if any) is returned
#
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return [(match[0], match[1][-1]) for match in matches]
return matches[1][-1]
return None
def match_all(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns all the likely matches."""
matches = self.__match(pe, ep_only, section_start_only)
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return matches
return matches[1]
return None
def __match(self, pe, ep_only, section_start_only):
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.__data__
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
scan_addresses = [section.PointerToRawData for section in pe.sections]
elif ep_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.get_memory_mapped_image()
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# Fetch the entry point of the PE file and the data
# at the entry point
#
ep = pe.OPTIONAL_HEADER.AddressOfEntryPoint
# Set the starting address to start scanning from
#
scan_addresses = [ep]
else:
data = pe.__data__
signatures = self.signature_tree_eponly_false
scan_addresses = range( len(data) )
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def match_data(self, code_data, ep_only=True, section_start_only=False):
data = code_data
scan_addresses = [ 0 ]
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
elif ep_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def __match_signature_tree(self, signature_tree, data, depth = 0):
"""Recursive function to find matches along the signature tree.
signature_tree is the part of the tree left to walk
data is the data being checked against the signature tree
depth keeps track of how far we have gone down the tree
"""
matched_names = list ()
match = signature_tree
# Walk the bytes in the data and match them
# against the signature
#
for idx, byte in enumerate ( [b if isinstance(b, int) else ord(b) for b in data] ):
# If the tree is exhausted...
#
if match is None :
break
# Get the next byte in the tree
#
match_next = match.get(byte, None)
# If None is among the values for the key
# it means that a signature in the database
# ends here and that there's an exact match.
#
if None in list(match.values()):
# idx represent how deep we are in the tree
#
#names = [idx+depth]
names = list()
# For each of the item pairs we check
# if it has an element other than None,
# if not then we have an exact signature
#
for item in list(match.items()):
if item[1] is None :
names.append (item[0])
matched_names.append(names)
# If a wildcard is found keep scanning the signature
# ignoring the byte.
#
if '??' in match :
match_tree_alternate = match.get ('??', None)
data_remaining = data[idx + 1 :]
if data_remaining:
matched_names.extend(
self.__match_signature_tree(
match_tree_alternate, data_remaining, idx+depth+1))
match = match_next
# If we have any more packer name in the end of the signature tree
# add them to the matches
#
if match is not None and None in list(match.values()):
#names = [idx + depth + 1]
names = list()
for item in list(match.items()) :
if item[1] is None:
names.append(item[0])
matched_names.append(names)
return matched_names
def load(self , filename=None, data=None):
"""Load a PEiD signature file.
Invoking this method on different files combines the signatures.
"""
self.__load(filename=filename, data=data)
def __load(self, filename=None, data=None):
if filename is not None:
# If the path does not exist, attempt to open a URL
#
if not os.path.exists(filename):
try:
sig_f = urllib.request.urlopen(filename)
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
# Get the data for a file
#
try:
sig_f = open( filename, 'rt' )
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
sig_data = data
# If the file/URL could not be read or no "raw" data
# was provided there's nothing else to do
#
if not sig_data:
return
# Helper function to parse the signature bytes
#
def to_byte(value):
if '?' in value:
return value
return int(value, 16)
# Parse all the signatures in the file
#
matches = self.parse_sig.findall(sig_data)
# For each signature, get the details and load it into the
# signature tree
#
for packer_name, signature, superfluous_wildcards, ep_only, section_start_only in matches:
ep_only = ep_only.strip().lower()
signature = signature.replace('\\n', '').strip()
signature_bytes = [to_byte(b) for b in signature.split()]
if ep_only == 'true':
ep_only = True
else:
ep_only = False
if section_start_only == 'true':
section_start_only = True
else:
section_start_only = False
depth = 0
if section_start_only is True:
tree = self.signature_tree_section_start
self.signature_count_section_start += 1
else:
if ep_only is True :
tree = self.signature_tree_eponly_true
self.signature_count_eponly_true += 1
else :
tree = self.signature_tree_eponly_false
self.signature_count_eponly_false += 1
for idx, byte in enumerate (signature_bytes) :
if idx+1 == len(signature_bytes):
tree[byte] = tree.get( byte, dict() )
tree[byte][packer_name] = None
else :
tree[byte] = tree.get ( byte, dict() )
tree = tree[byte]
depth += 1
if depth > self.max_depth:
self.max_depth = depth
def is_valid( pe ):
""""""
pass
def is_suspicious( pe ):
"""
unusual locations of import tables
non recognized section names
presence of long ASCII strings
"""
relocations_overlap_entry_point = False
sequential_relocs = 0
# If relocation data is found and the entries go over the entry point, and also are very
# continuous or point outside section's boundaries => it might imply that an obfuscation
# trick is being used or the relocations are corrupt (maybe intentionally)
#
if hasattr(pe, 'DIRECTORY_ENTRY_BASERELOC'):
for base_reloc in pe.DIRECTORY_ENTRY_BASERELOC:
last_reloc_rva = None
for reloc in base_reloc.entries:
if reloc.rva <= pe.OPTIONAL_HEADER.AddressOfEntryPoint <= reloc.rva + 4:
relocations_overlap_entry_point = True
if last_reloc_rva is not None and last_reloc_rva <= reloc.rva <= last_reloc_rva + 4:
sequential_relocs += 1
last_reloc_rva = reloc.rva
# If import tables or strings exist (are pointed to) to within the header or in the area
# between the PE header and the first section that's suspicious
#
# IMPLEMENT
warnings_while_parsing = False
# If we have warnings, that's suspicious, some of those will be because of out-of-ordinary
# values are found in the PE header fields
# Things that are reported in warnings:
# (parsing problems, special section characteristics i.e. W & X, uncommon values of fields,
# unusual entrypoint, suspicious imports)
#
warnings = pe.get_warnings()
if warnings:
warnings_while_parsing
# If there are few or none (should come with a standard "density" of strings/kilobytes of data) longer (>8)
# ascii sequences that might indicate packed data, (this is similar to the entropy test in some ways but
# might help to discard cases of legitimate installer or compressed data)
# If compressed data (high entropy) and is_driver => uuuuhhh, nasty
pass
def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing compressed data and the data makes
up for more than 20% of the total file size, the function will
return True.
"""
# Calculate the length of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
# Assume that the file is packed when no data is available
if not total_pe_data_length:
return True
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empirical, based on looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if ((1.0 * total_compressed_data)/total_pe_data_length) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data
|
mit
|
zmilan/POSTMan-Chrome-Extension
|
proxy/proxy_server.py
|
102
|
3014
|
#!/usr/bin/python
from twisted.internet import reactor
from twisted.web import http
from twisted.web.proxy import Proxy, ProxyRequest, ProxyClientFactory, ProxyClient
from ImageFile import Parser
from StringIO import StringIO
class InterceptingProxyClient(ProxyClient):
def __init__(self, *args, **kwargs):
ProxyClient.__init__(self, *args, **kwargs)
self.overrides = []
self.restricted_headers = [
'accept-charset',
'accept-encoding',
'access-control-request-headers',
'access-control-request-method',
'connection',
'content-length',
'cookie',
'cookie2',
'content-transfer-encoding',
'date',
'expect',
'host',
'keep-alive',
'origin',
'referer',
'te',
'trailer',
'transfer-encoding',
'upgrade',
'user-agent',
'via'
]
self.all_headers = []
self.unsent_restricted_headers = []
def sendHeader(self, name, value):
if "postman-" in name:
new_header = name[8:]
print "Header %s, %s, %s" % (name, value, new_header)
name = new_header
header = {
"name": name,
"value": value
}
self.all_headers.append(name)
ProxyClient.sendHeader(self, name, value)
elif name in self.restricted_headers:
header = {
"name": name,
"value": value
}
print "Restricted header %s" % name
self.unsent_restricted_headers.append(header)
else:
ProxyClient.sendHeader(self, name, value)
def endHeaders(self):
for header in self.unsent_restricted_headers:
if not header["name"] in self.all_headers:
ProxyClient.sendHeader(self, header["name"], header["value"])
ProxyClient.endHeaders(self)
def handleHeader(self, key, value):
# change response header here
print("Header: %s: %s" % (key, value))
l = key.lower()
if l == "location":
key = "Postman-Location"
ProxyClient.handleHeader(self, key, value)
def handleResponseEnd(self):
if not self._finished:
self.father.responseHeaders.setRawHeaders("client", ["location"])
ProxyClient.handleResponseEnd(self)
class InterceptingProxyClientFactory(ProxyClientFactory):
protocol = InterceptingProxyClient
class InterceptingProxyRequest(ProxyRequest):
protocols = {'http': InterceptingProxyClientFactory, 'https': InterceptingProxyClientFactory}
class InterceptingProxy(Proxy):
requestFactory = InterceptingProxyRequest
factory = http.HTTPFactory()
factory.protocol = InterceptingProxy
port = 8000
reactor.listenTCP(8000, factory)
reactor.run()
print "Listening on port %d" % port
|
apache-2.0
|
anubhabsen/NotesKeeper
|
languages/sk.py
|
161
|
6877
|
# coding: utf8
{
'!langcode!': 'sk',
'!langname!': 'Slovenský',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je voliteľný výraz ako "field1=\'newvalue\'". Nemôžete upravovať alebo zmazať výsledky JOINu',
'%s %%{row} deleted': '%s zmazaných záznamov',
'%s %%{row} updated': '%s upravených záznamov',
'%s selected': '%s označených',
'%Y-%m-%d': '%d.%m.%Y',
'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'pre administrátorské rozhranie kliknite sem',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'appadmin je zakázaný bez zabezpečeného spojenia',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Dostupné databázy a tabuľky',
'Buy this book': 'Buy this book',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nemôže byť prázdne',
'Check to delete': 'Označiť na zmazanie',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Aktuálna požiadavka',
'Current response': 'Aktuálna odpoveď',
'Current session': 'Aktuálne sedenie',
'customize me!': 'prispôsob ma!',
'data uploaded': 'údaje naplnené',
'Database': 'databáza',
'Database %s select': 'databáza %s výber',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Zmazať:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Popis',
'design': 'návrh',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Dokumentácia',
"Don't know what to do?": "Don't know what to do?",
'done!': 'hotovo!',
'Download': 'Download',
'Edit': 'Upraviť',
'Edit current record': 'Upraviť aktuálny záznam',
'Edit Profile': 'Upraviť profil',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportovať do csv súboru',
'FAQ': 'FAQ',
'First name': 'Krstné meno',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'ID skupiny',
'Groups': 'Groups',
'Hello World': 'Ahoj svet',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'vložiť nový záznam ',
'insert new %s': 'vložiť nový záznam %s',
'Internal State': 'Vnútorný stav',
'Introduction': 'Introduction',
'Invalid email': 'Neplatný email',
'Invalid password': 'Nesprávne heslo',
'Invalid Query': 'Neplatná otázka',
'invalid request': 'Neplatná požiadavka',
'Key': 'Key',
'Last name': 'Priezvisko',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Prihlásený',
'Logged out': 'Odhlásený',
'login': 'prihlásiť',
'logout': 'odhlásiť',
'Lost Password': 'Stratené heslo?',
'lost password?': 'stratené heslo?',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Model',
'My Sites': 'My Sites',
'Name': 'Meno',
'New password': 'Nové heslo',
'New Record': 'Nový záznam',
'new record inserted': 'nový záznam bol vložený',
'next 100 rows': 'ďalších 100 riadkov',
'No databases in this application': 'V tejto aplikácii nie sú databázy',
'Old password': 'Staré heslo',
'Online examples': 'pre online príklady kliknite sem',
'or import from csv file': 'alebo naimportovať z csv súboru',
'Origin': 'Pôvod',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'password': 'heslo',
'Password': 'Heslo',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'predchádzajúcich 100 riadkov',
'Python': 'Python',
'Query:': 'Otázka:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'záznam',
'record does not exist': 'záznam neexistuje',
'Record ID': 'ID záznamu',
'Record id': 'id záznamu',
'Register': 'Zaregistrovať sa',
'register': 'registrovať',
'Registration key': 'Registračný kľúč',
'Remember me (for 30 days)': 'Zapamätaj si ma (na 30 dní)',
'Reset Password key': 'Nastaviť registračný kľúč',
'Role': 'Rola',
'Rows in Table': 'riadkov v tabuľke',
'Rows selected': 'označených riadkov',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'stav',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Submit': 'Odoslať',
'Support': 'Support',
'Sure you want to delete this object?': 'Ste si istí, že chcete zmazať tento objekt?',
'Table': 'tabuľka',
'Table name': 'Názov tabuľky',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query" je podmienka ako "db.table1.field1==\'value\'". Niečo ako "db.table1.field1==db.table2.field2" má za výsledok SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'Výstup zo súboru je slovník, ktorý bol zobrazený vo view %s',
'The Views': 'The Views',
'This App': 'This App',
'This is a copy of the scaffolding application': 'Toto je kópia skeletu aplikácie',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Časová pečiatka',
'Twitter': 'Twitter',
'unable to parse csv file': 'nedá sa načítať csv súbor',
'Update:': 'Upraviť:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použite (...)&(...) pre AND, (...)|(...) pre OR a ~(...) pre NOT na poskladanie komplexnejších otázok.',
'User %(id)s Logged-in': 'Používateľ %(id)s prihlásený',
'User %(id)s Logged-out': 'Používateľ %(id)s odhlásený',
'User %(id)s Password changed': 'Používateľ %(id)s zmenil heslo',
'User %(id)s Profile updated': 'Používateľ %(id)s upravil profil',
'User %(id)s Registered': 'Používateľ %(id)s sa zaregistroval',
'User ID': 'ID používateľa',
'Verify Password': 'Zopakujte heslo',
'Videos': 'Videos',
'View': 'Zobraziť',
'Welcome to web2py': 'Vitajte vo web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Ktorý zavolal funkciu %s nachádzajúci sa v súbore %s',
'You are successfully running web2py': 'Úspešne ste spustili web2py',
'You can modify this application and adapt it to your needs': 'Môžete upraviť túto aplikáciu a prispôsobiť ju svojim potrebám',
'You visited the url %s': 'Navštívili ste URL %s',
}
|
mit
|
ProjexSoftware/orb
|
orb/core/connection_types/sql/mysql/statements/alter.py
|
2
|
3350
|
from projex.lazymodule import lazy_import
from ..mysqlconnection import MySQLStatement
orb = lazy_import('orb')
class ALTER(MySQLStatement):
def __call__(self, model, add=None, remove=None, owner=''):
"""
Modifies the table to add and remove the given columns.
:param model: <orb.Model>
:param add: [<orb.Column>, ..]
:param remove: [<orb.Column>, ..]
:return: <bool>
"""
data = {}
default_namespace = orb.Context().db.name()
ADD_COLUMN = self.byName('ADD COLUMN')
# determine what kind of model we're modifying
if issubclass(model, orb.Table):
typ = 'TABLE'
else:
raise orb.errors.OrbError('Cannot alter {0}'.format(type(model)))
# determine the i18n and standard columns
add_i18n = []
add_standard = []
for col in add or []:
# virtual columns do not exist in the database
if col.testFlag(col.Flags.Virtual):
continue
if col.testFlag(col.Flags.I18n):
add_i18n.append(col)
else:
add_standard.append(col)
# add standard columns
if add_standard:
field_statements = []
for col in add_standard:
field_statement, field_data = ADD_COLUMN(col)
data.update(field_data)
field_statements.append(field_statement)
sql_options = {
'type': typ,
'namespace': model.schema().namespace() or default_namespace,
'name': model.schema().dbname(),
'fields': u'\t' + ',\n\t'.join(field_statements)
}
sql = (
u'ALTER {type} `{namespace}`.`{name}`\n'
u'{fields};'
).format(**sql_options)
else:
sql = ''
# add i18n columns
if add_i18n:
id_column = model.schema().idColumn()
id_type = id_column.dbType('MySQL')
field_statements = []
for col in add_i18n:
field_statement, field_data = ADD_COLUMN(col)
data.update(field_data)
field_statements.append(field_statement)
i18n_options = {
'namespace': model.schema().namespace() or default_namespace,
'table': model.schema().dbname(),
'fields': u'\t' + ',\n\t'.join(field_statements),
'owner': owner,
'id_type': id_type,
'id_field': id_column.field()
}
i18n_sql = (
u'CREATE TABLE IF NOT EXISTS `{namespace}`.`{table}_i18n` (\n'
u' `locale` CHARACTER VARYING(5),\n'
u' `{table}_id` {id_type} REFERENCES `{namespace}`.`{table}` (`{id_field}`) ON DELETE CASCADE,\n'
u' CONSTRAINT `{table}_i18n_pkey` PRIMARY KEY (`locale`, `{table}_id`)\n'
u') WITH (OIDS=FALSE);'
u'ALTER TABLE `{namespace}`.`{table}_i18n` OWNER TO `{owner}`;'
u'ALTER TABLE `{namespace}`.`{table}_i18n`'
u'{fields};'
).format(**i18n_options)
sql += '\n' + i18n_sql
return sql, data
MySQLStatement.registerAddon('ALTER', ALTER())
|
lgpl-3.0
|
jakesyl/pychess
|
lib/pychess/Utils/GameModel.py
|
20
|
35722
|
from __future__ import absolute_import
from collections import defaultdict
from threading import RLock, Thread
import traceback
import datetime
from gi.repository import GObject
from pychess.compat import Queue, Empty, StringIO
from pychess.Savers.ChessFile import LoadingError
from pychess.Players.Player import PlayerIsDead, TurnInterrupt
from pychess.System import fident
from pychess.System.protoopen import protoopen, protosave, isWriteable
from pychess.System.Log import log
from pychess.Utils.Move import Move, toSAN
from pychess.Utils.eco import get_eco
from pychess.Utils.TimeModel import TimeModel
from pychess.Variants.normal import NormalChess
from pychess.Variants import variants
from .logic import getStatus, isClaimableDraw, playerHasMatingMaterial
from .const import *
def undolocked (f):
def newFunction(*args, **kw):
self = args[0]
log.debug("undolocked: adding func to queue: %s %s %s" % \
(repr(f), repr(args), repr(kw)))
self.undoQueue.put((f, args, kw))
locked = self.undoLock.acquire(blocking=False)
if locked:
try:
while True:
try:
func, args, kw = self.undoQueue.get_nowait()
log.debug("undolocked: running queued func: %s %s %s" % \
(repr(func), repr(args), repr(kw)))
func(*args, **kw)
except Empty:
break
finally:
self.undoLock.release()
return newFunction
def inthread (f):
def newFunction(*args, **kwargs):
t = Thread(target=f, name=fident(f), args=args, kwargs=kwargs)
t.daemon = True
t.start()
return newFunction
class GameModel (GObject.GObject, Thread):
""" GameModel contains all available data on a chessgame.
It also has the task of controlling players actions and moves """
__gsignals__ = {
# game_started is emitted when control is given to the players for the
# first time. Notice this is after players.start has been called.
"game_started": (GObject.SignalFlags.RUN_FIRST, None, ()),
# game_changed is emitted when a move has been made.
"game_changed": (GObject.SignalFlags.RUN_FIRST, None, ()),
# moves_undoig is emitted when a undoMoves call has been accepted, but
# before anywork has been done to execute it.
"moves_undoing": (GObject.SignalFlags.RUN_FIRST, None, (int,)),
# moves_undone is emitted after n moves have been undone in the
# gamemodel and the players.
"moves_undone": (GObject.SignalFlags.RUN_FIRST, None, (int,)),
# game_unended is emitted if moves have been undone, such that the game
# which had previously ended, is now again active.
"game_unended": (GObject.SignalFlags.RUN_FIRST, None, ()),
# game_loading is emitted if the GameModel is about to load in a chess
# game from a file.
"game_loading": (GObject.SignalFlags.RUN_FIRST, None, (object,)),
# game_loaded is emitted after the chessformat handler has loaded in
# all the moves from a file to the game model.
"game_loaded": (GObject.SignalFlags.RUN_FIRST, None, (object,)),
# game_saved is emitted in the end of model.save()
"game_saved": (GObject.SignalFlags.RUN_FIRST, None, (str,)),
# game_ended is emitted if the models state has been changed to an
# "ended state"
"game_ended": (GObject.SignalFlags.RUN_FIRST, None, (int,)),
# game_terminated is emitted if the game was terminated. That is all
# players and clocks were stopped, and it is no longer possible to
# resume the game, even by undo.
"game_terminated": (GObject.SignalFlags.RUN_FIRST, None, ()),
# game_paused is emitted if the game was successfully paused.
"game_paused": (GObject.SignalFlags.RUN_FIRST, None, ()),
# game_paused is emitted if the game was successfully resumed from a
# pause.
"game_resumed": (GObject.SignalFlags.RUN_FIRST, None, ()),
# action_error is currently only emitted by ICGameModel, in the case
# the "web model" didn't accept the action you were trying to do.
"action_error": (GObject.SignalFlags.RUN_FIRST, None, (object, int)),
# players_changed is emitted if the players list was changed.
"players_changed": (GObject.SignalFlags.RUN_FIRST, None, ()),
"analyzer_added": (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
"analyzer_removed": (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
"analyzer_paused": (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
"analyzer_resumed": (GObject.SignalFlags.RUN_FIRST, None, (object, str)),
# opening_changed is emitted if the move changed the opening.
"opening_changed": (GObject.SignalFlags.RUN_FIRST, None, ()),
# variation_added is emitted if a variation was added.
"variation_added": (GObject.SignalFlags.RUN_FIRST, None, (object,object)),
# variation_extended is emitted if a new move was added to a variation.
"variation_extended": (GObject.SignalFlags.RUN_FIRST, None, (object,object)),
# scores_changed is emitted if the analyzing scores was changed.
"analysis_changed": (GObject.SignalFlags.RUN_FIRST, None, (int,)),
}
def __init__ (self, timemodel=None, variant=NormalChess):
GObject.GObject.__init__(self)
Thread.__init__(self, name=fident(self.run))
self.daemon = True
self.variant = variant
self.boards = [variant.board(setup=True)]
self.moves = []
self.scores = {}
self.players = []
self.gameno = None
self.variations = [self.boards]
self.status = WAITING_TO_START
self.reason = UNKNOWN_REASON
if timemodel is None:
self.timemodel = TimeModel()
else:
self.timemodel = timemodel
self.connections = defaultdict(list) # mainly for IC subclasses
now = datetime.datetime.now()
self.tags = {
"Event": _("Local Event"),
"Site": _("Local Site"),
"Round": 1,
"Year": now.year,
"Month": now.month,
"Day": now.day,
"Time": "%02d:%02d:00" % (now.hour, now.minute),
"Result": "*",
}
self.endstatus = None
self.timed = self.timemodel.secs!=0 or self.timemodel.gain!=0
if self.timed:
self.tags["TimeControl"] = \
"%d+%d" % (self.timemodel.minutes*60, self.timemodel.gain)
# Notice: tags["WhiteClock"] and tags["BlackClock"] are never set
# on the gamemodel, but simply written or read during saving/
# loading from pgn. If you want to know the time left for a player,
# check the time model.
# Keeps track of offers, so that accepts can be spotted
self.offers = {}
# True if the game has been changed since last save
self.needsSave = False
# The uri the current game was loaded from, or None if not a loaded game
self.uri = None
self.spectators = {}
self.applyingMoveLock = RLock()
self.undoLock = RLock()
self.undoQueue = Queue()
def __repr__ (self):
s = "<GameModel at %s" % id(self)
s += " (ply=%s" % self.ply
if len(self.moves) > 0:
s += ", move=%s" % self.moves[-1]
s += ", variant=%s" % self.variant.name.encode('utf-8')
s += ", status=%s, reason=%s" % (str(self.status), str(self.reason))
s += ", players=%s" % str(self.players)
s += ", tags=%s" % str(self.tags)
if len(self.boards) > 0:
s += "\nboard=%s" % self.boards[-1]
return s + ")>"
@property
def display_text (self):
if self.variant == NormalChess and not self.timed:
return "[ " + _("Untimed") + " ]"
else:
t = "[ "
if self.variant != NormalChess:
t += self.variant.name + " "
if self.timed:
t += self.timemodel.display_text + " "
return t + "]"
def setPlayers (self, players):
log.debug("GameModel.setPlayers: starting")
assert self.status == WAITING_TO_START
self.players = players
for player in self.players:
self.connections[player].append(player.connect("offer", self.offerRecieved))
self.connections[player].append(player.connect("withdraw", self.withdrawRecieved))
self.connections[player].append(player.connect("decline", self.declineRecieved))
self.connections[player].append(player.connect("accept", self.acceptRecieved))
self.tags["White"] = str(self.players[WHITE])
self.tags["Black"] = str(self.players[BLACK])
log.debug("GameModel.setPlayers: -> emit players_changed")
self.emit("players_changed")
log.debug("GameModel.setPlayers: <- emit players_changed")
log.debug("GameModel.setPlayers: returning")
def color (self, player):
if player is self.players[0]:
return WHITE
else:
return BLACK
def start_analyzer (self, analyzer_type):
from pychess.Players.engineNest import init_engine
analyzer = init_engine(analyzer_type, self)
if analyzer is None: return
analyzer.setOptionInitialBoard(self)
self.spectators[analyzer_type] = analyzer
self.emit("analyzer_added", analyzer, analyzer_type)
if analyzer_type == HINT:
analyzer.connect("analyze", self.on_analyze)
return analyzer
def remove_analyzer (self, analyzer_type):
try:
analyzer = self.spectators[analyzer_type]
except KeyError:
return
analyzer.end(KILLED, UNKNOWN_REASON)
self.emit("analyzer_removed", analyzer, analyzer_type)
del self.spectators[analyzer_type]
def resume_analyzer (self, analyzer_type):
try:
analyzer = self.spectators[analyzer_type]
except KeyError:
analyzer = self.start_analyzer(analyzer_type)
if analyzer is None: return
analyzer.resume()
analyzer.setOptionInitialBoard(self)
self.emit("analyzer_resumed", analyzer, analyzer_type)
def pause_analyzer (self, analyzer_type):
try:
analyzer = self.spectators[analyzer_type]
except KeyError:
return
analyzer.pause()
self.emit("analyzer_paused", analyzer, analyzer_type)
def restart_analyzer (self, analyzer_type):
self.remove_analyzer(analyzer_type)
self.start_analyzer(analyzer_type)
if self.isPlayingICSGame():
self.pause_analyzer(analyzer_type)
def on_analyze(self, analyzer, analysis):
if analysis and analysis[0] is not None:
pv, score, depth = analysis[0]
ply = analyzer.board.ply
if score != None:
self.scores[ply] = (pv, score, depth)
self.emit("analysis_changed", ply)
def setOpening(self):
if self.ply > 40:
return
if self.ply > 0:
opening = get_eco(self.getBoardAtPly(self.ply).board.hash)
else:
opening = ("", "", "")
if opening is not None:
self.tags["ECO"] = opening[0]
self.tags["Opening"] = opening[1]
self.tags["Variation"] = opening[2]
self.emit("opening_changed")
############################################################################
# Board stuff #
############################################################################
def _get_ply (self):
return self.boards[-1].ply
ply = property(_get_ply)
def _get_lowest_ply (self):
return self.boards[0].ply
lowply = property(_get_lowest_ply)
def _get_curplayer (self):
try:
return self.players[self.getBoardAtPly(self.ply).color]
except IndexError:
log.error("%s %s" % (self.players, self.getBoardAtPly(self.ply).color))
raise
curplayer = property(_get_curplayer)
def _get_waitingplayer (self):
try:
return self.players[1 - self.getBoardAtPly(self.ply).color]
except IndexError:
log.error("%s %s" % (self.players, 1 - self.getBoardAtPly(self.ply).color))
raise
waitingplayer = property(_get_waitingplayer)
def _plyToIndex (self, ply):
index = ply - self.lowply
if index < 0:
raise IndexError("%s < %s\n" % (ply, self.lowply))
return index
def getBoardAtPly (self, ply, variation=0):
try:
return self.variations[variation][self._plyToIndex(ply)]
except IndexError:
log.error("%d\t%d\t%d\t%d\t%d" % (self.lowply, ply, self.ply, variation, len(self.variations)))
raise
def getMoveAtPly (self, ply, variation=0):
try:
return Move(self.variations[variation][self._plyToIndex(ply)+1].board.lastMove)
except IndexError:
log.error("%d\t%d\t%d\t%d\t%d" % (self.lowply, ply, self.ply, variation, len(self.variations)))
raise
def hasLocalPlayer (self):
if self.players[0].__type__ == LOCAL or self.players[1].__type__ == LOCAL:
return True
else:
return False
def isLocalGame (self):
if self.players[0].__type__ != REMOTE and self.players[1].__type__ != REMOTE:
return True
else:
return False
def isObservationGame (self):
return not self.hasLocalPlayer()
def isEngine2EngineGame (self):
if self.players[0].__type__ == ARTIFICIAL and self.players[1].__type__ == ARTIFICIAL:
return True
else:
return False
def isPlayingICSGame(self):
if self.players and self.status in (WAITING_TO_START, PAUSED, RUNNING):
if self.players[0].__type__ == LOCAL and self.players[1].__type__ == REMOTE or \
self.players[1].__type__ == LOCAL and self.players[0].__type__ == REMOTE:
return True
return False
def isLoadedGame(self):
return self.gameno is not None
############################################################################
# Offer management #
############################################################################
def offerRecieved (self, player, offer):
log.debug("GameModel.offerRecieved: offerer=%s %s" % (repr(player), offer))
if player == self.players[WHITE]:
opPlayer = self.players[BLACK]
else: opPlayer = self.players[WHITE]
if offer.type == HURRY_ACTION:
opPlayer.hurry()
elif offer.type == CHAT_ACTION:
opPlayer.putMessage(offer.param)
elif offer.type == RESIGNATION:
if player == self.players[WHITE]:
self.end(BLACKWON, WON_RESIGN)
else: self.end(WHITEWON, WON_RESIGN)
elif offer.type == FLAG_CALL:
assert self.timed
if self.timemodel.getPlayerTime(1-player.color) <= 0:
if self.timemodel.getPlayerTime(player.color) <= 0:
self.end(DRAW, DRAW_CALLFLAG)
elif not playerHasMatingMaterial(self.boards[-1], player.color):
if player.color == WHITE:
self.end(DRAW, DRAW_WHITEINSUFFICIENTANDBLACKTIME)
else:
self.end(DRAW, DRAW_BLACKINSUFFICIENTANDWHITETIME)
else:
if player == self.players[WHITE]:
self.end(WHITEWON, WON_CALLFLAG)
else:
self.end(BLACKWON, WON_CALLFLAG)
else:
player.offerError(offer, ACTION_ERROR_NOT_OUT_OF_TIME)
elif offer.type == DRAW_OFFER and isClaimableDraw(self.boards[-1]):
reason = getStatus(self.boards[-1])[1]
self.end(DRAW, reason)
elif offer.type == TAKEBACK_OFFER and offer.param < self.lowply:
player.offerError(offer, ACTION_ERROR_TOO_LARGE_UNDO)
elif offer.type in OFFERS:
if offer not in self.offers:
log.debug("GameModel.offerRecieved: doing %s.offer(%s)" % \
(repr(opPlayer), offer))
self.offers[offer] = player
opPlayer.offer(offer)
# If we updated an older offer, we want to delete the old one
for offer_ in self.offers.keys():
if offer.type == offer_.type and offer != offer_:
del self.offers[offer_]
def withdrawRecieved (self, player, offer):
log.debug("GameModel.withdrawRecieved: withdrawer=%s %s" % \
(repr(player), offer))
if player == self.players[WHITE]:
opPlayer = self.players[BLACK]
else: opPlayer = self.players[WHITE]
if offer in self.offers and self.offers[offer] == player:
del self.offers[offer]
opPlayer.offerWithdrawn(offer)
else:
player.offerError(offer, ACTION_ERROR_NONE_TO_WITHDRAW)
def declineRecieved (self, player, offer):
log.debug("GameModel.declineRecieved: decliner=%s %s" % (repr(player), offer))
if player == self.players[WHITE]:
opPlayer = self.players[BLACK]
else: opPlayer = self.players[WHITE]
if offer in self.offers and self.offers[offer] == opPlayer:
del self.offers[offer]
log.debug("GameModel.declineRecieved: declining %s" % offer)
opPlayer.offerDeclined(offer)
else:
player.offerError(offer, ACTION_ERROR_NONE_TO_DECLINE)
def acceptRecieved (self, player, offer):
log.debug("GameModel.acceptRecieved: accepter=%s %s" % (repr(player), offer))
if player == self.players[WHITE]:
opPlayer = self.players[BLACK]
else: opPlayer = self.players[WHITE]
if offer in self.offers and self.offers[offer] == opPlayer:
if offer.type == DRAW_OFFER:
self.end(DRAW, DRAW_AGREE)
elif offer.type == TAKEBACK_OFFER:
log.debug("GameModel.acceptRecieved: undoMoves(%s)" % \
(self.ply - offer.param))
self.undoMoves(self.ply - offer.param)
elif offer.type == ADJOURN_OFFER:
self.end(ADJOURNED, ADJOURNED_AGREEMENT)
elif offer.type == ABORT_OFFER:
self.end(ABORTED, ABORTED_AGREEMENT)
elif offer.type == PAUSE_OFFER:
self.pause()
elif offer.type == RESUME_OFFER:
self.resume()
del self.offers[offer]
else:
player.offerError(offer, ACTION_ERROR_NONE_TO_ACCEPT)
############################################################################
# Data stuff #
############################################################################
def loadAndStart (self, uri, loader, gameno, position):
assert self.status == WAITING_TO_START
uriIsFile = not isinstance(uri, str)
if not uriIsFile:
chessfile = loader.load(protoopen(uri))
else:
chessfile = loader.load(uri)
self.gameno = gameno
self.emit("game_loading", uri)
try:
chessfile.loadToModel(gameno, position, self)
#Postpone error raising to make games loadable to the point of the error
except LoadingError as e:
error = e
else: error = None
if self.players:
self.players[WHITE].setName(self.tags["White"])
self.players[BLACK].setName(self.tags["Black"])
self.emit("game_loaded", uri)
self.needsSave = False
if not uriIsFile:
self.uri = uri
else: self.uri = None
# Even if the game "starts ended", the players should still be moved
# to the last position, so analysis is correct, and a possible "undo"
# will work as expected.
for spectator in self.spectators.values():
spectator.setOptionInitialBoard(self)
for player in self.players:
player.setOptionInitialBoard(self)
if self.timed:
self.timemodel.setMovingColor(self.boards[-1].color)
if self.status == RUNNING:
if self.timed and self.ply >= 2:
self.timemodel.start()
# Store end status from Result tag
if self.status in (DRAW, WHITEWON, BLACKWON):
self.endstatus = self.status
self.status = WAITING_TO_START
self.start()
if error:
raise error
def save (self, uri, saver, append, position=None):
if isinstance(uri, str):
fileobj = protosave(uri, append)
self.uri = uri
else:
fileobj = uri
self.uri = None
saver.save(fileobj, self, position)
self.needsSave = False
self.emit("game_saved", uri)
############################################################################
# Run stuff #
############################################################################
def run (self):
log.debug("GameModel.run: Starting. self=%s" % self)
# Avoid racecondition when self.start is called while we are in self.end
if self.status != WAITING_TO_START:
return
self.status = RUNNING
for player in self.players + list(self.spectators.values()):
player.start()
log.debug("GameModel.run: emitting 'game_started' self=%s" % self)
self.emit("game_started")
# Let GameModel end() itself on games started with loadAndStart()
self.checkStatus()
while self.status in (PAUSED, RUNNING, DRAW, WHITEWON, BLACKWON):
curColor = self.boards[-1].color
curPlayer = self.players[curColor]
if self.timed:
log.debug("GameModel.run: id=%s, players=%s, self.ply=%s: updating %s's time" % \
(id(self), str(self.players), str(self.ply), str(curPlayer)))
curPlayer.updateTime(self.timemodel.getPlayerTime(curColor),
self.timemodel.getPlayerTime(1-curColor))
try:
log.debug("GameModel.run: id=%s, players=%s, self.ply=%s: calling %s.makeMove()" % \
(id(self), str(self.players), self.ply, str(curPlayer)))
if self.ply > self.lowply:
move = curPlayer.makeMove(self.boards[-1],
self.moves[-1],
self.boards[-2])
else: move = curPlayer.makeMove(self.boards[-1], None, None)
log.debug("GameModel.run: id=%s, players=%s, self.ply=%s: got move=%s from %s" % \
(id(self), str(self.players), self.ply, move, str(curPlayer)))
except PlayerIsDead as e:
if self.status in (WAITING_TO_START, PAUSED, RUNNING):
stringio = StringIO()
traceback.print_exc(file=stringio)
error = stringio.getvalue()
log.error("GameModel.run: A Player died: player=%s error=%s\n%s" % (curPlayer, error, e))
if curColor == WHITE:
self.kill(WHITE_ENGINE_DIED)
else: self.kill(BLACK_ENGINE_DIED)
break
except TurnInterrupt:
log.debug("GameModel.run: id=%s, players=%s, self.ply=%s: TurnInterrupt" % \
(id(self), str(self.players), self.ply))
continue
log.debug("GameModel.run: id=%s, players=%s, self.ply=%s: acquiring self.applyingMoveLock" % \
(id(self), str(self.players), self.ply))
assert isinstance(move, Move), "%s" % repr(move)
self.applyingMoveLock.acquire()
try:
log.debug("GameModel.run: id=%s, players=%s, self.ply=%s: applying move=%s" % \
(id(self), str(self.players), self.ply, str(move)))
self.needsSave = True
newBoard = self.boards[-1].move(move)
newBoard.board.prev = self.boards[-1].board
# Variation on next move can exist from the hint panel...
if self.boards[-1].board.next is not None:
newBoard.board.children = self.boards[-1].board.next.children
self.boards = self.variations[0]
self.boards[-1].board.next = newBoard.board
self.boards.append(newBoard)
self.moves.append(move)
if self.timed:
self.timemodel.tap()
self.emit("game_changed")
for spectator in self.spectators.values():
if spectator.board == self.boards[-2]:
spectator.putMove(self.boards[-1], self.moves[-1],
self.boards[-2])
self.setOpening()
self.checkStatus()
finally:
log.debug("GameModel.run: releasing self.applyingMoveLock")
self.applyingMoveLock.release()
def checkStatus (self):
""" Updates self.status so it fits with what getStatus(boards[-1])
would return. That is, if the game is e.g. check mated this will
call mode.end(), or if moves have been undone from an otherwise
ended position, this will call __resume and emit game_unended. """
log.debug("GameModel.checkStatus:")
status, reason = getStatus(self.boards[-1])
if self.endstatus is not None:
self.end(self.endstatus, reason)
return
if status != RUNNING and self.status in (WAITING_TO_START, PAUSED, RUNNING):
engine_engine = self.players[WHITE].__type__ == ARTIFICIAL and self.players[BLACK].__type__ == ARTIFICIAL
if status == DRAW and reason in (DRAW_REPITITION, DRAW_50MOVES):
if engine_engine:
self.end(status, reason)
return
else:
self.end(status, reason)
return
if status != self.status and self.status in UNDOABLE_STATES \
and self.reason in UNDOABLE_REASONS:
self.__resume()
self.status = status
self.reason = UNKNOWN_REASON
self.emit("game_unended")
def __pause (self):
log.debug("GameModel.__pause: %s" % self)
for player in self.players:
player.pause()
if self.timed:
self.timemodel.pause()
@inthread
def pause (self):
""" Players will raise NotImplementedError if they doesn't support
pause. Spectators will be ignored. """
self.applyingMoveLock.acquire()
try:
self.__pause()
self.status = PAUSED
finally:
self.applyingMoveLock.release()
self.emit("game_paused")
def __resume (self):
for player in self.players:
player.resume()
if self.timed:
self.timemodel.resume()
self.emit("game_resumed")
@inthread
def resume (self):
self.applyingMoveLock.acquire()
try:
self.status = RUNNING
self.__resume()
finally:
self.applyingMoveLock.release()
def end (self, status, reason):
if self.status not in UNFINISHED_STATES:
log.info("GameModel.end: Can't end a game that's already ended: %s %s" % (status, reason))
return
if self.status not in (WAITING_TO_START, PAUSED, RUNNING):
self.needsSave = True
#log.debug("Ending a game with status %d for reason %d\n%s" % (status, reason,
# "".join(traceback.format_list(traceback.extract_stack())).strip()))
log.debug("GameModel.end: players=%s, self.ply=%s: Ending a game with status %d for reason %d" % \
(repr(self.players), str(self.ply), status, reason))
self.status = status
self.reason = reason
self.emit("game_ended", reason)
self.__pause()
def kill (self, reason):
log.debug("GameModel.kill: players=%s, self.ply=%s: Killing a game for reason %d\n%s" % \
(repr(self.players), str(self.ply), reason,
"".join(traceback.format_list(traceback.extract_stack())).strip()))
self.status = KILLED
self.reason = reason
for player in self.players:
player.end(self.status, reason)
for spectator in self.spectators.values():
spectator.end(self.status, reason)
if self.timed:
self.timemodel.end()
self.emit("game_ended", reason)
def terminate (self):
log.debug("GameModel.terminate: %s" % self)
if self.status != KILLED:
#self.resume()
for player in self.players:
player.end(self.status, self.reason)
for spectator in self.spectators.values():
spectator.end(self.status, self.reason)
if self.timed:
log.debug("GameModel.terminate: -> timemodel.end()")
self.timemodel.end()
log.debug("GameModel.terminate: <- timemodel.end() %s" % repr(self.timemodel))
self.emit("game_terminated")
############################################################################
# Other stuff #
############################################################################
@inthread
@undolocked
def undoMoves (self, moves):
""" Undo and remove moves number of moves from the game history from
the GameModel, players, and any spectators """
if self.ply < 1 or moves < 1: return
if self.ply - moves < 0:
# There is no way in the current threaded/asynchronous design
# for the GUI to know that the number of moves it requests to takeback
# will still be valid once the undo is actually processed. So, until
# we either add some locking or get a synchronous design, we quietly
# "fix" the takeback request rather than cause AssertionError or IndexError
moves = 1
log.debug("GameModel.undoMoves: players=%s, self.ply=%s, moves=%s, board=%s" % \
(repr(self.players), self.ply, moves, self.boards[-1]))
log.debug("GameModel.undoMoves: acquiring self.applyingMoveLock")
self.applyingMoveLock.acquire()
log.debug("GameModel.undoMoves: self.applyingMoveLock acquired")
try:
self.emit("moves_undoing", moves)
self.needsSave = True
self.boards = self.variations[0]
del self.boards[-moves:]
del self.moves[-moves:]
self.boards[-1].board.next = None
for player in self.players:
player.playerUndoMoves(moves, self)
for spectator in self.spectators.values():
spectator.spectatorUndoMoves(moves, self)
log.debug("GameModel.undoMoves: undoing timemodel")
if self.timed:
self.timemodel.undoMoves(moves)
self.checkStatus()
self.setOpening()
finally:
log.debug("GameModel.undoMoves: releasing self.applyingMoveLock")
self.applyingMoveLock.release()
self.emit("moves_undone", moves)
def isChanged (self):
if self.ply == 0:
return False
if self.needsSave:
return True
if not self.uri or not isWriteable (self.uri):
return True
return False
def add_variation(self, board, moves):
board0 = board
board = board0.clone()
board.board.prev = None
variation = [board]
for move in moves:
new = board.move(move)
if len(variation) == 1:
new.board.prev = board0.board
variation[0].board.next = new.board
else:
new.board.prev = board.board
board.board.next = new.board
variation.append(new)
board = new
if board0.board.next is None:
# If we are in the latest played board, and want to add a variation
# we have to add a not played yet board first
# which can hold the variation as his child
from pychess.Utils.lutils.LBoard import LBoard
null_board = LBoard()
null_board.prev = board0.board
board0.board.next = null_board
board0.board.next.children.append([board.board for board in variation])
head = None
for vari in self.variations:
if board0 in vari:
head = vari
break
variation[0] = board0
self.variations.append(head[:board0.ply-self.lowply] + variation)
self.needsSave = True
self.emit("variation_added", board0.board.next.children[-1], board0.board.next)
return self.variations[-1]
def add_move2variation(self, board, move, variationIdx):
new = board.move(move)
new.board.prev = board.board
board.board.next = new.board
# Find the variation (low level lboard list) to append
cur_board = board.board
vari = None
while cur_board.prev is not None:
for child in cur_board.prev.next.children:
if isinstance(child, list) and cur_board in child:
vari = child
break
if vari is None:
cur_board = cur_board.prev
else:
break
vari.append(new.board)
self.variations[variationIdx].append(new)
self.needsSave = True
self.emit("variation_extended", board.board, new.board)
|
gpl-3.0
|
spotify/python-graphwalker
|
graphwalker/test/interactor.py
|
2
|
5356
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
import fcntl
import select
import subprocess
import sys
import os
class Dummy(object):
def __getattr__(self, name):
def f(*al, **kw):
print ('\033[32m%s\033[0m' % name)
f.__name__ = name
return f
def a(self):
pass
def b(self):
pass
def unblock(fd):
# make stdin a non-blocking file
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
class Interactor(object):
def spawn(self):
cmd = "%s graphwalker/cli.py" % sys.executable
cmd += " --planner=Interactive"
cmd += " --reporter=Print"
cmd += " --stopcond=Never"
cmd += " graphwalker/test/examples/ab.graphml"
cmd += " graphwalker.test.interactor.Dummy"
self.log('cmd: %r' % cmd)
self.sub = subprocess.Popen(
cmd.split(),
executable=sys.executable,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=os.path.join(os.path.dirname(__file__), '../..'))
unblock(self.sub.stdout.fileno())
unblock(self.sub.stderr.fileno())
def log(self, what):
r = self.context.get('reporter')
if r:
r.log('test', what)
def setup(self, context):
self.context = context
self.timeout = context.get('timeout', 0.01)
self.patience = int(context.get('wait', 2.0) / self.timeout)
self.last_out = ''
self.last_err = ''
self.spawn()
def push(self, data):
self.look()
self.last_out, self.last_err = '', ''
self.sub.stdin.write(data)
def look(self):
r, w, l = select.select(
[self.sub.stdout, self.sub.stderr], [], [],
self.timeout)
if self.sub.stdout in r:
self.last_out += self.sub.stdout.read()
if self.sub.stderr in r:
self.last_err += self.sub.stderr.read()
return self.last_out, self.last_err
def expect(self, expectation):
def show():
if out:
print ('out' + ' -' * 30)
print ('\n ' + out.strip().replace('\n', '\n '))
if err:
print ('err' + ' -' * 30)
print ('\n ' + err.strip().replace('\n', '\n '))
if out or err:
print ('- -' + ' -' * 30)
if type(expectation) is str:
x = expectation
expectation = lambda out, err: x in out or x in err
for i in range(self.patience):
out, err = self.look()
if expectation(out, err):
show()
return True
else:
show()
raise AssertionError("Did not find expected output")
def v_startup(self):
self.expect(
lambda out, err: (
out.startswith('starting ab-') and
'[stdout]\x1b[32msetup\x1b[0m\n' in out and
err.startswith('== Currently at: Start')))
def v_debugger(self):
self.expect(lambda out, err: err.endswith('(Pdb) '))
def v_vertex_a(self):
self.expect(lambda out, err: '== Currently at: a [' in err)
def v_vertex_b(self):
self.expect(lambda out, err: '== Currently at: b [' in err)
def v_break_set_a(self):
self.push('b\n')
self.expect(
lambda out, err: (
'breakpoint' in err and
'yes' in err and
'graphwalker/test/interactor.py:17' in err))
def v_break_set_b(self):
self.push('b\n')
self.expect(
lambda out, err: (
'breakpoint' in err and
'yes' in err and
'graphwalker/test/interactor.py:20' in err))
def v_actor_debugger(self):
self.push('self\n')
self.expect(
lambda out, err: (
'<graphwalker.test.interactor.Dummy object at 0x' in err and
err.endswith('(Pdb) ')))
def e_enter(self):
self.push('\n')
self.expect('huh?')
def e_follow_0(self):
self.push('0\n')
self.expect(
lambda out, err: (
'Begin step' in out and
'[stdout]\x1b[32mstep_begin\x1b[0m\n' in out and
'\nPassed step' in out))
def e_follow_9(self):
self.push('9\n')
self.expect('huh?')
def e_debug(self):
self.push('d\n')
def e_continue(self):
self.push('c\n')
def e_bug(self):
self.push('hex(3735928559)\n')
self.expect('deadbeef')
def e_bug_set_a(self):
self.push('self.vert=[v for v in self.g.V.values() if v[1]=="a"][0]\n')
def e_bug_set_b(self):
self.push('self.vert=[v for v in self.g.V.values() if v[1]=="b"][0]\n')
def e_bug_break_a(self):
self.push('import %s as x\n' % __name__)
self.push('tbreak x.Dummy.a\n')
def e_bug_break_b(self):
self.push('import %s as x\n' % __name__)
self.push('tbreak x.Dummy.b\n')
def e_jmp_a(self):
self.push('j a\n')
def e_jmp_b(self):
self.push('j b\n')
def e_goto_a(self):
self.push('g a\n')
def e_goto_b(self):
self.push('g b\n')
|
apache-2.0
|
pombredanne/pyjs
|
pyjs/browser.py
|
6
|
26136
|
# Copyright (C) 2009, 2010, Luke Kenneth Casson Leighton <[email protected]>
# Copyright (C) 2010, Sujan Shakya <[email protected]>
import os
import sys
import time
import shutil
from pyjs import linker
from pyjs import translator
if translator.name == 'proto':
required_modules = [
'pyjslib', 'sys', 'imp', 'dynamic', 'pyjamas', 'pyjamas.DOM',
]
early_static_app_libs = ['_pyjs.js']
elif translator.name == 'dict':
required_modules = [
'__builtin__', 'sys', 'imp', 'dynamic', 'pyjamas', 'pyjamas.DOM',
]
early_static_app_libs = []
else:
raise ValueError("unknown translator engine '%s'" % translator.name)
from pyjs import util
from pyjs import options
from cStringIO import StringIO
from optparse import OptionParser, OptionGroup
import pyjs
import re
import traceback
try:
from hashlib import md5
except:
from md5 import md5
from pprint import pprint, pformat
AVAILABLE_PLATFORMS = ('IE6', 'Opera', 'OldMoz', 'Safari', 'Mozilla')
BOILERPLATE_PATH = os.path.join(os.path.dirname(__file__), 'boilerplate')
APP_HTML_TEMPLATE = """\
<html>
<!-- auto-generated html - You should consider editing and adapting this
to suit your requirements. No doctype used here to force quirks mode; see
wiki for details: http://pyjs.org/wiki/csshellandhowtodealwithit/
-->
<head>
%(css)s
<title>%(title)s</title>
</head>
<body style="background-color:white">
</body>
</html>
"""
class BrowserLinker(linker.BaseLinker):
# parents are specified in most-specific last
platform_parents = {
'mozilla':['browser'],
'ie6':['browser'],
'safari':['browser'],
'oldmoz':['browser'],
'opera':['browser'],
}
def __init__(self, *args, **kwargs):
self.multi_file = kwargs.pop('multi_file', False)
self.cache_buster = kwargs.pop('cache_buster', False)
self.bootstrap_file = kwargs.pop('bootstrap_file', 'bootstrap.js')
self.apploader_file = kwargs.pop('apploader_file', None)
self.public_folder = kwargs.pop('public_folder', 'public')
self.runtime_options = kwargs.pop('runtime_options', [])
super(BrowserLinker, self).__init__(*args, **kwargs)
def visit_start(self):
super(BrowserLinker, self).visit_start()
self.boilerplate_path = None
self.early_static_app_libs += early_static_app_libs
self.merged_public = set()
self.app_files = {}
self.renamed_libs = {}
def visit_end_platform(self, platform):
if not platform:
return
if self.cache_buster:
# rename the files to their hashed equivalents
renamed = []
for p in self.done[platform]:
if p in self.renamed_libs:
new_p = self.renamed_libs[p]
else:
f = open(p)
md5sum = md5(f.read()).hexdigest()
f.close()
name, ext = os.path.splitext(p)
new_p = name + '.' + md5sum + ext
# if we are keeping all intermediate files
if self.keep_lib_files:
# copy the file to it's hashed equivalent
shutil.copyfile(p, new_p)
else: # keep new file only
# clean out any previous version of the hashed file
if os.access(new_p, os.F_OK):
os.unlink(new_p)
os.rename(p, new_p)
self.renamed_libs[p] = new_p
renamed.append(new_p)
self.done[platform] = renamed
self.app_files[platform] = self._generate_app_file(platform)
def visit_end(self):
self._create_app_html()
self._create_nocache_html()
if not self.keep_lib_files:
for fname in self.remove_files:
if fname.find(self.output) == 0:
os.unlink(fname)
def merge_resources(self, dir_name):
if not dir_name in self.merged_public:
public_folder = os.path.join(dir_name, self.public_folder)
if os.path.exists(public_folder) and os.path.isdir(public_folder):
util.copytree_exists(public_folder,
self.output)
self.merged_public.add(dir_name)
for libs in [self.js_libs, self.dynamic_js_libs,
self.static_js_libs, self.early_static_js_libs, self.late_static_js_libs]:
for lib in libs:
if not lib in self.merged_public:
for path in self.path:
if os.path.exists(lib) and os.path.isfile(lib):
util.copy_exists(lib, os.path.join(self.output, os.path.basename(lib)))
self.merged_public.add(lib)
break
# merge all output/css.d/* files into one output/base.css file
css_d_path = os.path.join(self.output, 'css.d')
base_css_path = os.path.join(self.output, 'base.css')
if os.path.exists(css_d_path):
hdr = '/* name: %s\n * md5: %s\n */\n'
with open(base_css_path, 'w') as base_css:
for root, dirs, files in os.walk(css_d_path):
docroot = root.replace(root, '', 1).strip('/')
for frag in files:
frag_path = os.path.join(root, frag)
with open(frag_path) as fd:
csstxt = fd.read()
base_css.write(hdr % (
os.path.relpath(frag_path, self.output),
md5(csstxt).hexdigest(),
))
base_css.write(csstxt)
def find_boilerplate(self, name):
if not self.top_module_path:
raise RuntimeError('Top module not found %r' % self.top_module)
if not self.boilerplate_path:
self.boilerplate_path = [BOILERPLATE_PATH]
module_bp_path = os.path.join(
os.path.dirname(self.top_module_path), 'boilerplate')
if os.path.isdir(module_bp_path):
self.boilerplate_path.insert(0, module_bp_path)
for p in self.boilerplate_path:
bp = os.path.join(p, name)
if os.path.exists(bp):
return bp
raise RuntimeError("Boilerplate not found %r" % name)
def read_boilerplate(self, name):
f = file(self.find_boilerplate(name))
res = f.read()
f.close()
return res
def unique_list_values(self, lst):
keys = {}
for k in lst:
keys[k] = 1
return keys.keys()
def _generate_app_file(self, platform):
# TODO: cache busting
template = self.read_boilerplate('all.cache.html')
name_parts = [self.top_module, platform, 'cache.html']
done = self.done[platform]
len_ouput_dir = len(self.output)+1
app_name = self.top_module
platform_name = platform.lower()
dynamic = 0,
app_headers = ''
available_modules = self.unique_list_values(self.visited_modules[platform])
early_static_app_libs = [] + self.early_static_app_libs
static_app_libs = []
dynamic_app_libs = []
dynamic_js_libs = [] + self.dynamic_js_libs
static_js_libs = [] + self.static_js_libs
early_static_js_libs = [] + self.early_static_js_libs
late_static_js_libs = [] + self.late_static_js_libs
dynamic_modules = []
not_unlinked_modules = [re.compile(m[1:]) for m in self.unlinked_modules if m[0] == '!']
for m in required_modules:
not_unlinked_modules.append(re.compile('^%s$' % m))
unlinked_modules = [re.compile(m) for m in self.unlinked_modules if m[0] != '!' and m not in not_unlinked_modules]
def static_code(libs, msg = None):
code = []
for lib in libs:
fname = lib
if not os.path.isfile(fname):
fname = os.path.join(self.output, lib)
if not os.path.isfile(fname):
raise RuntimeError('File not found %r' % lib)
if fname[len_ouput_dir:] == self.output:
name = fname[len_ouput_dir:]
else:
name = os.path.basename(lib)
code.append('<script type="text/javascript"><!--')
if not msg is None:
code.append("/* start %s: %s */" % (msg, name))
f = file(fname)
code.append(f.read())
if not msg is None:
code.append("/* end %s */" % (name,))
code.append("""--></script>""")
self.remove_files[fname] = True
fname = fname.split('.')
if fname[-2] == '__%s__' % platform_name:
del fname[-2]
fname = '.'.join(fname)
if os.path.isfile(fname):
self.remove_files[fname] = True
return "\n".join(code)
def js_modname(path):
return 'js@'+os.path.basename(path)+'.'+md5(path).hexdigest()
def skip_unlinked(lst):
new_lst = []
pltfrm = '__%s__' % platform_name
for path in lst:
fname = os.path.basename(path).rpartition(pyjs.MOD_SUFFIX)[0]
frags = fname.split('.')
# TODO: do not combine module chunks until we write the file
if self.cache_buster and len(frags[-1])==32 and len(frags[-1].strip('0123456789abcdef'))==0:
frags.pop()
if frags[-1] == pltfrm:
frags.pop()
fname = '.'.join(frags)
in_not_unlinked_modules = False
for m in not_unlinked_modules:
if m.match(fname):
in_not_unlinked_modules = True
new_lst.append(path)
break
if not in_not_unlinked_modules:
in_unlinked_modules = False
for m in unlinked_modules:
if m.match(fname):
in_unlinked_modules = True
if fname in available_modules:
available_modules.remove(fname)
if not in_unlinked_modules:
new_lst.append(path)
return new_lst
if self.multi_file:
dynamic_js_libs = self.unique_list_values(dynamic_js_libs + [m for m in list(self.js_libs) if not m in static_js_libs])
dynamic_app_libs = self.unique_list_values([m for m in done if not m in early_static_app_libs])
else:
static_js_libs = self.unique_list_values(static_js_libs + [m for m in list(self.js_libs) if not m in dynamic_js_libs])
static_app_libs = self.unique_list_values([m for m in done if not m in early_static_app_libs])
dynamic_js_libs = skip_unlinked(dynamic_js_libs)
dynamic_app_libs = skip_unlinked(dynamic_app_libs)
static_js_libs = skip_unlinked(static_js_libs)
static_app_libs = skip_unlinked(static_app_libs)
dynamic_modules = self.unique_list_values(available_modules + [js_modname(lib) for lib in dynamic_js_libs])
available_modules = self.unique_list_values(available_modules + early_static_app_libs + dynamic_modules)
if len(dynamic_modules) > 0:
dynamic_modules = "['" + "','".join(dynamic_modules) + "']"
else:
dynamic_modules = "[]"
appscript = "<script><!--\n$wnd['__pygwt_modController']['init']($pyjs['appname'], window)\n$wnd['__pygwt_modController']['load']($pyjs['appname'], [\n'%s'\n])\n--></script>"
jsscript = """<script type="text/javascript" src="%(path)s" onload="$pyjs['script_onload']('%(modname)s')" onreadystatechange="$pyjs['script_onreadystate']('%(modname)s')"></script>"""
dynamic_app_libs = appscript % "',\n'".join([lib[len_ouput_dir:].replace('\\', '/') for lib in dynamic_app_libs])
dynamic_js_libs = '\n'.join([jsscript % {'path': lib, 'modname': js_modname(lib)} for lib in dynamic_js_libs])
early_static_app_libs = static_code(early_static_app_libs)
static_app_libs = static_code(static_app_libs)
early_static_js_libs = static_code(early_static_js_libs, "javascript lib")
static_js_libs = static_code(static_js_libs, "javascript lib")
late_static_js_libs = static_code(late_static_js_libs, "javascript lib")
setoptions = "\n".join([("$pyjs['options']['%s'] = %s;" % (n, v)).lower() for n,v in self.runtime_options])
file_contents = template % locals()
if self.cache_buster:
md5sum = md5(file_contents).hexdigest()
name_parts.insert(2, md5sum)
out_path = os.path.join(self.output, '.'.join((name_parts)))
out_file = file(out_path, 'w')
out_file.write(file_contents)
out_file.close()
return out_path
def _create_nocache_html(self):
# nocache
template = self.read_boilerplate('home.nocache.html')
out_path = os.path.join(self.output, self.top_module + ".nocache.html")
select_tmpl = """O(["true","%s"],"%s");\n"""
script_selectors = StringIO()
for platform in self.platforms:
cache_html = os.path.basename(self.app_files[platform])
sel = select_tmpl % (platform, cache_html)
script_selectors.write(sel)
out_file = file(out_path, 'w')
out_file.write(template % dict(
app_name = self.top_module,
script_selectors = script_selectors.getvalue()
))
out_file.close()
def _create_app_html(self):
""" Checks if a base HTML-file is available in the Pyjamas
output directory, and injects the bootstrap loader script tag.
If the HTML-file isn't available, it will be created.
If a CSS-file with the same name is available
in the output directory, a reference to this CSS-file
is included.
If no CSS-file is found, this function will look for a special
CSS-file in the output directory, with the name
"pyjamas_default.css", and if found it will be referenced
in the generated HTML-file.
"""
html_output_filename = os.path.join(self.output,
self.top_module + '.html')
if self.apploader_file is None:
file_name = html_output_filename
else:
file_name = self.apploader_file
if os.path.exists(file_name):
fh = open(file_name, 'r')
base_html = fh.read()
fh.close()
created = 0
else:
title = self.top_module + ' (Pyjamas Auto-Generated HTML file)'
link_tag = '<link rel="stylesheet" href="%s">'
module_css = self.top_module + '.css'
default_css = 'pyjamas_default.css'
if os.path.exists(os.path.join(self.output, module_css)):
css = link_tag % module_css
elif os.path.exists(os.path.join(self.output, default_css)):
css = link_tag % default_css
else:
css = ''
base_html = APP_HTML_TEMPLATE % { 'title': title, 'css': css }
created = 1
# replace (or add) meta tag pygwt:module
meta_tag_head = '<meta name="pygwt:module"'
meta_tag_tail = ' content="%s">' % self.top_module
meta_found = base_html.find(meta_tag_head)
if meta_found > -1:
meta_stop = base_html.find('>', meta_found + len(meta_tag_head))
else:
head_end = '</head>'
meta_found = base_html.find(head_end)
meta_stop = meta_found - 1
meta_tag_tail += '\n'
if meta_found == -1:
raise RuntimeError("Can't inject module meta tag. " +\
"No tag %(tag)s found in %(file)s" %\
{ 'tag': head_end, 'file': file_name })
base_html = base_html[:meta_found] \
+ meta_tag_head + meta_tag_tail \
+ base_html[meta_stop + 1:]
# inject bootstrap script tag and history iframe
script_tag = '<script type="text/javascript" src="%s"></script>' % self.bootstrap_file
iframe_tag = '<iframe id="__pygwt_historyFrame" style="display:none;"></iframe>'
body_end = '</body>'
if base_html.find(body_end) == -1:
raise RuntimeError("Can't inject bootstrap loader. " + \
"No tag %(tag)s found in %(file)s" % \
{ 'tag': body_end, 'file': file_name })
base_html = base_html.replace(body_end,
script_tag +'\n'+ iframe_tag +'\n'+ body_end)
fh = open(html_output_filename, 'w')
fh.write(base_html)
fh.close()
return created
MODIFIED_TIME = {}
def is_modified(path):
current_mtime = os.path.getmtime(path)
if current_mtime == MODIFIED_TIME.get(path):
return False
else:
MODIFIED_TIME[path] = current_mtime
print('mtime changed for %s.' % path)
return True
def serve(path):
print("\nMonitoring file modifications in %s ..." % \
os.path.abspath(os.curdir))
def build(top_module, pyjs, options, app_platforms,
runtime_options, args):
print("Building: %s\nPYJSPATH: %s" % (top_module, pformat(pyjs.path)))
translator_arguments= translator.get_compile_options(options)
l = BrowserLinker(args,
output=options.output,
platforms=app_platforms,
path=pyjs.path,
js_libs=options.js_includes,
unlinked_modules=options.unlinked_modules,
keep_lib_files=options.keep_lib_files,
compile_inplace=options.compile_inplace,
translator_arguments=translator_arguments,
multi_file=options.multi_file,
cache_buster=options.cache_buster,
bootstrap_file=options.bootstrap_file,
apploader_file=options.apploader_file,
public_folder=options.public_folder,
runtime_options=runtime_options,
list_imports=options.list_imports,
)
l()
if not options.list_imports:
print ("Built to :", os.path.abspath(options.output))
return
print("Dependencies")
for f, deps in l.dependencies.items():
print("%s\n%s" % (f, '\n'.join(map(lambda x: "\t%s" % x, deps))))
print
print("Visited Modules")
for plat, deps in l.visited_modules.items():
print("%s\n%s" % (plat, '\n'.join(map(lambda x: "\t%s" % x, deps))))
print
def build_script():
usage = """usage: %prog [OPTIONS...] APPLICATION [MODULE...]
Command line interface to the pyjs.org suite: Python Application -> AJAX Application.
APPLICATION is the translation entry point; it MUST be importable by the toolchain.
MODULE(s) will also translate, if available; they MUST be importable by the toolchain."""
global app_platforms
parser = OptionParser(usage=usage, epilog='For more information, see http://pyjs.org/')
parser_group_builder = OptionGroup(parser, 'Builder',
'Configures the high-level properties of current '
'command and final project assembly.')
parser_group_trans = OptionGroup(parser, 'Translator',
'Configures the semantics/expectations of '
'application code. Each --enable-* implies '
'--disable-*. Groups modify several options at once.')
parser_group_linker = OptionGroup(parser, 'Linker',
'Configures the includes/destination of application '
'code, static resources, and project support files.')
add_builder_options(parser_group_builder)
translator.add_compile_options(parser_group_trans)
linker.add_linker_options(parser_group_linker)
parser.add_option_group(parser_group_builder)
parser.add_option_group(parser_group_trans)
parser.add_option_group(parser_group_linker)
options, _args = parser.parse_args()
args = []
for a in _args:
if a.lower().endswith('.py'):
args.append(a[:-3])
else:
args.append(a)
if options.log_level is not None:
import logging
logging.basicConfig(level=options.log_level)
if len(args) < 1:
parser.error("incorrect number of arguments in %s" % repr((sys.argv, options, _args)))
top_module = args[0]
for d in options.library_dirs:
pyjs.path.append(os.path.abspath(d))
if options.platforms:
app_platforms = options.platforms.lower().split(',')
if options.multi_file and options.compile_inplace:
options.compile_inplace = False
runtime_options = []
runtime_options.append(("arg_ignore", options.function_argument_checking))
runtime_options.append(("arg_count", options.function_argument_checking))
runtime_options.append(("arg_is_instance", options.function_argument_checking))
runtime_options.append(("arg_instance_type", options.function_argument_checking))
runtime_options.append(("arg_kwarg_dup", options.function_argument_checking))
runtime_options.append(("arg_kwarg_unexpected_keyword", options.function_argument_checking))
runtime_options.append(("arg_kwarg_multiple_values", options.function_argument_checking))
runtime_options.append(("dynamic_loading", (len(options.unlinked_modules)>0)))
build(top_module, pyjs, options, app_platforms,
runtime_options, args)
if not options.auto_build:
sys.exit(0)
# autobuild starts here: loops round the current directory file structure
# looking for file modifications. extra files in the public folder are
# copied to output, verbatim (without a recompile) but changes to python
# files result in a recompile with the exact same compile options.
first_loop = True
public_dir = options.public_folder
output_dir = options.output
serve(top_module)
while True:
for root, dirs, files in os.walk('.'):
if root[2:].startswith(output_dir):
continue
if root[2:].startswith(public_dir):
for filename in files:
file_path = os.path.join(root, filename)
if is_modified(file_path) and not first_loop:
dest_path = output_dir
dest_path += file_path.split(public_dir, 1)[1]
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
print('Copying %s to %s' % (file_path, dest_path))
shutil.copy(file_path, dest_path)
else:
for filename in files:
if os.path.splitext(filename)[1] in ('.py',):
file_path = os.path.join(root, filename)
if is_modified(file_path) and not first_loop:
try:
build(top_module, pyjs, options,
app_platforms, runtime_options, args)
except Exception:
traceback.print_exception(*sys.exc_info())
break
first_loop = False
time.sleep(1)
mappings = options.Mappings()
add_builder_options = mappings.bind
get_builder_options = mappings.link
mappings.log_level = (
['-v', '--verbosity'],
['-l', '--log-level'],
[],
dict(help='numeric Python logging level',
type='int',
metavar='LEVEL')
)
mappings.platforms = (
['-P', '--platforms'],
[],
[],
dict(help='comma-separated list of target platforms',
default=(','.join(AVAILABLE_PLATFORMS)))
)
mappings.list_imports = (
['--list-imports'],
['-i'],
[],
dict(help='list import dependencies (no translation)',
default=False)
)
mappings.apploader_file = (
['--frame'],
['--apploader-file'],
[],
dict(help='application html loader file',
type='string',
metavar='FILE',
default=None)
)
mappings.bootstrap_file = (
['--bootloader'],
['--bootstrap-file'],
[],
dict(help='application initial JS import/bootstrap code',
metavar='FILE',
default='bootstrap.js')
)
mappings.public_folder = (
['--resources'],
['--public-folder'],
[],
dict(help='application resource directory; contents copied to output dir',
metavar='PATH',
default='public')
)
mappings.auto_build = (
['--enable-rebuilds'],
['--auto-build', '-A'],
[],
dict(help='continuously rebuild on file changes',
default=False)
)
mappings.cache_buster = (
['--enable-signatures'],
['--cache-buster', '-c'],
[],
dict(help='enable browser cache-busting; append md5 hashes to filenames',
default=False)
)
mappings.compile_inplace = (
['--enable-compile-inplace'],
['--compile-inplace'],
[],
dict(help='store ouput JS in the same place as the Python source',
default=False)
)
mappings.keep_lib_files = (
['--enable-preserve-libs'],
['--keep-lib-files'],
[],
dict(help='do not remove intermediate compiled JS libs',
default=True)
)
|
apache-2.0
|
azoft-dev-team/imagrium
|
env/Lib/encodings/cp424.py
|
593
|
12311
|
""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp424',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> SELECT
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> REQUIRED NEW LINE
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> GRAPHIC ESCAPE
u'\x8d' # 0x09 -> SUPERSCRIPT
u'\x8e' # 0x0A -> REPEAT
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
u'\x85' # 0x15 -> NEW LINE
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> UNIT BACK SPACE
u'\x8f' # 0x1B -> CUSTOMER USE ONE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> DIGIT SELECT
u'\x81' # 0x21 -> START OF SIGNIFICANCE
u'\x82' # 0x22 -> FIELD SEPARATOR
u'\x83' # 0x23 -> WORD UNDERSCORE
u'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> SET ATTRIBUTE
u'\x89' # 0x29 -> START FIELD EXTENDED
u'\x8a' # 0x2A -> SET MODE OR SWITCH
u'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
u'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> <reserved>
u'\x91' # 0x31 -> <reserved>
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> INDEX RETURN
u'\x94' # 0x34 -> PRESENTATION POSITION
u'\x95' # 0x35 -> TRANSPARENT
u'\x96' # 0x36 -> NUMERIC BACKSPACE
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> SUBSCRIPT
u'\x99' # 0x39 -> INDENT TABULATION
u'\x9a' # 0x3A -> REVERSE FORM FEED
u'\x9b' # 0x3B -> CUSTOMER USE THREE
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> <reserved>
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u05d0' # 0x41 -> HEBREW LETTER ALEF
u'\u05d1' # 0x42 -> HEBREW LETTER BET
u'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x44 -> HEBREW LETTER DALET
u'\u05d4' # 0x45 -> HEBREW LETTER HE
u'\u05d5' # 0x46 -> HEBREW LETTER VAV
u'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x48 -> HEBREW LETTER HET
u'\u05d8' # 0x49 -> HEBREW LETTER TET
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\u05d9' # 0x51 -> HEBREW LETTER YOD
u'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x53 -> HEBREW LETTER KAF
u'\u05dc' # 0x54 -> HEBREW LETTER LAMED
u'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x56 -> HEBREW LETTER MEM
u'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x58 -> HEBREW LETTER NUN
u'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u05e2' # 0x62 -> HEBREW LETTER AYIN
u'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x64 -> HEBREW LETTER PE
u'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x66 -> HEBREW LETTER TSADI
u'\u05e7' # 0x67 -> HEBREW LETTER QOF
u'\u05e8' # 0x68 -> HEBREW LETTER RESH
u'\u05e9' # 0x69 -> HEBREW LETTER SHIN
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\ufffe' # 0x70 -> UNDEFINED
u'\u05ea' # 0x71 -> HEBREW LETTER TAV
u'\ufffe' # 0x72 -> UNDEFINED
u'\ufffe' # 0x73 -> UNDEFINED
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\ufffe' # 0x75 -> UNDEFINED
u'\ufffe' # 0x76 -> UNDEFINED
u'\ufffe' # 0x77 -> UNDEFINED
u'\u2017' # 0x78 -> DOUBLE LOW LINE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\ufffe' # 0x80 -> UNDEFINED
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\xb8' # 0x9D -> CEDILLA
u'\ufffe' # 0x9E -> UNDEFINED
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\ufffe' # 0xAA -> UNDEFINED
u'\ufffe' # 0xAB -> UNDEFINED
u'\ufffe' # 0xAC -> UNDEFINED
u'\ufffe' # 0xAD -> UNDEFINED
u'\ufffe' # 0xAE -> UNDEFINED
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\ufffe' # 0xCB -> UNDEFINED
u'\ufffe' # 0xCC -> UNDEFINED
u'\ufffe' # 0xCD -> UNDEFINED
u'\ufffe' # 0xCE -> UNDEFINED
u'\ufffe' # 0xCF -> UNDEFINED
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\ufffe' # 0xEE -> UNDEFINED
u'\ufffe' # 0xEF -> UNDEFINED
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\x9f' # 0xFF -> EIGHT ONES
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
SoftwareMaven/django
|
django/contrib/gis/gdal/error.py
|
535
|
1996
|
"""
This module houses the GDAL & SRS Exception objects, and the
check_err() routine which checks the status code returned by
GDAL/OGR methods.
"""
# #### GDAL & SRS Exceptions ####
class GDALException(Exception):
pass
# Legacy name
OGRException = GDALException
class SRSException(Exception):
pass
class OGRIndexError(GDALException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
# #### GDAL/OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = {
1: (GDALException, 'Not enough data.'),
2: (GDALException, 'Not enough memory.'),
3: (GDALException, 'Unsupported geometry type.'),
4: (GDALException, 'Unsupported operation.'),
5: (GDALException, 'Corrupt data.'),
6: (GDALException, 'OGR failure.'),
7: (SRSException, 'Unsupported SRS.'),
8: (GDALException, 'Invalid handle.'),
}
# CPL Error Codes
# http://www.gdal.org/cpl__error_8h.html
CPLERR_DICT = {
1: (GDALException, 'AppDefined'),
2: (GDALException, 'OutOfMemory'),
3: (GDALException, 'FileIO'),
4: (GDALException, 'OpenFailed'),
5: (GDALException, 'IllegalArg'),
6: (GDALException, 'NotSupported'),
7: (GDALException, 'AssertionFailed'),
8: (GDALException, 'NoWriteAccess'),
9: (GDALException, 'UserInterrupt'),
10: (GDALException, 'ObjectNull'),
}
ERR_NONE = 0
def check_err(code, cpl=False):
"""
Checks the given CPL/OGRERR, and raises an exception where appropriate.
"""
err_dict = CPLERR_DICT if cpl else OGRERR_DICT
if code == ERR_NONE:
return
elif code in err_dict:
e, msg = err_dict[code]
raise e(msg)
else:
raise GDALException('Unknown error code: "%s"' % code)
|
bsd-3-clause
|
snailbob/namebench
|
tools/check_dns_servers.py
|
174
|
3835
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for checking a lot of DNS servers from stdin for possible inclusion."""
__author__ = '[email protected] (Thomas Stromberg)'
import csv
import re
import sys
import GeoIP
sys.path.append('..')
sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import nameserver_list
from libnamebench import config
from libnamebench import addr_util
import check_nameserver_popularity
gi = GeoIP.open('/usr/local/share/GeoLiteCity.dat', GeoIP.GEOIP_MEMORY_CACHE)
asn_lookup = GeoIP.open('/usr/local/share/GeoIPASNum.dat', GeoIP.GEOIP_MEMORY_CACHE)
existing_nameservers = config.GetLocalNameServerList()
check_ns = []
output = csv.writer(open('output.csv', 'w'))
for line in sys.stdin:
ips = addr_util.ExtractIPsFromString(line)
for ip in ips:
print ip
# disable IPV6 until we can improve our regular expression matching
if ':' in ip:
continue
if ip not in existing_nameservers:
check_ns.append((ip, ip))
if not check_ns:
print "no new servers to check"
sys.exit(1)
else:
print "%s servers to check" % len(check_ns)
print '-' * 80
nameserver_list.MAX_INITIAL_HEALTH_THREAD_COUNT = 100
nameservers = nameserver_list.NameServers([],
global_servers=check_ns,
timeout=10,
health_timeout=10,
threads=100,
num_servers=5000,
skip_cache_collusion_checks=True,
)
nameservers.min_healthy_percent = 0
sanity_checks = config.GetLocalSanityChecks()
try:
nameservers.CheckHealth(sanity_checks['primary'], sanity_checks['secondary'])
except nameserver_list.TooFewNameservers:
pass
print '-' * 80
for ns in nameservers:
try:
details = gi.record_by_addr(ns.ip)
except:
pass
if not details:
details = {}
city = details.get('city', '')
if city:
city = city.decode('latin-1')
latitude = details.get('latitude', '')
longitude = details.get('longitude', '')
country = details.get('country_name', '')
if country:
country = country.decode('latin-1')
country_code = details.get('country_code', '')
region = details.get('region_name', '')
if region:
region = region.decode('latin-1')
try:
results = check_nameserver_popularity.CheckPopularity(ns.ip)
urls = [ x['Url'] for x in results ]
except:
urls = ['(exception)']
num_urls = len(urls)
main = "%s=UNKNOWN" % ns.ip
if 'Responded with: REFUSED' in ns.warnings:
note = '_REFUSED_'
elif 'a.root-servers.net.: Timeout' in ns.warnings:
note = '_TIMEOUT_'
elif 'No answer (NOERROR): a.root-servers.net.' in ns.warnings:
note = '_NOANSWER_'
elif ns.warnings:
note = '_WARNING/%s_' % '/'.join(list(ns.warnings))
else:
note = ''
if ns.hostname != ns.ip:
domain = addr_util.GetDomainPartOfHostname(ns.hostname)
if domain:
good_urls = [x for x in urls if re.search(domain, x, re.I)]
if good_urls:
urls = good_urls
geo = '/'.join([x for x in [country_code, region, city] if x and not x.isdigit()]).encode('utf-8')
coords = ','.join(map(str, [latitude,longitude]))
asn = asn_lookup.org_by_addr(ns.ip)
row = [ns.ip, 'regional', 'UNKNOWN', '', ns.hostname, geo, coords, asn, note, num_urls, ' '.join(urls[:2]), ns.version]
print row
output.writerow(row)
|
apache-2.0
|
rhelmer/socorro
|
socorro/unittest/external/postgresql/test_connection_context.py
|
13
|
2666
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import eq_, ok_
import psycopg2
from socorro.external.postgresql.connection_context import ConnectionContext
from socorro.unittest.testbase import TestCase
from configman import Namespace
_closes = _commits = _rollbacks = 0
class MockConnection(object):
def __init__(self, dsn):
self.dsn = dsn
self.transaction_status = psycopg2.extensions.TRANSACTION_STATUS_IDLE
def get_transaction_status(self):
return self.transaction_status
def close(self):
global _closes
_closes += 1
def rollback(self):
global _rollbacks
_rollbacks += 1
class TestConnectionContext(TestCase):
def setUp(self):
super(TestConnectionContext, self).setUp()
# reset global variables so each test can run separately
global _closes, _commits, _rollbacks
_closes = _commits = _rollbacks = 0
def test_basic_postgres_usage(self):
class Sneak(ConnectionContext):
def connection(self, __=None):
assert self.dsn
return MockConnection(self.dsn)
definition = Namespace()
local_config = {
'database_hostname': 'host',
'database_name': 'name',
'database_port': 'port',
'database_username': 'user',
'database_password': 'password',
}
postgres = Sneak(definition, local_config)
with postgres() as connection:
ok_(isinstance(connection, MockConnection))
eq_(connection.dsn,
'host=host dbname=name port=port user=user password=password')
eq_(_closes, 0)
# exiting the context would lastly call 'connection.close()'
eq_(_closes, 1)
eq_(_commits, 0)
eq_(_rollbacks, 0)
try:
with postgres() as connection:
raise NameError('crap')
except NameError:
pass
finally:
eq_(_closes, 2) # second time
eq_(_commits, 0)
eq_(_rollbacks, 0)
try:
with postgres() as connection:
connection.transaction_status = \
psycopg2.extensions.TRANSACTION_STATUS_INTRANS
raise psycopg2.OperationalError('crap!')
# OperationalError's aren't bubbled up
except psycopg2.OperationalError:
pass
eq_(_closes, 3)
eq_(_commits, 0)
eq_(_rollbacks, 0)
|
mpl-2.0
|
sander76/home-assistant
|
tests/components/mqtt/test_subscription.py
|
8
|
5399
|
"""The tests for the MQTT subscription component."""
from unittest.mock import ANY
from homeassistant.components.mqtt.subscription import (
async_subscribe_topics,
async_unsubscribe_topics,
)
from homeassistant.core import callback
from tests.common import async_fire_mqtt_message
async def test_subscribe_topics(hass, mqtt_mock, caplog):
"""Test subscription to topics."""
calls1 = []
@callback
def record_calls1(*args):
"""Record calls."""
calls1.append(args)
calls2 = []
@callback
def record_calls2(*args):
"""Record calls."""
calls2.append(args)
sub_state = None
sub_state = await async_subscribe_topics(
hass,
sub_state,
{
"test_topic1": {"topic": "test-topic1", "msg_callback": record_calls1},
"test_topic2": {"topic": "test-topic2", "msg_callback": record_calls2},
},
)
async_fire_mqtt_message(hass, "test-topic1", "test-payload1")
assert len(calls1) == 1
assert calls1[0][0].topic == "test-topic1"
assert calls1[0][0].payload == "test-payload1"
assert len(calls2) == 0
async_fire_mqtt_message(hass, "test-topic2", "test-payload2")
assert len(calls1) == 1
assert len(calls2) == 1
assert calls2[0][0].topic == "test-topic2"
assert calls2[0][0].payload == "test-payload2"
await async_unsubscribe_topics(hass, sub_state)
async_fire_mqtt_message(hass, "test-topic1", "test-payload")
async_fire_mqtt_message(hass, "test-topic2", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 1
async def test_modify_topics(hass, mqtt_mock, caplog):
"""Test modification of topics."""
calls1 = []
@callback
def record_calls1(*args):
"""Record calls."""
calls1.append(args)
calls2 = []
@callback
def record_calls2(*args):
"""Record calls."""
calls2.append(args)
sub_state = None
sub_state = await async_subscribe_topics(
hass,
sub_state,
{
"test_topic1": {"topic": "test-topic1", "msg_callback": record_calls1},
"test_topic2": {"topic": "test-topic2", "msg_callback": record_calls2},
},
)
async_fire_mqtt_message(hass, "test-topic1", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 0
async_fire_mqtt_message(hass, "test-topic2", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 1
sub_state = await async_subscribe_topics(
hass,
sub_state,
{"test_topic1": {"topic": "test-topic1_1", "msg_callback": record_calls1}},
)
async_fire_mqtt_message(hass, "test-topic1", "test-payload")
async_fire_mqtt_message(hass, "test-topic2", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 1
async_fire_mqtt_message(hass, "test-topic1_1", "test-payload")
assert len(calls1) == 2
assert calls1[1][0].topic == "test-topic1_1"
assert calls1[1][0].payload == "test-payload"
assert len(calls2) == 1
await async_unsubscribe_topics(hass, sub_state)
async_fire_mqtt_message(hass, "test-topic1_1", "test-payload")
async_fire_mqtt_message(hass, "test-topic2", "test-payload")
assert len(calls1) == 2
assert len(calls2) == 1
async def test_qos_encoding_default(hass, mqtt_mock, caplog):
"""Test default qos and encoding."""
@callback
def msg_callback(*args):
"""Do nothing."""
pass
sub_state = None
sub_state = await async_subscribe_topics(
hass,
sub_state,
{"test_topic1": {"topic": "test-topic1", "msg_callback": msg_callback}},
)
mqtt_mock.async_subscribe.assert_called_once_with("test-topic1", ANY, 0, "utf-8")
async def test_qos_encoding_custom(hass, mqtt_mock, caplog):
"""Test custom qos and encoding."""
@callback
def msg_callback(*args):
"""Do nothing."""
pass
sub_state = None
sub_state = await async_subscribe_topics(
hass,
sub_state,
{
"test_topic1": {
"topic": "test-topic1",
"msg_callback": msg_callback,
"qos": 1,
"encoding": "utf-16",
}
},
)
mqtt_mock.async_subscribe.assert_called_once_with("test-topic1", ANY, 1, "utf-16")
async def test_no_change(hass, mqtt_mock, caplog):
"""Test subscription to topics without change."""
calls = []
@callback
def record_calls(*args):
"""Record calls."""
calls.append(args)
sub_state = None
sub_state = await async_subscribe_topics(
hass,
sub_state,
{"test_topic1": {"topic": "test-topic1", "msg_callback": record_calls}},
)
subscribe_call_count = mqtt_mock.async_subscribe.call_count
async_fire_mqtt_message(hass, "test-topic1", "test-payload")
assert len(calls) == 1
sub_state = await async_subscribe_topics(
hass,
sub_state,
{"test_topic1": {"topic": "test-topic1", "msg_callback": record_calls}},
)
assert subscribe_call_count == mqtt_mock.async_subscribe.call_count
async_fire_mqtt_message(hass, "test-topic1", "test-payload")
assert len(calls) == 2
await async_unsubscribe_topics(hass, sub_state)
async_fire_mqtt_message(hass, "test-topic1", "test-payload")
assert len(calls) == 2
|
apache-2.0
|
staticsan/light-layers
|
requests/packages/chardet2/chardistribution.py
|
25
|
8695
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .euctwfreq import EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO
from .euckrfreq import EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO
from .gb2312freq import GB2312CharToFreqOrder, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO
from .big5freq import Big5CharToFreqOrder, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO
from .jisfreq import JISCharToFreqOrder, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
class CharDistributionAnalysis:
def __init__(self):
self._mCharToFreqOrder = None # Mapping table to get frequency order from char order (get from GetOrder())
self._mTableSize = None # Size of above table
self._mTypicalDistributionRatio = None # This is a constant value which varies from language to language, used in calculating confidence. See http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html for further detail.
self.reset()
def reset(self):
"""reset analyser, clear any state"""
self._mDone = False # If this flag is set to True, detection is done and conclusion has been made
self._mTotalChars = 0 # Total characters encountered
self._mFreqChars = 0 # The number of characters whose frequency order is less than 512
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range, return negative answer
if self._mTotalChars <= 0:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio)
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion. For charset detection,
# certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string, but
# convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aBuf[0] >= 0xC4:
return 94 * (aBuf[0] - 0xC4) + aBuf[1] - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aBuf[0] >= 0xB0:
return 94 * (aBuf[0] - 0xB0) + aBuf[1] - 0xA1
else:
return -1;
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if (aBuf[0] >= 0xB0) and (aBuf[1] >= 0xA1):
return 94 * (aBuf[0] - 0xB0) + aBuf[1] - 0xA1
else:
return -1;
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aBuf[0] >= 0xA4:
if aBuf[1] >= 0xA1:
return 157 * (aBuf[0] - 0xA4) + aBuf[1] - 0xA1 + 63
else:
return 157 * (aBuf[0] - 0xA4) + aBuf[1] - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
if (aBuf[0] >= 0x81) and (aBuf[0] <= 0x9F):
order = 188 * (aBuf[0] - 0x81)
elif (aBuf[0] >= 0xE0) and (aBuf[0] <= 0xEF):
order = 188 * (aBuf[0] - 0xE0 + 31)
else:
return -1;
order = order + aBuf[1] - 0x40
if aBuf[1] > 0x7F:
order =- 1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
if aBuf[0] >= 0xA0:
return 94 * (aBuf[0] - 0xA1) + aBuf[1] - 0xa1
else:
return -1
|
mit
|
Proggie02/TestRepo
|
django/conf/locale/sk/formats.py
|
108
|
1114
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
bsd-3-clause
|
mrucci/moto
|
tests/test_s3/test_server.py
|
21
|
1908
|
from __future__ import unicode_literals
import sure # noqa
import moto.server as server
'''
Test the different server responses
'''
def test_s3_server_get():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
res = test_client.get('/')
res.data.should.contain(b'ListAllMyBucketsResult')
def test_s3_server_bucket_create():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
res = test_client.put('/', 'http://foobaz.localhost:5000/')
res.status_code.should.equal(200)
res = test_client.get('/')
res.data.should.contain(b'<Name>foobaz</Name>')
res = test_client.get('/', 'http://foobaz.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.contain(b"ListBucketResult")
res = test_client.put('/bar', 'http://foobaz.localhost:5000/', data='test value')
res.status_code.should.equal(200)
res = test_client.get('/bar', 'http://foobaz.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.equal(b"test value")
def test_s3_server_bucket_versioning():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
# Just enough XML to enable versioning
body = '<Status>Enabled</Status>'
res = test_client.put('/?versioning', 'http://foobaz.localhost:5000', data=body)
res.status_code.should.equal(200)
def test_s3_server_post_to_bucket():
backend = server.create_backend_app("s3")
test_client = backend.test_client()
res = test_client.put('/', 'http://tester.localhost:5000/')
res.status_code.should.equal(200)
test_client.post('/', "https://tester.localhost:5000/", data={
'key': 'the-key',
'file': 'nothing'
})
res = test_client.get('/the-key', 'http://tester.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.equal(b"nothing")
|
apache-2.0
|
CodeMath/jinrockets
|
BluePrint/lib/werkzeug/testsuite/exceptions.py
|
61
|
3251
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The tests for the exception classes.
TODO:
- This is undertested. HTML is never checked
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import exceptions
from werkzeug.wrappers import Response
class ExceptionsTestCase(WerkzeugTestCase):
def test_proxy_exception(self):
orig_resp = Response('Hello World')
try:
exceptions.abort(orig_resp)
except exceptions.HTTPException, e:
resp = e.get_response({})
else:
self.fail('exception not raised')
self.assert_(resp is orig_resp)
self.assert_equal(resp.data, 'Hello World')
def test_aborter(self):
abort = exceptions.abort
self.assert_raises(exceptions.BadRequest, abort, 400)
self.assert_raises(exceptions.Unauthorized, abort, 401)
self.assert_raises(exceptions.Forbidden, abort, 403)
self.assert_raises(exceptions.NotFound, abort, 404)
self.assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
self.assert_raises(exceptions.NotAcceptable, abort, 406)
self.assert_raises(exceptions.RequestTimeout, abort, 408)
self.assert_raises(exceptions.Gone, abort, 410)
self.assert_raises(exceptions.LengthRequired, abort, 411)
self.assert_raises(exceptions.PreconditionFailed, abort, 412)
self.assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
self.assert_raises(exceptions.RequestURITooLarge, abort, 414)
self.assert_raises(exceptions.UnsupportedMediaType, abort, 415)
self.assert_raises(exceptions.InternalServerError, abort, 500)
self.assert_raises(exceptions.NotImplemented, abort, 501)
self.assert_raises(exceptions.BadGateway, abort, 502)
self.assert_raises(exceptions.ServiceUnavailable, abort, 503)
myabort = exceptions.Aborter({1: exceptions.NotFound})
self.assert_raises(LookupError, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
myabort = exceptions.Aborter(extra={1: exceptions.NotFound})
self.assert_raises(exceptions.NotFound, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
def test_exception_repr(self):
exc = exceptions.NotFound()
self.assert_equal(unicode(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
exc = exceptions.NotFound('Not There')
self.assert_equal(unicode(exc), '404: Not There')
self.assert_equal(repr(exc), "<NotFound '404: Not There'>")
def test_special_exceptions(self):
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
h = dict(exc.get_headers({}))
self.assert_equal(h['Allow'], 'GET, HEAD, POST')
self.assert_('The method DELETE is not allowed' in exc.get_description({
'REQUEST_METHOD': 'DELETE'
}))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExceptionsTestCase))
return suite
|
mit
|
michaelBenin/Django-facebook
|
facebook_example/member/migrations/0010_initial.py
|
27
|
14225
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CustomFacebookUser'
db.create_table(u'member_customfacebookuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('about_me', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('facebook_id', self.gf('django.db.models.fields.BigIntegerField')(unique=True, null=True, blank=True)),
('access_token', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('facebook_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('facebook_profile_url', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('website_url', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('blog_url', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('date_of_birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('raw_data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('facebook_open_graph', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('new_token_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'member', ['CustomFacebookUser'])
# Adding M2M table for field groups on 'CustomFacebookUser'
db.create_table(u'member_customfacebookuser_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('customfacebookuser', models.ForeignKey(orm[u'member.customfacebookuser'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(u'member_customfacebookuser_groups', ['customfacebookuser_id', 'group_id'])
# Adding M2M table for field user_permissions on 'CustomFacebookUser'
db.create_table(u'member_customfacebookuser_user_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('customfacebookuser', models.ForeignKey(orm[u'member.customfacebookuser'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(u'member_customfacebookuser_user_permissions', ['customfacebookuser_id', 'permission_id'])
# Adding model 'UserProfile'
db.create_table(u'member_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('about_me', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('facebook_id', self.gf('django.db.models.fields.BigIntegerField')(unique=True, null=True, blank=True)),
('access_token', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('facebook_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('facebook_profile_url', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('website_url', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('blog_url', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('date_of_birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('raw_data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('facebook_open_graph', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('new_token_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal(u'member', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'CustomFacebookUser'
db.delete_table(u'member_customfacebookuser')
# Removing M2M table for field groups on 'CustomFacebookUser'
db.delete_table('member_customfacebookuser_groups')
# Removing M2M table for field user_permissions on 'CustomFacebookUser'
db.delete_table('member_customfacebookuser_user_permissions')
# Deleting model 'UserProfile'
db.delete_table(u'member_userprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'member.customfacebookuser': {
'Meta': {'object_name': 'CustomFacebookUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'member.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['member']
|
bsd-3-clause
|
virtdb/drizzle
|
tests/qp_tests/randgen_basic/subquerySemijoinNested_test.py
|
4
|
1461
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
class basicTest(mysqlBaseTestCase):
def test_subquerySemijoinNested1(self):
test_cmd = "./gentest.pl --gendata=conf/drizzle/drizzle.zz --grammar=conf/drizzle/subquery_semijoin_nested_drizzle.yy --queries=1000 --threads=3"
retcode, output = self.execute_randgen(test_cmd, test_executor, servers[0])
self.assertEqual(retcode, 0, msg = output)
def tearDown(self):
server_manager.reset_servers(test_executor.name)
|
gpl-2.0
|
googleapis/google-resumable-media-python
|
google/_async_resumable_media/requests/download.py
|
1
|
18494
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for downloading media from Google APIs."""
import urllib3.response
from google._async_resumable_media import _download
from google._async_resumable_media import _helpers
from google._async_resumable_media.requests import _request_helpers
from google.resumable_media import common
from google.resumable_media import _helpers as sync_helpers
from google.resumable_media.requests import download
_CHECKSUM_MISMATCH = download._CHECKSUM_MISMATCH
class Download(_request_helpers.RequestsMixin, _download.Download):
"""Helper to manage downloading a resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None. The default is "md5".
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
async def _write_to_stream(self, response):
"""Write response body to a write-able stream.
.. note:
This method assumes that the ``_stream`` attribute is set on the
current download.
Args:
response (~requests.Response): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
"""
# `_get_expected_checksum()` may return None even if a checksum was
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
# If an invalid checksum type is specified, this will raise ValueError.
expected_checksum, checksum_object = sync_helpers._get_expected_checksum(
response, self._get_headers, self.media_url, checksum_type=self.checksum
)
local_checksum_object = _add_decoder(response, checksum_object)
async for chunk in response.content.iter_chunked(
_request_helpers._SINGLE_GET_CHUNK_SIZE
):
self._stream.write(chunk)
local_checksum_object.update(chunk)
if expected_checksum is None:
return
else:
actual_checksum = sync_helpers.prepare_checksum_digest(
checksum_object.digest()
)
if actual_checksum != expected_checksum:
msg = _CHECKSUM_MISMATCH.format(
self.media_url,
expected_checksum,
actual_checksum,
checksum_type=self.checksum.upper(),
)
raise common.DataCorruption(response, msg)
async def consume(self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
request_kwargs = {
u"data": payload,
u"headers": headers,
u"retry_strategy": self._retry_strategy,
u"timeout": timeout,
}
if self._stream is not None:
request_kwargs[u"stream"] = True
result = await _request_helpers.http_request(
transport, method, url, **request_kwargs
)
self._process_response(result)
if self._stream is not None:
await self._write_to_stream(result)
return result
class RawDownload(_request_helpers.RawRequestsMixin, _download.Download):
"""Helper to manage downloading a raw resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None. The default is "md5".
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
async def _write_to_stream(self, response):
"""Write response body to a write-able stream.
.. note:
This method assumes that the ``_stream`` attribute is set on the
current download.
Args:
response (~requests.Response): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
"""
# `_get_expected_checksum()` may return None even if a checksum was
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
# If an invalid checksum type is specified, this will raise ValueError.
expected_checksum, checksum_object = sync_helpers._get_expected_checksum(
response, self._get_headers, self.media_url, checksum_type=self.checksum
)
async for chunk in response.content.iter_chunked(
_request_helpers._SINGLE_GET_CHUNK_SIZE
):
self._stream.write(chunk)
checksum_object.update(chunk)
if expected_checksum is None:
return
else:
actual_checksum = sync_helpers.prepare_checksum_digest(
checksum_object.digest()
)
if actual_checksum != expected_checksum:
msg = _CHECKSUM_MISMATCH.format(
self.media_url,
expected_checksum,
actual_checksum,
checksum_type=self.checksum.upper(),
)
raise common.DataCorruption(response, msg)
async def consume(self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
result = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(result)
if self._stream is not None:
await self._write_to_stream(result)
return result
class ChunkedDownload(_request_helpers.RequestsMixin, _download.ChunkedDownload):
"""Download a resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
async def consume_next_chunk(
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
):
"""
Consume the next chunk of the resource to be downloaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
ValueError: If the current download has finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
result = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
timeout=timeout,
)
await self._process_response(result)
return result
class RawChunkedDownload(_request_helpers.RawRequestsMixin, _download.ChunkedDownload):
"""Download a raw resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
async def consume_next_chunk(
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
):
"""Consume the next chunk of the resource to be downloaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
ValueError: If the current download has finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
result = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
timeout=timeout,
)
await self._process_response(result)
return result
def _add_decoder(response_raw, checksum):
"""Patch the ``_decoder`` on a ``urllib3`` response.
This is so that we can intercept the compressed bytes before they are
decoded.
Only patches if the content encoding is ``gzip``.
Args:
response_raw (urllib3.response.HTTPResponse): The raw response for
an HTTP request.
checksum (object):
A checksum which will be updated with compressed bytes.
Returns:
object: Either the original ``checksum`` if ``_decoder`` is not
patched, or a ``_DoNothingHash`` if the decoder is patched, since the
caller will no longer need to hash to decoded bytes.
"""
encoding = response_raw.headers.get(u"content-encoding", u"").lower()
if encoding != u"gzip":
return checksum
response_raw._decoder = _GzipDecoder(checksum)
return _helpers._DoNothingHash()
class _GzipDecoder(urllib3.response.GzipDecoder):
"""Custom subclass of ``urllib3`` decoder for ``gzip``-ed bytes.
Allows a checksum function to see the compressed bytes before they are
decoded. This way the checksum of the compressed value can be computed.
Args:
checksum (object):
A checksum which will be updated with compressed bytes.
"""
def __init__(self, checksum):
super(_GzipDecoder, self).__init__()
self._checksum = checksum
def decompress(self, data):
"""Decompress the bytes.
Args:
data (bytes): The compressed bytes to be decompressed.
Returns:
bytes: The decompressed bytes from ``data``.
"""
self._checksum.update(data)
return super(_GzipDecoder, self).decompress(data)
|
apache-2.0
|
jimi-c/ansible
|
test/units/modules/network/eos/test_eos_user.py
|
57
|
4179
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_user
from units.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosUserModule(TestEosModule):
module = eos_user
def setUp(self):
super(TestEosUserModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.eos.eos_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_user.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEosUserModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_user_create(self):
set_module_args(dict(name='test', nopassword=True))
commands = ['username test nopassword']
self.execute_module(changed=True, commands=commands)
def test_eos_user_delete(self):
set_module_args(dict(name='ansible', state='absent'))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_password(self):
set_module_args(dict(name='ansible', configured_password='test'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege(self):
set_module_args(dict(name='ansible', privilege=15, configured_password='test'))
result = self.execute_module(changed=True)
self.assertIn('username ansible privilege 15', result['commands'])
def test_eos_user_privilege_invalid(self):
set_module_args(dict(name='ansible', privilege=25, configured_password='test'))
self.execute_module(failed=True)
def test_eos_user_purge(self):
set_module_args(dict(purge=True))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_role(self):
set_module_args(dict(name='ansible', role='test', configured_password='test'))
result = self.execute_module(changed=True)
self.assertIn('username ansible role test', result['commands'])
def test_eos_user_sshkey(self):
set_module_args(dict(name='ansible', sshkey='test'))
commands = ['username ansible sshkey test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_changed(self):
set_module_args(dict(name='test', configured_password='test', update_password='on_create'))
commands = ['username test secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_on_create_ok(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='on_create'))
self.execute_module()
def test_eos_user_update_password_always(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='always'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
|
gpl-3.0
|
ressu/SickGear
|
lib/unrar2/__init__.py
|
24
|
7125
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
pyUnRAR2 is a ctypes based wrapper around the free UnRAR.dll.
It is an modified version of Jimmy Retzlaff's pyUnRAR - more simple,
stable and foolproof.
Notice that it has INCOMPATIBLE interface.
It enables reading and unpacking of archives created with the
RAR/WinRAR archivers. There is a low-level interface which is very
similar to the C interface provided by UnRAR. There is also a
higher level interface which makes some common operations easier.
"""
__version__ = '0.99.3'
try:
WindowsError
in_windows = True
except NameError:
in_windows = False
if in_windows:
from windows import RarFileImplementation
else:
from unix import RarFileImplementation
import fnmatch, time, weakref
class RarInfo(object):
"""Represents a file header in an archive. Don't instantiate directly.
Use only to obtain information about file.
YOU CANNOT EXTRACT FILE CONTENTS USING THIS OBJECT.
USE METHODS OF RarFile CLASS INSTEAD.
Properties:
index - index of file within the archive
filename - name of the file in the archive including path (if any)
datetime - file date/time as a struct_time suitable for time.strftime
isdir - True if the file is a directory
size - size in bytes of the uncompressed file
comment - comment associated with the file
Note - this is not currently intended to be a Python file-like object.
"""
def __init__(self, rarfile, data):
self.rarfile = weakref.proxy(rarfile)
self.index = data['index']
self.filename = data['filename']
self.isdir = data['isdir']
self.size = data['size']
self.datetime = data['datetime']
self.comment = data['comment']
def __str__(self):
try :
arcName = self.rarfile.archiveName
except ReferenceError:
arcName = "[ARCHIVE_NO_LONGER_LOADED]"
return '<RarInfo "%s" in "%s">' % (self.filename, arcName)
class RarFile(RarFileImplementation):
def __init__(self, archiveName, password=None):
"""Instantiate the archive.
archiveName is the name of the RAR file.
password is used to decrypt the files in the archive.
Properties:
comment - comment associated with the archive
>>> print RarFile('test.rar').comment
This is a test.
"""
self.archiveName = archiveName
RarFileImplementation.init(self, password)
def __del__(self):
self.destruct()
def infoiter(self):
"""Iterate over all the files in the archive, generating RarInfos.
>>> import os
>>> for fileInArchive in RarFile('test.rar').infoiter():
... print os.path.split(fileInArchive.filename)[-1],
... print fileInArchive.isdir,
... print fileInArchive.size,
... print fileInArchive.comment,
... print tuple(fileInArchive.datetime)[0:5],
... print time.strftime('%a, %d %b %Y %H:%M', fileInArchive.datetime)
test True 0 None (2003, 6, 30, 1, 59) Mon, 30 Jun 2003 01:59
test.txt False 20 None (2003, 6, 30, 2, 1) Mon, 30 Jun 2003 02:01
this.py False 1030 None (2002, 2, 8, 16, 47) Fri, 08 Feb 2002 16:47
"""
for params in RarFileImplementation.infoiter(self):
yield RarInfo(self, params)
def infolist(self):
"""Return a list of RarInfos, descripting the contents of the archive."""
return list(self.infoiter())
def read_files(self, condition='*'):
"""Read specific files from archive into memory.
If "condition" is a list of numbers, then return files which have those positions in infolist.
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
and returns boolean True (extract) or False (skip).
If "condition" is omitted, all files are returned.
Returns list of tuples (RarInfo info, str contents)
"""
checker = condition2checker(condition)
return RarFileImplementation.read_files(self, checker)
def extract(self, condition='*', path='.', withSubpath=True, overwrite=True):
"""Extract specific files from archive to disk.
If "condition" is a list of numbers, then extract files which have those positions in infolist.
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
and returns either boolean True (extract) or boolean False (skip).
DEPRECATED: If "condition" callback returns string (only supported for Windows) -
that string will be used as a new name to save the file under.
If "condition" is omitted, all files are extracted.
"path" is a directory to extract to
"withSubpath" flag denotes whether files are extracted with their full path in the archive.
"overwrite" flag denotes whether extracted files will overwrite old ones. Defaults to true.
Returns list of RarInfos for extracted files."""
checker = condition2checker(condition)
return RarFileImplementation.extract(self, checker, path, withSubpath, overwrite)
def condition2checker(condition):
"""Converts different condition types to callback"""
if type(condition) in [str, unicode]:
def smatcher(info):
return fnmatch.fnmatch(info.filename, condition)
return smatcher
elif type(condition) in [list, tuple] and type(condition[0]) in [int, long]:
def imatcher(info):
return info.index in condition
return imatcher
elif callable(condition):
return condition
else:
raise TypeError
|
gpl-3.0
|
rooi/CouchPotatoServer
|
libs/suds/metrics.py
|
211
|
2004
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{metrics} module defines classes and other resources
designed for collecting and reporting performance metrics.
"""
import time
from logging import getLogger
from suds import *
from math import modf
log = getLogger(__name__)
class Timer:
def __init__(self):
self.started = 0
self.stopped = 0
def start(self):
self.started = time.time()
self.stopped = 0
return self
def stop(self):
if self.started > 0:
self.stopped = time.time()
return self
def duration(self):
return ( self.stopped - self.started )
def __str__(self):
if self.started == 0:
return 'not-running'
if self.started > 0 and self.stopped == 0:
return 'started: %d (running)' % self.started
duration = self.duration()
jmod = ( lambda m : (m[1], m[0]*1000) )
if duration < 1:
ms = (duration*1000)
return '%d (ms)' % ms
if duration < 60:
m = modf(duration)
return '%d.%.3d (seconds)' % jmod(m)
m = modf(duration/60)
return '%d.%.3d (minutes)' % jmod(m)
|
gpl-3.0
|
jamesyli/solum
|
solum/tests/common/test_exception_base.py
|
3
|
2567
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
import six
from solum.common import exception
from solum.tests import base
class ExceptionTestCase(base.BaseTestCase):
"""Test cases for exception code."""
def test_with_kwargs(self):
exc = exception.ResourceNotFound(name='application', id='green_paint')
self.assertIn('green_paint could not be found.',
six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_with_kwargs_ru(self):
exc = exception.ResourceNotFound(name='application',
id=u'зеленой_краской')
self.assertIn(u'зеленой_краской could not be found',
six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_bad_kwargs_exception(self):
cfg.CONF.set_override('fatal_exception_format_errors', True)
self.assertRaises(KeyError,
exception.ResourceNotFound, a_field='green')
def test_bad_kwargs(self):
cfg.CONF.set_override('fatal_exception_format_errors', False)
exc = exception.ResourceNotFound(a_field='green')
self.assertIn('An unknown exception occurred', six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_resource_exists(self):
exc = exception.ResourceExists(name='test')
self.assertIn("The test resource already exists.",
six.text_type(exc))
self.assertEqual(exc.code, 409)
def test_application_exists(self):
exc = exception.ResourceExists(name='test')
self.assertIn("The test resource already exists.",
six.text_type(exc))
self.assertEqual(exc.code, 409)
def test_not_implemented(self):
exc = exception.NotImplemented()
self.assertIn("The requested operation is not implemented.",
six.text_type(exc))
self.assertEqual(exc.code, 501)
|
apache-2.0
|
jcpowermac/ansible
|
test/utils/shippable/tools/download.py
|
124
|
10149
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""CLI tool for downloading results from Shippable CI runs."""
from __future__ import print_function
# noinspection PyCompatibility
import argparse
import json
import os
import re
import requests
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program body."""
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Download results from a Shippable run.')
parser.add_argument('run_id',
metavar='RUN',
help='shippable run id, run url or run name formatted as: account/project/run_number')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='show what is being downloaded')
parser.add_argument('-t', '--test',
dest='test',
action='store_true',
help='show what would be downloaded without downloading')
parser.add_argument('--key',
dest='api_key',
default=api_key,
required=api_key is None,
help='api key for accessing Shippable')
parser.add_argument('--console-logs',
action='store_true',
help='download console logs')
parser.add_argument('--test-results',
action='store_true',
help='download test results')
parser.add_argument('--coverage-results',
action='store_true',
help='download code coverage results')
parser.add_argument('--job-metadata',
action='store_true',
help='download job metadata')
parser.add_argument('--run-metadata',
action='store_true',
help='download run metadata')
parser.add_argument('--all',
action='store_true',
help='download everything')
parser.add_argument('--job-number',
metavar='N',
action='append',
type=int,
help='limit downloads to the given job number')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
old_runs_prefix = 'https://app.shippable.com/runs/'
if args.run_id.startswith(old_runs_prefix):
args.run_id = args.run_id[len(old_runs_prefix):]
if args.all:
args.console_logs = True
args.test_results = True
args.coverage_results = True
args.job_metadata = True
args.run_metadata = True
selections = (
args.console_logs,
args.test_results,
args.coverage_results,
args.job_metadata,
args.run_metadata,
)
if not any(selections):
parser.error('At least one download option is required.')
headers = dict(
Authorization='apiToken %s' % args.api_key,
)
match = re.search(
r'^https://app.shippable.com/github/(?P<account>[^/]+)/(?P<project>[^/]+)/runs/(?P<run_number>[0-9]+)(?:/summary|(/(?P<job_number>[0-9]+)))?$',
args.run_id)
if not match:
match = re.search(r'^(?P<account>[^/]+)/(?P<project>[^/]+)/(?P<run_number>[0-9]+)$', args.run_id)
if match:
account = match.group('account')
project = match.group('project')
run_number = int(match.group('run_number'))
job_number = int(match.group('job_number')) if match.group('job_number') else None
if job_number:
if args.job_number:
exit('ERROR: job number found in url and specified with --job-number')
args.job_number = [job_number]
url = 'https://api.shippable.com/projects'
response = requests.get(url, dict(projectFullNames='%s/%s' % (account, project)), headers=headers)
if response.status_code != 200:
raise Exception(response.content)
project_id = response.json()[0]['id']
url = 'https://api.shippable.com/runs?projectIds=%s&runNumbers=%s' % (project_id, run_number)
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = [run for run in response.json() if run['runNumber'] == run_number][0]
args.run_id = run['id']
elif re.search('^[a-f0-9]+$', args.run_id):
url = 'https://api.shippable.com/runs/%s' % args.run_id
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
run = response.json()
account = run['subscriptionOrgName']
project = run['projectName']
run_number = run['runNumber']
else:
exit('ERROR: invalid run: %s' % args.run_id)
output_dir = '%s/%s/%s' % (account, project, run_number)
response = requests.get('https://api.shippable.com/jobs?runIds=%s' % args.run_id, headers=headers)
if response.status_code != 200:
raise Exception(response.content)
jobs = sorted(response.json(), key=lambda job: int(job['jobNumber']))
if not args.test:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.run_metadata:
path = os.path.join(output_dir, 'run.json')
contents = json.dumps(run, sort_keys=True, indent=4)
if args.verbose or args.test:
print(path)
if not args.test:
with open(path, 'w') as metadata_fd:
metadata_fd.write(contents)
for j in jobs:
job_id = j['id']
job_number = j['jobNumber']
if args.job_number and job_number not in args.job_number:
continue
if args.job_metadata:
path = os.path.join(output_dir, '%s/job.json' % job_number)
contents = json.dumps(j, sort_keys=True, indent=4)
if args.verbose or args.test:
print(path)
if not args.test:
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as metadata_fd:
metadata_fd.write(contents)
if args.console_logs:
path = os.path.join(output_dir, '%s/console.log' % job_number)
url = 'https://api.shippable.com/jobs/%s/consoles?download=true' % job_id
download(args, headers, path, url, is_json=False)
if args.test_results:
path = os.path.join(output_dir, '%s/test.json' % job_number)
url = 'https://api.shippable.com/jobs/%s/jobTestReports' % job_id
download(args, headers, path, url)
extract_contents(args, path, os.path.join(output_dir, '%s/test' % job_number))
if args.coverage_results:
path = os.path.join(output_dir, '%s/coverage.json' % job_number)
url = 'https://api.shippable.com/jobs/%s/jobCoverageReports' % job_id
download(args, headers, path, url)
extract_contents(args, path, os.path.join(output_dir, '%s/coverage' % job_number))
def extract_contents(args, path, output_dir):
"""
:type args: any
:type path: str
:type output_dir: str
"""
if not args.test:
if not os.path.exists(path):
return
with open(path, 'r') as json_fd:
items = json.load(json_fd)
for item in items:
contents = item['contents'].encode('utf-8')
path = output_dir + '/' + re.sub('^/*', '', item['path'])
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if args.verbose:
print(path)
if path.endswith('.json'):
contents = json.dumps(json.loads(contents), sort_keys=True, indent=4)
if not os.path.exists(path):
with open(path, 'w') as output_fd:
output_fd.write(contents)
def download(args, headers, path, url, is_json=True):
"""
:type args: any
:type headers: dict[str, str]
:type path: str
:type url: str
:type is_json: bool
"""
if args.verbose or args.test:
print(path)
if os.path.exists(path):
return
if not args.test:
response = requests.get(url, headers=headers)
if response.status_code != 200:
path += '.error'
if is_json:
content = json.dumps(response.json(), sort_keys=True, indent=4)
else:
content = response.content
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as content_fd:
content_fd.write(content)
def get_api_key():
"""
rtype: str
"""
key = os.environ.get('SHIPPABLE_KEY', None)
if key:
return key
path = os.path.join(os.environ['HOME'], '.shippable.key')
try:
with open(path, 'r') as key_fd:
return key_fd.read().strip()
except IOError:
return None
if __name__ == '__main__':
main()
|
gpl-3.0
|
JRock007/boxxy
|
dist/Boxxy.app/Contents/Resources/lib/python2.7/pygame/tests/gfxdraw_test.py
|
9
|
30078
|
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import unittest
else:
from test.test_utils import unittest
import pygame
import pygame.gfxdraw
from pygame.locals import *
def intensity(c, i):
"""Return color c changed by intensity i
For 0 <= i <= 127 the color is a shade, with 0 being black, 127 being the
unaltered color.
For 128 <= i <= 255 the color is a tint, with 255 being white, 128 the
unaltered color.
"""
r, g, b = c[0:3]
if 0 <= i <= 127:
# Darken
return ((r * i) // 127, (g * i) // 127, (b * i) // 127)
# Lighten
return (r + ((255 - r) * (255 - i)) // 127,
g + ((255 - g) * (255 - i)) // 127,
b + ((255 - b) * (255 - i)) // 127)
class GfxdrawDefaultTest( unittest.TestCase ):
is_started = False
foreground_color = (128, 64, 8)
background_color = (255, 255, 255)
def make_palette(base_color):
"""Return color palette that is various intensities of base_color"""
# Need this function for Python 3.x so the base_color
# is within the scope of the list comprehension.
return [intensity(base_color, i) for i in range(0, 256)]
default_palette = make_palette(foreground_color)
default_size = (100, 100)
def check_at(self, surf, posn, color):
sc = surf.get_at(posn)
fail_msg = ("%s != %s at %s, bitsize: %i, flags: %i, masks: %s" %
(sc, color, posn, surf.get_bitsize(), surf.get_flags(),
surf.get_masks()))
self.failUnlessEqual(sc, color, fail_msg)
def check_not_at(self, surf, posn, color):
sc = surf.get_at(posn)
fail_msg = ("%s != %s at %s, bitsize: %i, flags: %i, masks: %s" %
(sc, color, posn, surf.get_bitsize(), surf.get_flags(),
surf.get_masks()))
self.failIfEqual(sc, color, fail_msg)
def setUp(self):
Surface = pygame.Surface
size = self.default_size
palette = self.default_palette
if not self.is_started:
# Necessary for Surface.set_palette.
pygame.init()
pygame.display.set_mode((1, 1))
# Create test surfaces
self.surfaces = [Surface(size, 0, 8),
Surface(size, 0, 16),
Surface(size, 0, 24),
Surface(size, 0, 32),
Surface(size, SRCALPHA, 16),
Surface(size, SRCALPHA, 32)]
self.surfaces[0].set_palette(palette)
# Special pixel formats
for i in range(1, 6):
s = self.surfaces[i]
flags = s.get_flags()
bitsize = s.get_bitsize()
masks = s.get_masks()
if flags:
masks = (masks[1], masks[2], masks[3], masks[0])
else:
masks = (masks[1], masks[2], masks[0], masks[3])
self.surfaces.append(Surface(size, flags, bitsize, masks))
for surf in self.surfaces:
surf.fill(self.background_color)
def test_pixel(self):
"""pixel(surface, x, y, color): return None"""
fg = self.foreground_color
bg = self.background_color
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.pixel(surf, 2, 2, fg)
for x in range(1, 4):
for y in range(1, 4):
if x == 2 and y == 2:
self.check_at(surf, (x, y), fg_adjusted)
else:
self.check_at(surf, (x, y), bg_adjusted)
def test_hline(self):
"""hline(surface, x1, x2, y, color): return None"""
fg = self.foreground_color
bg = self.background_color
startx = 10
stopx = 80
y = 50
fg_test_points = [(startx, y), (stopx, y), ((stopx - startx) // 2, y)]
bg_test_points = [(startx - 1, y), (stopx + 1, y),
(startx, y - 1), (startx, y + 1),
(stopx, y - 1), (stopx, y + 1)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.hline(surf, startx, stopx, y, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_vline(self):
"""vline(surface, x, y1, y2, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 50
starty = 10
stopy = 80
fg_test_points = [(x, starty), (x, stopy), (x, (stopy - starty) // 2)]
bg_test_points = [(x, starty - 1), (x, stopy + 1),
(x - 1, starty), (x + 1, starty),
(x - 1, stopy), (x + 1, stopy)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.vline(surf, x, starty, stopy, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_rectangle(self):
"""rectangle(surface, rect, color): return None"""
fg = self.foreground_color
bg = self.background_color
rect = pygame.Rect(10, 15, 55, 62)
rect_tuple = tuple(rect)
fg_test_points = [rect.topleft,
(rect.right - 1, rect.top),
(rect.left, rect.bottom - 1),
(rect.right - 1, rect.bottom - 1)]
bg_test_points = [(rect.left - 1, rect.top - 1),
(rect.left + 1, rect.top + 1),
(rect.right, rect.top - 1),
(rect.right - 2, rect.top + 1),
(rect.left - 1, rect.bottom),
(rect.left + 1, rect.bottom - 2),
(rect.right, rect.bottom),
(rect.right - 2, rect.bottom - 2)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.rectangle(surf, rect, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
surf.fill(bg)
pygame.gfxdraw.rectangle(surf, rect_tuple, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_box(self):
"""box(surface, rect, color): return None"""
fg = self.foreground_color
bg = self.background_color
rect = pygame.Rect(10, 15, 55, 62)
rect_tuple = tuple(rect)
fg_test_points = [rect.topleft,
(rect.left + 1, rect.top + 1),
(rect.right - 1, rect.top),
(rect.right - 2, rect.top + 1),
(rect.left, rect.bottom - 1),
(rect.left + 1, rect.bottom - 2),
(rect.right - 1, rect.bottom - 1),
(rect.right - 2, rect.bottom - 2)]
bg_test_points = [(rect.left - 1, rect.top - 1),
(rect.right, rect.top - 1),
(rect.left - 1, rect.bottom),
(rect.right, rect.bottom)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.box(surf, rect, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
surf.fill(bg)
pygame.gfxdraw.box(surf, rect_tuple, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_line(self):
"""line(surface, x1, y1, x2, y2, color): return None"""
fg = self.foreground_color
bg = self.background_color
x1 = 10
y1 = 15
x2 = 92
y2 = 77
fg_test_points = [(x1, y1), (x2, y2)]
bg_test_points = [(x1 - 1, y1), (x1, y1 - 1), (x1 - 1, y1 - 1),
(x2 + 1, y2), (x2, y2 + 1), (x2 + 1, y2 + 1)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.line(surf, x1, y1, x2, y2, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_circle(self):
"""circle(surface, x, y, r, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
r = 30
fg_test_points = [(x, y - r),
(x, y + r),
(x - r, y),
(x + r, y)]
bg_test_points = [(x, y),
(x, y - r + 1),
(x, y - r - 1),
(x, y + r + 1),
(x, y + r - 1),
(x - r - 1, y),
(x - r + 1, y),
(x + r + 1, y),
(x + r - 1, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.circle(surf, x, y, r, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_arc(self):
"""arc(surface, x, y, r, start, end, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
r = 30
start = 0 # +x direction, but not (x + r, y) (?)
end = 90 # -y direction, including (x, y + r)
fg_test_points = [(x, y + r), (x + r, y + 1)]
bg_test_points = [(x, y),
(x, y - r),
(x - r, y),
(x, y + r + 1),
(x, y + r - 1),
(x - 1, y + r),
(x + r + 1, y),
(x + r - 1, y),
(x + r, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.arc(surf, x, y, r, start, end, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_aacircle(self):
"""aacircle(surface, x, y, r, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
r = 30
fg_test_points = [(x, y - r),
(x, y + r),
(x - r, y),
(x + r, y)]
bg_test_points = [(x, y),
(x, y - r + 1),
(x, y - r - 1),
(x, y + r + 1),
(x, y + r - 1),
(x - r - 1, y),
(x - r + 1, y),
(x + r + 1, y),
(x + r - 1, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.aacircle(surf, x, y, r, fg)
for posn in fg_test_points:
self.check_not_at(surf, posn, bg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_filled_circle(self):
"""filled_circle(surface, x, y, r, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
r = 30
fg_test_points = [(x, y - r),
(x, y - r + 1),
(x, y + r),
(x, y + r - 1),
(x - r, y),
(x - r + 1, y),
(x + r, y),
(x + r - 1, y),
(x, y)]
bg_test_points = [(x, y - r - 1),
(x, y + r + 1),
(x - r - 1, y),
(x + r + 1, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.filled_circle(surf, x, y, r, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_ellipse(self):
"""ellipse(surface, x, y, rx, ry, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
rx = 30
ry = 35
fg_test_points = [(x, y - ry),
(x, y + ry),
(x - rx, y),
(x + rx, y)]
bg_test_points = [(x, y),
(x, y - ry + 1),
(x, y - ry - 1),
(x, y + ry + 1),
(x, y + ry - 1),
(x - rx - 1, y),
(x - rx + 1, y),
(x + rx + 1, y),
(x + rx - 1, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.ellipse(surf, x, y, rx, ry, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_aaellipse(self):
"""aaellipse(surface, x, y, rx, ry, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
rx = 30
ry = 35
fg_test_points = [(x, y - ry),
(x, y + ry),
(x - rx, y),
(x + rx, y)]
bg_test_points = [(x, y),
(x, y - ry + 1),
(x, y - ry - 1),
(x, y + ry + 1),
(x, y + ry - 1),
(x - rx - 1, y),
(x - rx + 1, y),
(x + rx + 1, y),
(x + rx - 1, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.aaellipse(surf, x, y, rx, ry, fg)
for posn in fg_test_points:
self.check_not_at(surf, posn, bg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_filled_ellipse(self):
"""filled_ellipse(surface, x, y, rx, ry, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
rx = 30
ry = 35
fg_test_points = [(x, y - ry),
(x, y - ry + 1),
(x, y + ry),
(x, y + ry - 1),
(x - rx, y),
(x - rx + 1, y),
(x + rx, y),
(x + rx - 1, y),
(x, y)]
bg_test_points = [(x, y - ry - 1),
(x, y + ry + 1),
(x - rx - 1, y),
(x + rx + 1, y)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.filled_ellipse(surf, x, y, rx, ry, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_pie(self):
"""pie(surface, x, y, r, start, end, color): return None"""
fg = self.foreground_color
bg = self.background_color
x = 45
y = 40
r = 30
start = 0 # +x direction, including (x + r, y)
end = 90 # -y direction, but not (x, y + r) (?)
fg_test_points = [(x, y),
(x + 1, y),
(x, y + 1),
(x + r, y)]
bg_test_points = [(x - 1, y),
(x, y - 1),
(x - 1, y - 1),
(x + 1, y + 1),
(x + r + 1, y),
(x + r, y - 1),
(x, y + r)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.pie(surf, x, y, r, start, end, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_trigon(self):
"""trigon(surface, x1, y1, x2, y2, x3, y3, color): return None"""
fg = self.foreground_color
bg = self.background_color
x1 = 10
y1 = 15
x2 = 92
y2 = 77
x3 = 20
y3 = 60
fg_test_points = [(x1, y1), (x2, y2), (x3, y3)]
bg_test_points = [(x1 - 1, y1 - 1),
(x2 + 1, y2 + 1),
(x3 - 1, y3 + 1),
(x1 + 10, y1 + 30)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.trigon(surf, x1, y1, x2, y2, x3, y3, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_aatrigon(self):
"""aatrigon(surface, x1, y1, x2, y2, x3, y3, color): return None"""
fg = self.foreground_color
bg = self.background_color
x1 = 10
y1 = 15
x2 = 92
y2 = 77
x3 = 20
y3 = 60
fg_test_points = [(x1, y1), (x2, y2), (x3, y3)]
bg_test_points = [(x1 - 1, y1 - 1),
(x2 + 1, y2 + 1),
(x3 - 1, y3 + 1),
(x1 + 10, y1 + 30)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.aatrigon(surf, x1, y1, x2, y2, x3, y3, fg)
for posn in fg_test_points:
self.check_not_at(surf, posn, bg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_filled_trigon(self):
"""filled_trigon(surface, x1, y1, x2, y2, x3, y3, color): return None"""
fg = self.foreground_color
bg = self.background_color
x1 = 10
y1 = 15
x2 = 92
y2 = 77
x3 = 20
y3 = 60
fg_test_points = [(x1, y1), (x2, y2), (x3, y3),
(x1 + 10, y1 + 30)]
bg_test_points = [(x1 - 1, y1 - 1),
(x2 + 1, y2 + 1),
(x3 - 1, y3 + 1)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.filled_trigon(surf, x1, y1, x2, y2, x3, y3, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_polygon(self):
"""polygon(surface, points, color): return None"""
fg = self.foreground_color
bg = self.background_color
points = [(10, 80), (10, 15), (92, 25), (92, 80)]
fg_test_points = (points +
[(points[0][0], points[0][1] - 1),
(points[0][0] + 1, points[0][1]),
(points[3][0] - 1, points[3][1]),
(points[3][0], points[3][1] - 1),
(points[2][0], points[2][1] + 1)])
bg_test_points = [(points[0][0] - 1, points[0][1]),
(points[0][0], points[0][1] + 1),
(points[0][0] - 1, points[0][1] + 1),
(points[0][0] + 1, points[0][1] - 1),
(points[3][0] + 1, points[3][1]),
(points[3][0], points[3][1] + 1),
(points[3][0] + 1, points[3][1] + 1),
(points[3][0] - 1, points[3][1] - 1),
(points[2][0] + 1, points[2][1]),
(points[2][0] - 1, points[2][1] + 1),
(points[1][0] - 1, points[1][1]),
(points[1][0], points[1][1] - 1),
(points[1][0] - 1, points[1][1] - 1)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.polygon(surf, points, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_aapolygon(self):
"""aapolygon(surface, points, color): return None"""
fg = self.foreground_color
bg = self.background_color
points = [(10, 80), (10, 15), (92, 25), (92, 80)]
fg_test_points = (points +
[(points[0][0], points[0][1] - 1),
(points[0][0] + 1, points[0][1]),
(points[3][0] - 1, points[3][1]),
(points[3][0], points[3][1] - 1),
(points[2][0], points[2][1] + 1)])
bg_test_points = [(points[0][0] - 1, points[0][1]),
(points[0][0], points[0][1] + 1),
(points[0][0] - 1, points[0][1] + 1),
(points[0][0] + 1, points[0][1] - 1),
(points[3][0] + 1, points[3][1]),
(points[3][0], points[3][1] + 1),
(points[3][0] + 1, points[3][1] + 1),
(points[3][0] - 1, points[3][1] - 1),
(points[2][0] + 1, points[2][1]),
(points[2][0] - 1, points[2][1] + 1),
(points[1][0] - 1, points[1][1]),
(points[1][0], points[1][1] - 1),
(points[1][0] - 1, points[1][1] - 1)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.aapolygon(surf, points, fg)
for posn in fg_test_points:
self.check_not_at(surf, posn, bg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_filled_polygon(self):
"""filled_polygon(surface, points, color): return None"""
fg = self.foreground_color
bg = self.background_color
points = [(10, 80), (10, 15), (92, 25), (92, 80)]
fg_test_points = (points +
[(points[0][0], points[0][1] - 1),
(points[0][0] + 1, points[0][1]),
(points[0][0] + 1, points[0][1] - 1),
(points[3][0] - 1, points[3][1]),
(points[3][0], points[3][1] - 1),
(points[3][0] - 1, points[3][1] - 1),
(points[2][0], points[2][1] + 1),
(points[2][0] - 1, points[2][1] + 1)])
bg_test_points = [(points[0][0] - 1, points[0][1]),
(points[0][0], points[0][1] + 1),
(points[0][0] - 1, points[0][1] + 1),
(points[3][0] + 1, points[3][1]),
(points[3][0], points[3][1] + 1),
(points[3][0] + 1, points[3][1] + 1),
(points[2][0] + 1, points[2][1]),
(points[1][0] - 1, points[1][1]),
(points[1][0], points[1][1] - 1),
(points[1][0] - 1, points[1][1] - 1)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.filled_polygon(surf, points, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
def test_textured_polygon(self):
"""textured_polygon(surface, points, texture, tx, ty): return None"""
w, h = self.default_size
fg = self.foreground_color
bg = self.background_color
tx = 0
ty = 0
texture = pygame.Surface((w + tx, h + ty), 0, 24)
texture.fill(fg, (0, 0, w, h))
points = [(10, 80), (10, 15), (92, 25), (92, 80)]
# Don't know how to really check this as boarder points may
# or may not be included in the textured polygon.
fg_test_points = [(points[1][0] + 30, points[1][1] + 40)]
bg_test_points = [(points[0][0] - 1, points[0][1]),
(points[0][0], points[0][1] + 1),
(points[0][0] - 1, points[0][1] + 1),
(points[3][0] + 1, points[3][1]),
(points[3][0], points[3][1] + 1),
(points[3][0] + 1, points[3][1] + 1),
(points[2][0] + 1, points[2][1]),
(points[1][0] - 1, points[1][1]),
(points[1][0], points[1][1] - 1),
(points[1][0] - 1, points[1][1] - 1)]
for surf in self.surfaces[1:]:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.textured_polygon(surf, points, texture, -tx, -ty)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
# Alpha blit to 8 bits-per-pixel surface forbidden.
texture = pygame.Surface(self.default_size, SRCALPHA, 32)
self.failUnlessRaises(ValueError,
pygame.gfxdraw.textured_polygon,
self.surfaces[0],
points,
texture, 0, 0)
def test_bezier(self):
"""bezier(surface, points, steps, color): return None"""
fg = self.foreground_color
bg = self.background_color
points = [(10, 50), (25, 15), (60, 80), (92, 30)]
fg_test_points = [points[0], points[3]]
bg_test_points = [(points[0][0] - 1, points[0][1]),
(points[3][0] + 1, points[3][1]),
(points[1][0], points[1][1] + 3),
(points[2][0], points[2][1] - 3)]
for surf in self.surfaces:
fg_adjusted = surf.unmap_rgb(surf.map_rgb(fg))
bg_adjusted = surf.unmap_rgb(surf.map_rgb(bg))
pygame.gfxdraw.bezier(surf, points, 30, fg)
for posn in fg_test_points:
self.check_at(surf, posn, fg_adjusted)
for posn in bg_test_points:
self.check_at(surf, posn, bg_adjusted)
if __name__ == '__main__':
unittest.main()
|
mit
|
mushtaqak/edx-platform
|
lms/djangoapps/verify_student/tests/test_fake_software_secure.py
|
86
|
2709
|
"""
Tests for the fake software secure response.
"""
from django.test import TestCase
from mock import patch
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
from verify_student.models import SoftwareSecurePhotoVerification
class SoftwareSecureFakeViewTest(UrlResetMixin, TestCase):
"""
Base class to test the fake software secure view.
"""
def setUp(self, **kwargs):
enable_software_secure_fake = kwargs.get('enable_software_secure_fake', False)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_SOFTWARE_SECURE_FAKE': enable_software_secure_fake}):
super(SoftwareSecureFakeViewTest, self).setUp('verify_student.urls')
self.user = UserFactory.create(username="test", password="test")
self.attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
self.client.login(username="test", password="test")
class SoftwareSecureFakeViewDisabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewDisabledTest, self).setUp(enable_software_secure_fake=False)
def test_get_method_without_enable_feature_flag(self):
"""
Test that the user gets 404 response if the feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 404)
class SoftwareSecureFakeViewEnabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewEnabledTest, self).setUp(enable_software_secure_fake=True)
def test_get_method_without_logged_in_user(self):
"""
Test that the user gets 302 response if that user is not logged in.
"""
self.client.logout()
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 302)
def test_get_method(self):
"""
Test that GET method of fake software secure view uses the most recent
attempt for the logged-in user.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 200)
self.assertIn('EdX-ID', response.content)
self.assertIn('results_callback', response.content)
|
agpl-3.0
|
shubhamchopra/spark
|
examples/src/main/python/ml/stopwords_remover_example.py
|
123
|
1434
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import StopWordsRemover
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StopWordsRemoverExample")\
.getOrCreate()
# $example on$
sentenceData = spark.createDataFrame([
(0, ["I", "saw", "the", "red", "balloon"]),
(1, ["Mary", "had", "a", "little", "lamb"])
], ["id", "raw"])
remover = StopWordsRemover(inputCol="raw", outputCol="filtered")
remover.transform(sentenceData).show(truncate=False)
# $example off$
spark.stop()
|
apache-2.0
|
sfu-fas/coursys
|
courselib/rest.py
|
1
|
9763
|
from oauth_provider.utils import get_oauth_request
from oauth_provider.models import Token
from rest_framework_oauth.authentication import OAuthAuthentication
from api.models import ConsumerInfo
from rest_framework import permissions, authentication, fields, relations
from django.shortcuts import get_object_or_404
from django.conf import settings
from coredata.models import CourseOffering, Member
import pytz
import copy
class APIConsumerPermissions(permissions.BasePermission):
"""
Checks that the user's token has been authorized with all of the actions specified in View.consumer_permissions.
Implies IsAuthenticated permission check since we need to know who the user is before we can check to see what
they authorized.
"""
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated:
# must be authenticated one way or another
return False
authenticator = request.successful_authenticator
required_permissions = view.consumer_permissions
if isinstance(authenticator, authentication.SessionAuthentication):
# CAS authenticated: the world is your oyster
return True
elif isinstance(authenticator, OAuthAuthentication):
# OAuth authenticated: check that the consumer is allowed to do these things
# re-find the Token, since it isn't stashed in the request
# could be avoided if: http://code.larlet.fr/django-oauth-plus/issue/40/set-requestconsumer-and-requesttoken-to
oauth_req = get_oauth_request(request)
token = get_object_or_404(Token, key=oauth_req['oauth_token'], consumer__key=oauth_req['oauth_consumer_key'])
# consumer must have asked for all of the permissions being used
allowed_perms = ConsumerInfo.allowed_permissions(token)
return set(required_permissions) <= set(allowed_perms)
else:
raise ValueError("Unknown authentication method.")
class IsOfferingMember(permissions.BasePermission):
"""
Check that the authenticated user is a (non-dropped) member of the course.
"""
def has_permission(self, request, view):
if 'course_slug' not in view.kwargs:
return False
if not hasattr(view, 'offering'):
offering = get_object_or_404(CourseOffering, slug=view.kwargs['course_slug'])
view.offering = offering
if not hasattr(view, 'member'):
assert request.user.is_authenticated
member = Member.objects.exclude(role='DROP').filter(offering=offering, person__userid=request.user.username).first()
view.member = member
return bool(view.member)
class IsOfferingStaff(permissions.BasePermission):
"""
Check that the authenticated user is an instructor or TA for the course
"""
def has_permission(self, request, view):
if 'course_slug' not in view.kwargs:
return False
if not hasattr(view, 'offering'):
offering = get_object_or_404(CourseOffering, slug=view.kwargs['course_slug'])
view.offering = offering
if not hasattr(view, 'member'):
assert request.user.is_authenticated
member = Member.objects.filter(role__in=['INST', 'TA', 'APPR']).filter(offering=offering, person__userid=request.user.username).first()
view.member = member
return bool(view.member)
from django.core.cache import caches
from django.utils.encoding import force_text, iri_to_uri
from django.utils.cache import patch_response_headers, patch_cache_control
from rest_framework.response import Response
import hashlib
MAX_KEY_LENGTH = 200
class CacheMixin(object):
"""
View mixin to cache responses based on username (whether they are authenticated by session, oauth, ...).
Does this by caching the Response object *before* it is rendered into JSON, HTML, etc. What goes in the cache is
kwargs to rebuild the rest_framework.response.Response object.
Assumes that your response data is serializable into your cache, which seems pretty likely.
"""
cache_hours = 1 # number of hours to cache the response (Expires header and local cache)
cache_ignore_auth = False # set to True if view can be cached without regard to who is fetching it
def __init__(self, *args, **kwargs):
super(CacheMixin, self).__init__(*args, **kwargs)
# borrowed from FetchFromCacheMiddleware
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _get_cache_key(self, request):
"""
Generate cache key that's exactly unique enough.
Assumes that the response is determined by the request.method, authenticated user, and URL path.
"""
# HTTP method
method = request.method
# Authenticated username
if not request.user.is_authenticated or self.cache_ignore_auth:
username = '*'
else:
username = request.user.username
# URL path
url = force_text(iri_to_uri(request.get_full_path()))
# build a cache key out of that
key = '#'.join(('CacheMixin', self.key_prefix, username, method, url))
if len(key) > MAX_KEY_LENGTH:
# make sure keys don't get too long
key = key[:(MAX_KEY_LENGTH - 33)] + '-' + hashlib.md5(key.encode('utf8')).hexdigest()
return key
def _timeout(self):
return self.cache_hours * 3600
@property
def default_response_headers(self):
# shouldn't be necessary since we're setting "cache-control: private" and delivering by HTTPS, but be sure
# there's no cross-contamination in caches
h = super(CacheMixin, self).default_response_headers
h['Vary'] = 'Accept, Authorization, Cookie'
return h
def cached_response(self, handler, request, *args, **kwargs):
# make sure we're actually being asked to do something
timeout = self._timeout()
if timeout <= 0:
return handler(request, *args, **kwargs)
# check the cache
cache_key = self._get_cache_key(request)
response_kwargs = self.cache.get(cache_key)
if response_kwargs:
# found it in the cache: hooray!
return Response(**response_kwargs)
# actually generate the response
response = handler(request, *args, **kwargs)
# ignore errors and streamed responses: borrowed from from UpdateCacheMiddleware
if response.streaming or response.status_code != 200:
return response
response['Cache-control'] = 'private'
patch_response_headers(response, cache_timeout=timeout)
# cache the response
assert isinstance(response, Response), "the response must be a rest_framework.response.Response instance"
response_kwargs = {
'data': response.data,
'status': response.status_code,
'template_name': response.template_name,
'headers': dict(list(response._headers.values())),
'exception': response.exception,
'content_type': response.content_type,
}
self.cache.set(cache_key, response_kwargs, timeout)
return response
def get(self, request, *args, **kwargs):
"""
Return the correct cached GET response.
"""
if hasattr(self, 'cached_get'):
handler = self.cached_get
else:
handler = super(CacheMixin, self).get
return self.cached_response(handler, request, *args, **kwargs)
def head(self, request, *args, **kwargs):
"""
Return the correct cached HEAD response.
Imitate the logic in django.views.generic.base.View.as_view.view which uses .get() in place of .head() if it's
not there.
"""
spr = super(CacheMixin, self)
if hasattr(self, 'cached_get') and not hasattr(self, 'cached_head'):
handler = self.cached_get
elif hasattr(self, 'cached_head'):
handler = self.cached_head
elif hasattr(spr, 'get') and not hasattr(spr, 'head'):
handler = spr.get
else:
handler = spr.head
return self.cached_response(handler, request, *args, **kwargs)
class HyperlinkCollectionField(fields.Field):
def __init__(self, hyperlink_data, help_text='links to additional information about this object', **kwargs):
super(HyperlinkCollectionField, self).__init__( read_only=True, help_text=help_text, **kwargs)
self.hyperlink_data = hyperlink_data
self.label = None
def to_representation(self, value):
result = {}
for link in self.hyperlink_data:
label = link['label']
kwargs = copy.copy(link)
del kwargs['label']
field = relations.HyperlinkedRelatedField(read_only=True, **kwargs)
# fake the request into the context so the URL can be constructed
field._context = {'request': self.context.get('request', None)}
result[label] = field.to_representation(value)
return result
def get_attribute(self, instance):
# fake this out to prevent an exception trying to get data we don't care about
return instance
system_tz = pytz.timezone(settings.TIME_ZONE)
def utc_datetime(dt):
"""
Convert the local datetime value from the database to UTC, since that's just better for the API.
"""
if dt:
return system_tz.normalize(system_tz.localize(dt)).astimezone(pytz.utc)
else:
return None
|
gpl-3.0
|
neighborhoodhacker/msm-3.4
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
krautradio/PyRfK
|
lib/rfk/site/admin/tags.py
|
4
|
1319
|
from flask import render_template, request
from flask.ext.login import login_required, current_user
from flask.json import jsonify
import rfk.database
from rfk.site.helper import permission_required, emit_error
from rfk.database.show import Tag
from rfk.helper.taglist import taglist
from ..admin import admin
@admin.route('/tags')
@login_required
@permission_required(permission='admin')
def tags_list():
tags = Tag.query.all()
return render_template('admin/tags_list.html', tags=tags)
@admin.route('/tag/<int:tag>/edit')
@login_required
@permission_required(permission='admin')
def tags_edit(tag):
tag = Tag.query.get(tag)
if tag is None:
return 'no tag found'
if request.args.get('inline'):
template = '/admin/tagform-inline.html'
else:
template = '/admin/tagform.html'
return render_template(template, taglist=taglist, tag=tag)
@admin.route("/tag/<int:tag>/save", methods=['POST'])
@permission_required(permission='admin', ajax=True)
def save_tag(tag):
tag = Tag.query.get(tag)
if tag is None:
return emit_error(1, 'Invalid Tag')
tag.name = request.form['name']
tag.description = request.form['description']
tag.icon = request.form['icon']
rfk.database.session.commit()
return jsonify({'success': True, 'data': None})
|
bsd-3-clause
|
40223101/w16b_test
|
static/Brython3.1.1-20150328-091302/Lib/unittest/util.py
|
794
|
4157
|
"""Various utility functions."""
from collections import namedtuple, OrderedDict
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _ordered_count(iterable):
'Return dict of element counts, in the order they were first seen'
c = OrderedDict()
for elem in iterable:
c[elem] = c.get(elem, 0) + 1
return c
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = _ordered_count(actual), _ordered_count(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
|
agpl-3.0
|
webmull/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/comments.py
|
148
|
2030
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A tool for automating dealing with bugzilla, posting patches, committing
# patches, etc.
from webkitpy.common.config import urls
def bug_comment_from_svn_revision(svn_revision):
return "Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision))
def bug_comment_from_commit_text(scm, commit_text):
svn_revision = scm.svn_revision_from_commit_text(commit_text)
return bug_comment_from_svn_revision(svn_revision)
|
bsd-3-clause
|
jkarnows/scikit-learn
|
sklearn/metrics/cluster/unsupervised.py
|
230
|
8281
|
""" Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
|
bsd-3-clause
|
mdsafwan/Deal-My-Stuff
|
Lib/site-packages/django/templatetags/i18n.py
|
82
|
17673
|
from __future__ import unicode_literals
import re
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import (
TOKEN_TEXT, TOKEN_VAR, TokenParser, render_value_in_context,
)
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatibility fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
m = re.match("^'([^']+)'(\|.*$)", value)
if m:
value = '"%s"%s' % (m.group(1).replace('"', '\\"'), m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"', '\\"')
noop = False
asvar = None
message_context = None
while self.more():
tag = self.tag()
if tag == 'noop':
noop = True
elif tag == 'context':
message_context = parser.compile_filter(self.value())
elif tag == 'as':
asvar = self.tag()
else:
raise TemplateSyntaxError(
"Only options for 'trans' are 'noop', "
"'context \"xxx\"', and 'as VAR'.")
return value, noop, asvar, message_context
value, noop, asvar, message_context = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop, asvar,
message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(six.iteritems(options['count']))[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
|
apache-2.0
|
kytvi2p/tahoe-lafs
|
src/allmydata/scripts/cli.py
|
1
|
23714
|
import os.path, re, fnmatch
from twisted.python import usage
from allmydata.scripts.common import get_aliases, get_default_nodedir, \
DEFAULT_ALIAS, BaseOptions
from allmydata.util.encodingutil import argv_to_unicode, argv_to_abspath, quote_output
NODEURL_RE=re.compile("http(s?)://([^:]*)(:([1-9][0-9]*))?")
_default_nodedir = get_default_nodedir()
class FilesystemOptions(BaseOptions):
optParameters = [
["node-url", "u", None,
"Specify the URL of the Tahoe gateway node, such as "
"'http://127.0.0.1:3456'. "
"This overrides the URL found in the --node-directory ."],
["dir-cap", None, None,
"Specify which dirnode URI should be used as the 'tahoe' alias."]
]
def postOptions(self):
self["quiet"] = self.parent["quiet"]
if self.parent['node-directory']:
self['node-directory'] = argv_to_abspath(self.parent['node-directory'])
else:
self['node-directory'] = _default_nodedir
# compute a node-url from the existing options, put in self['node-url']
if self['node-url']:
if (not isinstance(self['node-url'], basestring)
or not NODEURL_RE.match(self['node-url'])):
msg = ("--node-url is required to be a string and look like "
"\"http://HOSTNAMEORADDR:PORT\", not: %r" %
(self['node-url'],))
raise usage.UsageError(msg)
else:
node_url_file = os.path.join(self['node-directory'], "node.url")
self['node-url'] = open(node_url_file, "r").read().strip()
if self['node-url'][-1] != "/":
self['node-url'] += "/"
aliases = get_aliases(self['node-directory'])
if self['dir-cap']:
aliases[DEFAULT_ALIAS] = self['dir-cap']
self.aliases = aliases # maps alias name to dircap
class MakeDirectoryOptions(FilesystemOptions):
optParameters = [
("format", None, None, "Create a directory with the given format: SDMF or MDMF (case-insensitive)"),
]
def parseArgs(self, where=""):
self.where = argv_to_unicode(where)
if self['format']:
if self['format'].upper() not in ("SDMF", "MDMF"):
raise usage.UsageError("%s is an invalid format" % self['format'])
def getSynopsis(self):
return "Usage: %s [global-opts] mkdir [options] [REMOTE_DIR]" % (self.command_name,)
longdesc = """Create a new directory, either unlinked or as a subdirectory."""
class AddAliasOptions(FilesystemOptions):
def parseArgs(self, alias, cap):
self.alias = argv_to_unicode(alias)
if self.alias.endswith(u':'):
self.alias = self.alias[:-1]
self.cap = cap
def getSynopsis(self):
return "Usage: %s [global-opts] add-alias [options] ALIAS[:] DIRCAP" % (self.command_name,)
longdesc = """Add a new alias for an existing directory."""
class CreateAliasOptions(FilesystemOptions):
def parseArgs(self, alias):
self.alias = argv_to_unicode(alias)
if self.alias.endswith(u':'):
self.alias = self.alias[:-1]
def getSynopsis(self):
return "Usage: %s [global-opts] create-alias [options] ALIAS[:]" % (self.command_name,)
longdesc = """Create a new directory and add an alias for it."""
class ListAliasesOptions(FilesystemOptions):
def getSynopsis(self):
return "Usage: %s [global-opts] list-aliases [options]" % (self.command_name,)
longdesc = """Display a table of all configured aliases."""
class ListOptions(FilesystemOptions):
optFlags = [
("long", "l", "Use long format: show file sizes, and timestamps."),
("uri", "u", "Show file/directory URIs."),
("readonly-uri", None, "Show read-only file/directory URIs."),
("classify", "F", "Append '/' to directory names, and '*' to mutable."),
("json", None, "Show the raw JSON output."),
]
def parseArgs(self, where=""):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] ls [options] [PATH]" % (self.command_name,)
longdesc = """
List the contents of some portion of the grid.
If PATH is omitted, "tahoe:" is assumed.
When the -l or --long option is used, each line is shown in the
following format:
drwx <size> <date/time> <name in this directory>
where each of the letters on the left may be replaced by '-'.
If 'd' is present, it indicates that the object is a directory.
If the 'd' is replaced by a '?', the object type is unknown.
'rwx' is a Unix-like permissions mask: if the mask includes 'w',
then the object is writeable through its link in this directory
(note that the link might be replaceable even if the object is
not writeable through the current link).
The 'x' is a legacy of Unix filesystems. In Tahoe it is used
only to indicate that the contents of a directory can be listed.
Directories have no size, so their size field is shown as '-'.
Otherwise the size of the file, when known, is given in bytes.
The size of mutable files or unknown objects is shown as '?'.
The date/time shows when this link in the Tahoe filesystem was
last modified.
"""
class GetOptions(FilesystemOptions):
def parseArgs(self, arg1, arg2=None):
# tahoe get FOO |less # write to stdout
# tahoe get tahoe:FOO |less # same
# tahoe get FOO bar # write to local file
# tahoe get tahoe:FOO bar # same
self.from_file = argv_to_unicode(arg1)
if arg2:
self.to_file = argv_to_unicode(arg2)
else:
self.to_file = None
if self.to_file == "-":
self.to_file = None
def getSynopsis(self):
return "Usage: %s [global-opts] get [options] REMOTE_FILE LOCAL_FILE" % (self.command_name,)
longdesc = """
Retrieve a file from the grid and write it to the local filesystem. If
LOCAL_FILE is omitted or '-', the contents of the file will be written to
stdout."""
def getUsage(self, width=None):
t = FilesystemOptions.getUsage(self, width)
t += """
Examples:
% tahoe get FOO |less # write to stdout
% tahoe get tahoe:FOO |less # same
% tahoe get FOO bar # write to local file
% tahoe get tahoe:FOO bar # same
"""
return t
class PutOptions(FilesystemOptions):
optFlags = [
("mutable", "m", "Create a mutable file instead of an immutable one (like --format=SDMF)"),
]
optParameters = [
("format", None, None, "Create a file with the given format: SDMF and MDMF for mutable, CHK (default) for immutable. (case-insensitive)"),
]
def parseArgs(self, arg1=None, arg2=None):
# see Examples below
if arg1 is not None and arg2 is not None:
self.from_file = argv_to_unicode(arg1)
self.to_file = argv_to_unicode(arg2)
elif arg1 is not None and arg2 is None:
self.from_file = argv_to_unicode(arg1) # might be "-"
self.to_file = None
else:
self.from_file = None
self.to_file = None
if self.from_file == u"-":
self.from_file = None
if self['format']:
if self['format'].upper() not in ("SDMF", "MDMF", "CHK"):
raise usage.UsageError("%s is an invalid format" % self['format'])
def getSynopsis(self):
return "Usage: %s [global-opts] put [options] LOCAL_FILE REMOTE_FILE" % (self.command_name,)
longdesc = """
Put a file into the grid, copying its contents from the local filesystem.
If REMOTE_FILE is missing, upload the file but do not link it into a
directory; also print the new filecap to stdout. If LOCAL_FILE is missing
or '-', data will be copied from stdin. REMOTE_FILE is assumed to start
with tahoe: unless otherwise specified.
If the destination file already exists and is mutable, it will be modified
in-place, whether or not --mutable is specified. (--mutable only affects
creation of new files.)"""
def getUsage(self, width=None):
t = FilesystemOptions.getUsage(self, width)
t += """
Examples:
% cat FILE | tahoe put # create unlinked file from stdin
% cat FILE | tahoe put - # same
% tahoe put bar # create unlinked file from local 'bar'
% cat FILE | tahoe put - FOO # create tahoe:FOO from stdin
% tahoe put bar FOO # copy local 'bar' to tahoe:FOO
% tahoe put bar tahoe:FOO # same
% tahoe put bar MUTABLE-FILE-WRITECAP # modify the mutable file in-place
"""
return t
class CpOptions(FilesystemOptions):
optFlags = [
("recursive", "r", "Copy source directory recursively."),
("verbose", "v", "Be noisy about what is happening."),
("caps-only", None,
"When copying to local files, write out filecaps instead of actual "
"data (only useful for debugging and tree-comparison purposes)."),
]
def parseArgs(self, *args):
if len(args) < 2:
raise usage.UsageError("cp requires at least two arguments")
self.sources = map(argv_to_unicode, args[:-1])
self.destination = argv_to_unicode(args[-1])
def getSynopsis(self):
return "Usage: %s [global-opts] cp [options] FROM.. TO" % (self.command_name,)
longdesc = """
Use 'tahoe cp' to copy files between a local filesystem and a Tahoe grid.
Any FROM/TO arguments that begin with an alias indicate Tahoe-side
files or non-file arguments. Directories will be copied recursively.
New Tahoe-side directories will be created when necessary. Assuming that
you have previously set up an alias 'home' with 'tahoe create-alias home',
here are some examples:
tahoe cp ~/foo.txt home: # creates tahoe-side home:foo.txt
tahoe cp ~/foo.txt /tmp/bar.txt home: # copies two files to home:
tahoe cp ~/Pictures home:stuff/my-pictures # copies directory recursively
You can also use a dircap as either FROM or TO target:
tahoe cp URI:DIR2-RO:ixqhc4kdbjxc7o65xjnveoewym:5x6lwoxghrd5rxhwunzavft2qygfkt27oj3fbxlq4c6p45z5uneq/blog.html ./ # copy Zooko's wiki page to a local file
This command still has some limitations: symlinks and special files
(device nodes, named pipes) are not handled very well. Arguments should
probably not have trailing slashes. 'tahoe cp' does not behave as much
like /bin/cp as you would wish, especially with respect to trailing
slashes.
"""
class UnlinkOptions(FilesystemOptions):
def parseArgs(self, where):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] unlink [options] REMOTE_FILE" % (self.command_name,)
class RmOptions(UnlinkOptions):
def getSynopsis(self):
return "Usage: %s [global-opts] rm [options] REMOTE_FILE" % (self.command_name,)
class MvOptions(FilesystemOptions):
def parseArgs(self, frompath, topath):
self.from_file = argv_to_unicode(frompath)
self.to_file = argv_to_unicode(topath)
def getSynopsis(self):
return "Usage: %s [global-opts] mv [options] FROM TO" % (self.command_name,)
longdesc = """
Use 'tahoe mv' to move files that are already on the grid elsewhere on
the grid, e.g., 'tahoe mv alias:some_file alias:new_file'.
If moving a remote file into a remote directory, you'll need to append a
'/' to the name of the remote directory, e.g., 'tahoe mv tahoe:file1
tahoe:dir/', not 'tahoe mv tahoe:file1 tahoe:dir'.
Note that it is not possible to use this command to move local files to
the grid -- use 'tahoe cp' for that.
"""
class LnOptions(FilesystemOptions):
def parseArgs(self, frompath, topath):
self.from_file = argv_to_unicode(frompath)
self.to_file = argv_to_unicode(topath)
def getSynopsis(self):
return "Usage: %s [global-opts] ln [options] FROM_LINK TO_LINK" % (self.command_name,)
longdesc = """
Use 'tahoe ln' to duplicate a link (directory entry) already on the grid
to elsewhere on the grid. For example 'tahoe ln alias:some_file
alias:new_file'. causes 'alias:new_file' to point to the same object that
'alias:some_file' points to.
(The argument order is the same as Unix ln. To remember the order, you
can think of this command as copying a link, rather than copying a file
as 'tahoe cp' does. Then the argument order is consistent with that of
'tahoe cp'.)
When linking a remote file into a remote directory, you'll need to append
a '/' to the name of the remote directory, e.g. 'tahoe ln tahoe:file1
tahoe:dir/' (which is shorthand for 'tahoe ln tahoe:file1
tahoe:dir/file1'). If you forget the '/', e.g. 'tahoe ln tahoe:file1
tahoe:dir', the 'ln' command will refuse to overwrite the 'tahoe:dir'
directory, and will exit with an error.
Note that it is not possible to use this command to create links between
local and remote files.
"""
class BackupConfigurationError(Exception):
pass
class BackupOptions(FilesystemOptions):
optFlags = [
("verbose", "v", "Be noisy about what is happening."),
("ignore-timestamps", None, "Do not use backupdb timestamps to decide whether a local file is unchanged."),
]
vcs_patterns = ('CVS', 'RCS', 'SCCS', '.git', '.gitignore', '.cvsignore',
'.svn', '.arch-ids','{arch}', '=RELEASE-ID',
'=meta-update', '=update', '.bzr', '.bzrignore',
'.bzrtags', '.hg', '.hgignore', '_darcs')
def __init__(self):
super(BackupOptions, self).__init__()
self['exclude'] = set()
def parseArgs(self, localdir, topath):
self.from_dir = argv_to_unicode(localdir)
self.to_dir = argv_to_unicode(topath)
def getSynopsis(self):
return "Usage: %s [global-opts] backup [options] FROM ALIAS:TO" % (self.command_name,)
def opt_exclude(self, pattern):
"""Ignore files matching a glob pattern. You may give multiple
'--exclude' options."""
g = argv_to_unicode(pattern).strip()
if g:
exclude = self['exclude']
exclude.add(g)
def opt_exclude_from(self, filepath):
"""Ignore file matching glob patterns listed in file, one per
line. The file is assumed to be in the argv encoding."""
abs_filepath = argv_to_abspath(filepath)
try:
exclude_file = file(abs_filepath)
except:
raise BackupConfigurationError('Error opening exclude file %s.' % quote_output(abs_filepath))
try:
for line in exclude_file:
self.opt_exclude(line)
finally:
exclude_file.close()
def opt_exclude_vcs(self):
"""Exclude files and directories used by following version control
systems: CVS, RCS, SCCS, Git, SVN, Arch, Bazaar(bzr), Mercurial,
Darcs."""
for pattern in self.vcs_patterns:
self.opt_exclude(pattern)
def filter_listdir(self, listdir):
"""Yields non-excluded childpaths in path."""
exclude = self['exclude']
exclude_regexps = [re.compile(fnmatch.translate(pat)) for pat in exclude]
for filename in listdir:
for regexp in exclude_regexps:
if regexp.match(filename):
break
else:
yield filename
longdesc = """
Add a versioned backup of the local FROM directory to a timestamped
subdirectory of the TO/Archives directory on the grid, sharing as many
files and directories as possible with earlier backups. Create TO/Latest
as a reference to the latest backup. Behaves somewhat like 'rsync -a
--link-dest=TO/Archives/(previous) FROM TO/Archives/(new); ln -sf
TO/Archives/(new) TO/Latest'."""
class WebopenOptions(FilesystemOptions):
optFlags = [
("info", "i", "Open the t=info page for the file"),
]
def parseArgs(self, where=''):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] webopen [options] [ALIAS:PATH]" % (self.command_name,)
longdesc = """Open a web browser to the contents of some file or
directory on the grid. When run without arguments, open the Welcome
page."""
class ManifestOptions(FilesystemOptions):
optFlags = [
("storage-index", "s", "Only print storage index strings, not pathname+cap."),
("verify-cap", None, "Only print verifycap, not pathname+cap."),
("repair-cap", None, "Only print repaircap, not pathname+cap."),
("raw", "r", "Display raw JSON data instead of parsed."),
]
def parseArgs(self, where=''):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] manifest [options] [ALIAS:PATH]" % (self.command_name,)
longdesc = """Print a list of all files and directories reachable from
the given starting point."""
class StatsOptions(FilesystemOptions):
optFlags = [
("raw", "r", "Display raw JSON data instead of parsed"),
]
def parseArgs(self, where=''):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] stats [options] [ALIAS:PATH]" % (self.command_name,)
longdesc = """Print statistics about of all files and directories
reachable from the given starting point."""
class CheckOptions(FilesystemOptions):
optFlags = [
("raw", None, "Display raw JSON data instead of parsed."),
("verify", None, "Verify all hashes, instead of merely querying share presence."),
("repair", None, "Automatically repair any problems found."),
("add-lease", None, "Add/renew lease on all shares."),
]
def parseArgs(self, where=''):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] check [options] [ALIAS:PATH]" % (self.command_name,)
longdesc = """
Check a single file or directory: count how many shares are available and
verify their hashes. Optionally repair the file if any problems were
found."""
class DeepCheckOptions(FilesystemOptions):
optFlags = [
("raw", None, "Display raw JSON data instead of parsed."),
("verify", None, "Verify all hashes, instead of merely querying share presence."),
("repair", None, "Automatically repair any problems found."),
("add-lease", None, "Add/renew lease on all shares."),
("verbose", "v", "Be noisy about what is happening."),
]
def parseArgs(self, where=''):
self.where = argv_to_unicode(where)
def getSynopsis(self):
return "Usage: %s [global-opts] deep-check [options] [ALIAS:PATH]" % (self.command_name,)
longdesc = """
Check all files and directories reachable from the given starting point
(which must be a directory), like 'tahoe check' but for multiple files.
Optionally repair any problems found."""
subCommands = [
["mkdir", None, MakeDirectoryOptions, "Create a new directory."],
["add-alias", None, AddAliasOptions, "Add a new alias cap."],
["create-alias", None, CreateAliasOptions, "Create a new alias cap."],
["list-aliases", None, ListAliasesOptions, "List all alias caps."],
["ls", None, ListOptions, "List a directory."],
["get", None, GetOptions, "Retrieve a file from the grid."],
["put", None, PutOptions, "Upload a file into the grid."],
["cp", None, CpOptions, "Copy one or more files or directories."],
["unlink", None, UnlinkOptions, "Unlink a file or directory on the grid."],
["rm", None, RmOptions, "Unlink a file or directory on the grid (same as unlink)."],
["mv", None, MvOptions, "Move a file within the grid."],
["ln", None, LnOptions, "Make an additional link to an existing file or directory."],
["backup", None, BackupOptions, "Make target dir look like local dir."],
["webopen", None, WebopenOptions, "Open a web browser to a grid file or directory."],
["manifest", None, ManifestOptions, "List all files/directories in a subtree."],
["stats", None, StatsOptions, "Print statistics about all files/directories in a subtree."],
["check", None, CheckOptions, "Check a single file or directory."],
["deep-check", None, DeepCheckOptions, "Check all files/directories reachable from a starting point."],
]
def mkdir(options):
from allmydata.scripts import tahoe_mkdir
rc = tahoe_mkdir.mkdir(options)
return rc
def add_alias(options):
from allmydata.scripts import tahoe_add_alias
rc = tahoe_add_alias.add_alias(options)
return rc
def create_alias(options):
from allmydata.scripts import tahoe_add_alias
rc = tahoe_add_alias.create_alias(options)
return rc
def list_aliases(options):
from allmydata.scripts import tahoe_add_alias
rc = tahoe_add_alias.list_aliases(options)
return rc
def list(options):
from allmydata.scripts import tahoe_ls
rc = tahoe_ls.list(options)
return rc
def get(options):
from allmydata.scripts import tahoe_get
rc = tahoe_get.get(options)
if rc == 0:
if options.to_file is None:
# be quiet, since the file being written to stdout should be
# proof enough that it worked, unless the user is unlucky
# enough to have picked an empty file
pass
else:
print >>options.stderr, "%s retrieved and written to %s" % \
(options.from_file, options.to_file)
return rc
def put(options):
from allmydata.scripts import tahoe_put
rc = tahoe_put.put(options)
return rc
def cp(options):
from allmydata.scripts import tahoe_cp
rc = tahoe_cp.copy(options)
return rc
def unlink(options, command="unlink"):
from allmydata.scripts import tahoe_unlink
rc = tahoe_unlink.unlink(options, command=command)
return rc
def rm(options):
return unlink(options, command="rm")
def mv(options):
from allmydata.scripts import tahoe_mv
rc = tahoe_mv.mv(options, mode="move")
return rc
def ln(options):
from allmydata.scripts import tahoe_mv
rc = tahoe_mv.mv(options, mode="link")
return rc
def backup(options):
from allmydata.scripts import tahoe_backup
rc = tahoe_backup.backup(options)
return rc
def webopen(options, opener=None):
from allmydata.scripts import tahoe_webopen
rc = tahoe_webopen.webopen(options, opener=opener)
return rc
def manifest(options):
from allmydata.scripts import tahoe_manifest
rc = tahoe_manifest.manifest(options)
return rc
def stats(options):
from allmydata.scripts import tahoe_manifest
rc = tahoe_manifest.stats(options)
return rc
def check(options):
from allmydata.scripts import tahoe_check
rc = tahoe_check.check(options)
return rc
def deepcheck(options):
from allmydata.scripts import tahoe_check
rc = tahoe_check.deepcheck(options)
return rc
dispatch = {
"mkdir": mkdir,
"add-alias": add_alias,
"create-alias": create_alias,
"list-aliases": list_aliases,
"ls": list,
"get": get,
"put": put,
"cp": cp,
"unlink": unlink,
"rm": rm,
"mv": mv,
"ln": ln,
"backup": backup,
"webopen": webopen,
"manifest": manifest,
"stats": stats,
"check": check,
"deep-check": deepcheck,
}
|
gpl-2.0
|
fastmonkeys/netvisor.py
|
tests/test_auth.py
|
1
|
5206
|
import uuid
import pytest
from flexmock import flexmock
from freezegun import freeze_time
def make_auth(
sender='Testiclient',
partner_id='xxx_yyy',
partner_key='07f94228d149a96b2f25e3edad55096e',
customer_id='Integraatiokayttajan tunnus',
customer_key='7cd680e89e880553358bc07cd28b0ee2',
organization_id='1967543-8',
**kwargs
):
from netvisor.auth import NetvisorAuth
return NetvisorAuth(
sender,
partner_id,
partner_key,
customer_id,
customer_key,
organization_id,
**kwargs
)
@pytest.fixture
def auth():
return make_auth()
class TestNetvisorAuth(object):
@pytest.fixture
def http_request(self, request, auth):
(
flexmock(auth)
.should_receive('make_transaction_id')
.and_return('123456')
)
(
flexmock(auth)
.should_receive('make_timestamp')
.and_return('2009-01-12 15:49:12.221')
)
(
flexmock(auth)
.should_receive('make_mac')
.with_args(
'http://integrationdemo.netvisor.fi/accounting.nv',
'2009-01-12 15:49:12.221',
'123456'
)
.and_return('6b2783906969630c1b6649bf5b0e6620')
)
r = flexmock(
headers={},
url='http://integrationdemo.netvisor.fi/accounting.nv'
)
return auth(r)
def test_constructor_sets_sender(self, auth):
assert auth.sender == 'Testiclient'
def test_constructor_sets_partner_id(self, auth):
assert auth.partner_id == 'xxx_yyy'
def test_constructor_sets_partner_key(self, auth):
assert auth.partner_key == '07f94228d149a96b2f25e3edad55096e'
def test_constructor_sets_customer_id(self, auth):
assert auth.customer_id == 'Integraatiokayttajan tunnus'
def test_constructor_sets_customer_key(self, auth):
assert auth.customer_key == '7cd680e89e880553358bc07cd28b0ee2'
def test_constructor_sets_organization_id(self, auth):
assert auth.organization_id == '1967543-8'
def test_constructor_sets_default_language(self, auth):
assert auth.language == 'FI'
def test_constructor_sets_language(self, auth):
auth = make_auth(language='EN')
assert auth.language == 'EN'
@pytest.mark.parametrize(
('language', 'valid'),
[
('FI', True),
('EN', True),
('SE', True),
('NO', False),
('FR', False),
('', False),
(None, False),
]
)
def test_validates_language(self, auth, language, valid):
if valid:
auth.language = language
else:
with pytest.raises(ValueError) as exc_info:
auth.language = language
msg = str(exc_info.value)
assert msg == "language must be one of ('EN', 'FI', 'SE')"
def test_adds_sender_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Authentication-Sender'] ==
'Testiclient'
)
def test_adds_customer_id_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Authentication-CustomerId'] ==
'Integraatiokayttajan tunnus'
)
def test_adds_timestamp_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Authentication-Timestamp'] ==
'2009-01-12 15:49:12.221'
)
def test_adds_language_header_to_request(self, http_request):
assert http_request.headers['X-Netvisor-Interface-Language'] == 'FI'
def test_adds_organization_id_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Organisation-ID'] == '1967543-8'
)
def test_adds_transaction_id_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Authentication-TransactionId'] ==
'123456'
)
def test_adds_mac_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Authentication-MAC'] ==
'6b2783906969630c1b6649bf5b0e6620'
)
def test_adds_partner_id_header_to_request(self, http_request):
assert (
http_request.headers['X-Netvisor-Authentication-PartnerId'] ==
'xxx_yyy'
)
def test_make_transaction_id_uses_uuid(self, auth):
fake_uuid = flexmock(hex='123456')
flexmock(uuid).should_receive('uuid4').and_return(fake_uuid).once()
assert auth.make_transaction_id() == '123456'
def test_make_timestamp_returns_current_time_in_isoformat(self, auth):
with freeze_time('2009-01-12 15:49:12.221'):
assert auth.make_timestamp() == '2009-01-12 15:49:12.221'
def test_make_mac(self, auth):
mac = auth.make_mac(
'http://integrationdemo.netvisor.fi/accounting.nv',
'2009-01-12 15:49:12.221',
'123456'
)
assert mac == '6b2783906969630c1b6649bf5b0e6620'
|
mit
|
fergalbyrne/nupic
|
tests/swarming/nupic/swarming/experiments/simpleV2/description.py
|
32
|
15383
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'address': { 'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21},
'gym': { 'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',
inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
agpl-3.0
|
sejoruiz/weatherino
|
suncalc.py
|
1
|
9623
|
#!/usr/bin/env python
from datetime import datetime, timedelta
from pytz import timezone
import pytz
import math
import logging
class SunCalc:
def __init__(self, latitude, longitude, date, formatting='%d %b %Y %H:%M:%S', tzone='Europe/Madrid'):
self.latitude=latitude
# The longitude is west in the calculations
self.longitude=-longitude
#Convert date to a suitable format
if not isinstance(date, datetime):
date = datetime.strptime(date, formatting)
# Add the timezone information, and store it in tzone
if date.tzinfo is None:
tzone = timezone(tzone)
date = tzone.localize(date)
else:
tzone = date.tzinfo
self.tzone = tzone
#Convert to UTC
self.date = date.astimezone(timezone("UTC"))
self.julian = self.timeToJulian()
#Given a Gregorian date (in UTC) calculate the corresponding Julian Date
def timeToJulian(self, date=None):
# If we didn't get a datetime instance, we try to convert it
if date is None:
date=self.date
c0 = math.floor((date.month-3)/12)
x4 = date.year+c0
x3 = math.floor(x4/100)
x2 = x4%100
x1=date.month - 12*c0 -3
jd = math.floor(146097*x3/4) + math.floor(36525*x2/100) + math.floor((153*x1+2)/5) + date.day + 1721119
return jd
def julianToTime(self, julianDate=None):
if julianDate is None:
julianDate=self.julian
k3=4*(julianDate-1721120)+3
x3=math.floor(k3/146097)
k2=100*math.floor((k3%146097)/4)+99
x2=math.floor(k2/36525)
k1=5*math.floor((k2%36525)/100)+2
x1=math.floor(k1/153)
c0=math.floor((x1+2)/12)
year=int(round(100*x3+x2+c0))
month=int(round(x1-12*c0+3))
day=int(math.floor((k1%153)/5)+1)
#Hour minutes and seconds calculation
day_frac=julianDate - math.floor(julianDate)
#Hours since the beginning of the JULIAN day. Julian days begin at 12 AM
hour=math.floor(day_frac*24)
day_frac=day_frac*24 - hour
#Adjust hour: 0 in Julian is 12AM in Gregorian
hour=int((12+hour)%24)
minute=int(math.floor(day_frac*60))
day_frac=day_frac*60 - minute
second=int(math.floor(day_frac*60))
targetTime=datetime(year,month,day,hour,minute,second)
zone=timezone("UTC")
targetTime=zone.localize(targetTime)
targetTime=targetTime.astimezone(self.tzone)
logging.debug(targetTime.strftime('Date is %d, %b %Y %H:%M:%S'))
return targetTime
#Calculate the mean anomaly of the orbit of the Earth
def meanAnomalyCalc(self, julian=None):
if julian is None:
julian=self.julian
#Constant definition
m0=357.5291
m1=0.98560028
j2000=2451545
#Calculation
self.anomaly = (m0+m1*(self.julian-j2000))%360
return self.anomaly
#Calculate the center of the orbit of the Earth, with the true Anomaly
def centerOrbitCalc(self, anomaly=None):
if anomaly is None:
anomaly=self.anomaly
c1=1.9148
c2=0.02
c3=0.0003
anomaly = math.radians(anomaly)
self.center = ((c1*math.sin(anomaly) + c2*math.sin(2*anomaly) + c3*math.sin(3*anomaly))%360 + 360)%360
self.trueAnomaly = (self.center + math.degrees(anomaly))%360
return self.center
#Calculate the eliptical longitude of the Sun
def elipticLengthCalc(self, trueAnomaly=None):
if trueAnomaly is None:
trueAnomaly=self.trueAnomaly
perihelion = 102.9372
self.eliptic = (trueAnomaly + perihelion + 180)%360
return self.eliptic
#Calculates ascension and declination of the sun
def ascensiondeclinationCalc(self, eliptic=None):
if eliptic is None:
eliptic=self.eliptic
epsil = math.radians(23.45)
eliptic = math.radians(eliptic)
ascension = math.atan2(math.sin(eliptic)*math.cos(epsil), math.cos(eliptic))
declination = math.asin(math.sin(eliptic)*math.sin(epsil))
#Asign the value to self so it is easy to retrieve again
self.ascension = (math.degrees(ascension)+360)%360
self.declination = (math.degrees(declination)+360)%360
return self.declination
def siderealTimeCalc(self, julian=None, longitude=None):
if julian is None:
julian=self.julian
if longitude is None:
longitude=self.longitude
theta0=280.16
theta1=360.9856235
julian2k=2451545
self.sidereal = (theta0 + theta1*(julian-julian2k))%360 - longitude
return self.sidereal
def hourAngleCalc(self, sidereal=None, ascension=None, declination=None, latitude=None):
if sidereal is None:
sidereal=self.sidereal
if ascension is None:
ascension=self.ascension
if declination is None:
declination=self.declination
if latitude is None:
latitude=self.latitude
self.hourAngle=sidereal - ascension
hour = math.radians(self.hourAngle)
latitude = math.radians(latitude)
declination = math.radians(declination)
self.azimuth=math.atan2(math.sin(hour), math.cos(hour)*math.sin(latitude)-math.tan(declination)*math.cos(latitude))
return self.hourAngle
#Transit calculations. The moment the Sun passes over the highest point
def transitCalc(self, targetHour, julian=None, longitude=None, maxIterations=10, threshold=0.00001):
if julian is None:
julian=self.julian
if longitude is None:
longitude=self.longitude
julian2k=2451545
j0=0.0009
j1=0.0053
j2=-0.0069
j3=1
perihelion = 102.9372
n=round((julian - julian2k - j0)/j3 - (longitude+targetHour)/360)
jx=julian2k+j0+(targetHour+longitude)*j3/360+j3*n #First approx of the transit near J
logging.debug("Jx is %f" % jx)
logging.debug("n is %f" % n)
#Second part of the algorithm. Now we reestimate the parameters for the actual sunset/sunrise
m=self.meanAnomalyCalc(julian=jx)
lsun=(m+perihelion+180)%360
logging.debug("New m is %f" % m)
logging.debug("lsun is %f" % lsun)
m=math.radians(m)
lsun=math.radians(lsun)
jtrans=jx+j1*math.sin(m) + j2*math.sin(2*lsun)
return jtrans
# logging.debug("jtrans is %f" % jtrans)
# #Third iteration, we go for max accuracy. Iterate until threshold. It seems like we lose accuracy here
# iterations=0
# jtrans_new=jtrans
# jtrans_old=0
# hnew=targetHour
# hold=0
# while (abs(jtrans_new - jtrans_old)>threshold and iterations<maxIterations):
# jtrans_old=jtrans_new
# hold=hnew
# sidereal=self.siderealTimeCalc(julian=jtrans_old)
# hnew=self.hourAngleCalc(sidereal=sidereal)
# jtrans_new=jtrans_old + j3*(hold-hnew)/360
# iterations+=1
# print("Number of iterations performed: %d" % iterations)
# return jtrans_new
def hourTargetSunset(self, julian=None, latitude=None, declination=None):
if julian is None:
julian=self.julian
if latitude is None:
latitude=self.latitude
if declination is None:
declination=self.declination
declination=math.radians(declination)
latitude=math.radians(latitude)
h0=math.radians(-0.83)
hour = math.acos((math.sin(h0)-math.sin(declination)*math.sin(latitude))/(math.cos(declination)*math.cos(latitude)))
hour=math.degrees(hour)
return hour
def hourTargetSunrise(self, julian=None, latitude=None, declination=None):
if julian is None:
julian=self.julian
if latitude is None:
latitude=self.latitude
if declination is None:
declination=self.declination
declination=math.radians(declination)
latitude=math.radians(latitude)
h0=math.radians(-0.83)
hour = math.acos((math.sin(h0)-math.sin(declination)*math.sin(latitude))/(math.cos(declination)*math.cos(latitude)))
hour=-math.degrees(hour)
return hour
def sunsetCalc(self, latitude=None, longitude=None, date=None):
if latitude is None:
latitude=self.latitude
if longitude is None:
longitude=self.longitude
if date is None:
date=self.date
self.meanAnomalyCalc()
self.centerOrbitCalc()
self.elipticLengthCalc()
self.ascensiondeclinationCalc()
hour=self.hourTargetSunset()
sunsetJulian=self.transitCalc(targetHour=hour)
setTime = self.julianToTime(julianDate=sunsetJulian)
return setTime
def sunriseCalc(self, latitude=None, longitude=None, date=None):
if latitude is None:
latitude=self.latitude
if longitude is None:
longitude=self.longitude
if date is None:
date=self.date
self.meanAnomalyCalc()
self.centerOrbitCalc()
self.elipticLengthCalc()
self.ascensiondeclinationCalc()
hour=self.hourTargetSunrise()
sunriseJulian=self.transitCalc(targetHour=hour)
riseTime = self.julianToTime(julianDate=sunriseJulian)
return riseTime
|
mit
|
eg-zhang/scikit-learn
|
examples/applications/plot_stock_market.py
|
227
|
8284
|
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
|
bsd-3-clause
|
pieterdp/helptux
|
helptux/routes/auth.py
|
1
|
1444
|
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, login_required, logout_user
from helptux.modules.user.authentication import LoginForm
from helptux.modules.api.user import UserApi
from helptux.modules.error import DatabaseItemDoesNotExist
from helptux import app
@app.route('/admin/login', methods=['GET', 'POST'])
def v_login():
form = LoginForm()
a_user = UserApi()
if request.method == 'POST' and form.validate_on_submit():
try:
user = a_user.get_by_user(form.email.data)
except DatabaseItemDoesNotExist:
# User does not exist
flash('Invalid username or password.')
else:
if user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('.v_index'))
else:
flash('Invalid username or password.')
##
# next=request.args.get('next') must be embedded in the <form action='admin/login?next=next'>, or
# otherwise the POST request (when you submit the form) will not include the "next" bit.
return render_template('admin/login.html', form=form, next=request.args.get('next'))
@app.route('/admin/logout', methods=['GET'])
@login_required
def v_logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('.v_index'))
|
gpl-2.0
|
EMBL-EBI-TSI/aap-client-python
|
aap_client/flask/decorators.py
|
1
|
3159
|
"""
Contains flask decorators and some helper functions
in order to integrate jwt tokens in flask in a natural way
"""
from functools import wraps
from future.utils import raise_with_traceback
from jwt import DecodeError, InvalidTokenError as JWTInvalidTokenError
from flask import current_app, request
try:
from flask import _app_ctx_stack as ctx_stack
except ImportError:
from flask import _request_ctx_stack as ctx_stack
from aap_client.tokens import verify_token
from aap_client.flask.config import CONFIG
from aap_client.flask.exceptions import (
InvalidRequestError,
InvalidTokenError,
NoAuthenticationError
)
def jwt_required(func):
"""
Decorator that ensures that the request contains a valid token.
Used to ensure the request to a view is from an authorized user.
:param func: The decorated view function
"""
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=C0111
_load_jwt_to_context()
return func(*args, **kwargs)
return wrapper
def jwt_optional(func):
"""
Decorator that changes the flask context if the request contains
a valid token.
Used to retrieve the authorized user if the token is valid.
:param func: The decorated view function
"""
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=C0111
try:
_load_jwt_to_context()
except NoAuthenticationError:
pass
except InvalidTokenError:
pass
return func(*args, **kwargs)
return wrapper
def get_user():
"""
Returns the user in the current context / request, otherwise returns None
"""
return getattr(ctx_stack.top, u'user', None)
def get_claims():
"""
Returns the claims in the current context / request, otherwise None
"""
return getattr(ctx_stack.top, u'token', None)
def _load_jwt_to_context():
claims = _decode_from_request()
ctx_stack.top.token = claims
ctx_stack.top.user = claims[u'sub']
def _decode_from_request():
# verify that the auth header exists
auth_header = request.headers.get(u'Authorization', None)
if not auth_header:
raise NoAuthenticationError(u'Authorization Required')
# verify that the header is in the correct format
# Authorization: Bearer <JWT>
splitted_header = auth_header.split()
if len(splitted_header) != 2 or not auth_header.startswith(u'Bearer '):
raise InvalidRequestError(u'Invalid Authorization header, '
u'expected \'Bearer <JWT>\'')
jwt = splitted_header[1]
try:
return verify_token(jwt, CONFIG.public_key)
except DecodeError as err:
raise_with_traceback(
InvalidTokenError(u'Unable to decode token: {}'.format(err)))
except JWTInvalidTokenError as err:
raise_with_traceback(
InvalidTokenError(u'{}'.format(err)))
def _get_jwt_client():
try:
return current_app.jwt_client
except AttributeError:
raise RuntimeError(u'JWTClient must be initialized with a flask '
u'application before using this method')
|
apache-2.0
|
klnrajareddy/PyGithub
|
github/tests/UserKey.py
|
39
|
3759
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class UserKey(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.key = self.g.get_user().get_key(2626650)
def testAttributes(self):
self.assertEqual(self.key.id, 2626650)
self.assertEqual(self.key.key, "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2Mm0RjTNAYFfSCtUpO54usdseroUSIYg5KX4JoseTpqyiB/hqewjYLAdUq/tNIQzrkoEJWSyZrQt0ma7/YCyMYuNGd3DU6q6ZAyBeY3E9RyCiKjO3aTL2VKQGFvBVVmGdxGVSCITRphAcsKc/PF35/fg9XP9S0anMXcEFtdfMHz41SSw+XtE+Vc+6cX9FuI5qUfLGbkv8L1v3g4uw9VXlzq4GfTA+1S7D6mcoGHopAIXFlVr+2RfDKdSURMcB22z41fljO1MW4+zUS/4FyUTpL991es5fcwKXYoiE+x06VJeJJ1Krwx+DZj45uweV6cHXt2JwJEI9fWB6WyBlDejWw==")
self.assertEqual(self.key.title, "Key added through PyGithub")
self.assertEqual(self.key.url, "https://api.github.com/user/keys/2626650")
self.assertTrue(self.key.verified)
def testEditWithoutArguments(self):
self.key.edit()
def testEditWithAllArguments(self):
self.key.edit("Title edited by PyGithub", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5Q58YmzZjU64prR5Pk91MfeHezOTgLqDYmepYbv3qjguiHtPai1vSai5WvUv3hgf9DArXsXE5CV6yoBIhAdGtpJKExHuQ2m4XTFCdbrgfQ3ypcSdgzEiQemyTA6TWwhbuwjJ1IqJMYOVLH+FBCkD8pyIpUDO7v3vaR2TCEuNwOS7lbsRsW3OkGYnUKjaPaCTe/inrqb7I3OE8cPhWJ3dM/zzzBj22J4LCNKhjKua8TFS74xGm3lNDZ6/twQl4n4xmrH/3tG+WOJicNO3JohNHqK9T0pILnr3epEyfdkBjcG0qXApqWvH2WipJhaH6of8Gdr0Z/K/7p8QFddmwNgdPQ==")
self.assertEqual(self.key.key, "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5Q58YmzZjU64prR5Pk91MfeHezOTgLqDYmepYbv3qjguiHtPai1vSai5WvUv3hgf9DArXsXE5CV6yoBIhAdGtpJKExHuQ2m4XTFCdbrgfQ3ypcSdgzEiQemyTA6TWwhbuwjJ1IqJMYOVLH+FBCkD8pyIpUDO7v3vaR2TCEuNwOS7lbsRsW3OkGYnUKjaPaCTe/inrqb7I3OE8cPhWJ3dM/zzzBj22J4LCNKhjKua8TFS74xGm3lNDZ6/twQl4n4xmrH/3tG+WOJicNO3JohNHqK9T0pILnr3epEyfdkBjcG0qXApqWvH2WipJhaH6of8Gdr0Z/K/7p8QFddmwNgdPQ==")
self.assertEqual(self.key.title, "Title edited by PyGithub")
def testDelete(self):
self.key.delete()
|
gpl-3.0
|
ovnicraft/edx-platform
|
lms/djangoapps/ccx/tests/test_overrides.py
|
22
|
6985
|
# coding=UTF-8
"""
tests for overrides
"""
import datetime
import mock
import pytz
from nose.plugins.attrib import attr
from courseware.field_overrides import OverrideFieldData
from django.test.utils import override_settings
from lms.djangoapps.courseware.tests.test_field_overrides import inject_field_overrides
from request_cache.middleware import RequestCache
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import override_field_for_ccx
from lms.djangoapps.ccx.tests.test_views import flatten, iter_blocks
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
class TestFieldOverrides(ModuleStoreTestCase):
"""
Make sure field overrides behave in the expected manner.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestFieldOverrides, self).setUp()
self.course = course = CourseFactory.create()
self.course.enable_ccx = True
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
self.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
chapters = [ItemFactory.create(start=start, parent=course)
for _ in xrange(2)]
sequentials = flatten([
[ItemFactory.create(parent=chapter) for _ in xrange(2)]
for chapter in chapters])
verticals = flatten([
[ItemFactory.create(due=due, parent=sequential) for _ in xrange(2)]
for sequential in sequentials])
blocks = flatten([ # pylint: disable=unused-variable
[ItemFactory.create(parent=vertical) for _ in xrange(2)]
for vertical in verticals])
self.ccx = ccx = CustomCourseForEdX(
course_id=course.id,
display_name='Test CCX',
coach=AdminFactory.create())
ccx.save()
patch = mock.patch('ccx.overrides.get_current_ccx')
self.get_ccx = get_ccx = patch.start()
get_ccx.return_value = ccx
self.addCleanup(patch.stop)
self.addCleanup(RequestCache.clear_request_cache)
inject_field_overrides(iter_blocks(ccx.course), course, AdminFactory.create())
def cleanup_provider_classes():
"""
After everything is done, clean up by un-doing the change to the
OverrideFieldData object that is done during the wrap method.
"""
OverrideFieldData.provider_classes = None
self.addCleanup(cleanup_provider_classes)
def test_override_start(self):
"""
Test that overriding start date on a chapter works.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.start, ccx_start)
def test_override_num_queries_new_field(self):
"""
Test that for creating new field executed only create query
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
# One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the
# transaction.atomic decorator wrapping override_field_for_ccx.
# One SELECT and one INSERT.
# One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the
# transaction.atomic down in Django's get_or_create()/_create_object_from_params().
with self.assertNumQueries(6):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_override_num_queries_update_existing_field(self):
"""
Test that overriding existing field executed create, fetch and update queries.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
new_ccx_start = datetime.datetime(2015, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
with self.assertNumQueries(3):
override_field_for_ccx(self.ccx, chapter, 'start', new_ccx_start)
def test_override_num_queries_field_value_not_changed(self):
"""
Test that if value of field does not changed no query execute.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
with self.assertNumQueries(2): # 2 savepoints
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_overriden_field_access_produces_no_extra_queries(self):
"""
Test no extra queries when accessing an overriden field more than once.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
# One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the
# transaction.atomic decorator wrapping override_field_for_ccx.
# One SELECT and one INSERT.
# One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the
# transaction.atomic down in Django's get_or_create()/_create_object_from_params().
with self.assertNumQueries(6):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_override_is_inherited(self):
"""
Test that sequentials inherit overridden start date from chapter.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.get_children()[0].start, ccx_start)
self.assertEquals(chapter.get_children()[1].start, ccx_start)
def test_override_is_inherited_even_if_set_in_mooc(self):
"""
Test that a due date set on a chapter is inherited by grandchildren
(verticals) even if a due date is set explicitly on grandchildren in
the mooc.
"""
ccx_due = datetime.datetime(2015, 1, 1, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx.course.get_children()[0]
chapter.display_name = 'itsme!'
override_field_for_ccx(self.ccx, chapter, 'due', ccx_due)
vertical = chapter.get_children()[0].get_children()[0]
self.assertEqual(vertical.due, ccx_due)
|
agpl-3.0
|
PaulKinlan/cli-caniuse
|
site/app/scripts/bower_components/jsrepl-build/extern/python/unclosured/lib/python2.7/distutils/command/bdist_wininst.py
|
72
|
14917
|
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
__revision__ = "$Id$"
import sys
import os
import string
from sysconfig import get_python_version
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils import log
from distutils.util import get_platform
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
('user-access-control=', None,
"specify Vista's UAC handling - 'none'/default=no "
"handling, 'auto'=use UAC if target Python installed for "
"all users, 'force'=always use UAC"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
self.user_access_control = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
if self.skip_build and self.plat_name:
# If build is skipped and plat_name is overridden, bdist will
# not see the correct 'plat_name' - so set that up manually.
bdist = self.distribution.get_command_obj('bdist')
bdist.plat_name = self.plat_name
# next the command will be initialized using that name
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip-build'" \
" option must be specified" % (short_version,)
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install.plat_name = self.plat_name
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return string.replace(s, "\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
if self.user_access_control:
lines.append("user_access_control=%s" % self.user_access_control)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
try:
unicode
except NameError:
pass
else:
if isinstance(cfgdata, unicode):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + "\0"
if self.pre_install_script:
script_data = open(self.pre_install_script, "r").read()
cfgdata = cfgdata + script_data + "\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + "\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.%s-py%s.exe" %
(fullname, self.plat_name, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.%s.exe" % (fullname, self.plat_name))
return installer_name
# get_installer_filename()
def get_exe_bytes (self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version > cur_version:
bv = get_build_version()
else:
if self.target_version < "2.4":
bv = 6.0
else:
bv = 7.1
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# if plat_name starts with "win" but is not "win32"
# we want to strip "win" and leave the rest (e.g. -amd64)
# for all other cases, we don't want any suffix
if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
sfix = self.plat_name[3:]
else:
sfix = ''
filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix))
f = open(filename, "rb")
try:
return f.read()
finally:
f.close()
# class bdist_wininst
|
apache-2.0
|
sachinkum/Bal-Aveksha
|
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/setuptools/command/upload_docs.py
|
68
|
7217
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
import six
from six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'surrogateescape' if six.PY3 else 'strict'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
log.warn("Upload_docs command is deprecated. Use RTD instead.")
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if six.PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
gpl-3.0
|
xin3liang/platform_external_chromium_org_third_party_WebKit
|
Source/build/scripts/make_element_lookup_trie.py
|
19
|
5276
|
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from itertools import groupby, islice
import sys
import in_generator
import template_expander
PARAMETER_NAME = 'data'
def _trie(tags, index):
"""Make a trie from list of tags, starting at index.
Resulting trie is partly space-optimized (semi-radix tree): once have only
one string left, compact the entire branch to one leaf node.
However, does not compact branch nodes with a single child. (FIXME)
Returns:
(char, subtrie, tag, conditions): (char, trie, str, list)
code generation differs between branch nodes and leaf nodes,
hence need different data for each.
Arguments:
tags: sorted list
(sorted needed by groupby, list needed by len)
index: index at which to branch
(assumes prior to this index strings have a common prefix)
"""
def trie_node(char, subtags_iter):
# Pass in |char| so we can include in same tuple without unpacking
subtags = list(subtags_iter) # need list for len
if len(subtags) == 1: # terminal node, no subtrie
subtrie = None
tag = subtags[0]
conditions = _conditions(tag, index + 1)
else:
subtrie = _trie(subtags, index + 1)
tag = None
conditions = None
return char, subtrie, tag, conditions
# Group by char at index
def char_at_index(tag):
return tag[index].lower()
char_subtags = ((k, g) for k, g in groupby(tags, char_at_index))
# FIXME: if all subtags have a common prefix, merge with child
# and skip the switch in the generated code
return (trie_node(char, subtags) for char, subtags in char_subtags)
def _conditions(tag, index):
# boolean conditions to check suffix; corresponds to compacting branch
# with a single leaf
return ["%s[%d] == '%c'" % (PARAMETER_NAME, i, c.lower())
for i, c in islice(enumerate(tag), index, None)]
class ElementLookupTrieWriter(in_generator.Writer):
# FIXME: Inherit all these from somewhere.
defaults = {
'JSInterfaceName': None,
'constructorNeedsCreatedByParser': None,
'constructorNeedsFormElement': None,
'interfaceName': None,
'noConstructor': None,
'runtimeEnabled': None,
}
default_parameters = {
'attrsNullNamespace': None,
'namespace': '',
'namespacePrefix': '',
'namespaceURI': '',
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
}
def __init__(self, in_file_paths):
super(ElementLookupTrieWriter, self).__init__(in_file_paths)
self._tags = [entry['name'] for entry in self.in_file.name_dictionaries]
self._namespace = self.in_file.parameters['namespace'].strip('"')
self._outputs = {
(self._namespace + 'ElementLookupTrie.h'): self.generate_header,
(self._namespace + 'ElementLookupTrie.cpp'): self.generate_implementation,
}
@template_expander.use_jinja('ElementLookupTrie.h.tmpl')
def generate_header(self):
return {
'namespace': self._namespace,
}
@template_expander.use_jinja('ElementLookupTrie.cpp.tmpl')
def generate_implementation(self):
# First sort, so groupby works
self._tags.sort(key=lambda tag: (len(tag), tag))
# Group tags by length
length_tags = ((k, g) for k, g in groupby(self._tags, len))
return {
'namespace': self._namespace,
'length_tries': ((length, _trie(tags, 0))
for length, tags in length_tags),
}
if __name__ == '__main__':
in_generator.Maker(ElementLookupTrieWriter).main(sys.argv)
|
bsd-3-clause
|
RPi-Distro/python-gpiozero
|
tests/test_outputs.py
|
2
|
53540
|
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2018-2019 Ben Nuttall <[email protected]>
# Copyright (c) 2016-2019 Dave Jones <[email protected]>
# Copyright (c) 2016 Andrew Scheller <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import sys
from time import sleep, time
try:
from math import isclose
except ImportError:
from gpiozero.compat import isclose
import pytest
from colorzero import Color, Red, Green, Blue
from gpiozero import *
def test_output_initial_values(mock_factory, pwm):
pin = mock_factory.pin(2)
with OutputDevice(2, initial_value=False) as device:
assert repr(device).startswith('<gpiozero.OutputDevice object')
assert pin.function == 'output'
assert not pin.state
with OutputDevice(2, initial_value=True) as device:
assert pin.state
state = pin.state
with OutputDevice(2, initial_value=None) as device:
assert state == pin.state
def test_output_write_active_high(mock_factory):
pin = mock_factory.pin(2)
with OutputDevice(2) as device:
device.on()
assert pin.state
device.off()
assert not pin.state
def test_output_write_active_low(mock_factory):
pin = mock_factory.pin(2)
with OutputDevice(2, active_high=False) as device:
device.on()
assert not pin.state
device.off()
assert pin.state
def test_output_write_closed(mock_factory):
with OutputDevice(2) as device:
device.close()
assert device.closed
device.close()
assert device.closed
with pytest.raises(GPIODeviceClosed):
device.on()
def test_output_write_silly(mock_factory):
pin = mock_factory.pin(2)
with OutputDevice(2) as device:
pin.function = 'input'
with pytest.raises(AttributeError):
device.on()
def test_output_value(mock_factory):
pin = mock_factory.pin(2)
with OutputDevice(2) as device:
assert not device.value
assert not pin.state
device.on()
assert device.value
assert pin.state
device.value = False
assert not device.value
assert not pin.state
def test_output_digital_toggle(mock_factory):
pin = mock_factory.pin(2)
with DigitalOutputDevice(2) as device:
assert repr(device).startswith('<gpiozero.DigitalOutputDevice object')
assert not device.value
assert not pin.state
device.toggle()
assert device.value
assert pin.state
device.toggle()
assert not device.value
assert not pin.state
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_blink_background(mock_factory):
pin = mock_factory.pin(4)
with DigitalOutputDevice(4) as device:
start = time()
device.blink(0.1, 0.1, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
device._blink_thread.join() # naughty, but ensures no arbitrary waits in the test
assert isclose(time() - start, 0.4, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, False),
(0.0, True),
(0.1, False),
(0.1, True),
(0.1, False)
])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_blink_foreground(mock_factory):
pin = mock_factory.pin(4)
with DigitalOutputDevice(4) as device:
start = time()
device.blink(0.1, 0.1, n=2, background=False)
assert isclose(time() - start, 0.4, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, False),
(0.0, True),
(0.1, False),
(0.1, True),
(0.1, False)
])
def test_output_blink_interrupt_on(mock_factory):
pin = mock_factory.pin(4)
with DigitalOutputDevice(4) as device:
device.blink(1, 0.1)
sleep(0.2)
device.off() # should interrupt while on
pin.assert_states([False, True, False])
def test_output_blink_interrupt_off(mock_factory):
pin = mock_factory.pin(4)
with DigitalOutputDevice(4) as device:
device.blink(0.1, 1)
sleep(0.2)
device.off() # should interrupt while off
pin.assert_states([False, True, False])
def test_output_pwm_bad_initial_value(mock_factory):
with pytest.raises(ValueError):
PWMOutputDevice(2, initial_value=2)
def test_output_pwm_not_supported(mock_factory):
with pytest.raises(AttributeError):
PWMOutputDevice(2)
def test_output_pwm_states(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
assert repr(device).startswith('<gpiozero.PWMOutputDevice object')
device.value = 0.1
device.value = 0.2
device.value = 0.0
pin.assert_states([0.0, 0.1, 0.2, 0.0])
def test_output_pwm_read(mock_factory, pwm):
pin = mock_factory.pin(2)
with PWMOutputDevice(2, frequency=100) as device:
assert device.frequency == 100
device.value = 0.1
assert isclose(device.value, 0.1)
assert isclose(pin.state, 0.1)
assert device.is_active
device.frequency = None
assert not device.value
assert not device.is_active
assert device.frequency is None
def test_output_pwm_write(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
device.on()
device.off()
pin.assert_states([False, True, False])
def test_output_pwm_toggle(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
device.toggle()
device.value = 0.5
device.value = 0.1
device.toggle()
device.off()
pin.assert_states([False, True, 0.5, 0.1, 0.9, False])
def test_output_pwm_active_high_read(mock_factory, pwm):
pin = mock_factory.pin(2)
with PWMOutputDevice(2, active_high=False) as device:
device.value = 0.1
assert isclose(device.value, 0.1)
assert isclose(pin.state, 0.9)
device.frequency = None
assert device.value
def test_output_pwm_bad_value(mock_factory, pwm):
pin = mock_factory.pin(2)
with PWMOutputDevice(2) as device:
with pytest.raises(ValueError):
device.value = 2
def test_output_pwm_write_closed(mock_factory, pwm):
pin = mock_factory.pin(2)
with PWMOutputDevice(2) as device:
device.close()
with pytest.raises(GPIODeviceClosed):
device.on()
def test_output_pwm_write_silly(mock_factory, pwm):
pin = mock_factory.pin(2)
with PWMOutputDevice(2) as device:
pin.function = 'input'
with pytest.raises(AttributeError):
device.off()
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_pwm_blink_background(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
start = time()
device.blink(0.1, 0.1, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
device._blink_thread.join()
assert isclose(time() - start, 0.4, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, 0),
(0.0, 1),
(0.1, 0),
(0.1, 1),
(0.1, 0)
])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_pwm_blink_foreground(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
start = time()
device.blink(0.1, 0.1, n=2, background=False)
assert isclose(time() - start, 0.4, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, 0),
(0.0, 1),
(0.1, 0),
(0.1, 1),
(0.1, 0)
])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_pwm_fade_background(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
start = time()
device.blink(0, 0, 0.2, 0.2, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
device._blink_thread.join()
assert isclose(time() - start, 0.8, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_pwm_fade_foreground(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
start = time()
device.blink(0, 0, 0.2, 0.2, n=2, background=False)
assert isclose(time() - start, 0.8, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_pwm_pulse_background(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
start = time()
device.pulse(0.2, 0.2, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
device._blink_thread.join()
assert isclose(time() - start, 0.8, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_output_pwm_pulse_foreground(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
start = time()
device.pulse(0.2, 0.2, n=2, background=False)
assert isclose(time() - start, 0.8, abs_tol=0.05)
pin.assert_states_and_times([
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
])
def test_output_pwm_blink_interrupt(mock_factory, pwm):
pin = mock_factory.pin(4)
with PWMOutputDevice(4) as device:
device.blink(1, 0.1)
sleep(0.2)
device.off() # should interrupt while on
pin.assert_states([0, 1, 0])
def test_rgbled_missing_pins(mock_factory):
with pytest.raises(GPIOPinMissing):
RGBLED()
def test_rgbled_initial_value(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, initial_value=(0.1, 0.2, 0)) as led:
assert repr(led).startswith('<gpiozero.RGBLED object')
assert r.frequency
assert g.frequency
assert b.frequency
assert isclose(r.state, 0.1)
assert isclose(g.state, 0.2)
assert isclose(b.state, 0.0)
def test_rgbled_initial_value_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, pwm=False, initial_value=(0, 1, 1)) as led:
assert r.state == 0
assert g.state == 1
assert b.state == 1
def test_rgbled_initial_bad_value(mock_factory, pwm):
with pytest.raises(ValueError):
RGBLED(1, 2, 3, initial_value=(0.1, 0.2, 1.2))
def test_rgbled_initial_bad_value_nonpwm(mock_factory):
with pytest.raises(ValueError):
RGBLED(1, 2, 3, pwm=False, initial_value=(0.1, 0.2, 0))
def test_rgbled_value(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
assert isinstance(led._leds[0], PWMLED)
assert isinstance(led._leds[1], PWMLED)
assert isinstance(led._leds[2], PWMLED)
assert not led.is_active
assert led.value == (0, 0, 0)
led.on()
assert led.is_active
assert led.value == (1, 1, 1)
led.off()
assert not led.is_active
assert led.value == (0, 0, 0)
led.value = (0.5, 0.5, 0.5)
assert led.is_active
assert led.value == (0.5, 0.5, 0.5)
def test_rgbled_value_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
assert isinstance(led._leds[0], LED)
assert isinstance(led._leds[1], LED)
assert isinstance(led._leds[2], LED)
assert not led.is_active
assert led.value == (0, 0, 0)
led.on()
assert led.is_active
assert led.value == (1, 1, 1)
led.off()
assert not led.is_active
assert led.value == (0, 0, 0)
def test_rgbled_bad_value(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
with pytest.raises(ValueError):
led.value = (2, 0, 0)
with RGBLED(1, 2, 3) as led:
with pytest.raises(ValueError):
led.value = (0, -1, 0)
def test_rgbled_bad_value_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.value = (2, 0, 0)
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.value = (0, -1, 0)
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.value = (0.5, 0, 0)
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.value = (0, 0.5, 0)
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.value = (0, 0, 0.5)
def test_rgbled_toggle(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
assert not led.is_active
assert led.value == (0, 0, 0)
led.toggle()
assert led.is_active
assert led.value == (1, 1, 1)
led.toggle()
assert not led.is_active
assert led.value == (0, 0, 0)
def test_rgbled_toggle_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
assert not led.is_active
assert led.value == (0, 0, 0)
led.toggle()
assert led.is_active
assert led.value == (1, 1, 1)
led.toggle()
assert not led.is_active
assert led.value == (0, 0, 0)
def test_rgbled_bad_color_value_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.color = (0.5, 0, 0)
with pytest.raises(ValueError):
led.color = (0, 1.5, 0)
with pytest.raises(ValueError):
led.color = (0, 0, -1)
def test_rgbled_bad_color_value_pwm(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
with pytest.raises(ValueError):
led.color = (0, 1.5, 0)
with pytest.raises(ValueError):
led.color = (0, 0, -1)
with pytest.raises(ValueError):
led.green = 1.5
with pytest.raises(ValueError):
led.blue = -1
def test_rgbled_color_value_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
assert led.value == (0, 0, 0)
assert led.red == 0
assert led.green == 0
assert led.blue == 0
led.on()
assert led.value == (1, 1, 1)
assert led.color == (1, 1, 1)
assert led.red == 1
assert led.green == 1
assert led.blue == 1
led.color = (0, 1, 0)
assert led.value == (0, 1, 0)
assert led.red == 0
led.red = 1
assert led.value == (1, 1, 0)
assert led.red == 1
assert led.green == 1
assert led.blue == 0
led.green = 0
led.blue = 1
assert led.value == (1, 0, 1)
def test_rgbled_color_value_pwm(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
assert led.value == (0, 0, 0)
assert led.red == 0
assert led.green == 0
assert led.blue == 0
led.on()
assert led.value == (1, 1, 1)
assert led.color == (1, 1, 1)
assert led.red == 1
assert led.green == 1
assert led.blue == 1
led.color = (0.2, 0.5, 0.8)
assert led.value == (0.2, 0.5, 0.8)
assert led.red == 0.2
led.red = 0.5
assert led.value == (0.5, 0.5, 0.8)
assert led.red == 0.5
assert led.green == 0.5
assert led.blue == 0.8
led.green = 0.9
led.blue = 0.4
assert led.value == (0.5, 0.9, 0.4)
def test_rgbled_bad_rgb_property_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.red = 0.1
with pytest.raises(ValueError):
led.green = 0.5
with pytest.raises(ValueError):
led.blue = 0.9
with pytest.raises(ValueError):
led.red = Red(0.1)
with pytest.raises(ValueError):
led.green = Green(0.5)
with pytest.raises(ValueError):
led.blue = Blue(0.9)
def test_rgbled_bad_rgb_property_pwm(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
with pytest.raises(ValueError):
led.red = 1.5
with pytest.raises(ValueError):
led.green = 2
with pytest.raises(ValueError):
led.blue = -1
with pytest.raises(ValueError):
led.red = Red(1.5)
with pytest.raises(ValueError):
led.green = Green(2)
with pytest.raises(ValueError):
led.blue = Blue(-1)
def test_rgbled_rgb_property_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
assert led.value == (0, 0, 0)
led.red = Red(0)
assert led.value == (0, 0, 0)
led.red = Red(1)
assert led.value == (1, 0, 0)
led.green = Green(1)
assert led.value == (1, 1, 0)
led.blue = Blue(1)
assert led.value == (1, 1, 1)
def test_rgbled_rgb_property_pwm(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
assert led.value == (0, 0, 0)
led.red = Red(0)
assert led.value == (0, 0, 0)
led.red = Red(0.5)
assert led.value == (0.5, 0, 0)
led.green = Green(0.5)
assert led.value == (0.5, 0.5, 0)
led.blue = Blue(0.5)
assert led.value == (0.5, 0.5, 0.5)
def test_rgbled_bad_color_name_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.color = Color('green') # html 'green' is (0, ~0.5, 0)
with pytest.raises(ValueError):
led.color = Color(0.5, 0, 0)
with pytest.raises(ValueError):
led.color = Color(250, 0, 0)
def test_rgbled_color_name_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
assert led.value == (0, 0, 0)
led.color = Color('white')
assert led.value == (1, 1, 1)
led.color = Color('black')
assert led.value == (0, 0, 0)
led.color = Color('red')
assert led.value == (1, 0, 0)
led.color = Color('lime') # html 'green' is (0, 0.5, 0)
assert led.value == (0, 1, 0)
led.color = Color('blue')
assert led.value == (0, 0, 1)
led.color = Color('cyan')
assert led.value == (0, 1, 1)
led.color = Color('magenta')
assert led.value == (1, 0, 1)
led.color = Color('yellow')
assert led.value == (1, 1, 0)
def test_rgbled_color_name_pwm(mock_factory, pwm):
with RGBLED(1, 2, 3) as led:
assert led.value == (0, 0, 0)
led.color = Color('white')
assert led.value == (1, 1, 1)
led.color = Color('green')
assert led.value == (0, 0.5019607843137255, 0)
led.color = Color('chocolate')
assert led.value == (0.8235294117647058, 0.4117647058823529, 0.11764705882352941)
led.color = Color('purple')
assert led.value == (0.5019607843137255, 0.0, 0.5019607843137255)
def test_rgbled_blink_nonpwm(mock_factory):
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.blink(fade_in_time=1)
with pytest.raises(ValueError):
led.blink(fade_out_time=1)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_blink_background(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
start = time()
led.blink(0.1, 0.1, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
led._blink_thread.join()
assert isclose(time() - start, 0.4, abs_tol=0.05)
expected = [
(0.0, 0),
(0.0, 1),
(0.1, 0),
(0.1, 1),
(0.1, 0)
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_blink_background_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3, pwm=False) as led:
start = time()
led.blink(0.1, 0.1, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
led._blink_thread.join()
assert isclose(time() - start, 0.4, abs_tol=0.05)
expected = [
(0.0, 0),
(0.0, 1),
(0.1, 0),
(0.1, 1),
(0.1, 0)
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_blink_foreground(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
start = time()
led.blink(0.1, 0.1, n=2, background=False)
assert isclose(time() - start, 0.4, abs_tol=0.05)
expected = [
(0.0, 0),
(0.0, 1),
(0.1, 0),
(0.1, 1),
(0.1, 0)
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_blink_foreground_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3, pwm=False) as led:
start = time()
led.blink(0.1, 0.1, n=2, background=False)
assert isclose(time() - start, 0.4, abs_tol=0.05)
expected = [
(0.0, 0),
(0.0, 1),
(0.1, 0),
(0.1, 1),
(0.1, 0)
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_fade_background(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
start = time()
led.blink(0, 0, 0.2, 0.2, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
led._blink_thread.join()
assert isclose(time() - start, 0.8, abs_tol=0.05)
expected = [
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
def test_rgbled_fade_background_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.blink(0, 0, 0.2, 0, n=2)
with pytest.raises(ValueError):
led.blink(0, 0, 0, 0.2, n=2)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_fade_foreground(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
start = time()
led.blink(0, 0, 0.2, 0.2, n=2, background=False)
assert isclose(time() - start, 0.8, abs_tol=0.05)
expected = [
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
def test_rgbled_fade_foreground_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.blink(0, 0, 0.2, 0.2, n=2, background=False)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_pulse_background(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
start = time()
led.pulse(0.2, 0.2, n=2)
assert isclose(time() - start, 0, abs_tol=0.05)
led._blink_thread.join()
assert isclose(time() - start, 0.8, abs_tol=0.05)
expected = [
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
def test_rgbled_pulse_background_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.pulse(0.2, 0.2, n=2)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_rgbled_pulse_foreground(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
start = time()
led.pulse(0.2, 0.2, n=2, background=False)
assert isclose(time() - start, 0.8, abs_tol=0.05)
expected = [
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
]
r.assert_states_and_times(expected)
g.assert_states_and_times(expected)
b.assert_states_and_times(expected)
def test_rgbled_pulse_foreground_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, pwm=False) as led:
with pytest.raises(ValueError):
led.pulse(0.2, 0.2, n=2, background=False)
def test_rgbled_blink_interrupt(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3) as led:
led.blink(1, 0.1)
sleep(0.2)
led.off() # should interrupt while on
r.assert_states([0, 1, 0])
g.assert_states([0, 1, 0])
b.assert_states([0, 1, 0])
def test_rgbled_blink_interrupt_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (4, 5, 6))
with RGBLED(1, 2, 3, pwm=False) as led:
led.blink(1, 0.1)
sleep(0.2)
led.off() # should interrupt while on
r.assert_states([0, 1, 0])
g.assert_states([0, 1, 0])
b.assert_states([0, 1, 0])
def test_rgbled_close(mock_factory, pwm):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3) as led:
assert not led.closed
led.close()
assert led.closed
led.close()
assert led.closed
def test_rgbled_close_nonpwm(mock_factory):
r, g, b = (mock_factory.pin(i) for i in (1, 2, 3))
with RGBLED(1, 2, 3, pwm=False) as led:
assert not led.closed
led.close()
assert led.closed
led.close()
assert led.closed
def test_motor_bad_init(mock_factory):
with pytest.raises(GPIOPinMissing):
Motor()
with pytest.raises(GPIOPinMissing):
Motor(2)
with pytest.raises(GPIOPinMissing):
Motor(forward=2)
with pytest.raises(GPIOPinMissing):
Motor(backward=2)
with pytest.raises(TypeError):
Motor(a=2, b=3)
def test_motor_pins(mock_factory, pwm):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2) as motor:
assert repr(motor).startswith('<gpiozero.Motor object')
assert motor.forward_device.pin is f
assert isinstance(motor.forward_device, PWMOutputDevice)
assert motor.backward_device.pin is b
assert isinstance(motor.backward_device, PWMOutputDevice)
def test_motor_pins_nonpwm(mock_factory):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2, pwm=False) as motor:
assert motor.forward_device.pin is f
assert isinstance(motor.forward_device, DigitalOutputDevice)
assert motor.backward_device.pin is b
assert isinstance(motor.backward_device, DigitalOutputDevice)
def test_motor_close(mock_factory, pwm):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2) as motor:
motor.close()
assert motor.closed
assert motor.forward_device.pin is None
assert motor.backward_device.pin is None
motor.close()
assert motor.closed
def test_motor_close_nonpwm(mock_factory):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2, pwm=False) as motor:
motor.close()
assert motor.closed
assert motor.forward_device.pin is None
assert motor.backward_device.pin is None
def test_motor_value(mock_factory, pwm):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2) as motor:
motor.value = -1
assert motor.is_active
assert motor.value == -1
assert b.state == 1 and f.state == 0
motor.value = 1
assert motor.is_active
assert motor.value == 1
assert b.state == 0 and f.state == 1
motor.value = 0.5
assert motor.is_active
assert motor.value == 0.5
assert b.state == 0 and f.state == 0.5
motor.value = -0.5
assert motor.is_active
assert motor.value == -0.5
assert b.state == 0.5 and f.state == 0
motor.value = 0
assert not motor.is_active
assert not motor.value
assert b.state == 0 and f.state == 0
def test_motor_value_nonpwm(mock_factory):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2, pwm=False) as motor:
motor.value = -1
assert motor.is_active
assert motor.value == -1
assert b.state == 1 and f.state == 0
motor.value = 1
assert motor.is_active
assert motor.value == 1
assert b.state == 0 and f.state == 1
motor.value = 0
assert not motor.is_active
assert not motor.value
assert b.state == 0 and f.state == 0
def test_motor_bad_value(mock_factory, pwm):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2) as motor:
with pytest.raises(ValueError):
motor.value = -2
with pytest.raises(ValueError):
motor.value = 2
with pytest.raises(ValueError):
motor.forward(2)
with pytest.raises(ValueError):
motor.backward(2)
with pytest.raises(ValueError):
motor.forward(-1)
with pytest.raises(ValueError):
motor.backward(-1)
def test_motor_bad_value_nonpwm(mock_factory):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2, pwm=False) as motor:
with pytest.raises(ValueError):
motor.value = -2
with pytest.raises(ValueError):
motor.value = 2
with pytest.raises(ValueError):
motor.value = 0.5
with pytest.raises(ValueError):
motor.value = -0.5
with pytest.raises(ValueError):
motor.forward(0.5)
with pytest.raises(ValueError):
motor.backward(0.5)
def test_motor_reverse(mock_factory, pwm):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2) as motor:
motor.forward()
assert motor.value == 1
assert b.state == 0 and f.state == 1
motor.reverse()
assert motor.value == -1
assert b.state == 1 and f.state == 0
motor.backward(0.5)
assert motor.value == -0.5
assert b.state == 0.5 and f.state == 0
motor.reverse()
assert motor.value == 0.5
assert b.state == 0 and f.state == 0.5
def test_motor_reverse_nonpwm(mock_factory):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
with Motor(1, 2, pwm=False) as motor:
motor.forward()
assert motor.value == 1
assert b.state == 0 and f.state == 1
motor.reverse()
assert motor.value == -1
assert b.state == 1 and f.state == 0
def test_motor_enable_pin_bad_init(mock_factory, pwm):
with pytest.raises(GPIOPinMissing):
Motor(enable=1)
with pytest.raises(GPIOPinMissing):
Motor(forward=1, enable=2)
with pytest.raises(GPIOPinMissing):
Motor(backward=1, enable=2)
with pytest.raises(GPIOPinMissing):
Motor(backward=1, enable=2, pwm=True)
def test_motor_enable_pin_init(mock_factory, pwm):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
e = mock_factory.pin(3)
with Motor(forward=1, backward=2, enable=3) as motor:
assert motor.forward_device.pin is f
assert isinstance(motor.forward_device, PWMOutputDevice)
assert motor.backward_device.pin is b
assert isinstance(motor.backward_device, PWMOutputDevice)
assert motor.enable_device.pin is e
assert isinstance(motor.enable_device, DigitalOutputDevice)
assert e.state
with Motor(1, 2, 3) as motor:
assert motor.forward_device.pin is f
assert isinstance(motor.forward_device, PWMOutputDevice)
assert motor.backward_device.pin is b
assert isinstance(motor.backward_device, PWMOutputDevice)
assert motor.enable_device.pin is e
assert isinstance(motor.enable_device, DigitalOutputDevice)
assert e.state
def test_motor_enable_pin_nonpwm_init(mock_factory):
f = mock_factory.pin(1)
b = mock_factory.pin(2)
e = mock_factory.pin(3)
with Motor(forward=1, backward=2, enable=3, pwm=False) as motor:
assert motor.forward_device.pin is f
assert isinstance(motor.forward_device, DigitalOutputDevice)
assert motor.backward_device.pin is b
assert isinstance(motor.backward_device, DigitalOutputDevice)
assert motor.enable_device.pin is e
assert isinstance(motor.enable_device, DigitalOutputDevice)
def test_motor_enable_pin(mock_factory, pwm):
with Motor(forward=1, backward=2, enable=3) as motor:
motor.forward()
assert motor.value == 1
motor.backward()
assert motor.value == -1
motor.stop()
assert motor.value == 0
def test_phaseenable_motor_pins(mock_factory, pwm):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2) as motor:
assert repr(motor).startswith('<gpiozero.PhaseEnableMotor object')
assert motor.phase_device.pin is p
assert isinstance(motor.phase_device, OutputDevice)
assert motor.enable_device.pin is e
assert isinstance(motor.enable_device, PWMOutputDevice)
def test_phaseenable_motor_pins_nonpwm(mock_factory):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2, pwm=False) as motor:
assert motor.phase_device.pin is p
assert isinstance(motor.phase_device, OutputDevice)
assert motor.enable_device.pin is e
assert isinstance(motor.enable_device, DigitalOutputDevice)
def test_phaseenable_motor_close(mock_factory, pwm):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2) as motor:
motor.close()
assert motor.closed
assert motor.phase_device.pin is None
assert motor.enable_device.pin is None
motor.close()
assert motor.closed
def test_phaseenable_motor_close_nonpwm(mock_factory):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2, pwm=False) as motor:
motor.close()
assert motor.closed
assert motor.phase_device.pin is None
assert motor.enable_device.pin is None
def test_phaseenable_motor_value(mock_factory, pwm):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2) as motor:
motor.value = -1
assert motor.is_active
assert motor.value == -1
assert p.state == 1 and e.state == 1
motor.value = 1
assert motor.is_active
assert motor.value == 1
assert p.state == 0 and e.state == 1
motor.value = 0.5
assert motor.is_active
assert motor.value == 0.5
assert p.state == 0 and e.state == 0.5
motor.value = -0.5
assert motor.is_active
assert motor.value == -0.5
assert p.state == 1 and e.state == 0.5
motor.value = 0
assert not motor.is_active
assert not motor.value
assert e.state == 0
def test_phaseenable_motor_value_nonpwm(mock_factory):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2, pwm=False) as motor:
motor.value = -1
assert motor.is_active
assert motor.value == -1
assert p.state == 1 and e.state == 1
motor.value = 1
assert motor.is_active
assert motor.value == 1
assert p.state == 0 and e.state == 1
motor.value = 0
assert not motor.is_active
assert not motor.value
assert e.state == 0
def test_phaseenable_motor_bad_value(mock_factory, pwm):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2) as motor:
with pytest.raises(ValueError):
motor.value = -2
with pytest.raises(ValueError):
motor.value = 2
with pytest.raises(ValueError):
motor.forward(2)
with pytest.raises(ValueError):
motor.backward(2)
def test_phaseenable_motor_bad_value_nonpwm(mock_factory):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2, pwm=False) as motor:
with pytest.raises(ValueError):
motor.value = -2
with pytest.raises(ValueError):
motor.value = 2
with pytest.raises(ValueError):
motor.value = 0.5
with pytest.raises(ValueError):
motor.value = -0.5
def test_phaseenable_motor_reverse(mock_factory, pwm):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2) as motor:
motor.forward()
assert motor.value == 1
assert p.state == 0 and e.state == 1
motor.reverse()
assert motor.value == -1
assert p.state == 1 and e.state == 1
motor.backward(0.5)
assert motor.value == -0.5
assert p.state == 1 and e.state == 0.5
motor.reverse()
assert motor.value == 0.5
assert p.state == 0 and e.state == 0.5
def test_phaseenable_motor_reverse_nonpwm(mock_factory):
p = mock_factory.pin(1)
e = mock_factory.pin(2)
with PhaseEnableMotor(1, 2, pwm=False) as motor:
motor.forward()
assert motor.value == 1
assert p.state == 0 and e.state == 1
motor.reverse()
assert motor.value == -1
assert p.state == 1 and e.state == 1
def test_servo_pins(mock_factory, pwm):
p = mock_factory.pin(1)
with Servo(1) as servo:
assert repr(servo).startswith('<gpiozero.Servo object')
assert servo.pwm_device.pin is p
assert isinstance(servo.pwm_device, PWMOutputDevice)
def test_servo_bad_value(mock_factory, pwm):
p = mock_factory.pin(1)
with pytest.raises(ValueError):
Servo(1, initial_value=2)
with pytest.raises(ValueError):
Servo(1, min_pulse_width=30/1000)
with pytest.raises(ValueError):
Servo(1, max_pulse_width=30/1000)
def test_servo_pins_nonpwm(mock_factory):
p = mock_factory.pin(2)
with pytest.raises(PinPWMUnsupported):
Servo(1)
def test_servo_close(mock_factory, pwm):
p = mock_factory.pin(2)
with Servo(2) as servo:
servo.close()
assert servo.closed
assert servo.pwm_device.pin is None
servo.close()
assert servo.closed
def test_servo_pulse_width(mock_factory, pwm):
p = mock_factory.pin(2)
with Servo(2, min_pulse_width=5/10000, max_pulse_width=25/10000) as servo:
assert isclose(servo.min_pulse_width, 5/10000)
assert isclose(servo.max_pulse_width, 25/10000)
assert isclose(servo.frame_width, 20/1000)
assert isclose(servo.pulse_width, 15/10000)
servo.value = -1
assert isclose(servo.pulse_width, 5/10000)
servo.value = 1
assert isclose(servo.pulse_width, 25/10000)
servo.value = None
assert servo.pulse_width is None
def test_servo_initial_values(mock_factory, pwm):
p = mock_factory.pin(2)
with Servo(2) as servo:
assert servo.value == 0
with Servo(2, initial_value=-1) as servo:
assert servo.is_active
assert servo.value == -1
assert isclose(p.state, 0.05)
with Servo(2, initial_value=0) as servo:
assert servo.is_active
assert servo.value == 0
assert isclose(p.state, 0.075)
with Servo(2, initial_value=1) as servo:
assert servo.is_active
assert servo.value == 1
assert isclose(p.state, 0.1)
with Servo(2, initial_value=None) as servo:
assert not servo.is_active
assert servo.value is None
def test_servo_values(mock_factory, pwm):
p = mock_factory.pin(1)
with Servo(1) as servo:
servo.min()
assert servo.is_active
assert servo.value == -1
assert isclose(p.state, 0.05)
servo.max()
assert servo.is_active
assert servo.value == 1
assert isclose(p.state, 0.1)
servo.mid()
assert servo.is_active
assert servo.value == 0.0
assert isclose(p.state, 0.075)
servo.value = 0.5
assert servo.is_active
assert servo.value == 0.5
assert isclose(p.state, 0.0875)
servo.detach()
assert not servo.is_active
assert servo.value is None
servo.value = 0
assert servo.value == 0
servo.value = None
assert servo.value is None
def test_angular_servo_range(mock_factory, pwm):
with AngularServo(1, initial_angle=15, min_angle=0, max_angle=90) as servo:
assert repr(servo).startswith('<gpiozero.AngularServo object')
assert servo.min_angle == 0
assert servo.max_angle == 90
def test_angular_servo_initial_angles(mock_factory, pwm):
with AngularServo(1) as servo:
assert servo.angle == 0
with AngularServo(1, initial_angle=-90) as servo:
assert servo.angle == -90
assert isclose(servo.value, -1)
with AngularServo(1, initial_angle=0) as servo:
assert servo.angle == 0
assert isclose(servo.value, 0)
with AngularServo(1, initial_angle=90) as servo:
assert servo.angle == 90
assert isclose(servo.value, 1)
with AngularServo(1, initial_angle=None) as servo:
assert servo.angle is None
def test_angular_servo_angles(mock_factory, pwm):
with AngularServo(1) as servo:
servo.angle = 0
assert servo.angle == 0
assert isclose(servo.value, 0)
servo.max()
assert servo.angle == 90
assert isclose(servo.value, 1)
servo.min()
assert servo.angle == -90
assert isclose(servo.value, -1)
servo.detach()
assert servo.angle is None
with AngularServo(1, initial_angle=15, min_angle=0, max_angle=90) as servo:
assert servo.angle == 15
assert isclose(servo.value, -2/3)
servo.angle = 0
assert servo.angle == 0
assert isclose(servo.value, -1)
servo.angle = 90
assert servo.angle == 90
assert isclose(servo.value, 1)
servo.angle = None
assert servo.angle is None
with AngularServo(1, min_angle=45, max_angle=-45) as servo:
assert servo.angle == 0
assert isclose(servo.value, 0)
servo.angle = -45
assert servo.angle == -45
assert isclose(servo.value, 1)
servo.angle = -15
assert servo.angle == -15
assert isclose(servo.value, 1/3)
def test_tonalbuzzer_bad_init(mock_factory, pwm):
with pytest.raises(ValueError):
TonalBuzzer(2, initial_value=-2)
with pytest.raises(ValueError):
TonalBuzzer(2, initial_value=2)
with pytest.raises(ValueError):
TonalBuzzer(2, mid_tone='foo')
with pytest.raises(ValueError):
TonalBuzzer(2, octaves=0)
with pytest.raises(ValueError):
TonalBuzzer(2, octaves=0)
with pytest.raises(ValueError):
TonalBuzzer(2, mid_tone='B0', octaves=2)
with pytest.raises(ValueError):
TonalBuzzer(2, mid_tone='B1', octaves=3)
with pytest.raises(ValueError):
TonalBuzzer(2, mid_tone='B2', octaves=4)
def test_tonalbuzzer_init(mock_factory, pwm):
pin = mock_factory.pin(2)
with TonalBuzzer(2) as tb:
assert repr(tb).startswith('<gpiozero.TonalBuzzer object')
assert tb.pwm_device.pin is pin
assert tb.value is None
assert tb.pwm_device.frequency is None
with TonalBuzzer(2, mid_tone='C4') as tb:
assert tb.pwm_device.frequency is None
with TonalBuzzer(2, mid_tone='C4', initial_value=0) as tb:
assert isclose(tb.pwm_device.frequency, 261.626, abs_tol=1/100)
with TonalBuzzer(2, initial_value=-1) as tb:
assert isclose(tb.pwm_device.frequency, 220)
with TonalBuzzer(2, initial_value=0) as tb:
assert isclose(tb.pwm_device.frequency, 440)
with TonalBuzzer(2, initial_value=1) as tb:
assert isclose(tb.pwm_device.frequency, 880)
with TonalBuzzer(2, octaves=2, initial_value=-1) as tb:
assert isclose(tb.pwm_device.frequency, 110)
with TonalBuzzer(2, octaves=2, initial_value=0) as tb:
assert isclose(tb.pwm_device.frequency, 440)
with TonalBuzzer(2, octaves=2, initial_value=1) as tb:
assert isclose(tb.pwm_device.frequency, 1760)
def test_tonalbuzzer_play(mock_factory, pwm):
with TonalBuzzer(2) as tb:
tb.play(60)
assert isclose(tb.pwm_device.frequency, 261.626, abs_tol=1/100)
tb.play(None)
assert tb.value is None
assert tb.pwm_device.frequency is None
tb.play('C5')
assert isclose(tb.pwm_device.frequency, 523.25, abs_tol=1/100)
tb.play('A#4')
assert isclose(tb.pwm_device.frequency, 466.16, abs_tol=1/100)
tb.stop()
assert tb.value is None
assert tb.pwm_device.frequency is None
with pytest.raises(ValueError):
tb.play('GS3')
with pytest.raises(ValueError):
tb.play('AS5')
def test_tonalbuzzer_set_value(mock_factory, pwm):
with TonalBuzzer(2) as tb:
assert tb.pwm_device.frequency is None
tb.value = -1
assert isclose(tb.pwm_device.frequency, 220)
tb.value = 1
assert isclose(tb.pwm_device.frequency, 880)
with TonalBuzzer(2, octaves=2) as tb:
assert tb.pwm_device.frequency is None
tb.value = -1
assert isclose(tb.pwm_device.frequency, 110)
tb.value = 1
assert isclose(tb.pwm_device.frequency, 1760)
def test_tonalbuzzer_read_value(mock_factory, pwm):
with TonalBuzzer(2) as tb:
assert tb.value is None
tb.play('A3')
assert isclose(tb.value, -1)
tb.play('A4')
assert isclose(tb.value, 0)
tb.play('A5')
assert isclose(tb.value, 1)
with TonalBuzzer(2, octaves=2) as tb:
assert tb.value is None
tb.play('A2')
assert isclose(tb.value, -1)
tb.play('A3')
assert isclose(tb.value, -0.5)
tb.play('A4')
assert isclose(tb.value, 0)
tb.play('A5')
assert isclose(tb.value, 0.5)
tb.play('A6')
assert isclose(tb.value, 1)
|
bsd-3-clause
|
staslev/incubator-beam
|
sdks/python/apache_beam/io/gcp/datastore/v1/datastoreio_test.py
|
8
|
12626
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import unittest
from mock import MagicMock
from mock import call
from mock import patch
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from apache_beam.io.gcp.datastore.v1 import query_splitter
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import _Mutate
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.protobuf import timestamp_pb2
from googledatastore import helper as datastore_helper
except ImportError:
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DatastoreioTest(unittest.TestCase):
_PROJECT = 'project'
_KIND = 'kind'
_NAMESPACE = 'namespace'
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = self._KIND
def test_get_estimated_size_bytes_without_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp)
def test_get_estimated_size_bytes_with_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp, self._NAMESPACE)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
num_splits = 23
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), num_splits)
self.assertEqual(0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
self._query.limit.value = 3
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, 4)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(1, len(returned_split_queries))
self.assertEqual(0, len(self._mock_datastore.method_calls))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter, 'get_splits',
side_effect=ValueError("Testing query split error")):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(returned_split_queries[0][1], self._query)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_DatastoreWriteFn_with_emtpy_batch(self):
self.check_DatastoreWriteFn(0)
def test_DatastoreWriteFn_with_one_batch(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 1 - 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_multiple_batches(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_batch_size_exact_multiple(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 2
self.check_DatastoreWriteFn(num_entities_to_write)
def check_DatastoreWriteFn(self, num_entities):
"""A helper function to test DatastoreWriteFn."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in
fake_datastore.create_entities(num_entities)]
expected_mutations = map(WriteToDatastore.to_upsert_mutation, entities)
actual_mutations = []
self._mock_datastore.commit.side_effect = (
fake_datastore.create_commit(actual_mutations))
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=_Mutate._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for mutation in expected_mutations:
datastore_write_fn.process(mutation)
datastore_write_fn.finish_bundle()
self.assertEqual(actual_mutations, expected_mutations)
self.assertEqual(
(num_entities - 1) / _Mutate._WRITE_BATCH_INITIAL_SIZE + 1,
self._mock_datastore.commit.call_count)
def test_DatastoreWriteLargeEntities(self):
"""100*100kB entities gets split over two Commit RPCs."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in fake_datastore.create_entities(100)]
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=_Mutate._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for entity in entities:
datastore_helper.add_properties(
entity, {'large': u'A' * 100000}, exclude_from_indexes=True)
datastore_write_fn.process(WriteToDatastore.to_upsert_mutation(entity))
datastore_write_fn.finish_bundle()
self.assertEqual(2, self._mock_datastore.commit.call_count)
def verify_unique_keys(self, queries):
"""A helper function that verifies if all the queries have unique keys."""
keys, _ = zip(*queries)
keys = set(keys)
self.assertEqual(len(keys), len(queries))
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
timestamp_req = helper.make_request(
self._PROJECT, namespace, helper.make_latest_timestamp_query(namespace))
timestamp_resp = self.make_stats_response(
{'timestamp': datastore_helper.from_timestamp(timestamp)})
kind_stat_req = helper.make_request(
self._PROJECT, namespace, helper.make_kind_stats_query(
namespace, self._query.kind[0].name,
datastore_helper.micros_from_timestamp(timestamp)))
kind_stat_resp = self.make_stats_response(
{'entity_bytes': entity_bytes})
def fake_run_query(req):
if req == timestamp_req:
return timestamp_resp
elif req == kind_stat_req:
return kind_stat_resp
else:
print(kind_stat_req)
raise ValueError("Unknown req: %s" % req)
self._mock_datastore.run_query.side_effect = fake_run_query
self.assertEqual(entity_bytes, ReadFromDatastore.get_estimated_size_bytes(
self._PROJECT, namespace, self._query, self._mock_datastore))
self.assertEqual(self._mock_datastore.run_query.call_args_list,
[call(timestamp_req), call(kind_stat_req)])
def make_stats_response(self, property_map):
resp = datastore_pb2.RunQueryResponse()
entity_result = resp.batch.entity_results.add()
datastore_helper.add_properties(entity_result.entity, property_map)
return resp
def split_query(self, query, num_splits):
"""Generate dummy query splits."""
split_queries = []
for _ in range(0, num_splits):
q = query_pb2.Query()
q.CopyFrom(query)
split_queries.append(q)
return split_queries
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DynamicWriteBatcherTest(unittest.TestCase):
def setUp(self):
self._batcher = _Mutate._DynamicBatchSizer()
# If possible, keep these test cases aligned with the Java test cases in
# DatastoreV1Test.java
def test_no_data(self):
self.assertEquals(_Mutate._WRITE_BATCH_INITIAL_SIZE,
self._batcher.get_batch_size(0))
def test_fast_queries(self):
self._batcher.report_latency(0, 1000, 200)
self._batcher.report_latency(0, 1000, 200)
self.assertEquals(_Mutate._WRITE_BATCH_MAX_SIZE,
self._batcher.get_batch_size(0))
def test_slow_queries(self):
self._batcher.report_latency(0, 10000, 200)
self._batcher.report_latency(0, 10000, 200)
self.assertEquals(100, self._batcher.get_batch_size(0))
def test_size_not_below_minimum(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(0, 30000, 50)
self.assertEquals(_Mutate._WRITE_BATCH_MIN_SIZE,
self._batcher.get_batch_size(0))
def test_sliding_window(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(50000, 5000, 200)
self._batcher.report_latency(100000, 5000, 200)
self.assertEquals(200, self._batcher.get_batch_size(150000))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
t0mm0/youtube-dl
|
youtube_dl/extractor/soundgasm.py
|
149
|
2041
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SoundgasmIE(InfoExtractor):
IE_NAME = 'soundgasm'
_VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_\-]+)/(?P<title>[0-9a-zA-Z_\-]+)'
_TEST = {
'url': 'http://soundgasm.net/u/ytdl/Piano-sample',
'md5': '010082a2c802c5275bb00030743e75ad',
'info_dict': {
'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9',
'ext': 'm4a',
'title': 'ytdl_Piano-sample',
'description': 'Royalty Free Sample Music'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('title')
audio_title = mobj.group('user') + '_' + mobj.group('title')
webpage = self._download_webpage(url, display_id)
audio_url = self._html_search_regex(
r'(?s)m4a\:\s"([^"]+)"', webpage, 'audio URL')
audio_id = re.split('\/|\.', audio_url)[-2]
description = self._html_search_regex(
r'(?s)<li>Description:\s(.*?)<\/li>', webpage, 'description',
fatal=False)
return {
'id': audio_id,
'display_id': display_id,
'url': audio_url,
'title': audio_title,
'description': description
}
class SoundgasmProfileIE(InfoExtractor):
IE_NAME = 'soundgasm:profile'
_VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<id>[^/]+)/?(?:\#.*)?$'
_TEST = {
'url': 'http://soundgasm.net/u/ytdl',
'info_dict': {
'id': 'ytdl',
},
'playlist_count': 1,
}
def _real_extract(self, url):
profile_id = self._match_id(url)
webpage = self._download_webpage(url, profile_id)
entries = [
self.url_result(audio_url, 'Soundgasm')
for audio_url in re.findall(r'href="([^"]+/u/%s/[^"]+)' % profile_id, webpage)]
return self.playlist_result(entries, profile_id)
|
unlicense
|
vijayendra/Social
|
api/serializers.py
|
1
|
1440
|
from rest_framework import serializers
from django.contrib.auth.models import User
from forum.models import Post, Comment
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
class PostSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True, required=False)
class Meta:
model = Post
fields = ('url', 'id', 'user', 'title', 'description',)
read_only_fields = ('id', )
def get_validation_exclusions(self, *args, **kwargs):
exclusions = super(PostSerializer, self).get_validation_exclusions()
return exclusions + ['user']
class CommentSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True, required=False)
class Meta:
model = Comment
fields = ('id', 'user', 'post', 'parent', 'description')
read_only_fields = ('post',)
def get_validation_exclusions(self, *args, **kwargs):
exclusions = super(CommentSerializer, self).get_validation_exclusions()
return exclusions + ['user']
|
mit
|
mehmetkose/tornado
|
tornado/log.py
|
45
|
10920
|
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Logging support for Tornado.
Tornado uses three logger streams:
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
potentially other servers in the future)
* ``tornado.application``: Logging of errors from application code (i.e.
uncaught exceptions from callbacks)
* ``tornado.general``: General-purpose logging, including any errors
or warnings from Tornado itself.
These streams may be configured independently using the standard library's
`logging` module. For example, you may wish to send ``tornado.access`` logs
to a separate file for analysis.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import logging.handlers
import sys
from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type
try:
import curses
except ImportError:
curses = None
# Logger objects for internal tornado use
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
try:
return _unicode(s)
except UnicodeDecodeError:
return repr(s)
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(options=None, logger=None):
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
from tornado.options import options
if options.logging is None or options.logging.lower() == 'none':
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
rotate_mode = options.log_rotate_mode
if rotate_mode == 'size':
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
elif rotate_mode == 'time':
channel = logging.handlers.TimedRotatingFileHandler(
filename=options.log_file_prefix,
when=options.log_rotate_when,
interval=options.log_rotate_interval,
backupCount=options.log_file_num_backups)
else:
error_message = 'The value of log_rotate_mode option should be ' +\
'"size" or "time", not "%s".' % rotate_mode
raise ValueError(error_message)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not logger.handlers)):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options=None):
"""Add logging-related flags to ``options``.
These options are present automatically on the default options instance;
this method is only necessary if you have created your own `.OptionParser`.
.. versionadded:: 4.2
This function existed in prior versions but was broken and undocumented until 4.2.
"""
if options is None:
# late import to prevent cycle
from tornado.options import options
options.define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="debug|info|warning|error|none")
options.define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
options.define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
options.define("log_rotate_when", type=str, default='midnight',
help=("specify the type of TimedRotatingFileHandler interval "
"other options:('S', 'M', 'H', 'D', 'W0'-'W6')"))
options.define("log_rotate_interval", type=int, default=1,
help="The interval value of timed rotating")
options.define("log_rotate_mode", type=str, default='size',
help="The mode of rotating files(time or size)")
options.add_parse_callback(lambda: enable_pretty_logging(options))
|
apache-2.0
|
SamYaple/ansible-modules-extras
|
system/lvol.py
|
32
|
15209
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <[email protected]>, Alexander Bulimov <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) the C(size) option
is required.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
pvs:
version_added: "2.2"
description:
- Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
required: false
shrink:
version_added: "2.2"
description:
- shrink if current size is higher than size requested
required: false
default: yes
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol: vg=firefly lv=test size=512
# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
- lvol: vg=firefly lv=test size=512 pvs=/dev/sda,/dev/sdb
# Create cache pool logical volume
- lvol: vg=firefly lv=lvcache size=512m opts='--type cache-pool'
# Create a logical volume of 512g.
- lvol: vg=firefly lv=test size=512g
# Create a logical volume the size of all remaining space in the volume group
- lvol: vg=firefly lv=test size=100%FREE
# Create a logical volume with special options
- lvol: vg=firefly lv=test size=512g opts="-r 16"
# Extend the logical volume to 1024m.
- lvol: vg=firefly lv=test size=1024
# Extend the logical volume to consume all remaining space in the volume group
- lvol: vg=firefly lv=test size=+100%FREE
# Extend the logical volume to take all remaining space of the PVs
- lvol: vg=firefly lv=test size=100%PVS
# Resize the logical volume to % of VG
- lvol: vg-firefly lv=test size=80%VG force=yes
# Reduce the logical volume to 512m
- lvol: vg=firefly lv=test size=512 force=yes
# Set the logical volume to 512m and do not try to shrink if size is lower than current one
- lvol: vg=firefly lv=test size=512 shrink=no
# Remove the logical volume.
- lvol: vg=firefly lv=test state=absent force=yes
# Create a snapshot volume of the test logical volume.
- lvol: vg=firefly lv=test snapshot=snap1 size=100m
'''
import re
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[','').replace(']',''),
'size': int(decimal_point.match(parts[1]).group(1))
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
shrink=dict(type='bool', default='yes'),
snapshot=dict(type='str', default=None),
pvs=dict(type='str')
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found == None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit(): raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
else:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
msg = ''
if this_lv is None:
if state == 'present':
### create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif size_opt == 'l':
### Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
### resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
ningchi/scikit-learn
|
sklearn/utils/tests/test_random.py
|
38
|
7410
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
ggm/vm-for-transfer
|
src/vm/transferword.py
|
1
|
14796
|
#Copyright (C) 2011 Gabriel Gregori Manzano
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class TransferLexicalUnit:
"""Represent a lexical unit and all its attributes for the transfer stage."""
def __init__(self):
self.lu = ""
self.attrs = {}
#Index of the first tag '<' character.
self.tagStart = None
def modifyAttr(self, attr, value):
"""Modify the part of the full lexical unit and the attr."""
if attr == 'whole': self.setAttributes(value)
else:
if attr == 'tags':
self.lu = self.lu.replace(self.attrs[attr], value)
else:
#Only modify the lu until the tags.
self.lu = self.lu[:self.tagStart].replace(self.attrs[attr],
value) + self.lu[self.tagStart:]
if attr == 'lem' or attr == 'lemh':
#If the lemh is the same as the lem, we update both.
if self.attrs['lem'] == self.attrs['lemh']:
self.attrs['lem'] = value
self.attrs['lemh'] = value
self.attrs[attr] = value
#Update the tag start index.
self.tagStart = self.lu.find('<')
def modifyTag(self, tag, value):
"""Modify the tag of the full lexical unit and the attr."""
#Only modify the tags and not the entire lu.
self.lu = self.lu[:self.tagStart] \
+ self.lu[self.tagStart:].replace(tag, value)
self.attrs["tags"] = self.attrs["tags"].replace(tag, value)
#Update the tag start index.
self.tagStart = self.lu.find('<')
def setAttributes(self, token):
"""Set some of the attributes of a transfer lexical unit."""
self.lu = token
#Get the position of the key characters.
tag = token.find('<')
self.tagStart = tag
head = token.find('#')
if tag > -1:
#Set tags depending on if the '#' is before or after tags.
if head < tag:
self.attrs['lem'] = token[:tag]
self.attrs['tags'] = token[tag:]
else:
self.attrs['lem'] = token[:tag] + token[head:]
self.attrs['tags'] = token[tag:head]
else:
#If there isn't any tag, the lemma is everything.
self.attrs['lem'] = token
self.attrs['tags'] = ""
if head > -1:
#Set lemh, lemq depending on if the '#' is before or after tags.
if head < tag:
self.attrs['lemh'] = token[:head]
self.attrs['lemq'] = token[head:tag]
else:
self.attrs['lemh'] = token[:tag]
self.attrs['lemq'] = token[head:]
#If it isn't a multiword, then the lemh is the lemma.
else: self.attrs['lemh'] = self.attrs['lem']
def isEmpty(self):
return self.lu == "" and self.attrs == {}
class TransferWord:
"""Represent a word as a source/target language pair."""
def __init__(self):
self.source = TransferLexicalUnit()
self.target = TransferLexicalUnit()
def __str__(self):
return "^{}/{}$: {}/{}".format(self.source.lu, self.target.lu,
self.source.attrs, self.target.attrs)
class TransferWordTokenizer():
"""Create a set of transfer words from an input stream."""
def __init__(self):
pass
def tokenize(self, input):
"""Tokenize the input in ^...$ tokens."""
input = input.read()
tokens = []
token = ""
superblanks = []
escapeNextChar = False
ignoreMultipleTargets = False
word = TransferWord()
for char in input:
if ignoreMultipleTargets and char != '$': continue
elif escapeNextChar:
token += str(char)
escapeNextChar = False
elif char == "\\":
token += str(char)
escapeNextChar = True
elif char == '^':
superblanks.append(token)
token = ""
elif char == '$':
word.target.setAttributes(token.strip())
tokens.append(word)
token = ""
ignoreMultipleTargets = False
word = TransferWord()
elif char == '/':
if word.source.isEmpty():
word.source.setAttributes(token.strip())
token = ""
else:
ignoreMultipleTargets = True
else: token += str(char)
#Add everything at the end until the last ']' as a superblank.
superblanks.append(token[:1 + token.rfind(']')])
return tokens, superblanks
class ChunkLexicalUnit:
"""Represent a lexical unit and all its attributes for the inter/postchunk."""
def __init__(self):
self.lu = ""
self.attrs = {}
#Index of the first tag '<' character.
self.tagStart = None
#Index of the first '{', the start of the chunk content.
self.contentStart = None
def modifyAttr(self, attr, value):
"""Modify the part of the full lexical unit and the attr."""
if attr == 'whole': self.setAttributes(value)
else:
if attr == 'tags':
self.lu = self.lu.replace(self.attrs[attr], value)
elif attr == 'chcontent':
chcontent = self.attrs[attr]
lu = self.lu[:self.contentStart]
lu += self.lu[self.contentStart:].replace(chcontent, value)
self.lu = lu
else:
#Only modify the lu until the tags.
self.lu = self.lu[:self.tagStart].replace(self.attrs[attr],
value) + self.lu[self.tagStart:]
if attr == 'lem' or attr == 'lemh':
#If the lemh is the same as the lem, we update both.
if self.attrs['lem'] == self.attrs['lemh']:
self.attrs['lem'] = value
self.attrs['lemh'] = value
self.attrs[attr] = value
#Update index of first tag and content start.
self.tagStart = self.lu.find('<')
self.contentStart = self.lu.find('{')
def modifyTag(self, tag, value):
"""Modify the tag of the full lexical unit and the attr."""
#Only modify the tags and not the entire lu.
self.lu = self.lu[:self.tagStart] \
+ self.lu[self.tagStart:self.contentStart].replace(tag, value) \
+ self.lu[self.contentStart:]
self.attrs["tags"] = self.attrs["tags"].replace(tag, value)
#Update index of first tag and content start.
self.tagStart = self.lu.find('<')
self.contentStart = self.lu.find('{')
def setAttributes(self, token):
"""Set some of the attributes of a chunk lexical unit."""
#Get the position of the key characters.
self.lu = token
tag = token.find('<')
self.tagStart = tag
contentStart = token.find('{')
self.contentStart = contentStart
contentEnd = token.find('}')
if tag > -1 and tag < contentStart:
#The lemma is everything until the first tag.
self.attrs['lem'] = token[:tag]
self.attrs['tags'] = token[tag:contentStart]
else:
#If there isn't any tag, the lemma is everything until the '{'.
self.attrs['lem'] = token[:contentStart]
self.attrs['tags'] = ""
#In a chunk the lemh == lem and lemq is always "".
self.attrs['lemh'] = self.attrs['lem']
self.attrs['lemq'] = ""
#Store chunk contents with the '{' and '}'.
self.attrs['chcontent'] = token[contentStart:contentEnd + 1]
class ChunkWord:
"""Represent a word as a chunk for the interchunk and postchunk stages."""
def __init__(self):
self.chunk = ChunkLexicalUnit()
self.content = []
self.blanks = []
def __str__(self):
string = "^{}$: {}, content = [ ".format(self.chunk.lu, self.chunk.attrs)
for lu in self.content:
string += "^{}$: {} ".format(lu.lu, lu.attrs)
string += ']'
return string
def parseChunkContent(self):
"""Set the content of the chunk word as a list of lexical units
and apply the postchunk rule of setting the case of the lexical
units as the one of the chunk pseudolemma.
"""
#Depending on the case, change all cases or just the first lexical unit.
pseudoLemmaCase = self.getCase(self.chunk.attrs['lem'])
upperCaseAll = False
firstUpper = False
if pseudoLemmaCase == "AA": upperCaseAll = True
elif pseudoLemmaCase == "Aa": firstUpper = True
content = []
#The first blank (0) is the one before the chunk name.
blanks = [""]
firstLu = True
chcontent = self.chunk.attrs['chcontent'][1:-1] #Remove { and }.
for token in chcontent.split('$'):
if len(token) < 2: continue
#After the first blank, append the blanks between lexical units.
tag = token.find('^')
if firstLu: firstLu = False
else: blanks.append(token[:tag])
token = token[tag:]
lu = TransferLexicalUnit()
lu.setAttributes(token.replace('^', '').replace('/', '').strip())
if upperCaseAll: self.changeLemmaCase(lu, pseudoLemmaCase)
elif firstUpper:
self.changeLemmaCase(lu, pseudoLemmaCase)
firstUpper = False
content.append(lu)
self.content = content
self.blanks = blanks
def updateChunkContent(self, oldLu, newLu):
"""Update chunk.lu and chcontent when a lu inside the chunk changes."""
chunkLu = self.chunk.lu[:self.chunk.contentStart]
chunkLu += self.chunk.lu[self.chunk.contentStart:].replace(oldLu, newLu)
self.chunk.lu = chunkLu
chcontent = self.chunk.attrs['chcontent'].replace(oldLu, newLu)
self.chunk.attrs['chcontent'] = chcontent
def getCase(self, string):
"""Get the case of a string, defaulting to capitals."""
isFirstUpper = string[0].isupper()
isUpper = string.isupper()
#If it's a 1-length string and is upper, capitalize it.
if isUpper and len(string) == 1: return "Aa"
elif isFirstUpper and not isUpper: return "Aa"
elif isUpper: return "AA"
else: return "aa"
def changeLemmaCase(self, lu, case):
"""Change the case of the lemma in a lexical unit."""
oldLu = lu.lu
oldLem = lu.attrs['lem']
if case == "aa": newLem = oldLem.lower()
elif case == "Aa": newLem = oldLem[0].upper() + oldLem[1:]
elif case == "AA": newLem = oldLem.upper()
lu.modifyAttr('lem', newLem)
#Also, update the chcontent attribute of the chunk.
chcontent = self.chunk.attrs['chcontent']
self.chunk.attrs['chcontent'] = chcontent.replace(oldLu, lu.lu, 1)
class ChunkWordTokenizer():
"""Create a set of chunk words from an input stream."""
def __init__(self, solveRefs=False, parseContent=False):
self.solveRefs = solveRefs
self.parseContent = parseContent
def tokenize(self, input):
"""Tokenize the input in ^name<tags>{^...$} tokens."""
input = input.read()
tokens = []
token = ""
superblanks = []
chunkStart = True
escapeNextChar = False
word = ChunkWord()
for char in input:
if escapeNextChar:
token += str(char)
escapeNextChar = False
elif char == "\\":
token += str(char)
escapeNextChar = True
#Read the ^ and $ of the lexical units but not of the chunks.
elif char == '^':
if not chunkStart: token += str(char)
else:
#Characters between chunks are treated like superblanks.
superblanks.append(token)
token = ""
chunkStart = False
elif char == '$':
if not chunkStart: token += str(char)
elif char == '}':
token += str(char)
word.chunk.setAttributes(token.strip())
if self.solveRefs: self.solveReferences(word)
if self.parseContent: word.parseChunkContent()
tokens.append(word)
#Initialize auxiliary variables.
chunkStart = True
token = ""
word = ChunkWord()
else: token += str(char)
#Append the last superblank of the input, usually the '\n'.
superblanks.append(token)
return tokens, superblanks
def solveReferences(self, word):
"""Solve the references to the chunk tags."""
tags = word.chunk.attrs['tags']
tags = tags.replace('<', '')
tags = tags.split('>')
lu = word.chunk.lu
chcontent = word.chunk.attrs['chcontent']
newChcontent = chcontent
for i, char in enumerate(chcontent):
if char.isnumeric():
if chcontent[i - 1] == '<' and chcontent[i + 1] == '>':
pos = int(char)
tag = tags[pos - 1]
lu = self.replaceReference(lu, char, tag)
newChcontent = self.replaceReference(newChcontent, char, tag)
word.chunk.lu = lu
word.chunk.attrs['chcontent'] = newChcontent
def replaceReference(self, container, pos, tag):
"""Replace a number (pos) with the tag in the container."""
for i, char in enumerate(container):
if char == pos:
if container[i - 1] == '<' and container[i + 1] == '>':
newContainer = container[:i] + tag + container[i + 1:]
return newContainer
|
gpl-2.0
|
frida/glib
|
.gitlab-ci/meson-junit-report.py
|
1
|
3688
|
#!/usr/bin/env python3
# Turns a Meson testlog.json file into a JUnit XML report
#
# Copyright 2019 GNOME Foundation
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Original author: Emmanuele Bassi
import argparse
import datetime
import json
import sys
import xml.etree.ElementTree as ET
aparser = argparse.ArgumentParser(
description="Turns a Meson test log into a JUnit report"
)
aparser.add_argument(
"--project-name", metavar="NAME", help="The project name", default="unknown"
)
aparser.add_argument(
"--job-id", metavar="ID", help="The job ID for the report", default="Unknown"
)
aparser.add_argument(
"--branch",
metavar="NAME",
help="Branch of the project being tested",
default="master",
)
aparser.add_argument(
"--output",
metavar="FILE",
help="The output file, stdout by default",
type=argparse.FileType("w", encoding="UTF-8"),
default=sys.stdout,
)
aparser.add_argument(
"infile",
metavar="FILE",
help="The input testlog.json, stdin by default",
type=argparse.FileType("r", encoding="UTF-8"),
default=sys.stdin,
)
args = aparser.parse_args()
outfile = args.output
testsuites = ET.Element("testsuites")
testsuites.set("id", "{}/{}".format(args.job_id, args.branch))
testsuites.set("package", args.project_name)
testsuites.set("timestamp", datetime.datetime.utcnow().isoformat())
suites = {}
for line in args.infile:
data = json.loads(line)
(full_suite, unit_name) = data["name"].split(" / ")
try:
(project_name, suite_name) = full_suite.split(":")
except ValueError:
project_name = full_suite
suite_name = full_suite
duration = data["duration"]
return_code = data["returncode"]
log = data["stdout"]
log_stderr = data.get("stderr", "")
unit = {
"suite": suite_name,
"name": unit_name,
"duration": duration,
"returncode": return_code,
"stdout": log,
"stderr": log_stderr,
}
units = suites.setdefault(suite_name, [])
units.append(unit)
for name, units in suites.items():
print("Processing suite {} (units: {})".format(name, len(units)))
def if_failed(unit):
if unit["returncode"] != 0:
return True
return False
def if_succeded(unit):
if unit["returncode"] == 0:
return True
return False
successes = list(filter(if_succeded, units))
failures = list(filter(if_failed, units))
print(" - {}: {} pass, {} fail".format(name, len(successes), len(failures)))
testsuite = ET.SubElement(testsuites, "testsuite")
testsuite.set("name", "{}/{}".format(args.project_name, name))
testsuite.set("tests", str(len(units)))
testsuite.set("errors", str(len(failures)))
testsuite.set("failures", str(len(failures)))
for unit in successes:
testcase = ET.SubElement(testsuite, "testcase")
testcase.set("classname", "{}/{}".format(args.project_name, unit["suite"]))
testcase.set("name", unit["name"])
testcase.set("time", str(unit["duration"]))
for unit in failures:
testcase = ET.SubElement(testsuite, "testcase")
testcase.set("classname", "{}/{}".format(args.project_name, unit["suite"]))
testcase.set("name", unit["name"])
testcase.set("time", str(unit["duration"]))
failure = ET.SubElement(testcase, "failure")
failure.set("classname", "{}/{}".format(args.project_name, unit["suite"]))
failure.set("name", unit["name"])
failure.set("type", "error")
failure.text = unit["stdout"] + "\n" + unit["stderr"]
output = ET.tostring(testsuites, encoding="unicode")
outfile.write(output)
|
lgpl-2.1
|
photoninger/ansible
|
lib/ansible/modules/network/f5/bigip_monitor_tcp_half_open.py
|
3
|
17834
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_monitor_tcp_half_open
short_description: Manages F5 BIG-IP LTM tcp half-open monitors
description: Manages F5 BIG-IP LTM tcp half-open monitors.
version_added: "2.4"
options:
name:
description:
- Monitor name.
required: True
aliases:
- monitor
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp_half_open)
parent on the C(Common) partition.
default: "/Common/tcp_half_open"
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, and the C(type) is C(tcp) (the default),
then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
version_added: 2.5
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
version_added: 2.5
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create TCP Monitor
bigip_monitor_tcp_half_open:
state: present
ip: 10.10.10.10
server: lb.mydomain.com
user: admin
password: secret
name: my_tcp_monitor
delegate_to: localhost
- name: Remove TCP Monitor
bigip_monitor_tcp_half_open:
state: absent
server: lb.mydomain.com
user: admin
password: secret
name: my_tcp_monitor
delegate_to: localhost
- name: Add half-open monitor for all addresses, port 514
bigip_monitor_tcp_half_open:
server: lb.mydomain.com
user: admin
port: 514
password: secret
name: my_tcp_monitor
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: tcp
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'recv', 'send',
'destination'
]
returnables = [
'parent', 'send', 'receive', 'ip', 'port', 'interval', 'timeout',
'time_until_up'
]
updatables = [
'destination', 'send', 'receive', 'interval', 'timeout', 'time_until_up'
]
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So i
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
try:
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
result = str(netaddr.IPAddress(self._values['ip']))
return result
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'tcp_half_open'
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def exists(self):
result = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.tcp_half_opens.tcp_half_open.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/tcp_half_open'),
ip=dict(),
port=dict(type='int'),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
gpl-3.0
|
ChengyuSong/xen-arm
|
tools/xm-test/tests/xapi/03_xapi-network_pos.py
|
38
|
4039
|
#!/usr/bin/python
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009 flonatel GmbH & Co. KG
#============================================================================
#
# Author: Andreas Florath <[email protected]>
# Loosly based on the original testcase from
# Tom Wilkie <[email protected]>
#
# This test case creates two guest systems, creates a (very) private
# network between them and attaches the ethernet apropriate.
# Note: in this test case there are some fixed IP and network
# addresses used. This is not a problem, because those are really
# used only for local communication.
#
import sys
import time
from XmTestLib import *
from XmTestLib.network_utils import *
from XmTestLib.XenAPIDomain import XmTestAPIDomain
# Some config for this testcase
class TCConfig:
network_name = "xapi-network-xm-test-03"
ip_addr_1 = "172.16.77.70"
ip_addr_2 = "172.16.77.71"
default_gateway = "172.16.77.72"
default_netmask = "255.255.255.0"
@staticmethod
def remove_network(guest):
nw = guest.session.xenapi.network.get_all()
for n in nw:
name = guest.session.xenapi.network.get_name_label(n)
if name == TCConfig.network_name:
guest.session.xenapi.network.destroy(n)
# Create two domains (default XmTestDomain, with our ramdisk)
try:
guest1 = XmTestAPIDomain()
console1 = guest1.start()
# guest1.newDevice(XenNetDevice, "eth0")
# guest1_netdev = guest1.getDevice("eth0")
guest2 = XmTestAPIDomain()
console2 = guest2.start()
except DomainError, e:
if verbose:
print("Failed to create test domain because: %s" % e.extra)
FAIL(str(e))
# Clean up relicts
TCConfig.remove_network(guest1)
# Create a network
network = guest1.session.xenapi.network.create(
{ "name_label": TCConfig.network_name,
"name_description": "This is a testing network",
"default_gateway": TCConfig.default_gateway,
"default_netmask": TCConfig.default_netmask,
"other_config": {} } )
# Attach two domains to it
status, msg = network_attach(
guest1.getName(), console1, bridge=TCConfig.network_name)
if status:
FAIL(msg)
status, msg = network_attach(
guest2.getName(), console2, bridge=TCConfig.network_name)
if status:
FAIL(msg)
# Configure IP addresses on two guests
try:
run = console1.runCmd(
"ifconfig eth0 " + TCConfig.ip_addr_1
+ " netmask " + TCConfig.default_netmask + " up")
run = console2.runCmd(
"ifconfig eth0 " + TCConfig.ip_addr_2
+ " netmask " + TCConfig.default_netmask + " up")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
# Now ping...
try:
run = console1.runCmd("ping -c 4 " + TCConfig.ip_addr_2)
if run['return'] > 0:
FAIL("Could not ping other host")
run = console2.runCmd("ping -c 4 " + TCConfig.ip_addr_1)
if run['return'] > 0:
FAIL("Could not pint other host")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
status, msg = network_detach(guest1.getName(), console1)
status, msg = network_detach(guest2.getName(), console2)
# Clean up
TCConfig.remove_network(guest1)
guest1.closeConsole()
guest1.stop()
guest2.closeConsole()
guest2.stop()
|
gpl-2.0
|
vauxoo-dev/autopep8
|
test/suite/utf-8.py
|
55
|
2557
|
# -*- coding: utf-8 -*-
class Rectangle(Blob):
def __init__(self, width, height,
color='black', emphasis=None, highlight=0):
if width == 0 and height == 0 and \
color == 'red' and emphasis == 'strong' or \
highlight > 100:
raise ValueError("sorry, you lose")
if width == 0 and height == 0 and (color == 'red' or
emphasis is None):
raise ValueError("I don't think so -- values are %s, %s" %
(width, height))
Blob.__init__(self, width, height,
color, emphasis, highlight)
# Some random text with multi-byte characters (utf-8 encoded)
#
# Εδώ μάτσο κειμένων τη, τρόπο πιθανό διευθυντές ώρα μη. Νέων απλό παράγει ροή
# κι, το επί δεδομένη καθορίζουν. Πάντως ζητήσεις περιβάλλοντος ένα με, τη
# ξέχασε αρπάζεις φαινόμενο όλη. Τρέξει εσφαλμένη χρησιμοποίησέ νέα τι. Θα όρο
# πετάνε φακέλους, άρα με διακοπής λαμβάνουν εφαμοργής. Λες κι μειώσει
# καθυστερεί.
# 79 narrow chars
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 [79]
# 78 narrow chars (Na) + 1 wide char (W)
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8情
# 3 narrow chars (Na) + 40 wide chars (W)
# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情
# 3 narrow chars (Na) + 76 wide chars (W)
# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情
#
#: E501
# 80 narrow chars (Na)
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 [80]
#
#: E501
# 78 narrow chars (Na) + 2 wide char (W)
# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8情情
#
#: E501
# 3 narrow chars (Na) + 77 wide chars (W)
# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情
#
|
mit
|
xin3liang/platform_external_chromium_org
|
chrome/test/chromedriver/test/unittest_util.py
|
134
|
4320
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for dealing with the python unittest module."""
import fnmatch
import sys
import unittest
class _TextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
self._fails = set()
def _GetTestURI(self, test):
return '%s.%s.%s' % (test.__class__.__module__,
test.__class__.__name__,
test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def getRetestFilter(self):
return ':'.join(self._fails)
class TextTestRunner(unittest.TextTestRunner):
"""Test Runner for displaying test results in textual format.
Results are displayed in conformance with google test output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self, stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def GetTestsFromSuite(suite):
"""Returns all the tests from a given test suite."""
tests = []
for x in suite:
if isinstance(x, unittest.TestSuite):
tests += GetTestsFromSuite(x)
else:
tests += [x]
return tests
def GetTestNamesFromSuite(suite):
"""Returns a list of every test name in the given suite."""
return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
def GetTestName(test):
"""Gets the test name of the given unittest test."""
return '.'.join([test.__class__.__module__,
test.__class__.__name__,
test._testMethodName])
def FilterTestSuite(suite, gtest_filter):
"""Returns a new filtered tests suite based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
def FilterTests(all_tests, gtest_filter):
"""Returns a filtered list of tests based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
pattern_groups = gtest_filter.split('-')
positive_patterns = pattern_groups[0].split(':')
negative_patterns = None
if len(pattern_groups) > 1:
negative_patterns = pattern_groups[1].split(':')
tests = []
for test in all_tests:
test_name = GetTestName(test)
# Test name must by matched by one positive pattern.
for pattern in positive_patterns:
if fnmatch.fnmatch(test_name, pattern):
break
else:
continue
# Test name must not be matched by any negative patterns.
for pattern in negative_patterns or []:
if fnmatch.fnmatch(test_name, pattern):
break
else:
tests += [test]
return tests
|
bsd-3-clause
|
yati-sagade/pip
|
tests/functional/test_wheel.py
|
26
|
5129
|
"""'pip wheel' tests"""
import os
import pytest
from os.path import exists
from pip.locations import write_delete_marker_file
from pip.status_codes import PREVIOUS_BUILD_DIR_ERROR
from tests.lib import pyversion
def test_pip_wheel_fails_without_wheel(script, data):
"""
Test 'pip wheel' fails without wheel
"""
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'simple==3.0',
expect_error=True,
)
assert "'pip wheel' requires the 'wheel' package" in result.stderr
@pytest.mark.network
def test_pip_wheel_success(script, data):
"""
Test 'pip wheel' success.
"""
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'simple==3.0',
)
wheel_file_name = 'simple-3.0-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
assert "Successfully built simple" in result.stdout, result.stdout
@pytest.mark.network
def test_pip_wheel_downloads_wheels(script, data):
"""
Test 'pip wheel' downloads wheels
"""
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'simple.dist',
)
wheel_file_name = 'simple.dist-0.1-py2.py3-none-any.whl'
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
assert "Saved" in result.stdout, result.stdout
@pytest.mark.network
def test_pip_wheel_builds_when_no_binary_set(script, data):
script.pip('install', 'wheel')
res = script.pip(
'wheel', '--no-index', '--no-binary', ':all:', '-f', data.find_links,
'setuptools==0.9.8')
assert "Running setup.py bdist_wheel for setuptools" in str(res), str(res)
@pytest.mark.network
def test_pip_wheel_builds_editable_deps(script, data):
"""
Test 'pip wheel' finds and builds dependencies of editables
"""
script.pip('install', 'wheel')
editable_path = os.path.join(data.src, 'requires_simple')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, '-e', editable_path
)
wheel_file_name = 'simple-1.0-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
@pytest.mark.network
def test_pip_wheel_fail(script, data):
"""
Test 'pip wheel' failure.
"""
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'wheelbroken==0.1',
expect_error=True,
)
wheel_file_name = 'wheelbroken-0.1-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path not in result.files_created, (
wheel_file_path,
result.files_created,
)
assert "FakeError" in result.stdout, result.stdout
assert "Failed to build wheelbroken" in result.stdout, result.stdout
assert result.returncode != 0
@pytest.mark.network
def test_no_clean_option_blocks_cleaning_after_wheel(script, data):
"""
Test --no-clean option blocks cleaning after wheel build
"""
script.pip('install', 'wheel')
build = script.venv_path / 'build'
result = script.pip(
'wheel', '--no-clean', '--no-index', '--build', build,
'--find-links=%s' % data.find_links, 'simple',
)
build = build / 'simple'
assert exists(build), "build/simple should still exist %s" % str(result)
@pytest.mark.network
def test_pip_wheel_source_deps(script, data):
"""
Test 'pip wheel' finds and builds source archive dependencies
of wheels
"""
# 'requires_source' is a wheel that depends on the 'source' project
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'requires_source',
)
wheel_file_name = 'source-1.0-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
assert "Successfully built source" in result.stdout, result.stdout
@pytest.mark.network
def test_pip_wheel_fail_cause_of_previous_build_dir(script, data):
"""
Test when 'pip wheel' tries to install a package that has a previous build
directory
"""
script.pip('install', 'wheel')
# Given that I have a previous build dir of the `simple` package
build = script.venv_path / 'build' / 'simple'
os.makedirs(build)
write_delete_marker_file(script.venv_path / 'build')
build.join('setup.py').write('#')
# When I call pip trying to install things again
result = script.pip(
'wheel', '--no-index', '--find-links=%s' % data.find_links,
'--build', script.venv_path / 'build',
'simple==3.0', expect_error=True,
)
# Then I see that the error code is the right one
assert result.returncode == PREVIOUS_BUILD_DIR_ERROR, result
|
mit
|
zqzhang/crosswalk-test-suite
|
apptools/apptools-ios-tests/apptools/build_arguments.py
|
15
|
1710
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<[email protected]>
import unittest
import os
import comm
import commands
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
dagwieers/ansible
|
test/sanity/code-smell/use-argspec-type-path.py
|
17
|
2237
|
#!/usr/bin/env python
import re
import sys
def main():
skip = set([
# add legitimate uses of expanduser to the following list
'lib/ansible/modules/cloud/docker/docker_container.py', # uses colon-separated paths, can't use type=path
'lib/ansible/modules/cloud/lxc/lxc_container.py',
'lib/ansible/modules/cloud/rackspace/rax_files_objects.py',
'lib/ansible/modules/database/mongodb/mongodb_parameter.py',
'lib/ansible/modules/database/mongodb/mongodb_replicaset.py',
'lib/ansible/modules/database/mongodb/mongodb_shard.py',
'lib/ansible/modules/database/mongodb/mongodb_user.py',
'lib/ansible/modules/database/postgresql/postgresql_db.py',
'lib/ansible/modules/files/synchronize.py',
'lib/ansible/modules/source_control/git.py',
'lib/ansible/modules/system/puppet.py',
'lib/ansible/modules/utilities/logic/async_status.py',
'lib/ansible/modules/utilities/logic/async_wrapper.py',
'lib/ansible/modules/web_infrastructure/ansible_tower/tower_host.py',
'lib/ansible/modules/web_infrastructure/ansible_tower/tower_group.py',
'lib/ansible/modules/web_infrastructure/jenkins_plugin.py',
'lib/ansible/modules/cloud/vmware/vmware_deploy_ovf.py',
'lib/ansible/modules/crypto/certificate_complete_chain.py', # would need something like type=list(path)
# fix uses of expanduser in the following modules and remove them from the following list
'lib/ansible/modules/cloud/rackspace/rax.py',
'lib/ansible/modules/cloud/rackspace/rax_scaling_group.py',
'lib/ansible/modules/files/archive.py',
'lib/ansible/modules/files/find.py',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
with open(path, 'r') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'(expanduser)', text)
if match:
print('%s:%d:%d: use argspec type="path" instead of type="str" to avoid use of `expanduser`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
|
gpl-3.0
|
TanguyPatte/phantomjs-packaging
|
src/qt/qtwebkit/Source/WebInspectorUI/Scripts/jsmin.py
|
145
|
7659
|
#!/usr/bin/python
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
# /* jsmin.c
# 2007-05-22
#
# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# The Software shall be used for Good, not Evil.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# */
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if an unescaped '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/' and self.theA != '\\':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|' or self.theA == ';' or
self.theA == '{' or self.theA == '}' or
self.theA == '\n'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = '\n'
self.theB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
if __name__ == '__main__':
import sys
jsm = JavascriptMinify()
jsm.minify(sys.stdin, sys.stdout)
|
bsd-3-clause
|
obruns/gtest
|
test/gtest_break_on_failure_unittest.py
|
2140
|
7339
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
synasius/django
|
django/db/migrations/migration.py
|
326
|
8023
|
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import IrreversibleError
@python_2_unicode_compatible
class Migration(object):
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Is this an initial migration? Initial migrations are skipped on
# --fake-initial if the table or fields already exist. If None, check if
# the migration has any dependencies to determine if there are dependencies
# to tell if db introspection needs to be done. If True, always perform
# introspection. If False, never perform introspection.
initial = None
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state, preserve=True):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it. Preserves the original object state by
default and will return a mutated state from a copy.
"""
new_state = project_state
if preserve:
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
else:
# Normal behaviour
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a reverse order.
The backwards migration process consists of two phases:
1. The intermediate states from right before the first until right
after the last operation inside this migration are preserved.
2. The operations are applied in reverse order using the states
recorded in step 1.
"""
# Construct all the intermediate states we need for a reverse migration
to_run = []
new_state = project_state
# Phase 1
for operation in self.operations:
# If it's irreversible, error out
if not operation.reversible:
raise IrreversibleError("Operation %s in %s is not reversible" % (operation, self))
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
else:
# Normal behaviour
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
return project_state
class SwappableTuple(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""
Turns a setting value into a dependency.
"""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
|
bsd-3-clause
|
shikigit/python-phonenumbers
|
python/phonenumbers/data/region_HT.py
|
5
|
1618
|
"""Auto-generated file, do not edit by hand. HT metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HT = PhoneMetadata(id='HT', country_code=509, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-489]\\d{7}', possible_number_pattern='\\d{8}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:[24]\\d|5[1-5]|94)\\d{5}', possible_number_pattern='\\d{8}', example_number='22453300'),
mobile=PhoneNumberDesc(national_number_pattern='(?:3[1-9]|4\\d)\\d{6}', possible_number_pattern='\\d{8}', example_number='34101234'),
toll_free=PhoneNumberDesc(national_number_pattern='8\\d{7}', possible_number_pattern='\\d{8}', example_number='80012345'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='98[89]\\d{5}', possible_number_pattern='\\d{8}', example_number='98901234'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{4})', format='\\1 \\2 \\3')])
|
apache-2.0
|
androidarmv6/android_external_chromium
|
testing/gtest/test/gtest_help_test.py
|
2968
|
5856
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
findayu/picasso-graphic
|
tools/gyp/pylib/gyp/generator/gypsh.py
|
2779
|
1665
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
bsd-3-clause
|
precompiler/python-101
|
mastering-python/ch05/Decorator.py
|
1
|
1096
|
import functools
def logParams(function):
@functools.wraps(function) # use this to prevent loss of function attributes
def wrapper(*args, **kwargs):
print("function: {}, args: {}, kwargs: {}".format(function.__name__, args, kwargs))
return function(*args, **kwargs)
return wrapper
def add(a, b):
return a + b
@logParams
def mul(a, b):
return a * b
add(1, 1)
mul(2, 2)
def memo(function):
function.cache = dict()
@functools.wraps(function)
def wrapper(*args):
if args not in function.cache:
function.cache[args] = function(*args)
return function.cache[args]
return wrapper
@memo
def fib(n):
if n < 2:
return n
else:
return fib(n - 1) + fib(n - 2)
for i in range(1, 10):
print("fib{}:{}".format(i, fib(i)))
def trace(func):
@functools.wraps(func)
def _trace(self, *args):
print("Invoking {} - {}".format(self, args))
func(self, *args)
return _trace
class FooBar:
@trace
def dummy(self, s):
print(s)
fb = FooBar()
fb.dummy("Hello")
|
apache-2.0
|
jsteemann/arangodb
|
3rdParty/V8-4.3.61/tools/testrunner/objects/output.py
|
105
|
2443
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import signal
from ..local import utils
class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.timed_out:
return False
return (self.exit_code < 0 and
self.exit_code != -signal.SIGABRT)
def HasTimedOut(self):
return self.timed_out
def Pack(self):
return [self.exit_code, self.timed_out, self.stdout, self.stderr]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Output(packed[0], packed[1], packed[2], packed[3])
|
apache-2.0
|
stevenewey/wagtail
|
wagtail/wagtailsearch/migrations/0001_initial.py
|
37
|
2062
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0002_initial_data'),
]
operations = [
migrations.CreateModel(
name='EditorsPick',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(blank=True, null=True, editable=False)),
('description', models.TextField(blank=True)),
('page', models.ForeignKey(to='wagtailcore.Page')),
],
options={
'ordering': ('sort_order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Query',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('query_string', models.CharField(unique=True, max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QueryDailyHits',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('date', models.DateField()),
('hits', models.IntegerField(default=0)),
('query', models.ForeignKey(to='wagtailsearch.Query', related_name='daily_hits')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='querydailyhits',
unique_together=set([('query', 'date')]),
),
migrations.AddField(
model_name='editorspick',
name='query',
field=models.ForeignKey(to='wagtailsearch.Query', related_name='editors_picks'),
preserve_default=True,
),
]
|
bsd-3-clause
|
follow99/django
|
tests/admin_checks/models.py
|
281
|
1836
|
"""
Tests of ModelAdmin system checks logic.
"""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album, models.CASCADE)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, models.CASCADE, related_name="album1_set")
album2 = models.ForeignKey(Album, models.CASCADE, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
book = models.ForeignKey(Book, models.CASCADE)
featured = models.BooleanField()
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
class Influence(models.Model):
name = models.TextField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
|
bsd-3-clause
|
proxysh/Safejumper-for-Mac
|
buildlinux/env64/lib/python2.7/site-packages/twisted/internet/gtk2reactor.py
|
23
|
3614
|
# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2
mainloop.
In order to use this support, simply do the following::
from twisted.internet import gtk2reactor
gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
"""
# System Imports
import sys
# Twisted Imports
from twisted.internet import _glibbase
from twisted.python import runtime
# Certain old versions of pygtk and gi crash if imported at the same
# time. This is a problem when running Twisted's unit tests, since they will
# attempt to run both gtk2 and gtk3/gi tests. However, gireactor makes sure
# that if we are in such an old version, and gireactor was imported,
# gtk2reactor will not be importable. So we don't *need* to enforce that here
# as well; whichever is imported first will still win. Moreover, additional
# enforcement in this module is unnecessary in modern versions, and downright
# problematic in certain versions where for some reason importing gtk also
# imports some subset of gi. So we do nothing here, relying on gireactor to
# prevent the crash.
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
import pygtk
pygtk.require('2.0')
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
class Gtk2Reactor(_glibbase.GlibReactorBase):
"""
PyGTK+ 2 event loop reactor.
"""
_POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
_POLL_IN = gobject.IO_IN
_POLL_OUT = gobject.IO_OUT
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = _POLL_IN | _POLL_DISCONNECTED
OUTFLAGS = _POLL_OUT | _POLL_DISCONNECTED
def __init__(self, useGtk=True):
_gtk = None
if useGtk is True:
import gtk as _gtk
_glibbase.GlibReactorBase.__init__(self, gobject, _gtk, useGtk=useGtk)
class PortableGtkReactor(_glibbase.PortableGlibReactorBase):
"""
Reactor that works on Windows.
Sockets aren't supported by GTK+'s input_add on Win32.
"""
def __init__(self, useGtk=True):
_gtk = None
if useGtk is True:
import gtk as _gtk
_glibbase.PortableGlibReactorBase.__init__(self, gobject, _gtk,
useGtk=useGtk)
def install(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
def portableInstall(useGtk=True):
"""
Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
if runtime.platform.getType() == 'posix':
install = install
else:
install = portableInstall
__all__ = ['install']
|
gpl-2.0
|
gcodetogit/depot_tools
|
third_party/coverage/__init__.py
|
208
|
4505
|
"""Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
from coverage.version import __version__, __url__
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability, so the current api uses
# explicitly-created coverage objects. But for backward compatibility, here we
# define the top-level functions to create the singleton when they are first
# called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed via locals().
# pylint: disable=W0612
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
import inspect
meth = getattr(coverage, name)
args, varargs, kw, defaults = inspect.getargspec(meth)
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
docstring = meth.__doc__
wrapper.__doc__ = ("""\
A first-use-singleton wrapper around coverage.%(name)s.
This wrapper is provided for backward compatibility with legacy code.
New code should use coverage.%(name)s directly.
%(name)s%(argspec)s:
%(docstring)s
""" % locals()
)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
import encodings.utf_8
# Because of the "from coverage.control import fooey" lines at the top of the
# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
# This makes some inspection tools (like pydoc) unable to find the class
# coverage.coverage. So remove that entry.
import sys
try:
del sys.modules['coverage.coverage']
except KeyError:
pass
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2013 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
|
bsd-3-clause
|
OnizukaLab/SimpleSeq2Seq
|
interpreter.py
|
1
|
7157
|
# -*- coding:utf-8 -*-
import os
os.environ["CHAINER_TYPE_CHECK"] = "0"
import argparse
import unicodedata
import pickle
import numpy as np
import matplotlib.pyplot as plt
from nltk import word_tokenize
from chainer import serializers, cuda
from util import ConvCorpus, JaConvCorpus
from seq2seq import Seq2Seq
# path info
DATA_DIR = './data/corpus/'
MODEL_PATH = './data/9.model'
TRAIN_LOSS_PATH = './data/loss_train_data.pkl'
TEST_LOSS_PATH = './data/loss_test_data.pkl'
BLEU_SCORE_PATH = './data/bleu_score_data.pkl'
WER_SCORE_PATH = './data/wer_score_data.pkl'
# parse command line args
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default='-1', type=int, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--feature_num', '-f', default=1024, type=int, help='dimension of feature layer')
parser.add_argument('--hidden_num', '-hi', default=1024, type=int, help='dimension of hidden layer')
parser.add_argument('--bar', '-b', default='0', type=int, help='whether to show the graph of loss values or not')
parser.add_argument('--lang', '-l', default='en', type=str, help='the choice of a language (Japanese "ja" or English "en" )')
args = parser.parse_args()
# GPU settings
gpu_device = args.gpu
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(gpu_device).use()
def parse_ja_text(text):
"""
Function to parse Japanese text.
:param text: string: sentence written by Japanese
:return: list: parsed text
"""
import MeCab
mecab = MeCab.Tagger("mecabrc")
mecab.parse('')
# list up noun
mecab_result = mecab.parseToNode(text)
parse_list = []
while mecab_result is not None:
if mecab_result.surface != "": # ヘッダとフッタを除外
parse_list.append(unicodedata.normalize('NFKC', mecab_result.surface).lower())
mecab_result = mecab_result.next
return parse_list
def interpreter(data_path, model_path):
"""
Run this function, if you want to talk to seq2seq model.
if you type "exit", finish to talk.
:param data_path: the path of corpus you made model learn
:param model_path: the path of model you made learn
:return:
"""
# call dictionary class
if args.lang == 'en':
corpus = ConvCorpus(file_path=None)
corpus.load(load_dir=data_path)
elif args.lang == 'ja':
corpus = JaConvCorpus(file_path=None)
corpus.load(load_dir=data_path)
else:
print('You gave wrong argument to this system. Check out your argument about languages.')
raise ValueError
print('Vocabulary Size (number of words) :', len(corpus.dic.token2id))
print('')
# rebuild seq2seq model
model = Seq2Seq(len(corpus.dic.token2id), feature_num=args.feature_num,
hidden_num=args.hidden_num, batch_size=1, gpu_flg=args.gpu)
serializers.load_hdf5(model_path, model)
# run conversation system
print('The system is ready to run, please talk to me!')
print('( If you want to end a talk, please type "exit". )')
print('')
while True:
print('>> ', end='')
sentence = input()
if sentence == 'exit':
print('See you again!')
break
if args.lang == 'en':
input_vocab = [unicodedata.normalize('NFKC', word.lower()) for word in word_tokenize(sentence)]
elif args.lang == 'ja':
input_vocab = parse_ja_text(sentence)
input_vocab.reverse()
input_vocab.insert(0, "<eos>")
# convert word into ID
input_sentence = [corpus.dic.token2id[word] for word in input_vocab if not corpus.dic.token2id.get(word) is None]
model.initialize() # initialize cell
sentence = model.generate(input_sentence, sentence_limit=len(input_sentence) + 30,
word2id=corpus.dic.token2id, id2word=corpus.dic)
print("-> ", sentence)
print('')
def test_run(data_path, model_path, n_show=10):
"""
Test function.
Input is training data.
Output have to be the sentence which is correct data in training phase.
:return:
"""
corpus = ConvCorpus(file_path=None)
corpus.load(load_dir=data_path)
print('Vocabulary Size (number of words) :', len(corpus.dic.token2id))
print('')
# rebuild seq2seq model
model = Seq2Seq(len(corpus.dic.token2id), feature_num=args.feature_num,
hidden_num=args.hidden_num, batch_size=1, gpu_flg=args.gpu)
serializers.load_hdf5(model_path, model)
# run an interpreter
for num, input_sentence in enumerate(corpus.posts):
id_sequence = input_sentence.copy()
input_sentence.reverse()
input_sentence.insert(0, corpus.dic.token2id["<eos>"])
model.initialize() # initialize cell
sentence = model.generate(input_sentence, sentence_limit=len(input_sentence) + 30,
word2id=corpus.dic.token2id, id2word=corpus.dic)
print("teacher : ", " ".join([corpus.dic[w_id] for w_id in id_sequence]))
print("correct :", " ".join([corpus.dic[w_id] for w_id in corpus.cmnts[num]]))
print("-> ", sentence)
print('')
if num == n_show:
break
def show_chart(train_loss_path, test_loss_path):
"""
Show the graph of Losses for each epochs
"""
with open(train_loss_path, mode='rb') as f:
train_loss_data = np.array(pickle.load(f))
with open(test_loss_path, mode='rb') as f:
test_loss_data = np.array(pickle.load(f))
row = len(train_loss_data)
loop_num = np.array([i + 1 for i in range(row)])
plt.plot(loop_num, train_loss_data, label="Train Loss Value", color="gray")
plt.plot(loop_num, test_loss_data, label="Test Loss Value", color="green")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(loc=2)
plt.title("Learning Rate of Seq2Seq Model")
plt.show()
def show_bleu_chart(bleu_score_path):
"""
Show the graph of BLEU for each epochs
"""
with open(bleu_score_path, mode='rb') as f:
bleu_score_data = np.array(pickle.load(f))
row = len(bleu_score_data)
loop_num = np.array([i + 1 for i in range(row)])
plt.plot(loop_num, bleu_score_data, label="BLUE score", color="blue")
plt.xlabel("Epoch")
plt.ylabel("BLEU")
plt.legend(loc=2)
plt.title("BLEU score of Seq2Seq Model")
plt.show()
def show_wer_chart(wer_score_path):
"""
Show the graph of WER for each epochs
"""
with open(wer_score_path, mode='rb') as f:
wer_score_data = np.array(pickle.load(f))
row = len(wer_score_data)
loop_num = np.array([i + 1 for i in range(row)])
plt.plot(loop_num, wer_score_data, label="WER score", color="red")
plt.xlabel("Epoch")
plt.ylabel("WER")
plt.legend(loc=2)
plt.title("WER score of Seq2Seq Model")
plt.show()
if __name__ == '__main__':
interpreter(DATA_DIR, MODEL_PATH)
test_run(DATA_DIR, MODEL_PATH)
if args.bar:
show_chart(TRAIN_LOSS_PATH, TEST_LOSS_PATH)
show_bleu_chart(BLEU_SCORE_PATH)
show_wer_chart(WER_SCORE_PATH)
|
mit
|
EricMuller/mynotes-backend
|
requirements/twisted/Twisted-17.1.0/src/twisted/test/stdio_test_lastwrite.py
|
14
|
1206
|
# -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTests.test_lastWriteReceived -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTests.test_lastWriteReceived}
to test that L{os.write} can be reliably used after
L{twisted.internet.stdio.StandardIO} has finished.
"""
from __future__ import absolute_import, division
import sys
from twisted.internet.protocol import Protocol
from twisted.internet.stdio import StandardIO
from twisted.python.reflect import namedAny
class LastWriteChild(Protocol):
def __init__(self, reactor, magicString):
self.reactor = reactor
self.magicString = magicString
def connectionMade(self):
self.transport.write(self.magicString)
self.transport.loseConnection()
def connectionLost(self, reason):
self.reactor.stop()
def main(reactor, magicString):
p = LastWriteChild(reactor, magicString.encode('ascii'))
StandardIO(p)
reactor.run()
if __name__ == '__main__':
namedAny(sys.argv[1]).install()
from twisted.internet import reactor
main(reactor, sys.argv[2])
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.