code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
"""Microbenchmark for function call overhead.
This measures simple function calls that are not methods, do not use varargs or
kwargs, and do not use tuple unpacking.
"""
# Python imports
#import optparse
import time
# Local imports
import util
def foo(a, b, c, d):
# 20 calls
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
bar(a, b, c)
def bar(a, b, c):
# 20 calls
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
baz(a, b)
def baz(a, b):
# 20 calls
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
quux(a)
def quux(a):
# 20 calls
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
qux()
def qux():
pass
def test_calls(iterations):
from browser import console
times = []
for _i in range(iterations):
console.log("iteration: %s" % _i)
t0 = time.time()
# 40 calls
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
foo(1, 2, 3, 4)
t1 = time.time()
times.append(t1 - t0)
return times
def run(num_runs=100, take_geo_mean=True):
return util.run_benchmark(take_geo_mean, num_runs, test_calls)
#if __name__ == "__main__":
# parser = optparse.OptionParser(
# usage="%prog [options] [test]",
# description=("Test the performance of simple Python-to-Python function"
# " calls."))
# util.add_standard_options_to(parser)
# options, _ = parser.parse_args()
# # Priming run.
# test_calls(1)
# util.run_benchmark(options, options.num_runs, test_calls)
| kikocorreoso/brython | www/benchmarks/performance/bm_call_simple.py | Python | bsd-3-clause | 3,712 |
from __future__ import print_function
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
# Mail-related stuff
from builtins import str
from builtins import object
import smtplib
import time
import json
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from sibispy import sibislogger as slog
class sibis_email(object):
""" Class handling email communication with XNAT users and admin."""
# Initialize class.
def __init__(self, smtp_server, admin_email, sibis_admin_email = None):
self._sibis_admin_email = sibis_admin_email
self._admin_email = admin_email
self._admin_messages = []
self._messages_by_user = dict()
self._smtp_server = smtp_server
# Add to the message building up for a specific user
def add_user_message( self, uid, txt, uFirstName=None, uLastName=None, uEmail=None):
if uid not in self._messages_by_user:
self._messages_by_user[uid] = {'uFirst': uFirstName, 'uLast': uLastName, 'uEmail' : uEmail, 'msgList' : [txt] }
else:
self._messages_by_user[uid]['msgList'].append( txt )
# Add to the message building up for the admin
def add_admin_message( self, msg ):
self._admin_messages.append( msg )
# Send pre-formatted mail message
def send( self, subject, from_email, to_email, html, sendToAdminFlag=True ):
if not self._smtp_server :
slog.info("sibis_email.send","ERROR: smtp server not defined - email will not be sent!")
return False
if not to_email :
slog.info("sibis_email.send","ERROR: no email address for recipient defined - email will not be sent!")
return False
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = ', '.join( to_email )
# Record the MIME types of both parts - text/plain and text/html.
text = ''
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
try :
s = smtplib.SMTP( self._smtp_server )
except Exception as err_msg:
slog.info("sibis_email.send","ERROR: failed to connect to SMTP server at {} ".format(time.asctime()),
err_msg = str(err_msg),
smtp_server = self._smtp_server)
return False
try :
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail( from_email, to_email, msg.as_string() )
# Send email also to sibis admin if defined
if sendToAdminFlag and self._sibis_admin_email and to_email != self._sibis_admin_email :
s.sendmail( from_email, self._sibis_admin_email, msg.as_string() )
except Exception as err_msg:
slog.info("sibis_email.send","ERROR: failed to send email at {} ".format(time.asctime()),
err_msg = str(err_msg),
email_from = from_email,
email_to = to_email,
sibis_admin_email = self._sibis_admin_email,
email_msg = msg.as_string(),
smtp_server = self._smtp_server)
s.quit()
return False
s.quit()
return True
# Send mail to one user
def mail_user( self, uEmail, uFirst, uLast, title, intro_text, prolog, msglist ):
problem_list = [ '<ol>' ]
for m in msglist:
problem_list.append( '<li>%s</li>' % m )
problem_list.append( '</ol>' )
# Create the body of the message (a plain-text and an HTML version).
html = '<html>\n <head></head>\n <body>\n <p>Dear %s %s:<br><br>\n' %(uFirst,uLast) + intro_text + '\n %s\n' % ('\n'.join( problem_list ) ) + prolog + '\n</p>\n</body>\n</html>'
self.send( title, self._admin_email, [ uEmail ], html )
def mail_admin( self, title, intro_text):
problem_list = []
if len( self._messages_by_user ) > 0:
problem_list.append( '<ul>' )
for (uid,info_msglist) in self._messages_by_user.items():
problem_list.append( '<li>User: %s %s (%s)</li>' % (info_msglist['uFirst'],info_msglist['uLast'],info_msglist['uEmail']) )
problem_list.append( '<ol>' )
for m in info_msglist['msgList']:
problem_list.append( '<li>%s</li>' % m )
problem_list.append( '</ol>' )
problem_list.append( '</ul>' )
if len( self._admin_messages ) > 0:
problem_list.append( '<ol>' )
for m in self._admin_messages:
problem_list.append( '<li>%s</li>' % m )
problem_list.append( '</ol>' )
text = ''
# Create the body of the message (a plain-text and an HTML version).
html = '<html>\n\
<head></head>\n\
<body>\n' + intro_text + '\n %s\n\
</p>\n\
</body>\n\
</html>' % ('\n'.join( problem_list ))
self.send(title, self._admin_email, [ self._admin_email ], html )
def send_all( self, title, uIntro_txt, uProlog, aIntro_txt ):
# Run through list of messages by user
if len( self._messages_by_user ):
for (uid,uInfo_msg) in self._messages_by_user.items():
self.mail_user(uInfo_msg['uEmail'],uInfo_msg['uFirst'],uInfo_msg['uLast'], title, uIntro_txt, uProlog, uInfo_msg['msgList'])
if len( self._messages_by_user ) or len( self._admin_messages ):
self.mail_admin(title, aIntro_txt)
def dump_all( self ):
print("USER MESSAGES:")
print(self._messages_by_user)
print("ADMIN_MESSAGES:")
print(self._admin_messages)
class xnat_email(sibis_email):
def __init__(self, session):
self._interface = session.api['xnat']
if self._interface :
try:
# XNAT 1.7
server_config = self._interface.client.get('/xapi/siteConfig').json()
except Exception as ex:
# XNAT 1.6
server_config = self._interface._get_json('/data/services/settings')
self._site_url = server_config[u'siteUrl']
self._site_name = server_config[u'siteId']
sibis_email.__init__(self,server_config[u'smtp_host'],server_config[u'adminEmail'],session.get_email())
else:
slog.info('xnat_email.__init__',"ERROR: xnat api is not defined")
self._site_url = None
self._site_name = None
sibis_email.__init__(self,None,None,session.get_email())
self._project_name = session.get_project_name()
# Determine server config to get admin email and public URL
def add_user_message( self, uname, msg ):
if uname not in self._messages_by_user:
try:
user = self._interface.client.users[uname]
uEmail = user.email
user_firstname = user.first_name
user_lastname = user.last_name
except:
slog.info('xnat_email.add_user_message',"ERROR: failed to get detail information for user " + str(uname) + " at {}".format(time.asctime()),
msg = str(msg))
return False
sibis_email.add_user_message(self,uname,msg,user_firstname,user_lastname,uEmail)
else:
sibis_email.add_user_message(self,uname,msg)
return True
def mail_user( self, uEmail, uFirst, uLast, msglist ):
intro='We have detected the following problem(s) with data you uploaded to the <a href="%s">%s XNAT image repository</a>:' % (self._site_url, self._site_name)
prolog='Please address these issues as soon as possible (direct links to the respective data items are provided above for your convenience). If you have further questions, feel free to contact the <a href="mailto:%s">%s support</a>' % (self._admin_email, self._project_name )
title="%s XNAT: problems with your uploaded data" % ( self._project_name )
sibis_email.mail_user(self,uEmail, uFirst, uLast, title, intro, prolog, msglist)
def mail_admin(self):
title = "$s: %s XNAT problem update" % (self._project_name, self._site_name)
intro_text = 'We have detected the following problem(s) with data on <a href="%s">%s XNAT image repository</a>:' % (self._site_url, self._project_name)
sibis_email.mail_admin(self,title, intro_text)
def send_all( self ):
# Run through list of messages by user
if len( self._messages_by_user ):
for (uname,info_msglist) in self._messages_by_user.items():
self.mail_user(info_msglist['uEmail'], info_msglist['uFirst'], info_msglist['uLast'], info_msglist['msgList'])
if len( self._messages_by_user ) or len( self._admin_messages ):
self.mail_admin
| sibis-platform/sibispy | sibis_email.py | Python | bsd-3-clause | 9,495 |
"""
=============================
Straight line Hough transform
=============================
The Hough transform in its simplest form is a method to detect straight lines.
In the following example, we construct an image with a line intersection. We
then use the `Hough transform <http://en.wikipedia.org/wiki/Hough_transform>`__.
to explore a parameter space for straight lines that may run through the image.
Algorithm overview
------------------
Usually, lines are parameterised as :math:`y = mx + c`, with a gradient
:math:`m` and y-intercept `c`. However, this would mean that :math:`m` goes to
infinity for vertical lines. Instead, we therefore construct a segment
perpendicular to the line, leading to the origin. The line is represented by
the length of that segment, :math:`r`, and the angle it makes with the x-axis,
:math:`\\theta`.
The Hough transform constructs a histogram array representing the parameter
space (i.e., an :math:`M \\times N` matrix, for :math:`M` different values of
the radius and :math:`N` different values of :math:`\\theta`). For each
parameter combination, :math:`r` and :math:`\\theta`, we then find the number of
non-zero pixels in the input image that would fall close to the corresponding
line, and increment the array at position :math:`(r, \\theta)` appropriately.
We can think of each non-zero pixel "voting" for potential line candidates. The
local maxima in the resulting histogram indicates the parameters of the most
probably lines. In our example, the maxima occur at 45 and 135 degrees,
corresponding to the normal vector angles of each line.
Another approach is the Progressive Probabilistic Hough Transform [1]_. It is
based on the assumption that using a random subset of voting points give a good
approximation to the actual result, and that lines can be extracted during the
voting process by walking along connected components. This returns the
beginning and end of each line segment, which is useful.
The function `probabilistic_hough` has three parameters: a general threshold
that is applied to the Hough accumulator, a minimum line length and the line
gap that influences line merging. In the example below, we find lines longer
than 10 with a gap less than 3 pixels.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler,"Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
.. [2] Duda, R. O. and P. E. Hart, "Use of the Hough Transformation to
Detect Lines and Curves in Pictures," Comm. ACM, Vol. 15,
pp. 11-15 (January, 1972)
"""
from matplotlib import cm
from skimage.transform import (hough_line, hough_line_peaks,
probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
# Constructing test image.
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
# Classic straight-line Hough transform.
h, theta, d = hough_line(image)
# Generating figure 1.
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(12, 6))
plt.tight_layout()
ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h), extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]], cmap=cm.gray, aspect=1/1.5)
ax1.set_title('Hough transform')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
ax2.imshow(image, cmap=cm.gray)
row1, col1 = image.shape
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)
ax2.plot((0, col1), (y0, y1), '-r')
ax2.axis((0, col1, row1, 0))
ax2.set_title('Detected lines')
ax2.set_axis_off()
# Line finding using the Probabilistic Hough Transform.
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=10, line_length=5,
line_gap=3)
# Generating figure 2.
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(16, 6), sharex=True,
sharey=True)
plt.tight_layout()
ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
ax0.set_axis_off()
ax0.set_adjustable('box-forced')
ax1.imshow(edges, cmap=cm.gray)
ax1.set_title('Canny edges')
ax1.set_axis_off()
ax1.set_adjustable('box-forced')
ax2.imshow(edges * 0)
for line in lines:
p0, p1 = line
ax2.plot((p0[0], p1[0]), (p0[1], p1[1]))
row2, col2 = image.shape
ax2.axis((0, col2, row2, 0))
ax2.set_title('Probabilistic Hough')
ax2.set_axis_off()
ax2.set_adjustable('box-forced')
plt.show()
| rjeli/scikit-image | doc/examples/edges/plot_line_hough_transform.py | Python | bsd-3-clause | 4,764 |
"""
Test basic DarwinLog functionality provided by the StructuredDataDarwinLog
plugin.
These tests are currently only supported when running against Darwin
targets.
"""
from __future__ import print_function
import lldb
import os
import re
from lldbsuite.test import decorators
from lldbsuite.test import lldbtest
from lldbsuite.test import darwin_log
class TestDarwinLogFilterRegexCategory(darwin_log.DarwinLogTestBase):
mydir = lldbtest.TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
super(TestDarwinLogFilterRegexCategory, self).setUp()
# Source filename.
self.source = 'main.c'
# Output filename.
self.exe_name = self.getBuildArtifact("a.out")
self.d = {'C_SOURCES': self.source, 'EXE': self.exe_name}
# Locate breakpoint.
self.line = lldbtest.line_number(self.source, '// break here')
def tearDown(self):
# Shut down the process if it's still running.
if self.child:
self.runCmd('process kill')
self.expect_prompt()
self.runCmd('quit')
# Let parent clean up
super(TestDarwinLogFilterRegexCategory, self).tearDown()
# ==========================================================================
# category filter tests
# ==========================================================================
@decorators.skipUnlessDarwin
def test_filter_accept_category_full_match(self):
"""Test that fall-through reject, accept regex single category works."""
self.do_test(
["--no-match-accepts false",
"--filter \"accept category regex cat2\""]
)
# We should only see the second log message as we only accept
# that category.
self.assertIsNotNone(self.child.match)
self.assertTrue(
(len(
self.child.match.groups()) > 1) and (
self.child.match.group(2) == "cat2"),
"first log line should not be present, second log line "
"should be")
@decorators.skipUnlessDarwin
def test_filter_accept_category_partial_match(self):
"""Test that fall-through reject, accept regex category via partial match works."""
self.do_test(
["--no-match-accepts false",
"--filter \"accept category regex .+2\""]
)
# We should only see the second log message as we only accept
# that category.
self.assertIsNotNone(self.child.match)
self.assertTrue(
(len(
self.child.match.groups()) > 1) and (
self.child.match.group(2) == "cat2"),
"first log line should not be present, second log line "
"should be")
@decorators.skipUnlessDarwin
def test_filter_reject_category_full_match(self):
"""Test that fall-through accept, reject regex category works."""
self.do_test(
["--no-match-accepts true",
"--filter \"reject category regex cat1\""]
)
# We should only see the second log message as we rejected the first
# via category rejection.
self.assertIsNotNone(self.child.match)
self.assertTrue(
(len(
self.child.match.groups()) > 1) and (
self.child.match.group(2) == "cat2"),
"first log line should not be present, second log line "
"should be")
@decorators.skipUnlessDarwin
def test_filter_reject_category_partial_match(self):
"""Test that fall-through accept, reject regex category by partial match works."""
self.do_test(
["--no-match-accepts true",
"--filter \"reject category regex t1\""]
)
# We should only see the second log message as we rejected the first
# via category rejection.
self.assertIsNotNone(self.child.match)
self.assertTrue(
(len(
self.child.match.groups()) > 1) and (
self.child.match.group(2) == "cat2"),
"first log line should not be present, second log line "
"should be")
@decorators.skipUnlessDarwin
def test_filter_accept_category_second_rule(self):
"""Test that fall-through reject, accept regex category on second rule works."""
self.do_test(
["--no-match-accepts false",
"--filter \"accept category regex non-existent\"",
"--filter \"accept category regex cat2\""
]
)
# We should only see the second message since we reject by default,
# the first filter doesn't match any, and the second filter matches
# the category of the second log message.
self.assertIsNotNone(self.child.match)
self.assertTrue(
(len(
self.child.match.groups()) > 1) and (
self.child.match.group(2) == "cat2"),
"first log line should not be present, second log line "
"should be")
| youtube/cobalt | third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/darwin_log/filter/regex/category/TestDarwinLogFilterRegexCategory.py | Python | bsd-3-clause | 5,073 |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The spm module provides basic functions for interfacing with matlab
and spm to access spm tools.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import str, bytes
# Standard library imports
import os
from glob import glob
# Third-party imports
import numpy as np
import scipy.io as sio
# Local imports
from ... import logging
from ...utils.filemanip import (filename_to_list, list_to_filename,
split_filename)
from ..base import (Bunch, traits, TraitedSpec, File, Directory,
OutputMultiPath, InputMultiPath, isdefined)
from .base import (SPMCommand, SPMCommandInputSpec,
scans_for_fnames, ImageFileSPM)
__docformat__ = 'restructuredtext'
logger = logging.getLogger('interface')
class Level1DesignInputSpec(SPMCommandInputSpec):
spm_mat_dir = Directory(exists=True, field='dir',
desc='directory to store SPM.mat file (opt)')
timing_units = traits.Enum('secs', 'scans', field='timing.units',
desc='units for specification of onsets',
mandatory=True)
interscan_interval = traits.Float(field='timing.RT',
desc='Interscan interval in secs',
mandatory=True)
microtime_resolution = traits.Int(field='timing.fmri_t',
desc=('Number of time-bins per scan '
'in secs (opt)'))
microtime_onset = traits.Float(field='timing.fmri_t0',
desc=('The onset/time-bin in seconds for '
'alignment (opt)'))
session_info = traits.Any(field='sess',
desc=('Session specific information generated '
'by ``modelgen.SpecifyModel``'),
mandatory=True)
factor_info = traits.List(traits.Dict(traits.Enum('name', 'levels')),
field='fact',
desc=('Factor specific information '
'file (opt)'))
bases = traits.Dict(traits.Enum('hrf', 'fourier', 'fourier_han',
'gamma', 'fir'), field='bases', desc="""
dict {'name':{'basesparam1':val,...}}
name : string
Name of basis function (hrf, fourier, fourier_han,
gamma, fir)
hrf :
derivs : 2-element list
Model HRF Derivatives. No derivatives: [0,0],
Time derivatives : [1,0], Time and Dispersion
derivatives: [1,1]
fourier, fourier_han, gamma, fir:
length : int
Post-stimulus window length (in seconds)
order : int
Number of basis functions
""", mandatory=True)
volterra_expansion_order = traits.Enum(1, 2, field='volt',
desc=('Model interactions - '
'yes:1, no:2'))
global_intensity_normalization = traits.Enum('none', 'scaling',
field='global',
desc=('Global intensity '
'normalization - '
'scaling or none'))
mask_image = File(exists=True, field='mask',
desc='Image for explicitly masking the analysis')
mask_threshold = traits.Either(traits.Enum('-Inf'), traits.Float(),
desc="Thresholding for the mask",
default='-Inf', usedefault=True)
model_serial_correlations = traits.Enum('AR(1)', 'FAST', 'none',
field='cvi',
desc=('Model serial correlations '
'AR(1), FAST or none. FAST '
'is available in SPM12'))
class Level1DesignOutputSpec(TraitedSpec):
spm_mat_file = File(exists=True, desc='SPM mat file')
class Level1Design(SPMCommand):
"""Generate an SPM design matrix
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=59
Examples
--------
>>> level1design = Level1Design()
>>> level1design.inputs.timing_units = 'secs'
>>> level1design.inputs.interscan_interval = 2.5
>>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}}
>>> level1design.inputs.session_info = 'session_info.npz'
>>> level1design.run() # doctest: +SKIP
"""
input_spec = Level1DesignInputSpec
output_spec = Level1DesignOutputSpec
_jobtype = 'stats'
_jobname = 'fmri_spec'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['spm_mat_dir', 'mask_image']:
return np.array([str(val)], dtype=object)
if opt in ['session_info']: # , 'factor_info']:
if isinstance(val, dict):
return [val]
else:
return val
return super(Level1Design, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Level1Design, self)._parse_inputs(
skip=('mask_threshold'))
for sessinfo in einputs[0]['sess']:
sessinfo['scans'] = scans_for_fnames(
filename_to_list(sessinfo['scans']), keep4d=False)
if not isdefined(self.inputs.spm_mat_dir):
einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
def _make_matlab_command(self, content):
"""validates spm options and generates job structure
if mfile is True uses matlab .m file
else generates a job structure and saves in .mat
"""
if isdefined(self.inputs.mask_image):
# SPM doesn't handle explicit masking properly, especially
# when you want to use the entire mask image
postscript = "load SPM;\n"
postscript += ("SPM.xM.VM = spm_vol('%s');\n"
% list_to_filename(self.inputs.mask_image))
postscript += "SPM.xM.I = 0;\n"
postscript += "SPM.xM.T = [];\n"
postscript += ("SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n"
% self.inputs.mask_threshold)
postscript += ("SPM.xM.xs = struct('Masking', "
"'explicit masking only');\n")
postscript += "save SPM SPM;\n"
else:
postscript = None
return super(Level1Design, self)._make_matlab_command(
content, postscript=postscript)
def _list_outputs(self):
outputs = self._outputs().get()
spm = os.path.join(os.getcwd(), 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class EstimateModelInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, field='spmmat',
copyfile=True, mandatory=True,
desc='Absolute path to SPM.mat')
estimation_method = traits.Dict(
traits.Enum('Classical', 'Bayesian2', 'Bayesian'),
field='method', mandatory=True,
desc=('Dictionary of either Classical: 1, Bayesian: 1, '
'or Bayesian2: 1 (dict)'))
write_residuals = traits.Bool(field='write_residuals',
desc="Write individual residual images")
flags = traits.Dict(desc='Additional arguments')
class EstimateModelOutputSpec(TraitedSpec):
mask_image = ImageFileSPM(exists=True,
desc='binary mask to constrain estimation')
beta_images = OutputMultiPath(ImageFileSPM(exists=True),
desc='design parameter estimates')
residual_image = ImageFileSPM(exists=True,
desc='Mean-squared image of the residuals')
residual_images = OutputMultiPath(ImageFileSPM(exists=True),
desc="individual residual images (requires `write_residuals`")
RPVimage = ImageFileSPM(exists=True, desc='Resels per voxel image')
spm_mat_file = File(exists=True, desc='Updated SPM mat file')
labels = ImageFileSPM(exists=True, desc="label file")
SDerror = OutputMultiPath(ImageFileSPM(exists=True),
desc="Images of the standard deviation of the error")
ARcoef = OutputMultiPath(ImageFileSPM(exists=True),
desc="Images of the AR coefficient")
Cbetas = OutputMultiPath(ImageFileSPM(exists=True),
desc="Images of the parameter posteriors")
SDbetas = OutputMultiPath(ImageFileSPM(exists=True),
desc="Images of the standard deviation of parameter posteriors")
class EstimateModel(SPMCommand):
"""Use spm_spm to estimate the parameters of a model
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=69
Examples
--------
>>> est = EstimateModel()
>>> est.inputs.spm_mat_file = 'SPM.mat'
>>> est.inputs.estimation_method = {'Classical': 1}
>>> est.run() # doctest: +SKIP
"""
input_spec = EstimateModelInputSpec
output_spec = EstimateModelOutputSpec
_jobtype = 'stats'
_jobname = 'fmri_est'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'spm_mat_file':
return np.array([str(val)], dtype=object)
if opt == 'estimation_method':
if isinstance(val, (str, bytes)):
return {'{}'.format(val): 1}
else:
return val
return super(EstimateModel, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(EstimateModel, self)._parse_inputs(skip=('flags'))
if isdefined(self.inputs.flags):
einputs[0].update({flag: val for (flag, val) in
self.inputs.flags.items()})
return einputs
def _list_outputs(self):
outputs = self._outputs().get()
pth = os.path.dirname(self.inputs.spm_mat_file)
outtype = 'nii' if '12' in self.version.split('.')[0] else 'img'
spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
betas = [vbeta.fname[0] for vbeta in spm['SPM'][0, 0].Vbeta[0]]
if ('Bayesian' in self.inputs.estimation_method.keys() or
'Bayesian2' in self.inputs.estimation_method.keys()):
outputs['labels'] = os.path.join(pth,
'labels.{}'.format(outtype))
outputs['SDerror'] = glob(os.path.join(pth, 'Sess*_SDerror*'))
outputs['ARcoef'] = glob(os.path.join(pth, 'Sess*_AR_*'))
if betas:
outputs['Cbetas'] = [os.path.join(pth, 'C{}'.format(beta))
for beta in betas]
outputs['SDbetas'] = [os.path.join(pth, 'SD{}'.format(beta))
for beta in betas]
if 'Classical' in self.inputs.estimation_method.keys():
outputs['residual_image'] = os.path.join(pth,
'ResMS.{}'.format(outtype))
outputs['RPVimage'] = os.path.join(pth,
'RPV.{}'.format(outtype))
if self.inputs.write_residuals:
outputs['residual_images'] = glob(os.path.join(pth, 'Res_*'))
if betas:
outputs['beta_images'] = [os.path.join(pth, beta)
for beta in betas]
outputs['mask_image'] = os.path.join(pth,
'mask.{}'.format(outtype))
outputs['spm_mat_file'] = os.path.join(pth, 'SPM.mat')
return outputs
class EstimateContrastInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, field='spmmat',
desc='Absolute path to SPM.mat',
copyfile=True,
mandatory=True)
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Either(
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(
traits.Float)))))),
desc="""List of contrasts with each contrast being a list of the form:
[('name', 'stat', [condition list], [weight list], [session list])]
If session list is None or not provided, all sessions are used. For
F contrasts, the condition list should contain previously defined
T-contrasts.""",
mandatory=True)
beta_images = InputMultiPath(File(exists=True),
desc=('Parameter estimates of the '
'design matrix'),
copyfile=False,
mandatory=True)
residual_image = File(exists=True,
desc='Mean-squared image of the residuals',
copyfile=False,
mandatory=True)
use_derivs = traits.Bool(desc='use derivatives for estimation',
xor=['group_contrast'])
group_contrast = traits.Bool(desc='higher level contrast',
xor=['use_derivs'])
class EstimateContrastOutputSpec(TraitedSpec):
con_images = OutputMultiPath(File(exists=True),
desc='contrast images from a t-contrast')
spmT_images = OutputMultiPath(File(exists=True),
desc='stat images from a t-contrast')
ess_images = OutputMultiPath(File(exists=True),
desc='contrast images from an F-contrast')
spmF_images = OutputMultiPath(File(exists=True),
desc='stat images from an F-contrast')
spm_mat_file = File(exists=True, desc='Updated SPM mat file')
class EstimateContrast(SPMCommand):
"""Use spm_contrasts to estimate contrasts of interest
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> est = spm.EstimateContrast()
>>> est.inputs.spm_mat_file = 'SPM.mat'
>>> cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5])
>>> cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1])
>>> contrasts = [cont1,cont2]
>>> est.inputs.contrasts = contrasts
>>> est.run() # doctest: +SKIP
"""
input_spec = EstimateContrastInputSpec
output_spec = EstimateContrastOutputSpec
_jobtype = 'stats'
_jobname = 'con'
def _make_matlab_command(self, _):
"""validates spm options and generates job structure
"""
contrasts = []
cname = []
for i, cont in enumerate(self.inputs.contrasts):
cname.insert(i, cont[0])
contrasts.insert(i, Bunch(name=cont[0],
stat=cont[1],
conditions=cont[2],
weights=None,
sessions=None))
if len(cont) >= 4:
contrasts[i].weights = cont[3]
if len(cont) >= 5:
contrasts[i].sessions = cont[4]
script = "% generated by nipype.interfaces.spm\n"
script += "spm_defaults;\n"
script += ("jobs{1}.stats{1}.con.spmmat = {'%s'};\n"
% self.inputs.spm_mat_file)
script += "load(jobs{1}.stats{1}.con.spmmat{:});\n"
script += "SPM.swd = '%s';\n" % os.getcwd()
script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n"
script += "names = SPM.xX.name;\n"
# get names for columns
if (isdefined(self.inputs.group_contrast) and
self.inputs.group_contrast):
script += "condnames=names;\n"
else:
if self.inputs.use_derivs:
script += "pat = 'Sn\([0-9]*\) (.*)';\n"
else:
script += ("pat = 'Sn\([0-9]*\) (.*)\*bf\(1\)|Sn\([0-9]*\) "
".*\*bf\([2-9]\)|Sn\([0-9]*\) (.*)';\n")
script += "t = regexp(names,pat,'tokens');\n"
# get sessidx for columns
script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n"
script += "t1 = regexp(names,pat1,'tokens');\n"
script += ("for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if "
"~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};"
"condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n")
# BUILD CONTRAST SESSION STRUCTURE
for i, contrast in enumerate(contrasts):
if contrast.stat == 'T':
script += ("consess{%d}.tcon.name = '%s';\n"
% (i + 1, contrast.name))
script += ("consess{%d}.tcon.convec = zeros(1,numel(names));\n"
% (i + 1))
for c0, cond in enumerate(contrast.conditions):
script += ("idx = strmatch('%s',condnames,'exact');\n"
% (cond))
script += (("if isempty(idx), throw(MException("
"'CondName:Chk', sprintf('Condition %%s not "
"found in design','%s'))); end;\n") % cond)
if contrast.sessions:
for sno, sw in enumerate(contrast.sessions):
script += ("sidx = find(condsess(idx)==%d);\n"
% (sno + 1))
script += (("consess{%d}.tcon.convec(idx(sidx)) "
"= %f;\n")
% (i + 1, sw * contrast.weights[c0]))
else:
script += ("consess{%d}.tcon.convec(idx) = %f;\n"
% (i + 1, contrast.weights[c0]))
for i, contrast in enumerate(contrasts):
if contrast.stat == 'F':
script += ("consess{%d}.fcon.name = '%s';\n"
% (i + 1, contrast.name))
for cl0, fcont in enumerate(contrast.conditions):
try:
tidx = cname.index(fcont[0])
except:
Exception("Contrast Estimate: could not get index of"
" T contrast. probably not defined prior "
"to the F contrasts")
script += (("consess{%d}.fcon.convec{%d} = "
"consess{%d}.tcon.convec;\n")
% (i + 1, cl0 + 1, tidx + 1))
script += "jobs{1}.stats{1}.con.consess = consess;\n"
script += ("if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');"
"jobs=spm_jobman('spm5tospm8',{jobs});end\n")
script += "spm_jobman('run',jobs);"
return script
def _list_outputs(self):
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.spm_mat_file)
spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
con_images = []
spmT_images = []
for con in spm['SPM'][0, 0].xCon[0]:
con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0])))
spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0])))
if con_images:
outputs['con_images'] = con_images
outputs['spmT_images'] = spmT_images
spm12 = '12' in self.version.split('.')[0]
if spm12:
ess = glob(os.path.join(pth, 'ess*.nii'))
else:
ess = glob(os.path.join(pth, 'ess*.img'))
if len(ess) > 0:
outputs['ess_images'] = sorted(ess)
if spm12:
spmf = glob(os.path.join(pth, 'spmF*.nii'))
else:
spmf = glob(os.path.join(pth, 'spmF*.img'))
if len(spmf) > 0:
outputs['spmF_images'] = sorted(spmf)
outputs['spm_mat_file'] = self.inputs.spm_mat_file
return outputs
class ThresholdInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, desc='absolute path to SPM.mat',
copyfile=True, mandatory=True)
stat_image = File(exists=True, desc='stat image',
copyfile=False, mandatory=True)
contrast_index = traits.Int(mandatory=True,
desc='which contrast in the SPM.mat to use')
use_fwe_correction = traits.Bool(True, usedefault=True,
desc=('whether to use FWE (Bonferroni) '
'correction for initial threshold '
'(height_threshold_type has to be '
'set to p-value)'))
use_topo_fdr = traits.Bool(True, usedefault=True,
desc=('whether to use FDR over cluster extent '
'probabilities'))
height_threshold = traits.Float(0.05, usedefault=True,
desc=('value for initial thresholding '
'(defining clusters)'))
height_threshold_type = traits.Enum('p-value', 'stat', usedefault=True,
desc=('Is the cluster forming '
'threshold a stat value or '
'p-value?'))
extent_fdr_p_threshold = traits.Float(0.05, usedefault=True,
desc=('p threshold on FDR corrected '
'cluster size probabilities'))
extent_threshold = traits.Int(0, usedefault=True,
desc='Minimum cluster size in voxels')
force_activation = traits.Bool(False, usedefault=True,
desc=('In case no clusters survive the '
'topological inference step this '
'will pick a culster with the highes '
'sum of t-values. Use with care.'))
class ThresholdOutputSpec(TraitedSpec):
thresholded_map = File(exists=True)
n_clusters = traits.Int()
pre_topo_fdr_map = File(exists=True)
pre_topo_n_clusters = traits.Int()
activation_forced = traits.Bool()
cluster_forming_thr = traits.Float()
class Threshold(SPMCommand):
"""Topological FDR thresholding based on cluster extent/size. Smoothness is
estimated from GLM residuals but is assumed to be the same for all of the
voxels.
Examples
--------
>>> thresh = Threshold()
>>> thresh.inputs.spm_mat_file = 'SPM.mat'
>>> thresh.inputs.stat_image = 'spmT_0001.img'
>>> thresh.inputs.contrast_index = 1
>>> thresh.inputs.extent_fdr_p_threshold = 0.05
>>> thresh.run() # doctest: +SKIP
"""
input_spec = ThresholdInputSpec
output_spec = ThresholdOutputSpec
def _gen_thresholded_map_filename(self):
_, fname, ext = split_filename(self.inputs.stat_image)
return os.path.abspath(fname + "_thr" + ext)
def _gen_pre_topo_map_filename(self):
_, fname, ext = split_filename(self.inputs.stat_image)
return os.path.abspath(fname + "_pre_topo_thr" + ext)
def _make_matlab_command(self, _):
script = "con_index = %d;\n" % self.inputs.contrast_index
script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold
if self.inputs.use_fwe_correction:
script += "thresDesc = 'FWE';\n"
else:
script += "thresDesc = 'none';\n"
if self.inputs.use_topo_fdr:
script += "use_topo_fdr = 1;\n"
else:
script += "use_topo_fdr = 0;\n"
if self.inputs.force_activation:
script += "force_activation = 1;\n"
else:
script += "force_activation = 0;\n"
script += ("cluster_extent_p_fdr_thr = %f;\n"
% self.inputs.extent_fdr_p_threshold)
script += "stat_filename = '%s';\n" % self.inputs.stat_image
script += ("height_threshold_type = '%s';\n"
% self.inputs.height_threshold_type)
script += "extent_threshold = %d;\n" % self.inputs.extent_threshold
script += "load %s;\n" % self.inputs.spm_mat_file
script += """
FWHM = SPM.xVol.FWHM;
df = [SPM.xCon(con_index).eidf SPM.xX.erdf];
STAT = SPM.xCon(con_index).STAT;
R = SPM.xVol.R;
S = SPM.xVol.S;
n = 1;
switch thresDesc
case 'FWE'
cluster_forming_thr = spm_uc(cluster_forming_thr,df,STAT,R,n,S);
case 'none'
if strcmp(height_threshold_type, 'p-value')
cluster_forming_thr = spm_u(cluster_forming_thr^(1/n),df,STAT);
end
end
stat_map_vol = spm_vol(stat_filename);
[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);
Z = stat_map_data(:)';
[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');
XYZ = cat(1, x', y', z');
XYZth = XYZ(:, Z >= cluster_forming_thr);
Zth = Z(Z >= cluster_forming_thr);
"""
script += (("spm_write_filtered(Zth,XYZth,stat_map_vol.dim',"
"stat_map_vol.mat,'thresholded map', '%s');\n")
% self._gen_pre_topo_map_filename())
script += """
max_size = 0;
max_size_index = 0;
th_nclusters = 0;
nclusters = 0;
if isempty(XYZth)
thresholded_XYZ = [];
thresholded_Z = [];
else
if use_topo_fdr
V2R = 1/prod(FWHM(stat_map_vol.dim > 1));
[uc,Pc,ue] = spm_uc_clusterFDR(cluster_extent_p_fdr_thr,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);
end
voxel_labels = spm_clusters(XYZth);
nclusters = max(voxel_labels);
thresholded_XYZ = [];
thresholded_Z = [];
for i = 1:nclusters
cluster_size = sum(voxel_labels==i);
if cluster_size > extent_threshold && (~use_topo_fdr || (cluster_size - uc) > -1)
thresholded_XYZ = cat(2, thresholded_XYZ, XYZth(:,voxel_labels == i));
thresholded_Z = cat(2, thresholded_Z, Zth(voxel_labels == i));
th_nclusters = th_nclusters + 1;
end
if force_activation
cluster_sum = sum(Zth(voxel_labels == i));
if cluster_sum > max_size
max_size = cluster_sum;
max_size_index = i;
end
end
end
end
activation_forced = 0;
if isempty(thresholded_XYZ)
if force_activation && max_size ~= 0
thresholded_XYZ = XYZth(:,voxel_labels == max_size_index);
thresholded_Z = Zth(voxel_labels == max_size_index);
th_nclusters = 1;
activation_forced = 1;
else
thresholded_Z = [0];
thresholded_XYZ = [1 1 1]';
th_nclusters = 0;
end
end
fprintf('activation_forced = %d\\n',activation_forced);
fprintf('pre_topo_n_clusters = %d\\n',nclusters);
fprintf('n_clusters = %d\\n',th_nclusters);
fprintf('cluster_forming_thr = %f\\n',cluster_forming_thr);
"""
script += (("spm_write_filtered(thresholded_Z,thresholded_XYZ,"
"stat_map_vol.dim',stat_map_vol.mat,'thresholded map',"
" '%s');\n") % self._gen_thresholded_map_filename())
return script
def aggregate_outputs(self, runtime=None):
outputs = self._outputs()
setattr(outputs, 'thresholded_map',
self._gen_thresholded_map_filename())
setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename())
for line in runtime.stdout.split('\n'):
if line.startswith("activation_forced = "):
setattr(outputs, 'activation_forced',
line[len("activation_forced = "):].strip() == "1")
elif line.startswith("n_clusters = "):
setattr(outputs, 'n_clusters',
int(line[len("n_clusters = "):].strip()))
elif line.startswith("pre_topo_n_clusters = "):
setattr(outputs, 'pre_topo_n_clusters',
int(line[len("pre_topo_n_clusters = "):].strip()))
elif line.startswith("cluster_forming_thr = "):
setattr(outputs, 'cluster_forming_thr',
float(line[len("cluster_forming_thr = "):].strip()))
return outputs
def _list_outputs(self):
outputs = self._outputs().get()
outputs['thresholded_map'] = self._gen_thresholded_map_filename()
outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename()
return outputs
class ThresholdStatisticsInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, desc='absolute path to SPM.mat',
copyfile=True, mandatory=True)
stat_image = File(exists=True, desc='stat image',
copyfile=False, mandatory=True)
contrast_index = traits.Int(mandatory=True,
desc='which contrast in the SPM.mat to use')
height_threshold = traits.Float(desc=('stat value for initial '
'thresholding (defining clusters)'),
mandatory=True)
extent_threshold = traits.Int(0, usedefault=True,
desc="Minimum cluster size in voxels")
class ThresholdStatisticsOutputSpec(TraitedSpec):
voxelwise_P_Bonf = traits.Float()
voxelwise_P_RF = traits.Float()
voxelwise_P_uncor = traits.Float()
voxelwise_P_FDR = traits.Float()
clusterwise_P_RF = traits.Float()
clusterwise_P_FDR = traits.Float()
class ThresholdStatistics(SPMCommand):
"""Given height and cluster size threshold calculate theoretical
probabilities concerning false positives
Examples
--------
>>> thresh = ThresholdStatistics()
>>> thresh.inputs.spm_mat_file = 'SPM.mat'
>>> thresh.inputs.stat_image = 'spmT_0001.img'
>>> thresh.inputs.contrast_index = 1
>>> thresh.inputs.height_threshold = 4.56
>>> thresh.run() # doctest: +SKIP
"""
input_spec = ThresholdStatisticsInputSpec
output_spec = ThresholdStatisticsOutputSpec
def _make_matlab_command(self, _):
script = "con_index = %d;\n" % self.inputs.contrast_index
script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold
script += "stat_filename = '%s';\n" % self.inputs.stat_image
script += "extent_threshold = %d;\n" % self.inputs.extent_threshold
script += "load '%s'\n" % self.inputs.spm_mat_file
script += """
FWHM = SPM.xVol.FWHM;
df = [SPM.xCon(con_index).eidf SPM.xX.erdf];
STAT = SPM.xCon(con_index).STAT;
R = SPM.xVol.R;
S = SPM.xVol.S;
n = 1;
voxelwise_P_Bonf = spm_P_Bonf(cluster_forming_thr,df,STAT,S,n)
voxelwise_P_RF = spm_P_RF(1,0,cluster_forming_thr,df,STAT,R,n)
stat_map_vol = spm_vol(stat_filename);
[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);
Z = stat_map_data(:);
Zum = Z;
switch STAT
case 'Z'
VPs = (1-spm_Ncdf(Zum)).^n;
voxelwise_P_uncor = (1-spm_Ncdf(cluster_forming_thr)).^n
case 'T'
VPs = (1 - spm_Tcdf(Zum,df(2))).^n;
voxelwise_P_uncor = (1 - spm_Tcdf(cluster_forming_thr,df(2))).^n
case 'X'
VPs = (1-spm_Xcdf(Zum,df(2))).^n;
voxelwise_P_uncor = (1-spm_Xcdf(cluster_forming_thr,df(2))).^n
case 'F'
VPs = (1 - spm_Fcdf(Zum,df)).^n;
voxelwise_P_uncor = (1 - spm_Fcdf(cluster_forming_thr,df)).^n
end
VPs = sort(VPs);
voxelwise_P_FDR = spm_P_FDR(cluster_forming_thr,df,STAT,n,VPs)
V2R = 1/prod(FWHM(stat_map_vol.dim > 1));
clusterwise_P_RF = spm_P_RF(1,extent_threshold*V2R,cluster_forming_thr,df,STAT,R,n)
[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');
XYZ = cat(1, x', y', z');
[u, CPs, ue] = spm_uc_clusterFDR(0.05,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);
clusterwise_P_FDR = spm_P_clusterFDR(extent_threshold*V2R,df,STAT,R,n,cluster_forming_thr,CPs')
"""
return script
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
cur_output = ""
for line in runtime.stdout.split('\n'):
if cur_output != "" and len(line.split()) != 0:
setattr(outputs, cur_output, float(line))
cur_output = ""
continue
if (len(line.split()) != 0 and
line.split()[0] in ["clusterwise_P_FDR",
"clusterwise_P_RF", "voxelwise_P_Bonf",
"voxelwise_P_FDR", "voxelwise_P_RF",
"voxelwise_P_uncor"]):
cur_output = line.split()[0]
continue
return outputs
class FactorialDesignInputSpec(SPMCommandInputSpec):
spm_mat_dir = Directory(exists=True, field='dir',
desc='directory to store SPM.mat file (opt)')
# Need to make an alias of InputMultiPath; the inputs below are not Path
covariates = InputMultiPath(traits.Dict(
key_trait=traits.Enum('vector', 'name', 'interaction', 'centering')),
field='cov',
desc=('covariate dictionary {vector, name, '
'interaction, centering}'))
threshold_mask_none = traits.Bool(field='masking.tm.tm_none',
xor=['threshold_mask_absolute',
'threshold_mask_relative'],
desc='do not use threshold masking')
threshold_mask_absolute = traits.Float(field='masking.tm.tma.athresh',
xor=['threshold_mask_none',
'threshold_mask_relative'],
desc='use an absolute threshold')
threshold_mask_relative = traits.Float(field='masking.tm.tmr.rthresh',
xor=['threshold_mask_absolute',
'threshold_mask_none'],
desc=('threshold using a '
'proportion of the global '
'value'))
use_implicit_threshold = traits.Bool(field='masking.im',
desc=('use implicit mask NaNs or '
'zeros to threshold'))
explicit_mask_file = File(field='masking.em', # requires cell
desc='use an implicit mask file to threshold')
global_calc_omit = traits.Bool(field='globalc.g_omit',
xor=['global_calc_mean',
'global_calc_values'],
desc='omit global calculation')
global_calc_mean = traits.Bool(field='globalc.g_mean',
xor=['global_calc_omit',
'global_calc_values'],
desc='use mean for global calculation')
global_calc_values = traits.List(traits.Float,
field='globalc.g_user.global_uval',
xor=['global_calc_mean',
'global_calc_omit'],
desc='omit global calculation')
no_grand_mean_scaling = traits.Bool(field='globalm.gmsca.gmsca_no',
desc=('do not perform grand mean '
'scaling'))
global_normalization = traits.Enum(1, 2, 3, field='globalm.glonorm',
desc=('global normalization None-1, '
'Proportional-2, ANCOVA-3'))
class FactorialDesignOutputSpec(TraitedSpec):
spm_mat_file = File(exists=True, desc='SPM mat file')
class FactorialDesign(SPMCommand):
"""Base class for factorial designs
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=77
"""
input_spec = FactorialDesignInputSpec
output_spec = FactorialDesignOutputSpec
_jobtype = 'stats'
_jobname = 'factorial_design'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['spm_mat_dir', 'explicit_mask_file']:
return np.array([str(val)], dtype=object)
if opt in ['covariates']:
outlist = []
mapping = {'name': 'cname', 'vector': 'c',
'interaction': 'iCFI',
'centering': 'iCC'}
for dictitem in val:
outdict = {}
for key, keyval in list(dictitem.items()):
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return super(FactorialDesign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(FactorialDesign, self)._parse_inputs()
if not isdefined(self.inputs.spm_mat_dir):
einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
def _list_outputs(self):
outputs = self._outputs().get()
spm = os.path.join(os.getcwd(), 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class OneSampleTTestDesignInputSpec(FactorialDesignInputSpec):
in_files = traits.List(File(exists=True), field='des.t1.scans',
mandatory=True, minlen=2,
desc='input files')
class OneSampleTTestDesign(FactorialDesign):
"""Create SPM design for one sample t-test
Examples
--------
>>> ttest = OneSampleTTestDesign()
>>> ttest.inputs.in_files = ['cont1.nii', 'cont2.nii']
>>> ttest.run() # doctest: +SKIP
"""
input_spec = OneSampleTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return np.array(val, dtype=object)
return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val)
class TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec):
# very unlikely that you will have a single image in one group, so setting
# parameters to require at least two files in each group [SG]
group1_files = traits.List(File(exists=True), field='des.t2.scans1',
mandatory=True, minlen=2,
desc='Group 1 input files')
group2_files = traits.List(File(exists=True), field='des.t2.scans2',
mandatory=True, minlen=2,
desc='Group 2 input files')
dependent = traits.Bool(field='des.t2.dept',
desc=('Are the measurements dependent between '
'levels'))
unequal_variance = traits.Bool(field='des.t2.variance',
desc=('Are the variances equal or unequal '
'between groups'))
class TwoSampleTTestDesign(FactorialDesign):
"""Create SPM design for two sample t-test
Examples
--------
>>> ttest = TwoSampleTTestDesign()
>>> ttest.inputs.group1_files = ['cont1.nii', 'cont2.nii']
>>> ttest.inputs.group2_files = ['cont1a.nii', 'cont2a.nii']
>>> ttest.run() # doctest: +SKIP
"""
input_spec = TwoSampleTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['group1_files', 'group2_files']:
return np.array(val, dtype=object)
return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val)
class PairedTTestDesignInputSpec(FactorialDesignInputSpec):
paired_files = traits.List(traits.List(File(exists=True),
minlen=2, maxlen=2),
field='des.pt.pair',
mandatory=True, minlen=2,
desc='List of paired files')
grand_mean_scaling = traits.Bool(field='des.pt.gmsca',
desc='Perform grand mean scaling')
ancova = traits.Bool(field='des.pt.ancova',
desc='Specify ancova-by-factor regressors')
class PairedTTestDesign(FactorialDesign):
"""Create SPM design for paired t-test
Examples
--------
>>> pttest = PairedTTestDesign()
>>> pttest.inputs.paired_files = [['cont1.nii','cont1a.nii'],['cont2.nii','cont2a.nii']]
>>> pttest.run() # doctest: +SKIP
"""
input_spec = PairedTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['paired_files']:
return [dict(scans=np.array(files, dtype=object)) for files in val]
return super(PairedTTestDesign, self)._format_arg(opt, spec, val)
class MultipleRegressionDesignInputSpec(FactorialDesignInputSpec):
in_files = traits.List(File(exists=True),
field='des.mreg.scans',
mandatory=True, minlen=2,
desc='List of files')
include_intercept = traits.Bool(True, field='des.mreg.incint',
usedefault=True,
desc='Include intercept in design')
user_covariates = InputMultiPath(traits.Dict(
key_trait=traits.Enum('vector', 'name', 'centering')),
field='des.mreg.mcov',
desc=('covariate dictionary {vector, '
'name, centering}'))
class MultipleRegressionDesign(FactorialDesign):
"""Create SPM design for multiple regression
Examples
--------
>>> mreg = MultipleRegressionDesign()
>>> mreg.inputs.in_files = ['cont1.nii','cont2.nii']
>>> mreg.run() # doctest: +SKIP
"""
input_spec = MultipleRegressionDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return np.array(val, dtype=object)
if opt in ['user_covariates']:
outlist = []
mapping = {'name': 'cname', 'vector': 'c',
'centering': 'iCC'}
for dictitem in val:
outdict = {}
for key, keyval in list(dictitem.items()):
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return (super(MultipleRegressionDesign, self)
._format_arg(opt, spec, val))
| mick-d/nipype | nipype/interfaces/spm/model.py | Python | bsd-3-clause | 45,177 |
from django.core.management.base import BaseCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_') or not k.isupper()):
"""Convert a module namespace to a Python dictionary."""
return {k: repr(getattr(module, k)) for k in dir(module) if not omittable(k)}
class Command(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings."""
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
'--all', action='store_true',
help=(
'Display all settings, regardless of their value. In "hash" '
'mode, default values are prefixed by "###".'
),
)
parser.add_argument(
'--default', metavar='MODULE',
help=(
"The settings module to compare the current settings against. Leave empty to "
"compare against Django's default settings."
),
)
parser.add_argument(
'--output', default='hash', choices=('hash', 'unified'),
help=(
"Selects the output format. 'hash' mode displays each changed "
"setting, with the settings that don't appear in the defaults "
"followed by ###. 'unified' mode prefixes the default setting "
"with a minus sign, followed by the changed setting prefixed "
"with a plus sign."
),
)
def handle(self, **options):
from django.conf import settings, Settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
if not settings.configured:
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default = options['default']
default_settings = module_to_dict(Settings(default) if default else global_settings)
output_func = {
'hash': self.output_hash,
'unified': self.output_unified,
}[options['output']]
return '\n'.join(output_func(user_settings, default_settings, **options))
def output_hash(self, user_settings, default_settings, **options):
# Inspired by Postfix's "postconf -n".
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return output
def output_unified(self, user_settings, default_settings, **options):
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append(self.style.SUCCESS("+ %s = %s" % (key, user_settings[key])))
elif user_settings[key] != default_settings[key]:
output.append(self.style.ERROR("- %s = %s" % (key, default_settings[key])))
output.append(self.style.SUCCESS("+ %s = %s" % (key, user_settings[key])))
elif options['all']:
output.append(" %s = %s" % (key, user_settings[key]))
return output
| theo-l/django | django/core/management/commands/diffsettings.py | Python | bsd-3-clause | 3,370 |
from datetime import timedelta
from time import time
import warnings
from gdbn.dbn import buildDBN
from gdbn import activationFunctions
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
warnings.warn("""
The nolearn.dbn module will be removed in nolearn 0.6. If you want to
continue to use this module, please consider copying the code into
your own project. And take a look at Lasagne and nolearn.lasagne for
a better neural net toolkit.
""")
class DBN(BaseEstimator):
"""A scikit-learn estimator based on George Dahl's DBN
implementation `gdbn`.
"""
def __init__(
self,
layer_sizes=None,
scales=0.05,
fan_outs=None,
output_act_funct=None,
real_valued_vis=True,
use_re_lu=True,
uniforms=False,
learn_rates=0.1,
learn_rate_decays=1.0,
learn_rate_minimums=0.0,
momentum=0.9,
l2_costs=0.0001,
dropouts=0,
nesterov=True,
nest_compare=True,
rms_lims=None,
learn_rates_pretrain=None,
momentum_pretrain=None,
l2_costs_pretrain=None,
nest_compare_pretrain=None,
epochs=10,
epochs_pretrain=0,
loss_funct=None,
minibatch_size=64,
minibatches_per_epoch=None,
pretrain_callback=None,
fine_tune_callback=None,
random_state=None,
verbose=0,
):
"""
Many parameters such as `learn_rates`, `dropouts` etc. will
also accept a single value, in which case that value will be
used for all layers. To control the value per layer, pass a
list of values instead; see examples below.
Parameters ending with `_pretrain` may be provided to override
the given parameter for pretraining. Consider an example
where you want the pre-training to use a lower learning rate
than the fine tuning (the backprop), then you'd maybe pass
something like::
DBN([783, 300, 10], learn_rates=0.1, learn_rates_pretrain=0.005)
If you don't pass the `learn_rates_pretrain` parameter, the
value of `learn_rates` will be used for both pre-training and
fine tuning. (Which seems to not work very well.)
:param layer_sizes: A list of integers of the form
``[n_vis_units, n_hid_units1,
n_hid_units2, ..., n_out_units]``.
An example: ``[784, 300, 10]``
The number of units in the input layer and
the output layer will be set automatically
if you set them to -1. Thus, the above
example is equivalent to ``[-1, 300, -1]``
if you pass an ``X`` with 784 features,
and a ``y`` with 10 classes.
:param scales: Scale of the randomly initialized weights. A
list of floating point values. When you find
good values for the scale of the weights you
can speed up training a lot, and also improve
performance. Defaults to `0.05`.
:param fan_outs: Number of nonzero incoming connections to a
hidden unit. Defaults to `None`, which means
that all connections have non-zero weights.
:param output_act_funct: Output activation function. Instance
of type
:class:`~gdbn.activationFunctions.Sigmoid`,
:class:`~.gdbn.activationFunctions.Linear`,
:class:`~.gdbn.activationFunctions.Softmax`
from the
:mod:`gdbn.activationFunctions`
module. Defaults to
:class:`~.gdbn.activationFunctions.Softmax`.
:param real_valued_vis: Set `True` (the default) if visible
units are real-valued.
:param use_re_lu: Set `True` to use rectified linear units.
Defaults to `False`.
:param uniforms: Not documented at this time.
:param learn_rates: A list of learning rates, one entry per
weight layer.
An example: ``[0.1, 0.1]``
:param learn_rate_decays: The number with which the
`learn_rate` is multiplied after
each epoch of fine-tuning.
:param learn_rate_minimums: The minimum `learn_rates`; after
the learn rate reaches the minimum
learn rate, the
`learn_rate_decays` no longer has
any effect.
:param momentum: Momentum
:param l2_costs: L2 costs per weight layer.
:param dropouts: Dropouts per weight layer.
:param nesterov: Not documented at this time.
:param nest_compare: Not documented at this time.
:param rms_lims: Not documented at this time.
:param learn_rates_pretrain: A list of learning rates similar
to `learn_rates_pretrain`, but
used for pretraining. Defaults
to value of `learn_rates` parameter.
:param momentum_pretrain: Momentum for pre-training. Defaults
to value of `momentum` parameter.
:param l2_costs_pretrain: L2 costs per weight layer, for
pre-training. Defaults to the value
of `l2_costs` parameter.
:param nest_compare_pretrain: Not documented at this time.
:param epochs: Number of epochs to train (with backprop).
:param epochs_pretrain: Number of epochs to pre-train (with CDN).
:param loss_funct: A function that calculates the loss. Used
for displaying learning progress and for
:meth:`score`.
:param minibatch_size: Size of a minibatch.
:param minibatches_per_epoch: Number of minibatches per epoch.
The default is to use as many as
fit into our training set.
:param pretrain_callback: An optional function that takes as
arguments the :class:`DBN` instance,
the epoch and the layer index as its
argument, and is called for each
epoch of pretraining.
:param fine_tune_callback: An optional function that takes as
arguments the :class:`DBN` instance
and the epoch, and is called for
each epoch of fine tuning.
:param random_state: An optional int used as the seed by the
random number generator.
:param verbose: Debugging output.
"""
if layer_sizes is None:
layer_sizes = [-1, -1]
if output_act_funct is None:
output_act_funct = activationFunctions.Softmax()
elif isinstance(output_act_funct, str):
output_act_funct = getattr(activationFunctions, output_act_funct)()
if random_state is not None:
raise ValueError("random_sate must be an int")
self.layer_sizes = layer_sizes
self.scales = scales
self.fan_outs = fan_outs
self.output_act_funct = output_act_funct
self.real_valued_vis = real_valued_vis
self.use_re_lu = use_re_lu
self.uniforms = uniforms
self.learn_rates = learn_rates
self.learn_rate_decays = learn_rate_decays
self.learn_rate_minimums = learn_rate_minimums
self.momentum = momentum
self.l2_costs = l2_costs
self.dropouts = dropouts
self.nesterov = nesterov
self.nest_compare = nest_compare
self.rms_lims = rms_lims
self.learn_rates_pretrain = learn_rates_pretrain
self.momentum_pretrain = momentum_pretrain
self.l2_costs_pretrain = l2_costs_pretrain
self.nest_compare_pretrain = nest_compare_pretrain
self.epochs = epochs
self.epochs_pretrain = epochs_pretrain
self.loss_funct = loss_funct
self.use_dropout = True if dropouts else False
self.minibatch_size = minibatch_size
self.minibatches_per_epoch = minibatches_per_epoch
self.pretrain_callback = pretrain_callback
self.fine_tune_callback = fine_tune_callback
self.random_state = random_state
self.verbose = verbose
def _fill_missing_layer_sizes(self, X, y):
layer_sizes = self.layer_sizes
if layer_sizes[0] == -1: # n_feat
layer_sizes[0] = X.shape[1]
if layer_sizes[-1] == -1 and y is not None: # n_classes
layer_sizes[-1] = y.shape[1]
def _vp(self, value):
num_weights = len(self.layer_sizes) - 1
if not hasattr(value, '__iter__'):
value = [value] * num_weights
return list(value)
def _build_net(self, X, y=None):
v = self._vp
self._fill_missing_layer_sizes(X, y)
if self.verbose: # pragma: no cover
print "[DBN] layers {}".format(self.layer_sizes)
if self.random_state is not None:
np.random.seed(self.random_state)
net = buildDBN(
self.layer_sizes,
v(self.scales),
v(self.fan_outs),
self.output_act_funct,
self.real_valued_vis,
self.use_re_lu,
v(self.uniforms),
)
return net
def _configure_net_pretrain(self, net):
v = self._vp
self._configure_net_finetune(net)
learn_rates = self.learn_rates_pretrain
momentum = self.momentum_pretrain
l2_costs = self.l2_costs_pretrain
nest_compare = self.nest_compare_pretrain
if learn_rates is None:
learn_rates = self.learn_rates
if momentum is None:
momentum = self.momentum
if l2_costs is None:
l2_costs = self.l2_costs
if nest_compare is None:
nest_compare = self.nest_compare
net.learnRates = v(learn_rates)
net.momentum = momentum
net.L2Costs = v(l2_costs)
net.nestCompare = nest_compare
return net
def _configure_net_finetune(self, net):
v = self._vp
net.learnRates = v(self.learn_rates)
net.momentum = self.momentum
net.L2Costs = v(self.l2_costs)
net.dropouts = v(self.dropouts)
net.nesterov = self.nesterov
net.nestCompare = self.nest_compare
net.rmsLims = v(self.rms_lims)
return net
def _minibatches(self, X, y=None):
while True:
idx = np.random.randint(X.shape[0], size=(self.minibatch_size,))
X_batch = X[idx]
if hasattr(X_batch, 'todense'):
X_batch = X_batch.todense()
if y is not None:
yield (X_batch, y[idx])
else:
yield X_batch
def _onehot(self, y):
return np.array(
OneHotEncoder().fit_transform(y.reshape(-1, 1)).todense())
def _num_mistakes(self, targets, outputs):
if hasattr(targets, 'as_numpy_array'): # pragma: no cover
targets = targets.as_numpy_array()
if hasattr(outputs, 'as_numpy_array'):
outputs = outputs.as_numpy_array()
return np.sum(outputs.argmax(1) != targets.argmax(1))
def _learn_rate_adjust(self):
if self.learn_rate_decays == 1.0:
return
learn_rate_decays = self._vp(self.learn_rate_decays)
learn_rate_minimums = self._vp(self.learn_rate_minimums)
for index, decay in enumerate(learn_rate_decays):
new_learn_rate = self.net_.learnRates[index] * decay
if new_learn_rate >= learn_rate_minimums[index]:
self.net_.learnRates[index] = new_learn_rate
if self.verbose >= 2:
print "Learn rates: {}".format(self.net_.learnRates)
def fit(self, X, y):
if self.verbose:
print "[DBN] fitting X.shape=%s" % (X.shape,)
self._enc = LabelEncoder()
y = self._enc.fit_transform(y)
y = self._onehot(y)
self.net_ = self._build_net(X, y)
minibatches_per_epoch = self.minibatches_per_epoch
if minibatches_per_epoch is None:
minibatches_per_epoch = X.shape[0] / self.minibatch_size
loss_funct = self.loss_funct
if loss_funct is None:
loss_funct = self._num_mistakes
errors_pretrain = self.errors_pretrain_ = []
losses_fine_tune = self.losses_fine_tune_ = []
errors_fine_tune = self.errors_fine_tune_ = []
if self.epochs_pretrain:
self.epochs_pretrain = self._vp(self.epochs_pretrain)
self._configure_net_pretrain(self.net_)
for layer_index in range(len(self.layer_sizes) - 1):
errors_pretrain.append([])
if self.verbose: # pragma: no cover
print "[DBN] Pre-train layer {}...".format(layer_index + 1)
time0 = time()
for epoch, err in enumerate(
self.net_.preTrainIth(
layer_index,
self._minibatches(X),
self.epochs_pretrain[layer_index],
minibatches_per_epoch,
)):
errors_pretrain[-1].append(err)
if self.verbose: # pragma: no cover
print " Epoch {}: err {}".format(epoch + 1, err)
elapsed = str(timedelta(seconds=time() - time0))
print " ({})".format(elapsed.split('.')[0])
time0 = time()
if self.pretrain_callback is not None:
self.pretrain_callback(
self, epoch + 1, layer_index)
self._configure_net_finetune(self.net_)
if self.verbose: # pragma: no cover
print "[DBN] Fine-tune..."
time0 = time()
for epoch, (loss, err) in enumerate(
self.net_.fineTune(
self._minibatches(X, y),
self.epochs,
minibatches_per_epoch,
loss_funct,
self.verbose,
self.use_dropout,
)):
losses_fine_tune.append(loss)
errors_fine_tune.append(err)
self._learn_rate_adjust()
if self.verbose: # pragma: no cover
print "Epoch {}:".format(epoch + 1)
print " loss {}".format(loss)
print " err {}".format(err)
elapsed = str(timedelta(seconds=time() - time0))
print " ({})".format(elapsed.split('.')[0])
time0 = time()
if self.fine_tune_callback is not None:
self.fine_tune_callback(self, epoch + 1)
def predict(self, X):
y_ind = np.argmax(self.predict_proba(X), axis=1)
return self._enc.inverse_transform(y_ind)
def predict_proba(self, X):
if hasattr(X, 'todense'):
return self._predict_proba_sparse(X)
res = np.zeros((X.shape[0], self.layer_sizes[-1]))
for i, el in enumerate(self.net_.predictions(X, asNumpy=True)):
res[i] = el
return res
def _predict_proba_sparse(self, X):
batch_size = self.minibatch_size
res = []
for i in xrange(0, X.shape[0], batch_size):
X_batch = X[i:min(i + batch_size, X.shape[0])].todense()
res.extend(self.net_.predictions(X_batch))
return np.array(res).reshape(X.shape[0], -1)
def score(self, X, y):
loss_funct = self.loss_funct
if loss_funct is None:
loss_funct = self._num_mistakes
outputs = self.predict_proba(X)
targets = self._onehot(self._enc.transform(y))
mistakes = loss_funct(outputs, targets)
return - float(mistakes) / len(y) + 1
@property
def classes_(self):
return self._enc.classes_
| rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/nolearn/dbn.py | Python | bsd-3-clause | 16,813 |
"""Utility module for creating transformation matrices
Basically this gives you the ability to construct
transformation matrices without needing OpenGL
or similar run-time engines. The result is that
design-time utilities can process files without
trading dependencies on a particular run-time.
This code is originally from the mcf.vrml processing
engine, and has only been cosmetically altered to
fit the new organizational pattern.
Note: to apply these matrices to a particular coordinate,
you would do the following:
p = ones( 4 )
p[:3] = coordinate
return dot( p, matrix)
That is, you use the homogenous coordinate, and
make it the first item in the dot'ing.
"""
from math import *
from vrml.arrays import *
# used to determine whether angles are non-null
TWOPI = pi * 2.0
RADTODEG = 360./TWOPI
DEGTORAD = TWOPI/360.
# used to determine the center point of a transform
ORIGINPOINT = array([0,0,0,1],'f')
VERY_SMALL = 1e-300
def transformMatrix(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Convert VRML transform values to an overall matrix
Returns 4x4 transformation matrix
Note that this uses VRML standard for rotations
(angle last, and in radians).
This should return matrices which, when applied to
local-space coordinates, give you parent-space
coordinates.
parentMatrix if provided, should be the parent's
transformation matrix, a 4x4 matrix of such as
returned by this function.
"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 )
def itransformMatrix(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Convert VRML transform values to an inverse transform matrix
Returns 4x4 transformation matrix
Note that this uses VRML standard for rotations
(angle last, and in radians).
This should return matrices which, when applied to
parent-space coordinates, give you local-space
coordinates for the corresponding transform.
Note: this is a substantially un-tested algorithm
though it seems to be properly constructed as far
as I can see. Whether to use dot(x, parentMatrix)
or the reverse is not immediately clear to me.
parentMatrix if provided, should be the child's
transformation matrix, a 4x4 matrix of such as
returned by this function.
"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1)
def transformMatrices(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Calculate both forward and backward matrices for these parameters"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return (
compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 ),
compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1)
)
def localMatrices(
translation = (0,0,0),
center = (0,0,0),
rotation = (0,1,0,0),
scale = (1,1,1),
scaleOrientation = (0,1,0,0),
parentMatrix = None,
):
"""Calculate (forward,inverse) matrices for this transform element"""
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
R,R1 = rotMatrix( rotation )
SO,SO1 = rotMatrix( scaleOrientation )
S,S1 = scaleMatrix( scale )
return (
compressMatrices( T,C,R,SO,S,SO1,C1 ),
compressMatrices( C,SO, S1, SO1, R1, C1, T1)
)
def compressMatrices( *matrices ):
"""Compress a set of matrices
Any (or all) of the matrices may be None,
if *all* are None, then the result will be None,
otherwise will be the dot product of all of the
matrices...
"""
if not matrices:
return None
else:
first = matrices[0]
matrices = matrices[1:]
for item in matrices:
if item is not None:
if first is None:
first = item
else:
first = dot( item, first )
return first
def center(
translation = (0,0,0),
center = (0,0,0),
parentMatrix = None,
):
"""Determine the center of rotation for a transform node
Returns the parent-space coordinate of the
node's center of rotation.
"""
if parentMatrix is None:
parentMatrix = identity(4)
T,T1 = transMatrix( translation )
C,C1 = transMatrix( center )
for x in (T,C):
if x:
parentMatrix = dot( x, parentMatrix)
return dot( ORIGINPOINT, parentMatrix )
if tmatrixaccel:
def rotMatrix( source = None ):
"""Convert a VRML rotation to rotation matrices
Returns (R, R') (R and the inverse of R), with both
being 4x4 transformation matrices.
or
None,None if the angle is an exact multiple of 2pi
x,y,z -- (normalised) rotational vector
a -- angle in radians
"""
if source is None:
return None,None
else:
(x,y,z, a) = source
if a % TWOPI:
return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )
return None,None
def scaleMatrix( source=None ):
"""Convert a VRML scale to scale matrices
Returns (S, S') (S and the inverse of S), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 1.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 1.0:
return None, None
forward = tmatrixaccel.scaleMatrix( x,y,z )
backward = tmatrixaccel.scaleMatrix( 1.0/(x or VERY_SMALL),1.0/(y or VERY_SMALL), 1.0/(z or VERY_SMALL) )
return forward, backward
def transMatrix( source=None ):
"""Convert a VRML translation to translation matrices
Returns (T, T') (T and the inverse of T), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 0.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 0.0:
return None, None
return tmatrixaccel.transMatrix( x,y,z ),tmatrixaccel.transMatrix( -x, -y, -z )
perspectiveMatrix = tmatrixaccel.perspectiveMatrix
orthoMatrix = tmatrixaccel.orthoMatrix
else:
def rotMatrix( source=None ):
"""Convert a VRML rotation to rotation matrices
Returns (R, R') (R and the inverse of R), with both
being 4x4 transformation matrices.
or
None,None if the angle is an exact multiple of 2pi
x,y,z -- (normalised) rotational vector
a -- angle in radians
"""
if source is None:
return None,None
else:
(x,y,z, a) = source
if a % TWOPI:
# normalize the rotation vector!
squared = x*x + y*y + z*z
if squared != 1.0:
length = squared ** .5
x /= squared
y /= squared
z /= squared
c = cos( a )
c1 = cos( -a )
s = sin( a )
s1 = sin( -a )
t = 1-c
R = array( [
[ t*x*x+c, t*x*y+s*z, t*x*z-s*y, 0],
[ t*x*y-s*z, t*y*y+c, t*y*z+s*x, 0],
[ t*x*z+s*y, t*y*z-s*x, t*z*z+c, 0],
[ 0, 0, 0, 1]
] )
R1 = array( [
[ t*x*x+c1, t*x*y+s1*z, t*x*z-s1*y, 0],
[ t*x*y-s1*z, t*y*y+c1, t*y*z+s1*x, 0],
[ t*x*z+s1*y, t*y*z-s1*x, t*z*z+c1, 0],
[ 0, 0, 0, 1]
] )
return R, R1
else:
return None, None
def scaleMatrix( source=None ):
"""Convert a VRML scale to scale matrices
Returns (S, S') (S and the inverse of S), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 1.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 1.0:
return None, None
S = array( [ [x,0,0,0], [0,y,0,0], [0,0,z,0], [0,0,0,1] ], 'f' )
S1 = array( [
[1./(x or VERY_SMALL),0,0,0],
[0,1./(y or VERY_SMALL),0,0],
[0,0,1./(z or VERY_SMALL),0],
[0,0,0,1] ], 'f'
)
return S, S1
def transMatrix( source=None ):
"""Convert a VRML translation to translation matrices
Returns (T, T') (T and the inverse of T), with both
being 4x4 transformation matrices.
or
None,None if x == y == z == 0.0
x,y,z -- scale vector
"""
if source is None:
return None,None
else:
(x,y,z) = source[:3]
if x == y == z == 0.0:
return None, None
T = array( [ [1,0,0,0], [0,1,0,0], [0,0,1,0], [x,y,z,1] ], 'f' )
T1 = array( [ [1,0,0,0], [0,1,0,0], [0,0,1,0], [-x,-y,-z,1] ], 'f' )
return T, T1
def perspectiveMatrix( fovy, aspect, zNear, zFar, inverse=False ):
"""Create a perspective matrix from given parameters
Note that this is the same matrix as for gluPerspective,
save that we are using radians...
"""
f = 1.0/tan( (fovy/2.0) ) # cotangent( fovy/2.0 )
zDelta = zNear-zFar
if inverse:
return array([
[aspect/f,0,0,0],
[0,1/(f or VERY_SMALL),0,0],
[0,0,0,zDelta/(2*zFar*zNear)],
[0,0,-1,(zFar+zNear)/(2*zFar*zNear)],
],'f')
else:
return array([
[f/aspect,0,0,0],
[0,f,0,0],
[0,0,(zFar+zNear)/zDelta,-1],
[0,0,(2*zFar*zNear)/zDelta,0]
],'f')
def orthoMatrix( left=-1.0, right=1.0, bottom=-1.0, top=1.0, zNear=-1.0, zFar=1.0 ):
"""Calculate an orthographic projection matrix
Similar to glOrtho
"""
tx = - ( right + left ) / float( right-left )
ty = - ( top + bottom ) / float( top-bottom )
tz = - ( zFar + zNear ) / float( zFar-zNear )
return array([
[2/(right-left), 0, 0, tx],
[0, 2/(top-bottom), 0, ty],
[0, 0, -2/(zFar-zNear), tz],
[0, 0, 0, 1],
], dtype='f')
| menpo/vrml97 | vrml/vrml97/transformmatrix.py | Python | bsd-3-clause | 11,368 |
import getpass
from django.core.management.base import BaseCommand
from hc.accounts.forms import SignupForm
from hc.accounts.views import _make_user
class Command(BaseCommand):
help = """Create a super-user account."""
def handle(self, *args, **options):
email = None
password = None
while not email:
raw = input("Email address:")
form = SignupForm({"identity": raw})
if not form.is_valid():
self.stderr.write("Error: " + " ".join(form.errors["identity"]))
continue
email = form.cleaned_data["identity"]
while not password:
p1 = getpass.getpass()
p2 = getpass.getpass("Password (again):")
if p1.strip() == "":
self.stderr.write("Error: Blank passwords aren't allowed.")
continue
if p1 != p2:
self.stderr.write("Error: Your passwords didn't match.")
continue
password = p1
user = _make_user(email)
user.set_password(password)
user.is_staff = True
user.is_superuser = True
user.save()
return "Superuser created successfully."
| healthchecks/healthchecks | hc/accounts/management/commands/createsuperuser.py | Python | bsd-3-clause | 1,226 |
from django.utils.datetime_safe import datetime
from django.utils.translation import ugettext_lazy as _
from poradnia.users.models import User
def users_total(*args, **kwargs):
return User.objects.count()
users_total.name = _("Users total")
users_total.description = _("Number of users registered total")
def users_monthly(*args, **kwargs):
today = datetime.today().replace(day=1)
return User.objects.filter(created_on__date__gte=today).count()
users_monthly.name = _("Users monthly")
users_monthly.description = _("Number of users registered in month")
def users_active(*args, **kwargs):
return User.objects.active().count()
users_active.name = _("Active users")
users_active.description = _("Number of active users in month")
def users_active_staff(*args, **kwargs):
return User.objects.active().filter(is_staff=True).count()
users_active_staff.name = _("Active staff member")
users_active_staff.description = _(
"Number of team members who have made at least one message in the current month."
)
| watchdogpolska/poradnia.siecobywatelska.pl | poradnia/users/metric.py | Python | bsd-3-clause | 1,040 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
from os.path import dirname
from os.path import join
from setuptools import find_packages
from setuptools import setup
# requirements
with open('requirements.txt') as f:
required = f.read().splitlines()
with open(join(dirname(__file__), 'pyrcmd3/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name="pyrcmd3",
version=version,
description="Python3 Remote Commands toolkit",
long_description=open('README.rst').read(),
author="Marreta",
author_email="[email protected]",
maintainer="Bruno Costa, Kairo Araujo",
maintainer_email="[email protected]",
url="https://github.com/marreta/pyrcmd3/",
keywords="Python3 Remote Command Commands SSH Toolkit",
packages=find_packages(exclude=['*.test', 'tests.*']),
package_data={'': ['license.txt', 'pyrcmd3/VERSION']},
install_requires=required,
include_package_data=True,
license='BSD',
platforms='Posix; MacOS X; Windows',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Topic :: System :: Shells',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| marreta-sources/pyrcmd3 | setup.py | Python | bsd-3-clause | 1,533 |
#########################################################################
# Copyright (C) 2007, 2008, 2009
# Alex Clemesha <[email protected]> & Dorian Raymer <[email protected]>
#
# This module is part of codenode, and is distributed under the terms
# of the BSD License: http://www.opensource.org/licenses/bsd-license.php
#########################################################################
from django.conf.urls.defaults import *
from codenode.frontend.bookshelf.views import bookshelf, folders
from codenode.frontend.bookshelf.views import load_bookshelf_data, change_notebook_location
from codenode.frontend.bookshelf.views import empty_trash, new_notebook
urlpatterns = patterns('',
url(r'^$', bookshelf, name='bookshelf'),
url(r'^load$', load_bookshelf_data, name='load_bookshelf_data'),
url(r'^folders$', folders, name='folders'),
url(r'^move$', change_notebook_location, name='change_notebook_location'),
url(r'^new$', new_notebook, name='new_notebook'),
url(r'^emptytrash$', empty_trash, name='empty_trash'),
)
| ccordoba12/codenode | codenode/frontend/bookshelf/urls.py | Python | bsd-3-clause | 1,059 |
import unittest
from django.db import connections
from wp_frontman.models import Site, Blog
from wp_frontman.wp_helpers import month_archives, year_archives
from wp_frontman.tests.utils import MultiBlogMixin
class HelpersArchivesTestCase(MultiBlogMixin, unittest.TestCase):
def setUp(self):
super(HelpersArchivesTestCase, self).setUp()
self.cursor = connections['test'].cursor()
self.cursor_mu = connections['test_multi'].cursor()
def testMonthlyArchives(self):
self.reset_blog_class()
blog = Blog(1)
self.cursor.execute("""
select distinct date_format(post_date, '%%Y %%m') as monthly_archive
from %s
where post_type='post' and post_status='publish'
order by monthly_archive desc
""" % blog.models.Post._meta.db_table)
archive_dates = [tuple(map(int, r[0].split())) for r in self.cursor.fetchall()]
archives = month_archives(blog)
self.assertEqual(archive_dates, [(a['year'], a['month']) for a in archives])
self.assertEqual(archives[0]['get_absolute_url'], '/%02i/%02i/' % archive_dates[0])
def testYearlyArchives(self):
self.reset_blog_class()
blog = Blog(1)
self.cursor.execute("""
select distinct year(post_date) as y
from %s
where post_type='post' and post_status='publish'
order by y desc
""" % blog.models.Post._meta.db_table)
archive_dates = [r[0] for r in self.cursor.fetchall()]
archives = year_archives(blog)
self.assertEqual(archive_dates, [a['year'] for a in archives])
self.assertEqual(archives[0]['get_absolute_url'], '/%02i/' % archive_dates[0])
| ludoo/wpkit | attic/ngfrontman/wp_frontman/tests/test_helpers_archives.py | Python | bsd-3-clause | 1,742 |
from behave import *
# Unique to Scenario: User cancels attempt to request new account
@when('I cancel the request account form')
def impl(context):
context.browser.find_by_css('.cancel').first.click()
| nlhkabu/connect | bdd/features/steps/request_account.py | Python | bsd-3-clause | 208 |
# Copyright (c) 2015 Microsoft Corporation
"""
>>> from z3 import *
>>> b = BitVec('b', 16)
>>> Extract(12, 2, b).sexpr()
'((_ extract 12 2) b)'
>>> Extract(12, 2, b)
Extract(12, 2, b)
>>> SignExt(10, b).sexpr()
'((_ sign_extend 10) b)'
>>> SignExt(10, b)
SignExt(10, b)
>>> ZeroExt(10, b).sexpr()
'((_ zero_extend 10) b)'
>>> ZeroExt(10, b)
ZeroExt(10, b)
>>> RepeatBitVec(3, b).sexpr()
'((_ repeat 3) b)'
>>> RepeatBitVec(3, b)
RepeatBitVec(3, b)
"""
if __name__ == "__main__":
import doctest
if doctest.testmod().failed:
exit(1)
| dstaple/z3test | regressions/python/bug.2.py | Python | mit | 551 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Magic Encode
This module tries to convert an UTF-8 string to an encoded string for the printer.
It uses trial and error in order to guess the right codepage.
The code is based on the encoding-code in py-xml-escpos by @fvdsn.
:author: `Patrick Kanzler <[email protected]>`_
:organization: `python-escpos <https://github.com/python-escpos>`_
:copyright: Copyright (c) 2016 Patrick Kanzler and Frédéric van der Essen
:license: MIT
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from .constants import CODEPAGE_CHANGE
from .exceptions import Error
from .codepages import CodePages
import six
class Encoder(object):
"""Takes a list of available code spaces. Picks the right one for a
given character.
Note: To determine the code page, it needs to do the conversion, and
thus already knows what the final byte in the target encoding would
be. Nevertheless, the API of this class doesn't return the byte.
The caller use to do the character conversion itself.
$ python -m timeit -s "{u'ö':'a'}.get(u'ö')"
100000000 loops, best of 3: 0.0133 usec per loop
$ python -m timeit -s "u'ö'.encode('latin1')"
100000000 loops, best of 3: 0.0141 usec per loop
"""
def __init__(self, codepage_map):
self.codepages = codepage_map
self.available_encodings = set(codepage_map.keys())
self.available_characters = {}
self.used_encodings = set()
def get_sequence(self, encoding):
return int(self.codepages[encoding])
def get_encoding_name(self, encoding):
"""Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437.
"""
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding
@staticmethod
def _get_codepage_char_list(encoding):
"""Get codepage character list
Gets characters 128-255 for a given code page, as an array.
:param encoding: The name of the encoding. This must appear in the CodePage list
"""
codepage = CodePages.get_encoding(encoding)
if 'data' in codepage:
encodable_chars = list("".join(codepage['data']))
assert(len(encodable_chars) == 128)
return encodable_chars
elif 'python_encode' in codepage:
encodable_chars = [u" "] * 128
for i in range(0, 128):
codepoint = i + 128
try:
encodable_chars[i] = bytes([codepoint]).decode(codepage['python_encode'])
except UnicodeDecodeError:
# Non-encodable character, just skip it
pass
return encodable_chars
raise LookupError("Can't find a known encoding for {}".format(encoding))
def _get_codepage_char_map(self, encoding):
""" Get codepage character map
Process an encoding and return a map of UTF-characters to code points
in this encoding.
This is generated once only, and returned from a cache.
:param encoding: The name of the encoding.
"""
# Skip things that were loaded previously
if encoding in self.available_characters:
return self.available_characters[encoding]
codepage_char_list = self._get_codepage_char_list(encoding)
codepage_char_map = dict((utf8, i + 128) for (i, utf8) in enumerate(codepage_char_list))
self.available_characters[encoding] = codepage_char_map
return codepage_char_map
def can_encode(self, encoding, char):
"""Determine if a character is encodeable in the given code page.
:param encoding: The name of the encoding.
:param char: The character to attempt to encode.
"""
available_map = {}
try:
available_map = self._get_codepage_char_map(encoding)
except LookupError:
return False
# Decide whether this character is encodeable in this code page
is_ascii = ord(char) < 128
is_encodable = char in available_map
return is_ascii or is_encodable
@staticmethod
def _encode_char(char, charmap, defaultchar):
""" Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page
"""
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar)
def encode(self, text, encoding, defaultchar='?'):
""" Encode text under the given encoding
:param text: Text to encode
:param encoding: Encoding name to use (must be defined in capabilities)
:param defaultchar: Fallback for non-encodable characters
"""
codepage_char_map = self._get_codepage_char_map(encoding)
output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text])
return output_bytes
def __encoding_sort_func(self, item):
key, index = item
return (
key in self.used_encodings,
index
)
def find_suitable_encoding(self, char):
"""The order of our search is a specific one:
1. code pages that we already tried before; there is a good
chance they might work again, reducing the search space,
and by re-using already used encodings we might also
reduce the number of codepage change instructiosn we have
to send. Still, any performance gains will presumably be
fairly minor.
2. code pages in lower ESCPOS slots first. Presumably, they
are more likely to be supported, so if a printer profile
is missing or incomplete, we might increase our change
that the code page we pick for this character is actually
supported.
"""
sorted_encodings = sorted(
self.codepages.items(),
key=self.__encoding_sort_func)
for encoding, _ in sorted_encodings:
if self.can_encode(encoding, char):
# This encoding worked; at it to the set of used ones.
self.used_encodings.add(encoding)
return encoding
def split_writable_text(encoder, text, encoding):
"""Splits off as many characters from the begnning of text as
are writable with "encoding". Returns a 2-tuple (writable, rest).
"""
if not encoding:
return None, text
for idx, char in enumerate(text):
if encoder.can_encode(encoding, char):
continue
return text[:idx], text[idx:]
return text, None
class MagicEncode(object):
"""A helper that helps us to automatically switch to the right
code page to encode any given Unicode character.
This will consider the printers supported codepages, according
to the printer profile, and if a character cannot be encoded
with the current profile, it will attempt to find a suitable one.
If the printer does not support a suitable code page, it can
insert an error character.
"""
def __init__(self, driver, encoding=None, disabled=False,
defaultsymbol='?', encoder=None):
"""
:param driver:
:param encoding: If you know the current encoding of the printer
when initializing this class, set it here. If the current
encoding is unknown, the first character emitted will be a
codepage switch.
:param disabled:
:param defaultsymbol:
:param encoder:
"""
if disabled and not encoding:
raise Error('If you disable magic encode, you need to define an encoding!')
self.driver = driver
self.encoder = encoder or Encoder(driver.profile.get_code_pages())
self.encoding = self.encoder.get_encoding_name(encoding) if encoding else None
self.defaultsymbol = defaultsymbol
self.disabled = disabled
def force_encoding(self, encoding):
"""Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page.
"""
if not encoding:
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True
def write(self, text):
"""Write the text, automatically switching encodings.
"""
if self.disabled:
self.write_with_encoding(self.encoding, text)
return
# See how far we can go into the text with the current encoding
to_write, text = split_writable_text(self.encoder, text, self.encoding)
if to_write:
self.write_with_encoding(self.encoding, to_write)
while text:
# See if any of the code pages that the printer profile
# supports can encode this character.
encoding = self.encoder.find_suitable_encoding(text[0])
if not encoding:
self._handle_character_failed(text[0])
text = text[1:]
continue
# Write as much text as possible with the encoding found.
to_write, text = split_writable_text(self.encoder, text, encoding)
if to_write:
self.write_with_encoding(encoding, to_write)
def _handle_character_failed(self, char):
"""Called when no codepage was found to render a character.
"""
# Writing the default symbol via write() allows us to avoid
# unnecesary codepage switches.
self.write(self.defaultsymbol)
def write_with_encoding(self, encoding, text):
if text is not None and type(text) is not six.text_type:
raise Error("The supplied text has to be unicode, but is of type {type}.".format(
type=type(text)
))
# We always know the current code page; if the new codepage
# is different, emit a change command.
if encoding != self.encoding:
self.encoding = encoding
self.driver._raw(
CODEPAGE_CHANGE +
six.int2byte(self.encoder.get_sequence(encoding)))
if text:
self.driver._raw(self.encoder.encode(text, encoding))
| belono/python-escpos | src/escpos/magicencode.py | Python | mit | 10,963 |
import time
time.sleep(0.25)
contents = clipboard.get_selection()
retCode, abbr = dialog.input_dialog("New Abbreviation", "Choose an abbreviation for the new phrase")
if retCode == 0:
if len(contents) > 20:
title = contents[0:17] + "..."
else:
title = contents
folder = engine.get_folder("My Phrases")
engine.create_abbreviation(folder, title, abbr, contents) | andresgomezvidal/autokey_scripts | data/Scripts/Sample_Scripts/Abbreviation from selection.py | Python | mit | 391 |
DEBUG = True
DISCOVERY_SERVICE_URL = 'localhost:6100/discoveryservice'
| pwgn/microtut | commentservice/settings.py | Python | mit | 72 |
# encoding: utf-8
"""Definition of french pronoun related features"""
from __future__ import unicode_literals
PERSONAL = "personal"
SPECIAL_PERSONAL = "special_personal"
SNUMERAL = "snumeral"
POSSESSIVE = "possessive"
DEMONSTRATIV = "demonstrativ"
RELATIVE = "relative"
INTERROGATIVE = "interrogative"
INDEFINITE = "indefinite"
| brouberol/pynlg | pynlg/lexicon/feature/pronoun/fr.py | Python | mit | 332 |
import kol.Error as Error
from GenericRequest import GenericRequest
from kol.manager import PatternManager
from kol.util import Report
class SendMessageRequest(GenericRequest):
def __init__(self, session, message):
super(SendMessageRequest, self).__init__(session)
self.url = session.serverURL + "sendmessage.php?toid="
self.requestData['action'] = 'send'
self.requestData['pwd'] = session.pwd
self.requestData['towho'] = message["userId"]
self.requestData['message'] = message["text"]
# Add the items to the message.
if "items" in message and len(message["items"]) > 0:
i = 1
for item in message["items"]:
self.requestData['whichitem%s' % i] = item["id"]
self.requestData['howmany%s' % i] = item["quantity"]
i += 1
# Add meat to the message.
if "meat" in message:
self.requestData["sendmeat"] = message["meat"]
else:
self.requestData["sendmeat"] = 0
def parseResponse(self):
hardcoreRoninPattern = PatternManager.getOrCompilePattern('userInHardcoreRonin')
ignoringPattern = PatternManager.getOrCompilePattern('userIgnoringUs')
notEnoughItemsPattern = PatternManager.getOrCompilePattern('notEnoughItemsToSend')
sentMessagePattern = PatternManager.getOrCompilePattern('messageSent')
trendyPattern = PatternManager.getOrCompilePattern('kmailNotSentUserTrendy')
ignoringUserPattern = PatternManager.getOrCompilePattern('weAreIgnoringUser')
if hardcoreRoninPattern.search(self.responseText):
raise Error.Error("Unable to send items or meat. User is in hardcore or ronin.", Error.USER_IN_HARDCORE_RONIN)
elif ignoringPattern.search(self.responseText):
raise Error.Error("Unable to send message. User is ignoring us.", Error.USER_IS_IGNORING)
elif notEnoughItemsPattern.search(self.responseText):
raise Error.Error("You don't have enough of one of the items you're trying to send.", Error.ITEM_NOT_FOUND)
elif trendyPattern.search(self.responseText):
raise Error.Error("Unable to send items or meat. User is too trendy.", Error.USER_IN_HARDCORE_RONIN)
elif ignoringUserPattern.search(self.responseText):
raise Error.Error("Unable to send message. We are ignoring the other player.", Error.USER_IS_IGNORING)
elif sentMessagePattern.search(self.responseText) == None:
Report.alert("system", "Received unknown response when attempting to send a message.")
Report.alert("system", self.responseText)
raise Error.Error("Unknown error", Error.REQUEST_FATAL)
| KevZho/buffbot | kol/request/SendMessageRequest.py | Python | mit | 2,734 |
# -*- coding: utf-8 -*-
#
# openMVG documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 30 11:05:58 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# on_rtd is whether we are on readthedocs.org
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openMVG'
copyright = u'2014, OpenMVG authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes",]
if not on_rtd: # only import and set the theme if we're building docs locally
html_theme = 'armstrong'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "openMVG library"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openMVGdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'openMVG.tex', u'openMVG Documentation',
u'Pierre MOULON \\& Bruno DUISIT', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openmvg', u'openMVG Documentation',
[u'openMVG authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'openMVG', u'openMVG Documentation',
u'openMVG authors', 'openMVG', 'an open Multiple View Geometry library.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'openMVG'
epub_author = u'openMVG authors'
epub_publisher = u'Pierre MOULON & Bruno DUISIT & Fabien CASTAN'
epub_copyright = u'2013-2014, openMVG authors'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'openMVG'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| Smozeley/openMVG | docs/sphinx/rst/conf.py | Python | mit | 10,634 |
import time
import pytest
from redis.client import Redis
from redis.exceptions import LockError, LockNotOwnedError
from redis.lock import Lock
from .conftest import _get_client
@pytest.mark.onlynoncluster
class TestLock:
@pytest.fixture()
def r_decoded(self, request):
return _get_client(Redis, request=request, decode_responses=True)
def get_lock(self, redis, *args, **kwargs):
kwargs["lock_class"] = Lock
return redis.lock(*args, **kwargs)
def test_lock(self, r):
lock = self.get_lock(r, "foo")
assert lock.acquire(blocking=False)
assert r.get("foo") == lock.local.token
assert r.ttl("foo") == -1
lock.release()
assert r.get("foo") is None
def test_lock_token(self, r):
lock = self.get_lock(r, "foo")
self._test_lock_token(r, lock)
def test_lock_token_thread_local_false(self, r):
lock = self.get_lock(r, "foo", thread_local=False)
self._test_lock_token(r, lock)
def _test_lock_token(self, r, lock):
assert lock.acquire(blocking=False, token="test")
assert r.get("foo") == b"test"
assert lock.local.token == b"test"
assert r.ttl("foo") == -1
lock.release()
assert r.get("foo") is None
assert lock.local.token is None
def test_locked(self, r):
lock = self.get_lock(r, "foo")
assert lock.locked() is False
lock.acquire(blocking=False)
assert lock.locked() is True
lock.release()
assert lock.locked() is False
def _test_owned(self, client):
lock = self.get_lock(client, "foo")
assert lock.owned() is False
lock.acquire(blocking=False)
assert lock.owned() is True
lock.release()
assert lock.owned() is False
lock2 = self.get_lock(client, "foo")
assert lock.owned() is False
assert lock2.owned() is False
lock2.acquire(blocking=False)
assert lock.owned() is False
assert lock2.owned() is True
lock2.release()
assert lock.owned() is False
assert lock2.owned() is False
def test_owned(self, r):
self._test_owned(r)
def test_owned_with_decoded_responses(self, r_decoded):
self._test_owned(r_decoded)
def test_competing_locks(self, r):
lock1 = self.get_lock(r, "foo")
lock2 = self.get_lock(r, "foo")
assert lock1.acquire(blocking=False)
assert not lock2.acquire(blocking=False)
lock1.release()
assert lock2.acquire(blocking=False)
assert not lock1.acquire(blocking=False)
lock2.release()
def test_timeout(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
assert 8 < r.ttl("foo") <= 10
lock.release()
def test_float_timeout(self, r):
lock = self.get_lock(r, "foo", timeout=9.5)
assert lock.acquire(blocking=False)
assert 8 < r.pttl("foo") <= 9500
lock.release()
def test_blocking_timeout(self, r):
lock1 = self.get_lock(r, "foo")
assert lock1.acquire(blocking=False)
bt = 0.2
sleep = 0.05
lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
start = time.monotonic()
assert not lock2.acquire()
# The elapsed duration should be less than the total blocking_timeout
assert bt > (time.monotonic() - start) > bt - sleep
lock1.release()
def test_context_manager(self, r):
# blocking_timeout prevents a deadlock if the lock can't be acquired
# for some reason
with self.get_lock(r, "foo", blocking_timeout=0.2) as lock:
assert r.get("foo") == lock.local.token
assert r.get("foo") is None
def test_context_manager_raises_when_locked_not_acquired(self, r):
r.set("foo", "bar")
with pytest.raises(LockError):
with self.get_lock(r, "foo", blocking_timeout=0.1):
pass
def test_high_sleep_small_blocking_timeout(self, r):
lock1 = self.get_lock(r, "foo")
assert lock1.acquire(blocking=False)
sleep = 60
bt = 1
lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
start = time.monotonic()
assert not lock2.acquire()
# the elapsed timed is less than the blocking_timeout as the lock is
# unattainable given the sleep/blocking_timeout configuration
assert bt > (time.monotonic() - start)
lock1.release()
def test_releasing_unlocked_lock_raises_error(self, r):
lock = self.get_lock(r, "foo")
with pytest.raises(LockError):
lock.release()
def test_releasing_lock_no_longer_owned_raises_error(self, r):
lock = self.get_lock(r, "foo")
lock.acquire(blocking=False)
# manually change the token
r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
lock.release()
# even though we errored, the token is still cleared
assert lock.local.token is None
def test_extend_lock(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
assert 8000 < r.pttl("foo") <= 10000
assert lock.extend(10)
assert 16000 < r.pttl("foo") <= 20000
lock.release()
def test_extend_lock_replace_ttl(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
assert 8000 < r.pttl("foo") <= 10000
assert lock.extend(10, replace_ttl=True)
assert 8000 < r.pttl("foo") <= 10000
lock.release()
def test_extend_lock_float(self, r):
lock = self.get_lock(r, "foo", timeout=10.0)
assert lock.acquire(blocking=False)
assert 8000 < r.pttl("foo") <= 10000
assert lock.extend(10.0)
assert 16000 < r.pttl("foo") <= 20000
lock.release()
def test_extending_unlocked_lock_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
with pytest.raises(LockError):
lock.extend(10)
def test_extending_lock_with_no_timeout_raises_error(self, r):
lock = self.get_lock(r, "foo")
assert lock.acquire(blocking=False)
with pytest.raises(LockError):
lock.extend(10)
lock.release()
def test_extending_lock_no_longer_owned_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
lock.extend(10)
def test_reacquire_lock(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
assert r.pexpire("foo", 5000)
assert r.pttl("foo") <= 5000
assert lock.reacquire()
assert 8000 < r.pttl("foo") <= 10000
lock.release()
def test_reacquiring_unlocked_lock_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
with pytest.raises(LockError):
lock.reacquire()
def test_reacquiring_lock_with_no_timeout_raises_error(self, r):
lock = self.get_lock(r, "foo")
assert lock.acquire(blocking=False)
with pytest.raises(LockError):
lock.reacquire()
lock.release()
def test_reacquiring_lock_no_longer_owned_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert lock.acquire(blocking=False)
r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
lock.reacquire()
@pytest.mark.onlynoncluster
class TestLockClassSelection:
def test_lock_class_argument(self, r):
class MyLock:
def __init__(self, *args, **kwargs):
pass
lock = r.lock("foo", lock_class=MyLock)
assert type(lock) == MyLock
| mozillazg/redis-py-doc | tests/test_lock.py | Python | mit | 7,948 |
from django.apps import AppConfig
class DownloadConfig(AppConfig):
name = 'download'
| abhijithanilkumar/ns-3-AppStore | src/download/apps.py | Python | mit | 91 |
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
# example which maximizes the sum of a list of integers
# each of which can be 0 or 1
import random
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator: define 'attr_bool' to be an attribute ('gene')
# which corresponds to integers sampled uniformly
# from the range [0,1] (i.e. 0 or 1 with equal
# probability)
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers: define 'individual' to be an individual
# consisting of 100 'attr_bool' elements ('genes')
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, 100)
# define the population to be a list of 'individual's
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# the goal ('fitness') function to be maximized
def evalOneMax(individual):
return sum(individual),
#----------
# Operator registration
#----------
# register the goal / fitness function
toolbox.register("evaluate", evalOneMax)
# register the crossover operator
toolbox.register("mate", tools.cxTwoPoint)
# register a mutation operator with a probability to
# flip each attribute/gene of 0.05
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
#----------
def main():
random.seed(64)
# create an initial population of 300 individuals (where
# each individual is a list of integers)
pop = toolbox.population(n=300)
# CXPB is the probability with which two individuals
# are crossed
#
# MUTPB is the probability for mutating an individual
#
# NGEN is the number of generations for which the
# evolution runs
CXPB, MUTPB, NGEN = 0.5, 0.2, 40
print("Start of evolution")
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(pop))
# Begin the evolution
for g in range(NGEN):
print("-- Generation %i --" % g)
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# cross two individuals with probability CXPB
if random.random() < CXPB:
toolbox.mate(child1, child2)
# fitness values of the children
# must be recalculated later
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
# mutate an individual with probability MUTPB
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(invalid_ind))
# The population is entirely replaced by the offspring
pop[:] = offspring
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
if __name__ == "__main__":
main()
| GrimRanger/GeneticAlgorithm | helps/deap/deap-master/examples/ga/onemax.py | Python | mit | 5,221 |
import gym
import gym.wrappers
import gym.envs
import gym.spaces
import traceback
import logging
try:
from gym.wrappers.monitoring import logger as monitor_logger
monitor_logger.setLevel(logging.WARNING)
except Exception as e:
traceback.print_exc()
import os
import os.path as osp
from rllab.envs.base import Env, Step
from rllab.core.serializable import Serializable
from rllab.spaces.box import Box
from rllab.spaces.discrete import Discrete
from rllab.spaces.product import Product
from rllab.misc import logger
def convert_gym_space(space):
if isinstance(space, gym.spaces.Box):
return Box(low=space.low, high=space.high)
elif isinstance(space, gym.spaces.Discrete):
return Discrete(n=space.n)
elif isinstance(space, gym.spaces.Tuple):
return Product([convert_gym_space(x) for x in space.spaces])
else:
raise NotImplementedError
class CappedCubicVideoSchedule(object):
# Copied from gym, since this method is frequently moved around
def __call__(self, count):
if count < 1000:
return int(round(count ** (1. / 3))) ** 3 == count
else:
return count % 1000 == 0
class FixedIntervalVideoSchedule(object):
def __init__(self, interval):
self.interval = interval
def __call__(self, count):
return count % self.interval == 0
class NoVideoSchedule(object):
def __call__(self, count):
return False
class GymEnv(Env, Serializable):
def __init__(self, env_name, record_video=True, video_schedule=None, log_dir=None, record_log=True,
force_reset=False):
if log_dir is None:
if logger.get_snapshot_dir() is None:
logger.log("Warning: skipping Gym environment monitoring since snapshot_dir not configured.")
else:
log_dir = os.path.join(logger.get_snapshot_dir(), "gym_log")
Serializable.quick_init(self, locals())
env = gym.envs.make(env_name)
self.env = env
self.env_id = env.spec.id
assert not (not record_log and record_video)
if log_dir is None or record_log is False:
self.monitoring = False
else:
if not record_video:
video_schedule = NoVideoSchedule()
else:
if video_schedule is None:
video_schedule = CappedCubicVideoSchedule()
self.env = gym.wrappers.Monitor(self.env, log_dir, video_callable=video_schedule, force=True)
self.monitoring = True
self._observation_space = convert_gym_space(env.observation_space)
logger.log("observation space: {}".format(self._observation_space))
self._action_space = convert_gym_space(env.action_space)
logger.log("action space: {}".format(self._action_space))
self._horizon = env.spec.tags['wrapper_config.TimeLimit.max_episode_steps']
self._log_dir = log_dir
self._force_reset = force_reset
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
@property
def horizon(self):
return self._horizon
def reset(self):
if self._force_reset and self.monitoring:
from gym.wrappers.monitoring import Monitor
assert isinstance(self.env, Monitor)
recorder = self.env.stats_recorder
if recorder is not None:
recorder.done = True
return self.env.reset()
def step(self, action):
next_obs, reward, done, info = self.env.step(action)
return Step(next_obs, reward, done, **info)
def render(self):
self.env.render()
def terminate(self):
if self.monitoring:
self.env._close()
if self._log_dir is not None:
print("""
***************************
Training finished! You can upload results to OpenAI Gym by running the following command:
python scripts/submit_gym.py %s
***************************
""" % self._log_dir)
| brain-research/mirage-rl-qprop | rllab/envs/gym_env.py | Python | mit | 4,134 |
# coding=utf-8
"""
The Lists API endpoint
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/lists/
Schema: https://api.mailchimp.com/schema/3.0/Lists/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
from mailchimp3.entities.listabusereports import ListAbuseReports
from mailchimp3.entities.listactivity import ListActivity
from mailchimp3.entities.listclients import ListClients
from mailchimp3.entities.listgrowthhistory import ListGrowthHistory
from mailchimp3.entities.listinterestcategories import ListInterestCategories
from mailchimp3.entities.listmembers import ListMembers
from mailchimp3.entities.listmergefields import ListMergeFields
from mailchimp3.entities.listsegments import ListSegments
from mailchimp3.entities.listsignupforms import ListSignupForms
from mailchimp3.entities.listwebhooks import ListWebhooks
from mailchimp3.helpers import check_email
class Lists(BaseApi):
"""
A MailChimp list is a powerful and flexible tool that helps you manage your contacts.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(Lists, self).__init__(*args, **kwargs)
self.endpoint = 'lists'
self.list_id = None
self.abuse_reports = ListAbuseReports(self)
self.activity = ListActivity(self)
self.clients = ListClients(self)
self.growth_history = ListGrowthHistory(self)
self.interest_categories = ListInterestCategories(self)
self.members = ListMembers(self)
self.merge_fields = ListMergeFields(self)
self.segments = ListSegments(self)
self.signup_forms = ListSignupForms(self)
self.webhooks = ListWebhooks(self)
def create(self, data):
"""
Create a new list in your MailChimp account.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"contact": object*
{
"company": string*,
"address1": string*,
"city": string*,
"state": string*,
"zip": string*,
"country": string*
},
"permission_reminder": string*,
"campaign_defaults": object*
{
"from_name": string*,
"from_email": string*,
"subject": string*,
"language": string*
},
"email_type_option": boolean
}
"""
if 'name' not in data:
raise KeyError('The list must have a name')
if 'contact' not in data:
raise KeyError('The list must have a contact')
if 'company' not in data['contact']:
raise KeyError('The list contact must have a company')
if 'address1' not in data['contact']:
raise KeyError('The list contact must have a address1')
if 'city' not in data['contact']:
raise KeyError('The list contact must have a city')
if 'state' not in data['contact']:
raise KeyError('The list contact must have a state')
if 'zip' not in data['contact']:
raise KeyError('The list contact must have a zip')
if 'country' not in data['contact']:
raise KeyError('The list contact must have a country')
if 'permission_reminder' not in data:
raise KeyError('The list must have a permission_reminder')
if 'campaign_defaults' not in data:
raise KeyError('The list must have a campaign_defaults')
if 'from_name' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_name')
if 'from_email' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_email')
check_email(data['campaign_defaults']['from_email'])
if 'subject' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a subject')
if 'language' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a language')
if 'email_type_option' not in data:
raise KeyError('The list must have an email_type_option')
if data['email_type_option'] not in [True, False]:
raise TypeError('The list email_type_option must be True or False')
response = self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.list_id = response['id']
else:
self.list_id = None
return response
def update_members(self, list_id, data):
"""
Batch subscribe or unsubscribe list members.
Only the members array is required in the request body parameters.
Within the members array, each member requires an email_address
and either a status or status_if_new. The update_existing parameter
will also be considered required to help prevent accidental updates
to existing members and will default to false if not present.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"members": array*
[
{
"email_address": string*,
"status": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),
"status_if_new": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')
}
],
"update_existing": boolean*
}
"""
self.list_id = list_id
if 'members' not in data:
raise KeyError('The update must have at least one member')
else:
if not len(data['members']) <= 500:
raise ValueError('You may only batch sub/unsub 500 members at a time')
for member in data['members']:
if 'email_address' not in member:
raise KeyError('Each list member must have an email_address')
check_email(member['email_address'])
if 'status' not in member and 'status_if_new' not in member:
raise KeyError('Each list member must have either a status or a status_if_new')
valid_statuses = ['subscribed', 'unsubscribed', 'cleaned', 'pending']
if 'status' in member and member['status'] not in valid_statuses:
raise ValueError('The list member status must be one of "subscribed", "unsubscribed", "cleaned", or '
'"pending"')
if 'status_if_new' in member and member['status_if_new'] not in valid_statuses:
raise ValueError('The list member status_if_new must be one of "subscribed", "unsubscribed", '
'"cleaned", or "pending"')
if 'update_existing' not in data:
data['update_existing'] = False
return self._mc_client._post(url=self._build_path(list_id), data=data)
def all(self, get_all=False, **queryparams):
"""
Get information about all lists in the account.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
queryparams['before_date_created'] = string
queryparams['since_date_created'] = string
queryparams['before_campaign_last_sent'] = string
queryparams['since_campaign_last_sent'] = string
queryparams['email'] = string
queryparams['sort_field'] = string (Must be 'date_created')
queryparams['sort_dir'] = string (Must be one of 'ASC' or 'DESC')
"""
self.list_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, list_id, **queryparams):
"""
Get information about a specific list in your MailChimp account.
Results include list members who have signed up but haven’t confirmed
their subscription yet and unsubscribed or cleaned.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.list_id = list_id
return self._mc_client._get(url=self._build_path(list_id), **queryparams)
def update(self, list_id, data):
"""
Update the settings for a specific list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"contact": object*
{
"company": string*,
"address1": string*,
"city": string*,
"state": string*,
"zip": string*,
"country": string*
},
"permission_reminder": string*,
"campaign_defaults": object*
{
"from_name": string*,
"from_email": string*,
"subject": string*,
"language": string*
},
"email_type_option": boolean
}
"""
self.list_id = list_id
if 'name' not in data:
raise KeyError('The list must have a name')
if 'contact' not in data:
raise KeyError('The list must have a contact')
if 'company' not in data['contact']:
raise KeyError('The list contact must have a company')
if 'address1' not in data['contact']:
raise KeyError('The list contact must have a address1')
if 'city' not in data['contact']:
raise KeyError('The list contact must have a city')
if 'state' not in data['contact']:
raise KeyError('The list contact must have a state')
if 'zip' not in data['contact']:
raise KeyError('The list contact must have a zip')
if 'country' not in data['contact']:
raise KeyError('The list contact must have a country')
if 'permission_reminder' not in data:
raise KeyError('The list must have a permission_reminder')
if 'campaign_defaults' not in data:
raise KeyError('The list must have a campaign_defaults')
if 'from_name' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_name')
if 'from_email' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_email')
check_email(data['campaign_defaults']['from_email'])
if 'subject' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a subject')
if 'language' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a language')
if 'email_type_option' not in data:
raise KeyError('The list must have an email_type_option')
if data['email_type_option'] not in [True, False]:
raise TypeError('The list email_type_option must be True or False')
return self._mc_client._patch(url=self._build_path(list_id), data=data)
def delete(self, list_id):
"""
Delete a list from your MailChimp account. If you delete a list,
you’ll lose the list history—including subscriber activity,
unsubscribes, complaints, and bounces. You’ll also lose subscribers’
email addresses, unless you exported and backed up your list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
"""
self.list_id = list_id
return self._mc_client._delete(url=self._build_path(list_id))
| charlesthk/python-mailchimp | mailchimp3/entities/lists.py | Python | mit | 12,424 |
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys, os
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occured.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class TestRequest(unittest.TestCase):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
try:
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.url, exp1)
req = oauth.Request(method, url2)
self.assertEquals(req.url, exp2)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
'foo': 'baz',
'bar': 'foo',
'multi': ['FOO','BAR']
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR'],
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
srtd = [(k, v if type(v) != ListType else sorted(v)) for k,v in sorted(params.items())]
self.assertEquals(urllib.urlencode(srtd, True), res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'TQ6vGQ5A6IZn8dmeGB4+/Jl3EMI=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
if __name__ == "__main__":
unittest.main()
| oauth-xx/python-oauth2 | tests/test_oauth.py | Python | mit | 28,822 |
from hamper.interfaces import ChatCommandPlugin, Command
try:
# Python 2
import HTMLParser
html = HTMLParser.HTMLParser()
except ImportError:
# Python 3
import html.parser
html = html.parser.HTMLParser()
import re
import requests
import json
class Lookup(ChatCommandPlugin):
name = 'lookup'
priority = 2
short_desc = 'lookup <something> - look something up'
long_desc = ('lookup and cite <something> - look something up and cite a '
'source\n')
# Inspired by http://googlesystem.blogspot.com/2009/12/on-googles-unofficial-dictionary-api.html # noqa
search_url = "http://www.google.com/dictionary/json?callback=dict_api.callbacks.id100&q={query}&sl=en&tl=en&restrict=pr%2Cde&client=te" # noqa
def setup(self, loader):
super(Lookup, self).setup(loader)
class Lookup(Command):
name = 'lookup'
regex = '^(lookup\s+and\s+cite|lookup)\s*(\d+)?\s+(.*)'
def command(self, bot, comm, groups):
lookup_type = groups[0]
def_num = int(groups[1]) if groups[1] else 1
query = groups[2]
resp = requests.get(self.plugin.search_url.format(query=query))
if resp.status_code != 200:
raise Exception(
"Lookup Error: A non 200 status code was returned"
)
# We have actually asked for this cruft to be tacked onto our JSON
# response. When I tried to remove the callback parameter from the
# URL the api broke, so I'm going to leave it. Put it down, and
# walk away...
# Strip off the JS callback
gr = resp.content.strip('dict_api.callbacks.id100(')
gr = gr.strip(',200,null)')
gr = gr.replace('\\x', "\u00") # Google uses javascript JSON crap
gr = json.loads(gr)
if 'primaries' in gr:
entries = gr['primaries'][0]['entries']
elif 'webDefinitions' in gr:
entries = gr['webDefinitions'][0]['entries']
else:
bot.reply(comm, "No definition found")
return False
seen = 0
definition = None
url = None
for entry in entries:
if not entry['type'] == 'meaning':
continue
for term in entry['terms']:
if term['type'] == 'url':
url = re.sub('<[^<]+?>', '', term['text'])
else:
seen += 1
if not definition and seen == def_num:
definition = term['text']
if not definition or def_num > seen:
bot.reply(
comm,
"Looks like there might not be %s definitions" % def_num
)
else:
bot.reply(
comm, "%s (%s/%s)" % (
html.unescape(definition), def_num, seen
)
)
if 'cite' in lookup_type:
if url:
bot.reply(comm, html.unescape(url))
else:
bot.reply(comm, '[No citation]')
# Always let the other plugins run
return False
| iankronquist/hamper | hamper/plugins/dictionary.py | Python | mit | 3,376 |
import pytest
from collections import namedtuple
import jenkinsapi
from jenkinsapi.plugins import Plugins
from jenkinsapi.utils.requester import Requester
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.job import Job
from jenkinsapi.custom_exceptions import JenkinsAPIException
DATA = {}
TWO_JOBS_DATA = {
'jobs': [
{'name': 'job_one',
'url': 'http://localhost:8080/job/job_one',
'color': 'blue'},
{'name': 'job_two',
'url': 'http://localhost:8080/job/job_two',
'color': 'blue'},
]
}
MULTIBRANCH_JOBS_DATA = {
'jobs': [
{'name': 'multibranch-repo/master',
'url': 'http://localhost:8080/job/multibranch-repo/job/master',
'color': 'blue'},
{'name': 'multibranch-repo/develop',
'url': 'http://localhost:8080/job/multibranch-repo/job/develop',
'color': 'blue'},
]
}
SCAN_MULTIBRANCH_PIPELINE_LOG = """
Started by timer
[Fri Jul 05 06:46:00 CEST 2019] Starting branch indexing...
Connecting to https://stash.macq.eu using Jenkins/****** (jenkins-ldap)
Repository type: Git
Looking up internal/base for branches
Checking branch master from internal/base
'Jenkinsfile' found
Met criteria
No changes detected: master (still at 26d4d8a673f57a957fd5a23f5adfe0be02089294)
1 branches were processed
Looking up internal/base for pull requests
0 pull requests were processed
[Fri Jul 05 06:46:01 CEST 2019] Finished branch indexing. Indexing took 1.1 sec
Finished: SUCCESS
"""
@pytest.fixture(scope='function')
def jenkins(monkeypatch):
def fake_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(Jenkins, '_poll', fake_poll)
return Jenkins('http://localhost:8080',
username='foouser', password='foopassword')
def test__clone(jenkins):
cloned = jenkins._clone()
assert id(cloned) != id(jenkins)
assert cloned == jenkins
def test_stored_passwords(jenkins):
assert jenkins.requester.password == 'foopassword'
assert jenkins.requester.username == 'foouser'
def test_reload(monkeypatch):
class FakeResponse(object):
status_code = 200
text = '{}'
def fake_get_url(
url, # pylint: disable=unused-argument
params=None, # pylint: disable=unused-argument
headers=None, # pylint: disable=unused-argument
allow_redirects=True, # pylint: disable=unused-argument
stream=False): # pylint: disable=unused-argument
return FakeResponse()
monkeypatch.setattr(Requester, 'get_url', fake_get_url)
mock_requester = Requester(username='foouser', password='foopassword')
jenkins = Jenkins(
'http://localhost:8080/',
username='foouser', password='foopassword',
requester=mock_requester)
jenkins.poll()
def test_get_jobs_list(monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
return TWO_JOBS_DATA
def fake_job_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(JenkinsBase, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Job, '_poll', fake_job_poll)
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword')
for idx, job_name in enumerate(jenkins.get_jobs_list()):
assert job_name == TWO_JOBS_DATA['jobs'][idx]['name']
for idx, job_name in enumerate(jenkins.jobs.keys()):
assert job_name == TWO_JOBS_DATA['jobs'][idx]['name']
def test_create_new_job_fail(mocker, monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
return TWO_JOBS_DATA
def fake_job_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(JenkinsBase, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Job, '_poll', fake_job_poll)
mock_requester = Requester(username='foouser', password='foopassword')
mock_requester.post_xml_and_confirm_status = mocker.MagicMock(
return_value=''
)
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword',
requester=mock_requester)
with pytest.raises(JenkinsAPIException) as ar:
jenkins.create_job('job_new', None)
assert 'Job XML config cannot be empty' in str(ar.value)
def test_create_multibranch_pipeline_job(mocker, monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
# return multibranch jobs and other jobs.
# create_multibranch_pipeline_job is supposed to filter out the MULTIBRANCH jobs
return {
'jobs': TWO_JOBS_DATA['jobs'] + MULTIBRANCH_JOBS_DATA['jobs']
}
def fake_job_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(JenkinsBase, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Job, '_poll', fake_job_poll)
mock_requester = Requester(username='foouser', password='foopassword')
mock_requester.post_xml_and_confirm_status = mocker.MagicMock(
return_value=''
)
mock_requester.post_and_confirm_status = mocker.MagicMock(
return_value=''
)
get_response = namedtuple('get_response', 'text')
mock_requester.get_url = mocker.MagicMock(
return_value=get_response(text=SCAN_MULTIBRANCH_PIPELINE_LOG)
)
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword',
requester=mock_requester)
jobs = jenkins.create_multibranch_pipeline_job("multibranch-repo", "multibranch-xml-content")
for idx, job_instance in enumerate(jobs):
assert job_instance.name == MULTIBRANCH_JOBS_DATA['jobs'][idx]['name']
# make sure we didn't get more jobs.
assert len(MULTIBRANCH_JOBS_DATA['jobs']) == len(jobs)
def test_get_jenkins_obj_from_url(mocker, monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
return TWO_JOBS_DATA
def fake_job_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(JenkinsBase, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Job, '_poll', fake_job_poll)
mock_requester = Requester(username='foouser', password='foopassword')
mock_requester.post_xml_and_confirm_status = mocker.MagicMock(
return_value=''
)
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword',
requester=mock_requester)
new_jenkins = jenkins.get_jenkins_obj_from_url('http://localhost:8080/')
assert new_jenkins == jenkins
new_jenkins = jenkins.get_jenkins_obj_from_url('http://localhost:8080/foo')
assert new_jenkins != jenkins
def test_get_jenkins_obj(mocker, monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
return TWO_JOBS_DATA
def fake_job_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(JenkinsBase, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
monkeypatch.setattr(Job, '_poll', fake_job_poll)
mock_requester = Requester(username='foouser', password='foopassword')
mock_requester.post_xml_and_confirm_status = mocker.MagicMock(
return_value=''
)
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword',
requester=mock_requester)
new_jenkins = jenkins.get_jenkins_obj()
assert new_jenkins == jenkins
def test_get_version(monkeypatch):
class MockResponse(object):
def __init__(self):
self.headers = {}
self.headers['X-Jenkins'] = '1.542'
def fake_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(Jenkins, '_poll', fake_poll)
def fake_get(cls, *arga, **kwargs): # pylint: disable=unused-argument
return MockResponse()
monkeypatch.setattr(Requester, 'get_and_confirm_status', fake_get)
jenkins = Jenkins('http://foobar:8080/',
username='foouser', password='foopassword')
assert jenkins.version == '1.542'
def test_get_version_nonexistent(mocker):
class MockResponse(object):
status_code = 200
headers = {}
text = '{}'
mock_requester = Requester(username='foouser', password='foopassword')
mock_requester.get_url = mocker.MagicMock(
return_value=MockResponse()
)
jenkins = Jenkins('http://localhost:8080',
username='foouser', password='foopassword',
requester=mock_requester)
assert jenkins.version == '0.0'
def test_get_master_data(mocker):
class MockResponse(object):
status_code = 200
headers = {}
text = '{}'
mock_requester = Requester(username='foouser', password='foopassword')
mock_requester.get_url = mocker.MagicMock(
return_value=MockResponse()
)
jenkins = Jenkins('http://localhost:808',
username='foouser', password='foopassword',
requester=mock_requester)
jenkins.get_data = mocker.MagicMock(
return_value={
"busyExecutors": 59,
"totalExecutors": 75
}
)
data = jenkins.get_master_data()
assert data['busyExecutors'] == 59
assert data['totalExecutors'] == 75
def test_get_create_url(monkeypatch):
def fake_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(Jenkins, '_poll', fake_poll)
# Jenkins URL w/o slash
jenkins = Jenkins('http://localhost:8080',
username='foouser', password='foopassword')
assert jenkins.get_create_url() == 'http://localhost:8080/createItem'
# Jenkins URL w/ slash
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword')
assert jenkins.get_create_url() == 'http://localhost:8080/createItem'
def test_has_plugin(monkeypatch):
def fake_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(Jenkins, '_poll', fake_poll)
def fake_plugin_poll(cls, tree=None): # pylint: disable=unused-argument
return {
'plugins': [
{
'deleted': False, 'hasUpdate': True, 'downgradable': False,
'dependencies': [{}, {}, {}, {}],
'longName': 'Jenkins Subversion Plug-in', 'active': True,
'shortName': 'subversion', 'backupVersion': None,
'url': 'http://wiki.jenkins-ci.org/'
'display/JENKINS/Subversion+Plugin',
'enabled': True, 'pinned': False, 'version': '1.45',
'supportsDynamicLoad': 'MAYBE', 'bundled': True
}
]
}
monkeypatch.setattr(Plugins, '_poll', fake_plugin_poll)
jenkins = Jenkins('http://localhost:8080/',
username='foouser', password='foopassword')
assert jenkins.has_plugin('subversion') is True
def test_get_use_auth_cookie(mocker, monkeypatch):
COOKIE_VALUE = 'FAKE_COOKIE'
def fake_opener(redirect_handler): # pylint: disable=unused-argument
mock_response = mocker.MagicMock()
mock_response.cookie = COOKIE_VALUE
mock_opener = mocker.MagicMock()
mock_opener.open.return_value = mock_response
return mock_opener
def fake_poll(cls, tree=None): # pylint: disable=unused-argument
return {}
monkeypatch.setattr(Jenkins, '_poll', fake_poll)
monkeypatch.setattr(Requester, 'AUTH_COOKIE', None)
monkeypatch.setattr(jenkinsapi.jenkins, 'build_opener', fake_opener)
jenkins = Jenkins('http://localhost:8080',
username='foouser', password='foopassword')
jenkins.use_auth_cookie()
assert Requester.AUTH_COOKIE == COOKIE_VALUE
| salimfadhley/jenkinsapi | jenkinsapi_tests/unittests/test_jenkins.py | Python | mit | 12,493 |
# -*- coding: utf-8 -*-
""" S3 Logging Facility
@copyright: (c) 2014 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import sys
from gluon import current
# =============================================================================
class S3Log(object):
"""
Simple global logging facility, called like:
current.log.error("Something went wrong", value="Example")
gives:
2014-02-16 11:58:41 S3LOG ERROR: Something went wrong: Example
Configurable in 000_config.py (set up in models/00_db.py)
- to include caller details (file name, line number, function name):
2014-02-16 11:58:23 (applications/eden/modules/s3/s3rest.py 477 __init__)
ERROR: Something went wrong: Example
- to write to console (sys.stderr), to a log file, or both.
Configuration see modules/s3cfg.py.
"""
def __init__(self):
"""
Constructor
"""
settings = current.deployment_settings
log_level = settings.get_log_level()
if log_level is None:
self.critical = \
self.error = \
self.warning = \
self.info = \
self.debug = self.ignore
self.log_level = 100
else:
try:
level = getattr(logging, log_level.upper())
except AttributeError:
raise SyntaxError("Invalid settings.log.level: %s" % log_level)
self.log_level = level
self.critical = self._critical \
if level <= logging.CRITICAL else self.ignore
self.error = self._error \
if level <= logging.ERROR else self.ignore
self.warning = self._warning \
if level <= logging.WARNING else self.ignore
self.info = self._info \
if level <= logging.INFO else self.ignore
self.debug = self._debug \
if level <= logging.DEBUG else self.ignore
self.configure_logger()
# -------------------------------------------------------------------------
@classmethod
def setup(cls):
"""
Set up current.log
"""
if hasattr(current, "log"):
return
current.log = cls()
return
# -------------------------------------------------------------------------
def configure_logger(self):
"""
Configure output handlers
"""
if hasattr(current, "log"):
return
settings = current.deployment_settings
console = settings.get_log_console()
logfile = settings.get_log_logfile()
if not console and not logfile:
# No point to log without output channel
self.critical = \
self.error = \
self.warning = \
self.info = \
self.debug = self.ignore
return
logger = logging.getLogger(__name__)
logger.propagate = False
logger.setLevel(self.log_level)
logger.handlers = []
m_format = "%(asctime)s %(caller)s %(levelname)s: %(message)s"
d_format = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(m_format, d_format)
# Set up console handler
if console:
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(formatter)
console_handler.setLevel(self.log_level)
logger.addHandler(console_handler)
# Set up log file handler
if logfile:
from logging.handlers import RotatingFileHandler
MAXBYTES = 1048576
logfile_handler = RotatingFileHandler(logfile,
maxBytes = MAXBYTES,
backupCount = 3)
logfile_handler.setFormatter(formatter)
logfile_handler.setLevel(self.log_level)
logger.addHandler(logfile_handler)
return
# -------------------------------------------------------------------------
@staticmethod
def ignore(message, value=None):
"""
Dummy to ignore messages below minimum severity level
"""
return
# -------------------------------------------------------------------------
@staticmethod
def recorder():
"""
Return a recording facility for log messages
"""
return S3LogRecorder()
# -------------------------------------------------------------------------
@staticmethod
def _log(severity, message, value=None):
"""
Log a message
@param severity: the severity of the message
@param message: the message
@param value: message suffix (optional)
"""
logger = logging.getLogger(__name__)
logger.propagate = False
msg = "%s: %s" % (message, value) if value else message
extra = {"caller": "S3LOG"}
if current.deployment_settings.get_log_caller_info():
caller = logger.findCaller()
if caller:
extra = {"caller": "(%s %s %s)" % caller}
logger.log(severity, msg, extra=extra)
return
# -------------------------------------------------------------------------
@classmethod
def _critical(cls, message, value=None):
"""
Log a critical message (highest severity level),
called via current.log.critical()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.CRITICAL, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _error(cls, message, value=None):
"""
Log an error message,
called via current.log.error()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.ERROR, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _warning(cls, message, value=None):
"""
Log a warning message,
called via current.log.warning()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.WARNING, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _info(cls, message, value=None):
"""
Log an general info message,
called via current.log.info()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.INFO, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _debug(cls, message, value=None):
"""
Log a detailed debug message (lowest severity level),
called via current.log.debug()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.DEBUG, message, value=value)
# =============================================================================
class S3LogRecorder(object):
"""
S3Log recorder, simple facility to record log messages for tests
Start:
recorder = current.log.recorder()
Read out messages:
messages = recorder.read()
Stop recording:
recorder.stop()
Re-start recording:
recorder.listen()
Clear messages buffer:
recorder.clear()
"""
def __init__(self):
self.handler = None
self.strbuf = None
self.listen()
# -------------------------------------------------------------------------
def listen(self):
""" Start recording S3Log messages """
if self.handler is not None:
return
strbuf = self.strbuf
if strbuf is None:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
strbuf = StringIO()
handler = logging.StreamHandler(strbuf)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
self.handler = handler
self.strbuf = strbuf
return
# -------------------------------------------------------------------------
def read(self):
""" Read out recorded S3Log messages """
strbuf = self.strbuf
if strbuf is None:
return ""
handler = self.handler
if handler is not None:
handler.flush()
return strbuf.getvalue()
# -------------------------------------------------------------------------
def stop(self):
""" Stop recording S3Log messages (and return the messages) """
handler = self.handler
if handler is not None:
logger = logging.getLogger(__name__)
logger.removeHandler(handler)
handler.close()
self.handler = None
strbuf = self.strbuf
if strbuf is not None:
return strbuf.getvalue()
else:
return ""
# -------------------------------------------------------------------------
def clear(self):
""" Clear the messages buffer """
if self.handler is not None:
on = True
self.stop()
else:
on = False
strbuf = self.strbuf
if strbuf is not None:
strbuf.close()
self.strbuf = None
if on:
self.listen()
# END =========================================================================
| devinbalkind/eden | modules/s3log.py | Python | mit | 11,312 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingResults.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QObject, pyqtSignal
class ProcessingResults(QObject):
resultAdded = pyqtSignal()
results = []
def addResult(self, icon, name, result):
self.results.append(Result(icon, name, result))
self.resultAdded.emit()
def getResults(self):
return self.results
class Result:
def __init__(self, icon, name, filename):
self.icon = icon
self.name = name
self.filename = filename
resultsList = ProcessingResults()
| medspx/QGIS | python/plugins/processing/core/ProcessingResults.py | Python | gpl-2.0 | 1,611 |
# Miro - an RSS based video player application
# Copyright (C) 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
from watchhistory import main
WATCHER = None
def load(context):
"""Loads the watchhistory module.
"""
global WATCHER
WATCHER = main.WatchHistory()
def unload():
pass
| debugger06/MiroX | tv/extensions/watchhistory/__init__.py | Python | gpl-2.0 | 1,599 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""JSON utilities."""
import re
import sys
if sys.hexversion < 0x2060000:
try:
import simplejson as json
CFG_JSON_AVAILABLE = True
except ImportError:
# Okay, no Ajax app will be possible, but continue anyway,
# since this package is only recommended, not mandatory.
CFG_JSON_AVAILABLE = False
json = None
else:
import json
CFG_JSON_AVAILABLE = True
def json_unicode_to_utf8(data):
"""Change all strings in a JSON structure to UTF-8."""
if type(data) == unicode:
return data.encode('utf-8')
elif type(data) == dict:
newdict = {}
for key in data:
newdict[json_unicode_to_utf8(key)] = json_unicode_to_utf8(data[key])
return newdict
elif type(data) == list:
return [json_unicode_to_utf8(elem) for elem in data]
else:
return data
def json_decode_file(filename):
"""
Parses a textfile using json to build a python object representation
"""
seq = open(filename).read()
## The JSON standard has no comments syntax. We have to remove them
## before feeding python's JSON parser
seq = json_remove_comments(seq)
## Parse all the unicode stuff to utf-8
return json_unicode_to_utf8(json.loads(seq))
def json_remove_comments(text):
""" Removes C style comments from the given string. Will keep newline
characters intact. This way parsing errors from json will point to the
right line.
This is primarily used to make comments in JSON files possible.
The JSON standard has no comments syntax, but we want to use
JSON for our profiles and configuration files. The comments need to be
removed first, before the text can be feed to the JSON parser of python.
@param text: JSON string that should be cleaned
@type text: string
@return: Cleaned JSON
@rtype: string
"""
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return ""
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
def wash_for_js(text):
"""
DEPRECATED: use htmlutils.escape_javascript_string() instead,
and take note that returned value is no longer enclosed into
quotes.
"""
from invenio.htmlutils import escape_javascript_string
if isinstance(text, basestring):
return '"%s"' % escape_javascript_string(text,
escape_for_html=False,
escape_CDATA=False,
escape_script_tag_with_quote=None)
else:
return text
| CERNDocumentServer/invenio | modules/miscutil/lib/jsonutils.py | Python | gpl-2.0 | 3,590 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_colors_stddev.py
------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def processInputs(alg):
# We need to import all the bands and to preserve color table
raster = alg.getParameterValue('map')
if raster in alg.exportedLayers.keys():
return
alg.setSessionProjectionFromLayer(raster, alg.commands)
destFilename = alg.getTempFilename()
alg.exportedLayers[raster] = destFilename
command = 'r.in.gdal input={} output={} --overwrite -o'.format(raster, destFilename)
alg.commands.append(command)
alg.setSessionProjectionFromProject(alg.commands)
region = unicode(alg.getParameterValue(alg.GRASS_REGION_EXTENT_PARAMETER))
regionCoords = region.split(',')
command = 'g.region'
command += ' -a'
command += ' n=' + unicode(regionCoords[3])
command += ' s=' + unicode(regionCoords[2])
command += ' e=' + unicode(regionCoords[1])
command += ' w=' + unicode(regionCoords[0])
cellsize = alg.getParameterValue(alg.GRASS_REGION_CELLSIZE_PARAMETER)
if cellsize:
command += ' res=' + unicode(cellsize)
else:
command += ' res=' + unicode(alg.getDefaultCellsize())
alignToResolution = alg.getParameterValue(alg.GRASS_REGION_ALIGN_TO_RESOLUTION)
if alignToResolution:
command += ' -a'
alg.commands.append(command)
def processCommand(alg):
# We need to remove output
output = alg.getOutputFromName('output')
alg.exportedLayers[output.value] = output.name + alg.uniqueSufix
alg.removeOutputFromName('output')
alg.processCommand()
alg.addOutput(output)
def processOutputs(alg):
# We need to export the raster with all its bands and its color table
output = alg.getOutputValue('output')
raster = alg.getParameterFromName('map')
# Get the list of rasters matching the basename
command = "r.out.gdal -t input={} output=\"{}\" createopt=\"TFW=YES,COMPRESS=LZW\"".format(
alg.exportedLayers[raster.value], output)
alg.commands.append(command)
alg.outputCommands.append(command)
| alexbruy/QGIS | python/plugins/processing/algs/grass7/ext/r_colors_stddev.py | Python | gpl-2.0 | 3,090 |
#! /usr/bin/python
# a script to preare PiSi source tarball from svn
# author: exa
#TODO: arguments for svn snapshot with rev number, or a tag to override default
import sys
import os
import shutil
def run(cmd):
print 'running', cmd
os.system(cmd)
sys.path.insert(0, '.')
import pisi
if not os.path.exists('svndist'):
os.makedirs('svndist')
ver = pisi.__version__
if os.path.exists('svndist/pisi-%s' % ver):
shutil.rmtree('svndist/pisi-%s' % ver)
print 'Exporting svn directory'
run('svn export http://svn.uludag.org.tr/uludag/trunk/pisi svndist/pisi-%s' % ver)
os.chdir('svndist')
run('tar cjvf pisi-%s.tar.bz2 pisi-%s' % (ver, ver))
print 'Have a look at svndist directory'
| Pardus-Linux/pisi | scripts/svndist.py | Python | gpl-2.0 | 709 |
# coding=UTF-8
__author__ = 'wanghongfei'
import mysql.connector, sys
CLEAR_DB = False
args = sys.argv[1:]
if len(args) == 1:
if args[0] == 'clear':
CLEAR_DB = True
else:
print 'wrong parameter!'
sys.exit(1)
config = {
'user': 'root',
'password': '111111',
'host': 'localhost',
'database': 'taolijie',
'raise_on_warnings': True
}
def build_connection(conf):
connection = mysql.connector.connect(**conf)
return connection
def close_connection(conn):
conn.close()
# 清空所有数据
def clear_data():
sqls = [
"DELETE FROM member_role",
"DELETE FROM role",
"DELETE FROM job_post",
"DELETE FROM second_hand_post",
"DELETE FROM job_post_category",
"DELETE FROM second_hand_post_category",
"DELETE FROM resume",
"DELETE FROM academy",
"DELETE FROM school",
"DELETE FROM news",
"DELETE FROM member",
]
for sql in sqls:
cursor.execute(sql)
conn.commit()
def insert_role_data(cursor):
sql = "INSERT INTO role(rolename, memo) VALUES (%s, %s)"
data = [
('ADMIN', '管理员'),
('STUDENT', '学生'),
('EMPLOYER', '商家')
]
cursor.executemany(sql, data)
def insert_member_data(cursor):
sql = "INSERT INTO member(username, password, age) VALUES (%(username)s, %(password)s, %(age)s )"
data = [
{
'username': 'wanghongfei',
'password': '3d4f2bf07dc1be38b20cd6e46949a1071f9d0e3d',
'age': 22
},
{
'username': 'wangfucheng',
'password': '3d4f2bf07dc1be38b20cd6e46949a1071f9d0e3d',
'age': 21
},
{
'username': 'abc',
'password': '3d4f2bf07dc1be38b20cd6e46949a1071f9d0e3d',
'age': 18
}
]
cursor.executemany(sql, data)
def insert_member_role_data(cursor):
sql = "INSERT INTO member_role(member_id, role_rid) VALUES ( %(member_id)s, %(role_rid)s )"
data = [
{
'member_id': query_member_id('wanghongfei'),
'role_rid': query_role_id('ADMIN')
},
{
'member_id': query_member_id('wangfucheng'),
'role_rid': query_role_id('STUDENT')
},
{
'member_id': query_member_id('abc'),
'role_rid': query_role_id('EMPLOYER')
}
]
cursor.executemany(sql, data)
def insert_school_data():
sql = "INSERT INTO school(short_name, full_name, province) VALUES ( %(short_name)s, %(full_name)s, %(province)s )"
data = [
{
'short_name': '理工大',
'full_name': '山东理工大学',
'province': '山东'
}
]
cursor.executemany(sql, data)
def insert_academy_data():
sql = "INSERT INTO academy(college_id, short_name, full_name) VALUES (%(college_id)s, %(short_name)s, %(full_name)s )"
data = [
{
'college_id': query_school_id('山东理工大学'),
'short_name': '计院',
'full_name': '计算机学院'
},
{
'college_id': query_school_id('山东理工大学'),
'short_name': '商学院',
'full_name': '商学院'
},
{
'college_id': query_school_id('山东理工大学'),
'short_name': '电气学院',
'full_name': '电气与电子工程学院'
}
]
cursor.executemany(sql, data)
def insert_news_data():
sql = "INSERT INTO news(title, content, member_id) VALUE ( %(title)s, %(content)s, %(member_id)s) "
data = [
{
'title': '死人了1',
'content': '哪里死人了?',
'member_id': query_member_id('wanghongfei')
},
{
'title': '死人了2',
'content': '哪里死人了?',
'member_id': query_member_id('wangfucheng')
},
{
'title': '死人了3',
'content': '哪里死人了?',
'member_id': query_member_id('abc')
}
]
cursor.executemany(sql, data)
def query_school_id(school_name):
sql = "SELECT id FROM school WHERE full_name = %(full_name)s"
data = {
'full_name': school_name
}
cursor.execute(sql, data)
res = cursor.fetchone()
return res[0]
def query_role_id(rolename):
sql = "SELECT rid FROM role WHERE rolename = %(rolename)s"
data = {
'rolename': rolename
}
cursor.execute(sql, data)
res = cursor.fetchone()
return res[0]
def query_member_id(username):
sql = "SELECT id FROM member WHERE username = %(username)s"
data = {
'username': username
}
cursor.execute(sql, data)
res = cursor.fetchone()
return res[0]
def insert_category_data():
sql = "INSERT INTO job_post_category (name, memo) VALUES (%(name)s, %(memo)s )"
data = [
{
'name': '发传单',
'memo': ''
},
{
'name': '送快递',
'memo': ''
},
{
'name': '家政',
'memo': ''
}
]
cursor.executemany(sql, data)
sql = "INSERT INTO second_hand_post_category (name, memo) VALUES (%(name)s, %(memo)s )"
data = [
{
'name': '手机',
'memo': ''
},
{
'name': '电脑',
'memo': ''
},
{
'name': '自行车',
'memo': ''
}
]
cursor.executemany(sql, data)
def insert_resume_data(cursor, usernames):
for username in usernames:
sql = "INSERT INTO resume (member_id, name, gender, access_authority) VALUES ( %(member_id)s, %(name)s, %(gender)s, %(access_authority)s )"
data = [
{
'member_id': query_member_id(username),
'name': '王鸿飞',
'gender': '男',
'access_authority': 'GLOBAL'
},
{
'member_id': query_member_id(username),
'name': '王鸿飞2',
'gender': '男',
'access_authority': 'GLOBAL'
}
]
cursor.executemany(sql, data)
def query_job_category_id(name):
sql = "SELECT id FROM job_post_category WHERE name = %(name)s"
data = ({'name': name})
cursor.execute(sql, data)
res = cursor.fetchone()
return res[0]
def query_sh_category_id(name):
sql = "SELECT id FROM second_hand_post_category WHERE name = %(name)s"
data = ({'name': name})
cursor.execute(sql, data)
res = cursor.fetchone()
return res[0]
def insert_sh_data(name_list):
for name in name_list:
sql = "INSERT INTO second_hand_post (member_id, second_hand_post_category_id, title, description) VALUES (%(member_id)s, %(second_hand_post_category_id)s, %(title)s, %(description)s) "
data = [
{
'member_id': query_member_id(name),
'second_hand_post_category_id': query_sh_category_id('自行车'),
'title': '出售二手山地车',
'description': '9成新'
},
{
'member_id': query_member_id(name),
'second_hand_post_category_id': query_sh_category_id('手机'),
'title': '出售二手iphone',
'description': '8成新'
},
{
'member_id': query_member_id(name),
'second_hand_post_category_id': query_sh_category_id('电脑'),
'title': '出售二手macbook',
'description': '5成新'
}
]
cursor.executemany(sql, data)
def insert_job_data(name_list):
for name in name_list:
sql = "INSERT INTO job_post (member_id, job_post_category_id, title, introduce) VALUES ( %(member_id)s, %(job_post_category_id)s, %(title)s, %(introduce)s )"
data = [
{
'member_id': query_member_id(name),
'job_post_category_id': query_job_category_id('送快递'),
'title': '送快递',
'introduce': '998'
}
]
cursor.executemany(sql, data)
# build connection
conn = build_connection(config)
cursor = conn.cursor()
if CLEAR_DB:
clear_data()
print 'done clearing'
sys.exit(0)
users = ['wanghongfei', 'wangfucheng', 'abc']
# insert data
print 'inserting into role table'
insert_role_data(cursor)
print 'done'
print 'inserting into member table'
insert_member_data(cursor)
print 'done'
print 'inserting into member_role table'
insert_member_role_data(cursor)
print 'done'
print 'inserting into category table'
insert_category_data()
print 'done'
print 'inserting into resume table'
insert_resume_data(cursor, users)
print 'done'
print 'inserting into job table'
insert_job_data(users)
print 'done'
print 'inserting into second_hand table'
insert_sh_data(users)
print 'done'
print 'inserting into school table'
insert_school_data()
print 'done'
print 'inserting into academy table'
insert_academy_data()
print 'done'
print 'inserting into news table'
insert_news_data()
print 'done'
conn.commit()
close_connection(conn)
| lankeren/taolijie | script/insert-data.py | Python | gpl-3.0 | 9,304 |
from django.core.urlresolvers import reverse
from decision_test_case import DecisionTestCase
from publicweb.forms import DecisionForm
from mock import patch
from django.dispatch.dispatcher import Signal
from django.db.models import signals
from publicweb.models import Decision, decision_signal_handler
# TODO: This class is a bit stumpy... merge with other (web) tests.
class EditDecisionTest(DecisionTestCase):
def test_edit_description_form_displays_title(self):
decision = self.create_and_return_decision()
response = self.load_add_decision_and_return_response(decision.id)
self.assertContains(response, u"Update Proposal #%s" % decision.id)
def load_add_decision_and_return_response(self, idd):
return self.client.get(reverse('publicweb_decision_update', args=[idd]))
@patch('publicweb.models.ObservationManager.send_notifications')
def test_email_not_sent_when_watcher_removed(self, notifications):
dispatch_uid = "publicweb.models.decision_signal_handler"
Signal.disconnect(signals.post_save, sender=Decision,
dispatch_uid=dispatch_uid)
decision = self.create_and_return_decision()
data = {
'description': decision.description,
'status': decision.status,
'deadline': decision.deadline,
'people': decision.people,
'watch': True
}
form = DecisionForm(data, instance=decision)
form.watch = False
form.is_valid()
form.save()
Signal.connect(
signals.post_save,
sender=Decision,
receiver=decision_signal_handler,
dispatch_uid=dispatch_uid
)
self.assertFalse(notifications.called)
| aptivate/econsensus | django/econsensus/publicweb/tests/edit_decision_test.py | Python | gpl-3.0 | 1,773 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PrivatePost.text_html'
db.add_column(u'mp_privatepost', 'text_html',
self.gf('django.db.models.fields.TextField')(default='Old MP text not parsed in markdown'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PrivatePost.text_html'
db.delete_column(u'mp_privatepost', 'text_html')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'mp.privatepost': {
'Meta': {'object_name': 'PrivatePost'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'privateposts'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position_in_topic': ('django.db.models.fields.IntegerField', [], {}),
'privatetopic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mp.PrivateTopic']"}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'text_html': ('django.db.models.fields.TextField', [], {}),
'update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'mp.privatetopic': {
'Meta': {'object_name': 'PrivateTopic'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'author'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_message': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_message'", 'null': 'True', 'to': u"orm['mp.PrivatePost']"}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participants'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'mp.privatetopicread': {
'Meta': {'object_name': 'PrivateTopicRead'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'privatepost': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mp.PrivatePost']"}),
'privatetopic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mp.PrivateTopic']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'privatetopics_read'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['mp'] | Florianboux/zds-site | zds/mp/migrations/0002_auto__add_field_privatepost_text_html.py | Python | gpl-3.0 | 6,206 |
from scipy import stats
N=stats.norm
print N.mean() # 0
print N.var() # 1
xi = N.isf(0.95)
print xi # -1.64485
N.cdf(xi) # Vérification : 0.05
# Graphiques de la fonction de densité et la cumulative.
P=plot(N.cdf,x,-10,10)
Q=plot(N.pdf,x,-10,10,color="red")
show(P+Q)
| LaurentClaessens/mazhe | tex/frido/code_sage2.py | Python | gpl-3.0 | 306 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
except ImportError:
pass
DOCUMENTATION = '''
---
module: wait_for
short_description: Waits for a condition before continuing.
description:
- You can wait for a set amount of time C(timeout), this is the default if nothing is specified.
- Waiting for a port to become available is useful for when services
are not immediately available after their init scripts return
which is true of certain Java application servers. It is also
useful when starting guests with the M(virt) module and
needing to pause until they are ready.
- This module can also be used to wait for a regex match a string to be present in a file.
- In 1.6 and later, this module can also be used to wait for a file to be available or
absent on the filesystem.
- In 1.8 and later, this module can also be used to wait for active
connections to be closed before continuing, useful if a node
is being rotated out of a load balancer pool.
version_added: "0.7"
options:
host:
description:
- A resolvable hostname or IP address to wait for
required: false
default: "127.0.0.1"
timeout:
description:
- maximum number of seconds to wait for
required: false
default: 300
connect_timeout:
description:
- maximum number of seconds to wait for a connection to happen before closing and retrying
required: false
default: 5
delay:
description:
- number of seconds to wait before starting to poll
required: false
default: 0
port:
description:
- port number to poll
required: false
state:
description:
- either C(present), C(started), or C(stopped), C(absent), or C(drained)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
choices: [ "present", "started", "stopped", "absent", "drained" ]
default: "started"
path:
version_added: "1.4"
required: false
description:
- path to a file on the filesytem that must exist before continuing
search_regex:
version_added: "1.4"
required: false
description:
- Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex.
exclude_hosts:
version_added: "1.8"
required: false
description:
- list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state
notes:
- The ability to use search_regex with a port connection was added in 1.7.
requirements: []
author:
- "Jeroen Hoekx (@jhoekx)"
- "John Jarvis (@jarv)"
- "Andrii Radyk (@AnderEnder)"
'''
EXAMPLES = '''
# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
- wait_for: port=8000 delay=10
# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds
- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained
# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts
- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3
# wait until the file /tmp/foo is present before continuing
- wait_for: path=/tmp/foo
# wait until the string "completed" is in the file /tmp/foo before continuing
- wait_for: path=/tmp/foo search_regex=completed
# wait until the lock file is removed
- wait_for: path=/var/lock/file.lock state=absent
# wait until the process is finished and pid was destroyed
- wait_for: path=/proc/3466/status state=absent
# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable
# and don't start checking for 10 seconds
- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
'''
class TCPConnectionInfo(object):
"""
This is a generic TCP Connection Info strategy class that relies
on the psutil module, which is not ideal for targets, but necessary
for cross platform support.
A subclass may wish to override some or all of these methods.
- _get_exclude_ips()
- get_active_connections()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
match_all_ips = {
socket.AF_INET: '0.0.0.0',
socket.AF_INET6: '::',
}
ipv4_mapped_ipv6_address = {
'prefix': '::ffff',
'match_all': '::ffff:0.0.0.0'
}
connection_states = {
'01': 'ESTABLISHED',
'02': 'SYN_SENT',
'03': 'SYN_RECV',
'04': 'FIN_WAIT1',
'05': 'FIN_WAIT2',
'06': 'TIME_WAIT',
}
def __new__(cls, *args, **kwargs):
return load_platform_subclass(TCPConnectionInfo, args, kwargs)
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_ip(module.params['host'])
self.port = int(self.module.params['port'])
self.exclude_ips = self._get_exclude_ips()
if not HAS_PSUTIL:
module.fail_json(msg="psutil module required for wait_for")
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_ip(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for p in psutil.process_iter():
connections = p.get_connections(kind='inet')
for conn in connections:
if conn.status not in self.connection_states.values():
continue
(local_ip, local_port) = conn.local_address
if self.port != local_port:
continue
(remote_ip, remote_port) = conn.remote_address
if (conn.family, remote_ip) in self.exclude_ips:
continue
if any((
(conn.family, local_ip) in self.ips,
(conn.family, self.match_all_ips[conn.family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
return active_connections
# ===========================================
# Subclass: Linux
class LinuxTCPConnectionInfo(TCPConnectionInfo):
"""
This is a TCP Connection Info evaluation strategy class
that utilizes information from Linux's procfs. While less universal,
does allow Linux targets to not require an additional library.
"""
platform = 'Linux'
distribution = None
source_file = {
socket.AF_INET: '/proc/net/tcp',
socket.AF_INET6: '/proc/net/tcp6'
}
match_all_ips = {
socket.AF_INET: '00000000',
socket.AF_INET6: '00000000000000000000000000000000',
}
ipv4_mapped_ipv6_address = {
'prefix': '0000000000000000FFFF0000',
'match_all': '0000000000000000FFFF000000000000'
}
local_address_field = 1
remote_address_field = 2
connection_state_field = 3
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_hex(module.params['host'])
self.port = "%0.4X" % int(module.params['port'])
self.exclude_ips = self._get_exclude_ips()
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_hex(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for family in self.source_file.keys():
f = open(self.source_file[family])
for tcp_connection in f.readlines():
tcp_connection = tcp_connection.strip().split()
if tcp_connection[self.local_address_field] == 'local_address':
continue
if tcp_connection[self.connection_state_field] not in self.connection_states:
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port != local_port:
continue
(remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
if (family, remote_ip) in self.exclude_ips:
continue
if any((
(family, local_ip) in self.ips,
(family, self.match_all_ips[family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
f.close()
return active_connections
def _convert_host_to_ip(host):
"""
Perform forward DNS resolution on host, IP will give the same IP
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and IP
"""
addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
ips = []
for family, socktype, proto, canonname, sockaddr in addrinfo:
ip = sockaddr[0]
ips.append((family, ip))
if family == socket.AF_INET:
ips.append((socket.AF_INET6, "::ffff:" + ip))
return ips
def _convert_host_to_hex(host):
"""
Convert the provided host to the format in /proc/net/tcp*
/proc/net/tcp uses little-endian four byte hex for ipv4
/proc/net/tcp6 uses little-endian per 4B word for ipv6
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and the
little-endian converted host
"""
ips = []
if host is not None:
for family, ip in _convert_host_to_ip(host):
hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
hexip_hf = ""
for i in range(0, len(hexip_nf), 8):
ipgroup_nf = hexip_nf[i:i+8]
ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
ips.append((family, hexip_hf))
return ips
def _create_connection(host, port, connect_timeout):
"""
Connect to a 2-tuple (host, port) and return
the socket object.
Args:
2-tuple (host, port) and connection timeout
Returns:
Socket object
"""
if sys.version_info < (2, 6):
(family, _) = _convert_host_to_ip(host)
connect_socket = socket.socket(family, socket.SOCK_STREAM)
connect_socket.settimeout(connect_timeout)
connect_socket.connect( (host, port) )
else:
connect_socket = socket.create_connection( (host, port), connect_timeout)
return connect_socket
def _timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default='127.0.0.1'),
timeout=dict(default=300, type='int'),
connect_timeout=dict(default=5, type='int'),
delay=dict(default=0, type='int'),
port=dict(default=None, type='int'),
path=dict(default=None, type='path'),
search_regex=dict(default=None),
state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']),
exclude_hosts=dict(default=None, type='list')
),
)
params = module.params
host = params['host']
timeout = params['timeout']
connect_timeout = params['connect_timeout']
delay = params['delay']
port = params['port']
state = params['state']
path = params['path']
search_regex = params['search_regex']
if search_regex is not None:
compiled_search_re = re.compile(search_regex, re.MULTILINE)
else:
compiled_search_re = None
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module")
if path and state == 'drained':
module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module")
if params['exclude_hosts'] is not None and state != 'drained':
module.fail_json(msg="exclude_hosts should only be with state=drained")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not port and not path and state != 'drained':
time.sleep(timeout)
elif state in [ 'stopped', 'absent' ]:
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
f = open(path)
f.close()
time.sleep(1)
pass
except IOError:
break
elif port:
try:
s = _create_connection(host, port, connect_timeout)
s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(1)
except:
break
else:
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
if port:
module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
os.stat(path)
except OSError, e:
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# File exists. Are there additional things to check?
if not compiled_search_re:
# nope, succeed!
break
try:
f = open(path)
try:
if re.search(compiled_search_re, f.read()):
# String found, success!
break
finally:
f.close()
except IOError:
pass
elif port:
alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
try:
s = _create_connection(host, port, min(connect_timeout, alt_connect_timeout))
except:
# Failed to connect by connect_timeout. wait and try again
pass
else:
# Connected -- are there additional conditions?
if compiled_search_re:
data = ''
matched = False
while datetime.datetime.now() < end:
max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
(readable, w, e) = select.select([s], [], [], max_timeout)
if not readable:
# No new data. Probably means our timeout
# expired
continue
response = s.recv(1024)
if not response:
# Server shutdown
break
data += response
if re.search(compiled_search_re, data):
matched = True
break
# Shutdown the client socket
s.shutdown(socket.SHUT_RDWR)
s.close()
if matched:
# Found our string, success!
break
else:
# Connection established, success!
s.shutdown(socket.SHUT_RDWR)
s.close()
break
# Conditions not yet met, wait and try again
time.sleep(1)
else: # while-else
# Timeout expired
elapsed = datetime.datetime.now() - start
if port:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elif state == 'drained':
### wait until all active connections are gone
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while datetime.datetime.now() < end:
try:
if tcpconns.get_active_connections_count() == 0:
break
except IOError:
pass
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| sysadmin75/ansible-modules-core | utilities/logic/wait_for.py | Python | gpl-3.0 | 20,316 |
#!/usr/bin/env python
########################################################################
# File : dirac-proxy-init.py
# Author : Adrian Casajus
###########################################################from DIRAC.Core.Base import Script#############
import sys
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import ProxyManagerClient
from DIRAC.Core.Security import CS, Properties
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = "$Id:"
userName = False
def setUser( arg ):
global userName
userName = arg
return DIRAC.S_OK()
Script.registerSwitch( "u:", "user=", "User to query (by default oneself)", setUser )
Script.parseCommandLine()
result = getProxyInfo()
if not result[ 'OK' ]:
print "Do you have a valid proxy?"
print result[ 'Message' ]
sys.exit( 1 )
proxyProps = result[ 'Value' ]
if not userName:
userName = proxyProps[ 'username' ]
if userName in CS.getAllUsers():
if Properties.PROXY_MANAGEMENT not in proxyProps[ 'groupProperties' ]:
if userName != proxyProps[ 'username' ] and userName != proxyProps[ 'issuer' ]:
print "You can only query info about yourself!"
sys.exit( 1 )
result = CS.getDNForUsername( userName )
if not result[ 'OK' ]:
print "Oops %s" % result[ 'Message' ]
dnList = result[ 'Value' ]
if not dnList:
print "User %s has no DN defined!" % userName
sys.exit( 1 )
userDNs = dnList
else:
userDNs = [ userName ]
print "Checking for DNs %s" % " | ".join( userDNs )
pmc = ProxyManagerClient()
result = pmc.getDBContents( { 'UserDN' : userDNs } )
if not result[ 'OK' ]:
print "Could not retrieve the proxy list: %s" % result[ 'Value' ]
sys.exit( 1 )
data = result[ 'Value' ]
colLengths = []
for pN in data[ 'ParameterNames' ]:
colLengths.append( len( pN ) )
for row in data[ 'Records' ] :
for i in range( len( row ) ):
colLengths[ i ] = max( colLengths[i], len( str( row[i] ) ) )
lines = [""]
for i in range( len( data[ 'ParameterNames' ] ) ):
pN = data[ 'ParameterNames' ][i]
lines[0] += "| %s " % pN.ljust( colLengths[i] )
lines[0] += "|"
tL = len( lines[0] )
lines.insert( 0, "-"*tL )
lines.append( "-"*tL )
for row in data[ 'Records' ] :
nL = ""
for i in range( len( row ) ):
nL += "| %s " % str( row[i] ).ljust( colLengths[i] )
nL += "|"
lines.append( nL )
lines.append( "-"*tL )
print "\n".join( lines )
| Andrew-McNab-UK/DIRAC | FrameworkSystem/scripts/dirac-proxy-get-uploaded-info.py | Python | gpl-3.0 | 2,416 |
#
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import struct
import numpy
import six
from gnuradio import gru
from . import crc
def conv_packed_binary_string_to_1_0_string(s):
"""
'\xAF' --> '10101111'
"""
r = []
for ch in s:
x = ord(ch)
for i in range(7,-1,-1):
t = (x >> i) & 0x1
r.append(t)
return ''.join([chr(x + ord('0')) for x in r])
def conv_1_0_string_to_packed_binary_string(s):
"""
'10101111' -> ('\xAF', False)
Basically the inverse of conv_packed_binary_string_to_1_0_string,
but also returns a flag indicating if we had to pad with leading zeros
to get to a multiple of 8.
"""
if not is_1_0_string(s):
raise ValueError("Input must be a string containing only 0's and 1's")
# pad to multiple of 8
padded = False
rem = len(s) % 8
if rem != 0:
npad = 8 - rem
s = '0' * npad + s
padded = True
assert len(s) % 8 == 0
r = []
i = 0
while i < len(s):
t = 0
for j in range(8):
t = (t << 1) | (ord(s[i + j]) - ord('0'))
r.append(chr(t))
i += 8
return (''.join(r), padded)
default_access_code = \
conv_packed_binary_string_to_1_0_string('\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC')
default_preamble = \
conv_packed_binary_string_to_1_0_string('\xA4\xF2')
def is_1_0_string(s):
if not isinstance(s, str):
return False
for ch in s:
if not ch in ('0', '1'):
return False
return True
def string_to_hex_list(s):
return [hex(ord(x)) for x in s]
def whiten(s, o):
sa = numpy.fromstring(s, numpy.uint8)
z = sa ^ random_mask_vec8[o:len(sa)+o]
return z.tostring()
def dewhiten(s, o):
return whiten(s, o) # self inverse
def make_header(payload_len, whitener_offset=0):
# Upper nibble is offset, lower 12 bits is len
val = ((whitener_offset & 0xf) << 12) | (payload_len & 0x0fff)
#print("offset =", whitener_offset, " len =", payload_len, " val=", val)
return struct.pack(b'!HH', val, val)
def make_packet(payload, samples_per_symbol, bits_per_symbol,
preamble=default_preamble, access_code=default_access_code,
pad_for_usrp=True, whitener_offset=0, whitening=True,
calc_crc=True):
"""
Build a packet, given access code, payload, and whitener offset
Args:
payload: packet payload, len [0, 4096]
samples_per_symbol: samples per symbol (needed for padding calculation) (int)
bits_per_symbol: (needed for padding calculation) (int)
preamble: string of ascii 0's and 1's
access_code: string of ascii 0's and 1's
pad_for_usrp: If true, packets are padded such that they end up a multiple of 128 samples(512 bytes)
whitener_offset: offset into whitener string to use [0-16)
whitening: Whether to turn on data whitening(scrambling) (boolean)
calc_crc: Whether to calculate CRC32 or not (boolean)
Packet will have access code at the beginning, followed by length, payload
and finally CRC-32.
"""
if not is_1_0_string(preamble):
raise ValueError("preamble must be a string containing only 0's and 1's (%r)" % (preamble,))
if not is_1_0_string(access_code):
raise ValueError("access_code must be a string containing only 0's and 1's (%r)" % (access_code,))
if not whitener_offset >=0 and whitener_offset < 16:
raise ValueError("whitener_offset must be between 0 and 15, inclusive (%i)" % (whitener_offset,))
(packed_access_code, padded) = conv_1_0_string_to_packed_binary_string(access_code)
(packed_preamble, ignore) = conv_1_0_string_to_packed_binary_string(preamble)
if(calc_crc):
payload_with_crc = crc.gen_and_append_crc32(payload)
else:
payload_with_crc = payload
#print("outbound crc =", string_to_hex_list(payload_with_crc[-4:]))
L = len(payload_with_crc)
MAXLEN = len(random_mask_tuple)
if L > MAXLEN:
raise ValueError("len(payload) must be in [0, %d]" % (MAXLEN,))
if whitening:
pkt = b''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
whiten(payload_with_crc, whitener_offset), b'\x55'))
else:
pkt = b''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
(payload_with_crc), b'\x55'))
if pad_for_usrp:
pkt = pkt + (_npadding_bytes(len(pkt), int(samples_per_symbol), bits_per_symbol) * b'\x55')
#print("make_packet: len(pkt) =", len(pkt))
return pkt
def _npadding_bytes(pkt_byte_len, samples_per_symbol, bits_per_symbol):
"""
Generate sufficient padding such that each packet ultimately ends
up being a multiple of 512 bytes when sent across the USB. We
send 4-byte samples across the USB (16-bit I and 16-bit Q), thus
we want to pad so that after modulation the resulting packet
is a multiple of 128 samples.
Args:
ptk_byte_len: len in bytes of packet, not including padding.
samples_per_symbol: samples per bit (1 bit / symbolwidth GMSK) (int)
bits_per_symbol: bits per symbol (log2(modulation order)) (int)
Returns:
number of bytes of padding to append.
"""
modulus = 128
byte_modulus = gru.lcm(modulus // 8, samples_per_symbol) * bits_per_symbol // samples_per_symbol
r = pkt_byte_len % byte_modulus
if r == 0:
return 0
return byte_modulus - r
def unmake_packet(whitened_payload_with_crc, whitener_offset=0,
dewhitening=True, check_crc=True):
"""
Return (ok, payload)
Args:
whitened_payload_with_crc: string
whitener_offset: integer offset into whitener table
dewhitening: True if we should run this through the dewhitener
check_crc: True if we should check the CRC of the packet
"""
if dewhitening:
payload_with_crc = dewhiten(whitened_payload_with_crc, whitener_offset)
else:
payload_with_crc = (whitened_payload_with_crc)
if check_crc:
ok, payload = crc.check_crc32(payload_with_crc)
else:
payload = payload_with_crc
ok = True
if 0:
print("payload_with_crc =", string_to_hex_list(payload_with_crc))
print("ok = %r, len(payload) = %d" % (ok, len(payload)))
print("payload =", string_to_hex_list(payload))
print("")
return ok, payload
# FYI, this PN code is the output of a 15-bit LFSR
random_mask_tuple = (
255, 63, 0, 16, 0, 12, 0, 5, 192, 3, 16, 1, 204, 0, 85, 192,
63, 16, 16, 12, 12, 5, 197, 195, 19, 17, 205, 204, 85, 149, 255, 47,
0, 28, 0, 9, 192, 6, 208, 2, 220, 1, 153, 192, 106, 208, 47, 28,
28, 9, 201, 198, 214, 210, 222, 221, 152, 89, 170, 186, 255, 51, 0, 21,
192, 15, 16, 4, 12, 3, 69, 193, 243, 16, 69, 204, 51, 21, 213, 207,
31, 20, 8, 15, 70, 132, 50, 227, 85, 137, 255, 38, 192, 26, 208, 11,
28, 7, 73, 194, 182, 209, 182, 220, 118, 217, 230, 218, 202, 219, 23, 27,
78, 139, 116, 103, 103, 106, 170, 175, 63, 60, 16, 17, 204, 12, 85, 197,
255, 19, 0, 13, 192, 5, 144, 3, 44, 1, 221, 192, 89, 144, 58, 236,
19, 13, 205, 197, 149, 147, 47, 45, 220, 29, 153, 201, 170, 214, 255, 30,
192, 8, 80, 6, 188, 2, 241, 193, 132, 80, 99, 124, 41, 225, 222, 200,
88, 86, 186, 190, 243, 48, 69, 212, 51, 31, 85, 200, 63, 22, 144, 14,
236, 4, 77, 195, 117, 145, 231, 44, 74, 157, 247, 41, 134, 158, 226, 232,
73, 142, 182, 228, 118, 203, 102, 215, 106, 222, 175, 24, 124, 10, 161, 199,
56, 82, 146, 189, 173, 177, 189, 180, 113, 183, 100, 118, 171, 102, 255, 106,
192, 47, 16, 28, 12, 9, 197, 198, 211, 18, 221, 205, 153, 149, 170, 239,
63, 12, 16, 5, 204, 3, 21, 193, 207, 16, 84, 12, 63, 69, 208, 51,
28, 21, 201, 207, 22, 212, 14, 223, 68, 88, 51, 122, 149, 227, 47, 9,
220, 6, 217, 194, 218, 209, 155, 28, 107, 73, 239, 118, 204, 38, 213, 218,
223, 27, 24, 11, 74, 135, 119, 34, 166, 153, 186, 234, 243, 15, 5, 196,
3, 19, 65, 205, 240, 85, 132, 63, 35, 80, 25, 252, 10, 193, 199, 16,
82, 140, 61, 165, 209, 187, 28, 115, 73, 229, 246, 203, 6, 215, 66, 222,
177, 152, 116, 106, 167, 111, 58, 172, 19, 61, 205, 209, 149, 156, 111, 41,
236, 30, 205, 200, 85, 150, 191, 46, 240, 28, 68, 9, 243, 70, 197, 242,
211, 5, 157, 195, 41, 145, 222, 236, 88, 77, 250, 181, 131, 55, 33, 214,
152, 94, 234, 184, 79, 50, 180, 21, 183, 79, 54, 180, 22, 247, 78, 198,
180, 82, 247, 125, 134, 161, 162, 248, 121, 130, 162, 225, 185, 136, 114, 230,
165, 138, 251, 39, 3, 90, 129, 251, 32, 67, 88, 49, 250, 148, 67, 47,
113, 220, 36, 89, 219, 122, 219, 99, 27, 105, 203, 110, 215, 108, 94, 173,
248, 125, 130, 161, 161, 184, 120, 114, 162, 165, 185, 187, 50, 243, 85, 133,
255, 35, 0, 25, 192, 10, 208, 7, 28, 2, 137, 193, 166, 208, 122, 220,
35, 25, 217, 202, 218, 215, 27, 30, 139, 72, 103, 118, 170, 166, 255, 58,
192, 19, 16, 13, 204, 5, 149, 195, 47, 17, 220, 12, 89, 197, 250, 211,
3, 29, 193, 201, 144, 86, 236, 62, 205, 208, 85, 156, 63, 41, 208, 30,
220, 8, 89, 198, 186, 210, 243, 29, 133, 201, 163, 22, 249, 206, 194, 212,
81, 159, 124, 104, 33, 238, 152, 76, 106, 181, 239, 55, 12, 22, 133, 206,
227, 20, 73, 207, 118, 212, 38, 223, 90, 216, 59, 26, 147, 75, 45, 247,
93, 134, 185, 162, 242, 249, 133, 130, 227, 33, 137, 216, 102, 218, 170, 219,
63, 27, 80, 11, 124, 7, 97, 194, 168, 81, 190, 188, 112, 113, 228, 36,
75, 91, 119, 123, 102, 163, 106, 249, 239, 2, 204, 1, 149, 192, 111, 16,
44, 12, 29, 197, 201, 147, 22, 237, 206, 205, 148, 85, 175, 127, 60, 32,
17, 216, 12, 90, 133, 251, 35, 3, 89, 193, 250, 208, 67, 28, 49, 201,
212, 86, 223, 126, 216, 32, 90, 152, 59, 42, 147, 95, 45, 248, 29, 130,
137, 161, 166, 248, 122, 194, 163, 17, 185, 204, 114, 213, 229, 159, 11, 40,
7, 94, 130, 184, 97, 178, 168, 117, 190, 167, 48, 122, 148, 35, 47, 89,
220, 58, 217, 211, 26, 221, 203, 25, 151, 74, 238, 183, 12, 118, 133, 230,
227, 10, 201, 199, 22, 210, 142, 221, 164, 89, 187, 122, 243, 99, 5, 233,
195, 14, 209, 196, 92, 83, 121, 253, 226, 193, 137, 144, 102, 236, 42, 205,
223, 21, 152, 15, 42, 132, 31, 35, 72, 25, 246, 138, 198, 231, 18, 202,
141, 151, 37, 174, 155, 60, 107, 81, 239, 124, 76, 33, 245, 216, 71, 26,
178, 139, 53, 167, 87, 58, 190, 147, 48, 109, 212, 45, 159, 93, 168, 57,
190, 146, 240, 109, 132, 45, 163, 93, 185, 249, 178, 194, 245, 145, 135, 44,
98, 157, 233, 169, 142, 254, 228, 64, 75, 112, 55, 100, 22, 171, 78, 255,
116, 64, 39, 112, 26, 164, 11, 59, 71, 83, 114, 189, 229, 177, 139, 52,
103, 87, 106, 190, 175, 48, 124, 20, 33, 207, 88, 84, 58, 191, 83, 48,
61, 212, 17, 159, 76, 104, 53, 238, 151, 12, 110, 133, 236, 99, 13, 233,
197, 142, 211, 36, 93, 219, 121, 155, 98, 235, 105, 143, 110, 228, 44, 75,
93, 247, 121, 134, 162, 226, 249, 137, 130, 230, 225, 138, 200, 103, 22, 170,
142, 255, 36, 64, 27, 112, 11, 100, 7, 107, 66, 175, 113, 188, 36, 113,
219, 100, 91, 107, 123, 111, 99, 108, 41, 237, 222, 205, 152, 85, 170, 191,
63, 48, 16, 20, 12, 15, 69, 196, 51, 19, 85, 205, 255, 21, 128, 15,
32, 4, 24, 3, 74, 129, 247, 32, 70, 152, 50, 234, 149, 143, 47, 36,
28, 27, 73, 203, 118, 215, 102, 222, 170, 216, 127, 26, 160, 11, 56, 7,
82, 130, 189, 161, 177, 184, 116, 114, 167, 101, 186, 171, 51, 63, 85, 208,
63, 28, 16, 9, 204, 6, 213, 194, 223, 17, 152, 12, 106, 133, 239, 35,
12, 25, 197, 202, 211, 23, 29, 206, 137, 148, 102, 239, 106, 204, 47, 21,
220, 15, 25, 196, 10, 211, 71, 29, 242, 137, 133, 166, 227, 58, 201, 211,
22, 221, 206, 217, 148, 90, 239, 123, 12, 35, 69, 217, 243, 26, 197, 203,
19, 23, 77, 206, 181, 148, 119, 47, 102, 156, 42, 233, 223, 14, 216, 4,
90, 131, 123, 33, 227, 88, 73, 250, 182, 195, 54, 209, 214, 220, 94, 217,
248, 90, 194, 187, 17, 179, 76, 117, 245, 231, 7, 10, 130, 135, 33, 162,
152, 121, 170, 162, 255, 57, 128, 18, 224, 13, 136, 5, 166, 131, 58, 225,
211, 8, 93, 198, 185, 146, 242, 237, 133, 141, 163, 37, 185, 219, 50, 219,
85, 155, 127, 43, 96, 31, 104, 8, 46, 134, 156, 98, 233, 233, 142, 206,
228, 84, 75, 127, 119, 96, 38, 168, 26, 254, 139, 0, 103, 64, 42, 176,
31, 52, 8, 23, 70, 142, 178, 228, 117, 139, 103, 39, 106, 154, 175, 43,
60, 31, 81, 200, 60, 86, 145, 254, 236, 64, 77, 240, 53, 132, 23, 35,
78, 153, 244, 106, 199, 111, 18, 172, 13, 189, 197, 177, 147, 52, 109, 215,
109, 158, 173, 168, 125, 190, 161, 176, 120, 116, 34, 167, 89, 186, 186, 243,
51, 5, 213, 195, 31, 17, 200, 12, 86, 133, 254, 227, 0, 73, 192, 54,
208, 22, 220, 14, 217, 196, 90, 211, 123, 29, 227, 73, 137, 246, 230, 198,
202, 210, 215, 29, 158, 137, 168, 102, 254, 170, 192, 127, 16, 32, 12, 24,
5, 202, 131, 23, 33, 206, 152, 84, 106, 191, 111, 48, 44, 20, 29, 207,
73, 148, 54, 239, 86, 204, 62, 213, 208, 95, 28, 56, 9, 210, 134, 221,
162, 217, 185, 154, 242, 235, 5, 143, 67, 36, 49, 219, 84, 91, 127, 123,
96, 35, 104, 25, 238, 138, 204, 103, 21, 234, 143, 15, 36, 4, 27, 67,
75, 113, 247, 100, 70, 171, 114, 255, 101, 128, 43, 32, 31, 88, 8, 58,
134, 147, 34, 237, 217, 141, 154, 229, 171, 11, 63, 71, 80, 50, 188, 21,
177, 207, 52, 84, 23, 127, 78, 160, 52, 120, 23, 98, 142, 169, 164, 126,
251, 96, 67, 104, 49, 238, 148, 76, 111, 117, 236, 39, 13, 218, 133, 155,
35, 43, 89, 223, 122, 216, 35, 26, 153, 203, 42, 215, 95, 30, 184, 8,
114, 134, 165, 162, 251, 57, 131, 82, 225, 253, 136, 65, 166, 176, 122, 244,
35, 7, 89, 194, 186, 209, 179, 28, 117, 201, 231, 22, 202, 142, 215, 36,
94, 155, 120, 107, 98, 175, 105, 188, 46, 241, 220, 68, 89, 243, 122, 197,
227, 19, 9, 205, 198, 213, 146, 223, 45, 152, 29, 170, 137, 191, 38, 240,
26, 196, 11, 19, 71, 77, 242, 181, 133, 183, 35, 54, 153, 214, 234, 222,
207, 24, 84, 10, 191, 71, 48, 50, 148, 21, 175, 79, 60, 52, 17, 215,
76, 94, 181, 248, 119, 2, 166, 129, 186, 224, 115, 8, 37, 198, 155, 18,
235, 77, 143, 117, 164, 39, 59, 90, 147, 123, 45, 227, 93, 137, 249, 166,
194, 250, 209, 131, 28, 97, 201, 232, 86, 206, 190, 212, 112, 95, 100, 56,
43, 82, 159, 125, 168, 33, 190, 152, 112, 106, 164, 47, 59, 92, 19, 121,
205, 226, 213, 137, 159, 38, 232, 26, 206, 139, 20, 103, 79, 106, 180, 47,
55, 92, 22, 185, 206, 242, 212, 69, 159, 115, 40, 37, 222, 155, 24, 107,
74, 175, 119, 60, 38, 145, 218, 236, 91, 13, 251, 69, 131, 115, 33, 229,
216, 75, 26, 183, 75, 54, 183, 86, 246, 190, 198, 240, 82, 196, 61, 147,
81, 173, 252, 125, 129, 225, 160, 72, 120, 54, 162, 150, 249, 174, 194, 252,
81, 129, 252, 96, 65, 232, 48, 78, 148, 52, 111, 87, 108, 62, 173, 208,
125, 156, 33, 169, 216, 126, 218, 160, 91, 56, 59, 82, 147, 125, 173, 225,
189, 136, 113, 166, 164, 122, 251, 99, 3, 105, 193, 238, 208, 76, 92, 53,
249, 215, 2, 222, 129, 152, 96, 106, 168, 47, 62, 156, 16, 105, 204, 46,
213, 220, 95, 25, 248, 10, 194, 135, 17, 162, 140, 121, 165, 226, 251, 9,
131, 70, 225, 242, 200, 69, 150, 179, 46, 245, 220, 71, 25, 242, 138, 197,
167, 19, 58, 141, 211, 37, 157, 219, 41, 155, 94, 235, 120, 79, 98, 180,
41, 183, 94, 246, 184, 70, 242, 178, 197, 181, 147, 55, 45, 214, 157, 158,
233, 168, 78, 254, 180, 64, 119, 112, 38, 164, 26, 251, 75, 3, 119, 65,
230, 176, 74, 244, 55, 7, 86, 130, 190, 225, 176, 72, 116, 54, 167, 86,
250, 190, 195, 48, 81, 212, 60, 95, 81, 248, 60, 66, 145, 241, 172, 68,
125, 243, 97, 133, 232, 99, 14, 169, 196, 126, 211, 96, 93, 232, 57, 142,
146, 228, 109, 139, 109, 167, 109, 186, 173, 179, 61, 181, 209, 183, 28, 118,
137, 230, 230, 202, 202, 215, 23, 30, 142, 136, 100, 102, 171, 106, 255, 111,
0, 44, 0, 29, 192, 9, 144, 6, 236, 2, 205, 193, 149, 144, 111, 44,
44, 29, 221, 201, 153, 150, 234, 238, 207, 12, 84, 5, 255, 67, 0, 49,
192, 20, 80, 15, 124, 4, 33, 195, 88, 81, 250, 188, 67, 49, 241, 212,
68, 95, 115, 120, 37, 226, 155, 9, 171, 70, 255, 114, 192, 37, 144, 27,
44, 11, 93, 199, 121, 146, 162, 237, 185, 141, 178, 229, 181, 139, 55, 39,
86, 154, 190, 235, 48, 79, 84, 52, 63, 87, 80, 62, 188, 16, 113, 204,
36, 85, 219, 127, 27, 96, 11, 104, 7, 110, 130, 172, 97, 189, 232, 113,
142, 164, 100, 123, 107, 99, 111, 105, 236, 46, 205, 220, 85, 153, 255, 42,
192, 31, 16, 8, 12, 6, 133, 194, 227, 17, 137, 204, 102, 213, 234, 223,
15, 24, 4, 10, 131, 71, 33, 242, 152, 69, 170, 179, 63, 53, 208, 23,
28, 14, 137, 196, 102, 211, 106, 221, 239, 25, 140, 10, 229, 199, 11, 18,
135, 77, 162, 181, 185, 183, 50, 246, 149, 134, 239, 34, 204, 25, 149, 202,
239, 23, 12, 14, 133, 196, 99, 19, 105, 205, 238, 213, 140, 95, 37, 248,
27, 2, 139, 65, 167, 112, 122, 164, 35, 59, 89, 211, 122, 221, 227, 25,
137, 202, 230, 215, 10, 222, 135, 24, 98, 138, 169, 167, 62, 250, 144, 67,
44, 49, 221, 212, 89, 159, 122, 232, 35, 14, 153, 196, 106, 211, 111, 29,
236, 9, 141, 198, 229, 146, 203, 45, 151, 93, 174, 185, 188, 114, 241, 229,
132, 75, 35, 119, 89, 230, 186, 202, 243, 23, 5, 206, 131, 20, 97, 207,
104, 84, 46, 191, 92, 112, 57, 228, 18, 203, 77, 151, 117, 174, 167, 60,
122, 145, 227, 44, 73, 221, 246, 217, 134, 218, 226, 219, 9, 155, 70, 235,
114, 207, 101, 148, 43, 47, 95, 92, 56, 57, 210, 146, 221, 173, 153, 189,
170, 241, 191, 4, 112, 3, 100, 1, 235, 64, 79, 112, 52, 36, 23, 91,
78, 187, 116, 115, 103, 101, 234, 171, 15, 63, 68, 16, 51, 76, 21, 245,
207, 7, 20, 2, 143, 65, 164, 48, 123, 84, 35, 127, 89, 224, 58, 200,
19, 22, 141, 206, 229, 148, 75, 47, 119, 92, 38, 185, 218, 242, 219, 5,
155, 67, 43, 113, 223, 100, 88, 43, 122, 159, 99, 40, 41, 222, 158, 216,
104, 90, 174, 187, 60, 115, 81, 229, 252, 75, 1, 247, 64, 70, 176, 50,
244, 21, 135, 79, 34, 180, 25, 183, 74, 246, 183, 6, 246, 130, 198, 225,
146, 200, 109, 150, 173, 174, 253, 188, 65, 177, 240, 116, 68, 39, 115, 90,
165, 251, 59, 3, 83, 65, 253, 240, 65, 132, 48, 99, 84, 41, 255, 94,
192, 56, 80, 18, 188, 13, 177, 197, 180, 83, 55, 125, 214, 161, 158, 248,
104, 66, 174, 177, 188, 116, 113, 231, 100, 74, 171, 119, 63, 102, 144, 42,
236, 31, 13, 200, 5, 150, 131, 46, 225, 220, 72, 89, 246, 186, 198, 243,
18, 197, 205, 147, 21, 173, 207, 61, 148, 17, 175, 76, 124, 53, 225, 215,
8, 94, 134, 184, 98, 242, 169, 133, 190, 227, 48, 73, 212, 54, 223, 86,
216, 62, 218, 144, 91, 44, 59, 93, 211, 121, 157, 226, 233, 137, 142, 230,
228, 74, 203, 119, 23, 102, 142, 170, 228, 127, 11, 96, 7, 104, 2, 174,
129, 188, 96, 113, 232, 36, 78, 155, 116, 107, 103, 111, 106, 172, 47, 61,
220, 17, 153, 204, 106, 213, 239, 31, 12, 8, 5, 198, 131, 18, 225, 205,
136, 85, 166, 191, 58, 240, 19, 4, 13, 195, 69, 145, 243, 44, 69, 221,
243, 25, 133, 202, 227, 23, 9, 206, 134, 212, 98, 223, 105, 152, 46, 234,
156, 79, 41, 244, 30, 199, 72, 82, 182, 189, 182, 241, 182, 196, 118, 211,
102, 221, 234, 217, 143, 26, 228, 11, 11, 71, 71, 114, 178, 165, 181, 187,
55, 51, 86, 149, 254, 239, 0, 76, 0, 53, 192, 23, 16, 14, 140, 4,
101, 195, 107, 17, 239, 76, 76, 53, 245, 215, 7, 30, 130, 136, 97, 166,
168, 122, 254, 163, 0, 121, 192, 34, 208, 25, 156, 10, 233, 199, 14, 210,
132, 93, 163, 121, 185, 226, 242, 201, 133, 150, 227, 46, 201, 220, 86, 217,
254, 218, 192, 91, 16, 59, 76, 19, 117, 205, 231, 21, 138, 143, 39, 36,
26, 155, 75, 43, 119, 95, 102, 184, 42, 242, 159, 5, 168, 3, 62, 129,
208, 96, 92, 40, 57, 222, 146, 216, 109, 154, 173, 171, 61, 191, 81, 176,
60, 116, 17, 231, 76, 74, 181, 247, 55, 6, 150, 130, 238, 225, 140, 72,
101, 246, 171, 6, 255, 66, 192, 49, 144, 20, 108, 15, 109, 196, 45, 147,
93, 173, 249, 189, 130, 241, 161, 132, 120, 99, 98, 169, 233, 190, 206, 240,
84, 68, 63, 115, 80, 37, 252, 27, 1, 203, 64, 87, 112, 62, 164, 16,
123, 76, 35, 117, 217, 231, 26, 202, 139, 23, 39, 78, 154, 180, 107, 55,
111, 86, 172, 62, 253, 208, 65, 156, 48, 105, 212, 46, 223, 92, 88, 57,
250, 146, 195, 45, 145, 221, 172, 89, 189, 250, 241, 131, 4, 97, 195, 104,
81, 238, 188, 76, 113, 245, 228, 71, 11, 114, 135, 101, 162, 171, 57, 191,
82, 240, 61, 132, 17, 163, 76, 121, 245, 226, 199, 9, 146, 134, 237, 162,
205, 185, 149, 178, 239, 53, 140, 23, 37, 206, 155, 20, 107, 79, 111, 116,
44, 39, 93, 218, 185, 155, 50, 235, 85, 143, 127, 36, 32, 27, 88, 11,
122, 135, 99, 34, 169, 217, 190, 218, 240, 91, 4, 59, 67, 83, 113, 253,
228, 65, 139, 112, 103, 100, 42, 171, 95, 63, 120, 16, 34, 140, 25, 165,
202, 251, 23, 3, 78, 129, 244, 96, 71, 104, 50, 174, 149, 188, 111, 49,
236, 20, 77, 207, 117, 148, 39, 47, 90, 156, 59, 41, 211, 94, 221, 248,
89, 130, 186, 225, 179, 8, 117, 198, 167, 18, 250, 141, 131, 37, 161, 219,
56, 91, 82, 187, 125, 179, 97, 181, 232, 119, 14, 166, 132, 122, 227, 99,
9, 233, 198, 206, 210, 212, 93, 159, 121, 168, 34, 254, 153, 128, 106, 224,
47, 8, 28, 6, 137, 194, 230, 209, 138, 220, 103, 25, 234, 138, 207, 39,
20, 26, 143, 75, 36, 55, 91, 86, 187, 126, 243, 96, 69, 232, 51, 14,
149, 196, 111, 19, 108, 13, 237, 197, 141, 147, 37, 173, 219, 61, 155, 81,
171, 124, 127, 97, 224, 40, 72, 30, 182, 136, 118, 230, 166, 202, 250, 215,
3, 30, 129, 200, 96, 86, 168, 62, 254, 144, 64, 108, 48, 45, 212, 29,
159, 73, 168, 54, 254, 150, 192, 110, 208, 44, 92, 29, 249, 201, 130, 214,
225, 158, 200, 104, 86, 174, 190, 252, 112, 65, 228, 48, 75, 84, 55, 127,
86, 160, 62, 248, 16, 66, 140, 49, 165, 212, 123, 31, 99, 72, 41, 246,
158, 198, 232, 82, 206, 189, 148, 113, 175, 100, 124, 43, 97, 223, 104, 88,
46, 186, 156, 115, 41, 229, 222, 203, 24, 87, 74, 190, 183, 48, 118, 148,
38, 239, 90, 204, 59, 21, 211, 79, 29, 244, 9, 135, 70, 226, 178, 201,
181, 150, 247, 46, 198, 156, 82, 233, 253, 142, 193, 164, 80, 123, 124, 35,
97, 217, 232, 90, 206, 187, 20, 115, 79, 101, 244, 43, 7, 95, 66, 184,
49, 178, 148, 117, 175, 103, 60, 42, 145, 223, 44, 88, 29, 250, 137, 131,
38, 225, 218, 200, 91, 22, 187, 78, 243, 116, 69, 231, 115, 10, 165, 199,
59, 18, 147, 77, 173, 245, 189, 135, 49, 162, 148, 121, 175, 98, 252, 41,
129, 222, 224, 88, 72, 58, 182, 147, 54, 237, 214, 205, 158, 213, 168, 95,
62, 184, 16, 114, 140, 37, 165, 219, 59, 27, 83, 75, 125, 247, 97, 134,
168, 98, 254, 169, 128, 126, 224, 32, 72, 24, 54, 138, 150, 231, 46, 202,
156, 87, 41, 254, 158, 192, 104, 80, 46, 188, 28, 113, 201, 228, 86, 203,
126, 215, 96, 94, 168, 56, 126, 146, 160, 109, 184, 45, 178, 157, 181, 169,
183, 62, 246, 144, 70, 236, 50, 205, 213, 149, 159, 47, 40, 28, 30, 137,
200, 102, 214, 170, 222, 255, 24, 64, 10, 176, 7, 52, 2, 151, 65, 174,
176, 124, 116, 33, 231, 88, 74, 186, 183, 51, 54, 149, 214, 239, 30, 204,
8, 85, 198, 191, 18, 240, 13, 132, 5, 163, 67, 57, 241, 210, 196, 93,
147, 121, 173, 226, 253, 137, 129, 166, 224, 122, 200, 35, 22, 153, 206, 234,
212, 79, 31, 116, 8, 39, 70, 154, 178, 235, 53, 143, 87, 36, 62, 155,
80, 107, 124, 47, 97, 220, 40, 89, 222, 186, 216, 115, 26, 165, 203, 59,
23, 83, 78, 189, 244, 113, 135, 100, 98, 171, 105, 191, 110, 240, 44, 68,
29, 243, 73, 133, 246, 227, 6, 201, 194, 214, 209, 158, 220, 104, 89, 238,
186, 204, 115, 21, 229, 207, 11, 20, 7, 79, 66, 180, 49, 183, 84, 118,
191, 102, 240, 42, 196, 31, 19, 72, 13, 246, 133, 134, 227, 34, 201, 217,
150, 218, 238, 219, 12, 91, 69, 251, 115, 3, 101, 193, 235, 16, 79, 76,
52, 53, 215, 87, 30, 190, 136, 112, 102, 164, 42, 251, 95, 3, 120, 1,
226, 128, 73, 160, 54, 248, 22, 194, 142, 209, 164, 92, 123, 121, 227, 98,
201, 233, 150, 206, 238, 212, 76, 95, 117, 248, 39, 2, 154, 129, 171, 32,
127, 88, 32, 58, 152, 19, 42, 141, 223, 37, 152, 27, 42, 139, 95, 39,
120, 26, 162, 139, 57, 167, 82, 250, 189, 131, 49, 161, 212, 120, 95, 98,
184, 41, 178, 158, 245, 168, 71, 62, 178, 144, 117, 172, 39, 61, 218, 145,
155, 44, 107, 93, 239, 121, 140, 34, 229, 217, 139, 26, 231, 75, 10, 183,
71, 54, 178, 150, 245, 174, 199, 60, 82, 145, 253, 172, 65, 189, 240, 113,
132, 36, 99, 91, 105, 251, 110, 195, 108, 81, 237, 252, 77, 129, 245, 160,
71, 56, 50, 146, 149, 173, 175, 61, 188, 17, 177, 204, 116, 85, 231, 127,
10, 160, 7, 56, 2, 146, 129, 173, 160, 125, 184, 33, 178, 152, 117, 170,
167, 63, 58, 144, 19, 44, 13, 221, 197, 153, 147, 42, 237, 223, 13, 152,
5, 170, 131, 63, 33, 208, 24, 92, 10, 185, 199, 50, 210, 149, 157, 175,
41, 188, 30, 241, 200, 68, 86, 179, 126, 245, 224, 71, 8, 50, 134, 149,
162, 239, 57, 140, 18, 229, 205, 139, 21, 167, 79, 58, 180, 19, 55, 77,
214, 181, 158, 247, 40, 70, 158, 178, 232, 117, 142, 167, 36, 122, 155, 99,
43, 105, 223, 110, 216, 44, 90, 157, 251, 41, 131, 94, 225, 248, 72, 66,
182, 177, 182, 244, 118, 199, 102, 210, 170, 221, 191, 25, 176, 10, 244, 7,
7, 66, 130, 177, 161, 180, 120, 119, 98, 166, 169, 186, 254, 243, 0, 69,
192, 51, 16, 21, 204, 15, 21, 196, 15, 19, 68, 13, 243, 69, 133, 243,
35, 5, 217, 195, 26, 209, 203, 28, 87, 73, 254, 182, 192, 118, 208, 38,
220, 26, 217, 203, 26, 215, 75, 30, 183, 72, 118, 182, 166, 246, 250, 198,
195, 18, 209, 205, 156, 85, 169, 255, 62, 192, 16, 80, 12, 60, 5, 209,
195, 28, 81, 201, 252, 86, 193, 254, 208, 64, 92, 48, 57, 212, 18, 223,
77, 152, 53, 170, 151, 63, 46, 144, 28, 108, 9, 237, 198, 205, 146, 213,
173, 159, 61, 168, 17, 190, 140, 112, 101, 228, 43, 11, 95, 71, 120, 50,
162, 149, 185, 175, 50, 252, 21, 129, 207, 32, 84, 24, 63, 74, 144, 55,
44, 22, 157, 206, 233, 148, 78, 239, 116, 76, 39, 117, 218, 167, 27, 58,
139, 83, 39, 125, 218, 161, 155, 56, 107, 82, 175, 125, 188, 33, 177, 216,
116, 90, 167, 123, 58, 163, 83, 57, 253, 210, 193, 157, 144, 105, 172, 46,
253, 220, 65, 153, 240, 106, 196, 47, 19, 92, 13, 249, 197, 130, 211, 33,
157, 216, 105, 154, 174, 235, 60, 79, 81, 244, 60, 71, 81, 242, 188, 69,
177, 243, 52, 69, 215, 115, 30, 165, 200, 123, 22, 163, 78, 249, 244, 66,
199, 113, 146, 164, 109, 187, 109, 179, 109, 181, 237, 183, 13, 182, 133, 182,
227, 54, 201, 214, 214, 222, 222, 216, 88, 90, 186, 187, 51, 51, 255, 63 )
random_mask_vec8 = numpy.array(random_mask_tuple, numpy.uint8)
| jdemel/gnuradio | gr-digital/python/digital/packet_utils.py | Python | gpl-3.0 | 27,833 |
# Copyright (C) 2008-2009 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from metadata._base import BaseFormat
from mutagen import oggspeex
class SpeexFormat(BaseFormat):
MutagenType = oggspeex.OggSpeex
writable = True
# vim: et sts=4 sw=4
| jeromeLB/client175 | metadata/speex.py | Python | gpl-3.0 | 1,397 |
"""
Dictzone
@website https://dictzone.com/
@provide-api no
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
"""
import re
from lxml import html
from searx.utils import is_valid_lang
from searx.url_utils import urljoin
categories = ['general']
url = u'http://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
weight = 100
parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I)
results_xpath = './/table[@id="r"]/tr'
def request(query, params):
m = parser_re.match(query)
if not m:
return params
from_lang, to_lang, query = m.groups()
from_lang = is_valid_lang(from_lang)
to_lang = is_valid_lang(to_lang)
if not from_lang or not to_lang:
return params
params['url'] = url.format(from_lang=from_lang[2],
to_lang=to_lang[2],
query=query.decode('utf-8'))
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for k, result in enumerate(dom.xpath(results_xpath)[1:]):
try:
from_result, to_results_raw = result.xpath('./td')
except:
continue
to_results = []
for to_result in to_results_raw.xpath('./p/a'):
t = to_result.text_content()
if t.strip():
to_results.append(to_result.text_content())
results.append({
'url': urljoin(resp.url, '?%d' % k),
'title': from_result.text_content(),
'content': '; '.join(to_results)
})
return results
| potato/searx | searx/engines/dictzone.py | Python | agpl-3.0 | 1,645 |
import rdflib
from rdflib.graph import ConjunctiveGraph as Graph
from rdflib import plugin
from rdflib.store import Store, NO_STORE, VALID_STORE
from rdflib.namespace import Namespace
from rdflib.term import Literal
from rdflib.term import URIRef
from tempfile import mkdtemp
from gstudio.models import *
def rdf_description(name, notation='xml' ):
"""
Funtion takes title of node, and rdf notation.
"""
valid_formats = ["xml", "n3", "ntriples", "trix"]
default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
configString = "/var/tmp/rdfstore"
# Get the Sleepycat plugin.
store = plugin.get('IOMemory', Store)('rdfstore')
# Open previously created store, or create it if it doesn't exist yet
graph = Graph(store="IOMemory",
identifier = URIRef(default_graph_uri))
path = mkdtemp()
rt = graph.open(path, create=False)
if rt == NO_STORE:
#There is no underlying Sleepycat infrastructure, create it
graph.open(path, create=True)
else:
assert rt == VALID_STORE, "The underlying store is corrupt"
# Now we'll add some triples to the graph & commit the changes
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
graph.bind("gstudio", "http://gnowledge.org/")
exclusion_fields = ["id", "rght", "node_ptr_id", "image", "lft", "_state", "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"]
node=NID.objects.get(title=name)
node_dict=node.__dict__
subject=str(node_dict['id'])
for key in node_dict:
if key not in exclusion_fields:
predicate=str(key)
pobject=str(node_dict[predicate])
graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))
graph.commit()
print graph.serialize(format=notation)
graph.close()
i=0
p=NID.objects.all()
for each in p:
rdf_description(p[i])
i=i+1
| gnowledge/ncert_nroer | gstudio/testloop.py | Python | agpl-3.0 | 1,908 |
import os
import pathlib2
import logging
import yaml
import sys
import networkx as nx
from collections import namedtuple
import argparse
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
DOCKER_PATH_ROOT = pathlib2.Path(TRAVIS_BUILD_DIR, "docker", "build")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def build_graph(git_dir, roles_dirs, aws_play_dirs, docker_play_dirs):
"""
Builds a dependency graph that shows relationships between roles and playbooks.
An edge [A, B], where A and B are roles, signifies that A depends on B. An edge
[C, D], where C is a playbook and D is a role, signifies that C uses D.
Input:
git_dir: A path to the top-most directory in the local git repository tool is to be run in.
roles_dirs: A list of relative paths to directories in which Ansible roles reside.
aws_play_dirs: A list of relative paths to directories in which AWS Ansible playbooks reside.
docker_play_dirs: A list of relative paths to directories in which Docker Ansible playbooks reside.
"""
graph = nx.DiGraph()
_map_roles_to_roles(graph, roles_dirs, git_dir, "dependencies", "role", "role")
_map_plays_to_roles(graph, aws_play_dirs, git_dir, "roles", "aws_playbook", "role")
_map_plays_to_roles(graph, docker_play_dirs, git_dir, "roles", "docker_playbook", "role")
return graph
def _map_roles_to_roles(graph, dirs, git_dir, key, type_1, type_2):
"""
Maps roles to the roles that they depend on.
Input:
graph: A networkx digraph that is used to map Ansible dependencies.
dirs: A list of relative paths to directories in which Ansible roles reside.
git_dir: A path to the top-most directory in the local git repository tool is to be run in.
key: The key in a role yaml file in dirs that maps to relevant role data. In this case, key is
"dependencies", because a role's dependent roles is of interest.
type_1: Given edges A-B, the type of node A.
type_2: Given edges A-B, the type of node B.
Since this function maps roles to their dependent roles, both type_1 and type_2 are "role".
"""
Node = namedtuple('Node', ['name', 'type'])
# for each role directory
for d in dirs:
d = pathlib2.Path(git_dir, d)
# for all files/sub-directories in directory
for item in d.iterdir():
# attempts to find meta/*.yml file in item directory tree
roles = [f for f in item.glob("meta/*.yml")]
# if a meta/*.yml file(s) exists for a role
if roles:
# for each role
for role in roles:
yaml_file = _open_yaml_file(role)
# if not an empty yaml file and key in file
if yaml_file is not None and key in yaml_file:
# for each dependent role; yaml_file["dependencies"] returns list of
# dependent roles
for dependent in yaml_file[key]:
# get role name of each dependent role
name = _get_role_name(dependent)
# add node for type_1, typically role
node_1 = Node(item.name, type_1)
# add node for type_2, typically dependent role
node_2 = Node(name, type_2)
# add edge, typically role - dependent role
graph.add_edge(node_1, node_2)
def _map_plays_to_roles(graph, dirs, git_dir, key, type_1, type_2):
"""
Maps plays to the roles they use.
Input:
graph: A networkx digraph that is used to map Ansible dependencies.
dirs: A list of relative paths to directories in which Ansible playbooks reside.
git_dir: A path to the top-most directory in the local git repository tool is to be run in.
key: The key in a playbook yaml file in dirs that maps to relevant playbook data. In this case, key is
"roles", because the roles used by a playbook is of interest.
type_1: Given edges A-B, the type of node A.
type_2: Given edges A-B, the type of node B.
Since this function maps plays to the roles they use, both type_1 is a type of playbook and type_2 is "role".
"""
Node = namedtuple('Node', ['name', 'type'])
# for each play directory
for d in dirs:
d = pathlib2.Path(git_dir, d)
# for all files/sub-directories in directory
for item in d.iterdir():
# if item is a file ending in .yml
if item.match("*.yml"):
# open .yml file for playbook
yaml_file = _open_yaml_file(item)
# if not an empty yaml file
if yaml_file is not None:
# for each play in yaml file
for play in yaml_file:
# if specified key in yaml file (e.g. "roles")
if key in play:
# for each role
for role in play[key]:
# get role name
name = _get_role_name(role)
#add node for type_1, typically for playbook
node_1 = Node(item.stem, type_1)
# add node for type_2, typically for role
node_2 = Node(name, type_2)
# add edge, typically playbook - role it uses
graph.add_edge(node_1, node_2)
def _open_yaml_file(file_str):
"""
Opens yaml file.
Input:
file_str: The path to yaml file to be opened.
"""
with (file_str.open(mode='r')) as file:
try:
yaml_file = yaml.load(file)
return yaml_file
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
def change_set_to_roles(files, git_dir, roles_dirs, playbooks_dirs, graph):
"""
Converts change set consisting of a number of files to the roles that they represent/contain.
Input:
files: A list of files modified by a commit range.
git_dir: A path to the top-most directory in the local git repository tool is to be run in.
roles_dirs: A list of relative paths to directories in which Ansible roles reside.
playbook_dirs: A list of relative paths to directories in which Ansible playbooks reside.
graph: A networkx digraph that is used to map Ansible dependencies.
"""
# set of roles
items = set()
# for all directories containing roles
for role_dir in roles_dirs:
role_dir_path = pathlib2.Path(git_dir, role_dir)
# get all files in the directories containing roles (i.e. all the roles in that directory)
candidate_files = (f for f in role_dir_path.glob("**/*"))
# for all the files in the change set
for f in files:
file_path = pathlib2.Path(git_dir, f)
# if the change set file is in the set of role files
if file_path in candidate_files:
# get name of role and add it to set of roles of the change set
items.add(_get_resource_name(file_path, "roles"))
# for all directories containing playbooks
for play_dir in playbooks_dirs:
play_dir_path = pathlib2.Path(git_dir, play_dir)
# get all files in directory containing playbook that end with yml extension
# (i.e. all playbooks in that directory)
candidate_files = (f for f in play_dir_path.glob("*.yml"))
# for all filse in the change set
for f in files:
file_path = pathlib2.Path(git_dir, f)
# if the change set file is in teh set of playbook files
if file_path in candidate_files:
# gets first level of children of playbook in graph, which represents
# all roles the playbook uses
descendants = nx.all_neighbors(graph, (file_path.stem, "aws_playbook"))
# adds all the roles that a playbook uses to set of roles of the change set
items |= {desc.name for desc in descendants}
return items
def _get_resource_name(path, kind):
"""
Gets name of resource from the filepath, which is the directory following occurence of kind.
Input:
path: A path to the resource (e.g. a role or a playbook)
kind: A description of the type of resource; this keyword precedes the name of a role or a playbook
in a file path and allows for the separation of its name;
e.g. for "configuration/playbooks/roles/discovery/...", kind = "roles" returns
"discovery" as the role name
"""
# get individual parts of a file path
dirs = path.parts
# type of resource is the next part of the file path after kind (e.g. after "roles" or "playbooks")
return dirs[dirs.index(kind)+1]
def get_dependencies(roles, graph):
"""
Determines all roles dependent on set of roles and returns set containing both.
Input:
roles: A set of roles.
graph: A networkx digraph that is used to map Ansible dependencies.
"""
items = set()
for role in roles:
# add the role itself
items.add(role)
# add all the roles that depend on the role
dependents = nx.descendants(graph, (role, "role"))
items |= {dependent.name for dependent in dependents}
return items
def get_docker_plays(roles, graph):
"""Gets all docker plays that contain at least role in common with roles."""
# dict to determine coverage of plays
coverage = dict.fromkeys(roles, False)
items = set()
docker_plays = (node.name for node in graph.nodes() if node.type == "docker_playbook")
for play in docker_plays:
# all roles that are used by play
roles_nodes = nx.all_neighbors(graph, (play, "docker_playbook"))
docker_roles = {role.name for role in roles_nodes}
# compares roles and docker roles
common_roles = roles & docker_roles
# if their intersection is non-empty, add the docker role
if common_roles:
items.add(play)
# each aws role that was in common is marked as being covered by a docker play
for role in common_roles:
coverage[role] = True
# check coverage of roles
for role in coverage:
if not coverage[role]:
LOGGER.warning("role '%s' is not covered." % role)
return items
def filter_docker_plays(plays, repo_path):
"""Filters out docker plays that do not have a Dockerfile."""
items = set()
for play in plays:
dockerfile = pathlib2.Path(DOCKER_PATH_ROOT, play, "Dockerfile")
if dockerfile.exists():
items.add(play)
else:
LOGGER.warning("covered playbook '%s' does not have Dockerfile." % play)
return items
def _get_role_name(role):
"""
Resolves a role name from either a simple declaration or a dictionary style declaration.
A simple declaration would look like:
- foo
A dictionary style declaration would look like:
- role: rbenv
rbenv_user: "{{ forum_user }}"
rbenv_dir: "{{ forum_app_dir }}"
rbenv_ruby_version: "{{ forum_ruby_version }}"
:param role:
:return:
"""
if isinstance(role, dict):
return role['role']
elif isinstance(role, basestring):
return role
else:
LOGGER.warning("role %s could not be resolved to a role name." % role)
return None
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a commit range, analyze Ansible dependencies between roles and playbooks '
'and output a list of Docker plays affected by this commit range via these dependencies.')
parser.add_argument('--verbose', help="set warnings to be displayed", action="store_true")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
if not args.verbose:
logging.disable(logging.WARNING)
# set of modified files in the commit range
change_set = set()
# read from standard in
for line in sys.stdin:
change_set.add(line.rstrip())
# configuration file is expected to be in the following format:
#
# roles_paths:
# - <all paths relative to configuration repository that contain Ansible roles>
# aws_plays_paths:
# - <all paths relative to configuration repository that contain aws Ansible playbooks>
# docker_plays_paths:
# - <all paths relative to configuration repositroy that contain Docker Ansible playbooks>
# read config file
config = _open_yaml_file(CONFIG_FILE_PATH)
# build graph
graph = build_graph(TRAVIS_BUILD_DIR, config["roles_paths"], config["aws_plays_paths"], config["docker_plays_paths"])
# transforms list of roles and plays into list of original roles and the roles contained in the plays
roles = change_set_to_roles(change_set, TRAVIS_BUILD_DIR, config["roles_paths"], config["aws_plays_paths"], graph)
# expands roles set to include roles that are dependent on existing roles
dependent_roles = get_dependencies(roles, graph)
# determine which docker plays cover at least one role
docker_plays = get_docker_plays(dependent_roles, graph)
# filter out docker plays without a Dockerfile
docker_plays = filter_docker_plays(docker_plays, TRAVIS_BUILD_DIR)
# prints Docker plays
print " ".join(str(play) for play in docker_plays)
| karimdamak123/configuration2 | util/parsefiles.py | Python | agpl-3.0 | 13,776 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.campaigns.models.catalog_filters import (
CategoryFilter, ProductFilter, ProductTypeFilter
)
from ._base import BaseRuleModelForm
class ProductTypeFilterForm(BaseRuleModelForm):
class Meta(BaseRuleModelForm.Meta):
model = ProductTypeFilter
class ProductFilterForm(BaseRuleModelForm):
class Meta(BaseRuleModelForm.Meta):
model = ProductFilter
class CategoryFilterForm(BaseRuleModelForm):
class Meta(BaseRuleModelForm.Meta):
model = CategoryFilter
| suutari-ai/shoop | shuup/campaigns/admin_module/forms/_catalog_filters.py | Python | agpl-3.0 | 734 |
"""
ACE message types for the calendar_sync module.
"""
from openedx.core.djangoapps.ace_common.message import BaseMessageType
class CalendarSync(BaseMessageType):
def __init__(self, *args, **kwargs):
super(CalendarSync, self).__init__(*args, **kwargs)
self.options['transactional'] = True
| edx-solutions/edx-platform | openedx/features/calendar_sync/message_types.py | Python | agpl-3.0 | 315 |
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def exclude_tables_from_config(config_):
tables_ = config_.get("tables", None)
if tables_ is not None:
tables = tables_.split(",")
return tables
exclude_tables = exclude_tables_from_config(config.get_section('alembic:exclude'))
def include_object(object, name, type_, reflected, compare_to):
if type_ == "table" and name in exclude_tables:
return False
else:
return True
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool
)
connection = engine.connect()
context.configure(connection=connection, target_metadata=target_metadata, include_object=include_object)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| xlqian/navitia | source/tyr/migrations/env.py | Python | agpl-3.0 | 3,771 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlXmlLibxml(PerlPackage):
"""This module is an interface to libxml2, providing XML and HTML parsers
with DOM, SAX and XMLReader interfaces, a large subset of DOM Layer 3
interface and a XML::XPath-like interface to XPath API of libxml2. The
module is split into several packages which are not described in this
section; unless stated otherwise, you only need to use XML::LibXML; in your
programs."""
homepage = "https://metacpan.org/pod/XML::LibXML"
url = "https://cpan.metacpan.org/authors/id/S/SH/SHLOMIF/XML-LibXML-2.0201.tar.gz"
version('2.0201', sha256='e008700732502b3f1f0890696ec6e2dc70abf526cd710efd9ab7675cae199bc2')
depends_on('libxml2')
depends_on('perl-xml-namespacesupport', type=('build', 'run'))
depends_on('perl-xml-sax', type=('build', 'run'))
depends_on('perl-xml-sax-base', type=('build', 'run'))
depends_on('perl-alien-libxml2', type='build')
| iulian787/spack | var/spack/repos/builtin/packages/perl-xml-libxml/package.py | Python | lgpl-2.1 | 1,155 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyUrllib3(PythonPackage):
"""HTTP library with thread-safe connection pooling, file post, and
more."""
homepage = "https://urllib3.readthedocs.io/"
url = "https://pypi.io/packages/source/u/urllib3/urllib3-1.25.6.tar.gz"
version('1.25.6', sha256='9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86')
version('1.25.3', sha256='dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232')
version('1.21.1', sha256='b14486978518ca0901a76ba973d7821047409d7f726f22156b24e83fd71382a5')
version('1.20', sha256='97ef2b6e2878d84c0126b9f4e608e37a951ca7848e4855a7f7f4437d5c34a72f')
version('1.14', sha256='dd4fb13a4ce50b18338c7e4d665b21fd38632c5d4b1d9f1a1379276bd3c08d37')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest', type='test')
depends_on('py-mock', type='test')
depends_on('py-tornado', type='test')
variant('secure', default=False, description='Add SSL/TLS support')
depends_on('[email protected]:', when='+secure')
depends_on('[email protected]:', when='+secure')
depends_on('py-idna@2:', when='+secure')
depends_on('py-certifi', when='+secure')
depends_on('py-ipaddress', when='+secure ^[email protected]:2.8')
variant('socks', default=False, description='SOCKS and HTTP proxy support')
depends_on('[email protected],1.5.8:1.999', when='+socks')
| iulian787/spack | var/spack/repos/builtin/packages/py-urllib3/package.py | Python | lgpl-2.1 | 1,658 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyQtawesome(PythonPackage):
"""FontAwesome icons in PyQt and PySide applications"""
homepage = "https://github.com/spyder-ide/qtawesome"
url = "https://pypi.io/packages/source/Q/QtAwesome/QtAwesome-0.4.1.tar.gz"
version('0.4.1', 'bf93df612a31f3b501d751fc994c1b05')
version('0.3.3', '830677aa6ca4e7014e228147475183d3')
depends_on('py-setuptools', type='build')
depends_on('py-qtpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
| TheTimmy/spack | var/spack/repos/builtin/packages/py-qtawesome/package.py | Python | lgpl-2.1 | 1,759 |
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from androguard.decompiler.dad.basic_blocks import (Condition,
ShortCircuitBlock,
LoopBlock)
from androguard.decompiler.dad.graph import Graph
from androguard.decompiler.dad.node import Interval
from androguard.decompiler.dad.util import common_dom
logger = logging.getLogger('dad.control_flow')
def intervals(graph):
'''
Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval)
'''
interval_graph = Graph() # graph of intervals
heads = set([graph.entry]) # set of header nodes
interv_heads = {} # interv_heads[i] = interval of header i
processed = dict([(i, False) for i in graph])
edges = {}
while heads:
head = heads.pop()
if not processed[head]:
processed[head] = True
interv_heads[head] = Interval(head)
# Check if if there is a node which has all its predecessor in the
# current interval. If there is, add that node to the interval and
# repeat until all the possible nodes have been added.
change = True
while change:
change = False
for node in graph.rpo[1:]:
if all(p in interv_heads[head] for p in graph.preds(node)):
change |= interv_heads[head].add_node(node)
# At this stage, a node which is not in the interval, but has one
# of its predecessor in it, is the header of another interval. So
# we add all such nodes to the header list.
for node in graph:
if node not in interv_heads[head] and node not in heads:
if any(p in interv_heads[head] for p in graph.preds(node)):
edges.setdefault(interv_heads[head], []).append(node)
heads.add(node)
interval_graph.add_node(interv_heads[head])
interv_heads[head].compute_end(graph)
# Edges is a mapping of 'Interval -> [header nodes of interval successors]'
for interval, heads in edges.items():
for head in heads:
interval_graph.add_edge(interval, interv_heads[head])
interval_graph.entry = graph.entry.interval
if graph.exit:
interval_graph.exit = graph.exit.interval
return interval_graph, interv_heads
def derived_sequence(graph):
'''
Compute the derived sequence of the graph G
The intervals of G are collapsed into nodes, intervals of these nodes are
built, and the process is repeated iteratively until we obtain a single
node (if the graph is not irreducible)
'''
deriv_seq = [graph]
deriv_interv = []
single_node = False
while not single_node:
interv_graph, interv_heads = intervals(graph)
deriv_interv.append(interv_heads)
single_node = len(interv_graph) == 1
if not single_node:
deriv_seq.append(interv_graph)
graph = interv_graph
if 0:
graph.draw(graph.entry.name, 'tmp/dad/intervals/')
graph.compute_rpo()
return deriv_seq, deriv_interv
def mark_loop_rec(graph, node, s_num, e_num, interval, nodes_in_loop):
if node in nodes_in_loop:
return
nodes_in_loop.append(node)
for pred in graph.preds(node):
if s_num < pred.num <= e_num and pred in interval:
mark_loop_rec(graph, pred, s_num, e_num, interval, nodes_in_loop)
def mark_loop(graph, start, end, interval):
logger.debug('MARKLOOP : %s END : %s', start, end)
head = start.get_head()
latch = end.get_end()
nodes_in_loop = [head]
mark_loop_rec(graph, latch, head.num, latch.num, interval, nodes_in_loop)
head.startloop = True
head.latch = latch
return nodes_in_loop
def loop_type(start, end, nodes_in_loop):
if end.type.is_cond:
if start.type.is_cond:
if start.true in nodes_in_loop and start.false in nodes_in_loop:
start.looptype.is_posttest = True
else:
start.looptype.is_pretest = True
else:
start.looptype.is_posttest = True
else:
if start.type.is_cond:
if start.true in nodes_in_loop and start.false in nodes_in_loop:
start.looptype.is_endless = True
else:
start.looptype.is_pretest = True
else:
start.looptype.is_endless = True
def loop_follow(start, end, nodes_in_loop):
follow = None
if start.looptype.is_pretest:
if start.true in nodes_in_loop:
follow = start.false
else:
follow = start.true
elif start.looptype.is_posttest:
if end.true in nodes_in_loop:
follow = end.false
else:
follow = end.true
else:
num_next = float('inf')
for node in nodes_in_loop:
if node.type.is_cond:
if (node.true.num < num_next
and node.true not in nodes_in_loop):
follow = node.true
num_next = follow.num
elif (node.false.num < num_next
and node.false not in nodes_in_loop):
follow = node.false
num_next = follow.num
start.follow['loop'] = follow
for node in nodes_in_loop:
node.follow['loop'] = follow
logger.debug('Start of loop %s', start)
logger.debug('Follow of loop: %s', start.follow['loop'])
def loop_struct(graphs_list, intervals_list):
first_graph = graphs_list[0]
for i, graph in enumerate(graphs_list):
interval = intervals_list[i]
for head in sorted(interval.keys(), key=lambda x: x.num):
loop_nodes = set()
for node in graph.preds(head):
if node.interval is head.interval:
lnodes = mark_loop(first_graph, head, node, head.interval)
loop_nodes.update(lnodes)
head.get_head().loop_nodes = loop_nodes
def if_struct(graph, idoms):
unresolved = set()
for node in graph.post_order():
if node.type.is_cond:
ldominates = []
for n, idom in idoms.iteritems():
if node is idom and len(graph.preds(n)) > 1:
ldominates.append(n)
if len(ldominates) > 0:
n = max(ldominates, key=lambda x: x.num)
node.follow['if'] = n
for x in unresolved.copy():
if node.num < x.num < n.num:
x.follow['if'] = n
unresolved.remove(x)
else:
unresolved.add(node)
return unresolved
def switch_struct(graph, idoms):
unresolved = set()
for node in graph.post_order():
if node.type.is_switch:
m = node
for suc in graph.sucs(node):
if idoms[suc] is not node:
m = common_dom(idoms, node, suc)
ldominates = []
for n, dom in idoms.iteritems():
if m is dom and len(graph.preds(n)) > 1:
ldominates.append(n)
if len(ldominates) > 0:
n = max(ldominates, key=lambda x: x.num)
node.follow['switch'] = n
for x in unresolved:
x.follow['switch'] = n
unresolved = set()
else:
unresolved.add(node)
node.order_cases()
def short_circuit_struct(graph, idom, node_map):
def MergeNodes(node1, node2, is_and, is_not):
lpreds = set()
ldests = set()
for node in (node1, node2):
lpreds.update(graph.preds(node))
ldests.update(graph.sucs(node))
graph.remove_node(node)
done.add(node)
lpreds.difference_update((node1, node2))
ldests.difference_update((node1, node2))
entry = graph.entry in (node1, node2)
new_name = '%s+%s' % (node1.name, node2.name)
condition = Condition(node1, node2, is_and, is_not)
new_node = ShortCircuitBlock(new_name, condition)
for old_n, new_n in node_map.iteritems():
if new_n in (node1, node2):
node_map[old_n] = new_node
node_map[node1] = new_node
node_map[node2] = new_node
idom[new_node] = idom[node1]
idom.pop(node1)
idom.pop(node2)
new_node.copy_from(node1)
graph.add_node(new_node)
for pred in lpreds:
pred.update_attribute_with(node_map)
graph.add_edge(node_map.get(pred, pred), new_node)
for dest in ldests:
graph.add_edge(new_node, node_map.get(dest, dest))
if entry:
graph.entry = new_node
return new_node
change = True
while change:
change = False
done = set()
for node in graph.post_order():
if node.type.is_cond and node not in done:
then = node.true
els = node.false
if node in (then, els):
continue
if then.type.is_cond and len(graph.preds(then)) == 1:
if then.false is els: # node && t
change = True
merged_node = MergeNodes(node, then, True, False)
merged_node.true = then.true
merged_node.false = els
elif then.true is els: # !node || t
change = True
merged_node = MergeNodes(node, then, False, True)
merged_node.true = els
merged_node.false = then.false
elif els.type.is_cond and len(graph.preds(els)) == 1:
if els.false is then: # !node && e
change = True
merged_node = MergeNodes(node, els, True, True)
merged_node.true = els.true
merged_node.false = then
elif els.true is then: # node || e
change = True
merged_node = MergeNodes(node, els, False, False)
merged_node.true = then
merged_node.false = els.false
done.add(node)
if change:
graph.reset_rpo()
def while_block_struct(graph, node_map):
change = False
for node in graph.rpo[:]:
if node.startloop:
change = True
new_node = LoopBlock(node.name, node)
node_map[node] = new_node
new_node.copy_from(node)
entry = node is graph.entry
lpreds = graph.preds(node)
lsuccs = graph.sucs(node)
for pred in lpreds:
graph.add_edge(node_map.get(pred, pred), new_node)
for suc in lsuccs:
graph.add_edge(new_node, node_map.get(suc, suc))
if entry:
graph.entry = new_node
if node.type.is_cond:
new_node.true = node.true
new_node.false = node.false
graph.add_node(new_node)
graph.remove_node(node)
if change:
graph.reset_rpo()
def update_dom(idoms, node_map):
for n, dom in idoms.iteritems():
idoms[n] = node_map.get(dom, dom)
def identify_structures(graph, idoms):
Gi, Li = derived_sequence(graph)
switch_struct(graph, idoms)
loop_struct(Gi, Li)
node_map = {}
short_circuit_struct(graph, idoms, node_map)
update_dom(idoms, node_map)
if_unresolved = if_struct(graph, idoms)
while_block_struct(graph, node_map)
update_dom(idoms, node_map)
loop_starts = []
for node in graph.rpo:
node.update_attribute_with(node_map)
if node.startloop:
loop_starts.append(node)
for node in loop_starts:
loop_type(node, node.latch, node.loop_nodes)
loop_follow(node, node.latch, node.loop_nodes)
for node in if_unresolved:
follows = [n for n in (node.follow['loop'],
node.follow['switch']) if n]
if len(follows) >= 1:
follow = min(follows, key=lambda x: x.num)
node.follow['if'] = follow
| flamableconcrete/androguard | androguard/decompiler/dad/control_flow.py | Python | lgpl-3.0 | 13,166 |
# -*- coding: utf-8 -*-
"""This file contains the airport plist plugin in Plaso."""
from plaso.events import plist_event
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class AirportPlugin(interface.PlistPlugin):
"""Plist plugin that extracts WiFi information."""
NAME = u'airport'
DESCRIPTION = u'Parser for Airport plist files.'
PLIST_PATH = u'com.apple.airport.preferences.plist'
PLIST_KEYS = frozenset([u'RememberedNetworks'])
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Airport entries.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
match: Optional dictionary containing keys extracted from PLIST_KEYS.
The default is None.
"""
if u'RememberedNetworks' not in match:
return
for wifi in match[u'RememberedNetworks']:
description = (
u'[WiFi] Connected to network: <{0:s}> using security {1:s}').format(
wifi.get(u'SSIDString', u'UNKNOWN_SSID'),
wifi.get(u'SecurityType', u'UNKNOWN_SECURITY_TYPE'))
event_object = plist_event.PlistEvent(
u'/RememberedNetworks', u'item', wifi.get(u'LastConnected', 0),
description)
parser_mediator.ProduceEvent(event_object)
plist.PlistParser.RegisterPlugin(AirportPlugin)
| ostree/plaso | plaso/parsers/plist_plugins/airport.py | Python | apache-2.0 | 1,452 |
toptenants_resource = {
"_links": {
"self": {
"href": "/usagedata/toptenants"
}
},
"_embedded": {
"tenants": [
]
},
}
tenant_resource = {
"ranking": 0,
"tenantId": 0,
"vmsActiveNum": 0,
"ramAllocatedTot": 0,
"vcpuAllocatedTot": 0,
"ramUsedPct": 0,
"cpuUsedPct": 0,
"tmpSumCpuPct": 0,
"tmpSumRamPct": 0,
"regions": [
]
} | attybro/FIWARELab-monitoringAPI | monitoringProxy/model/usagedata_resources.py | Python | apache-2.0 | 424 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilation operators"""
import tvm
from tvm import te
from .. import utils
from .. import tag
@te.tag_scope(tag=tag.INJECTIVE + ",dilate")
def dilate(data, strides, dilation_value=0.0, name="DilatedInput"):
"""Dilate data with given dilation value (0 by default).
Parameters
----------
data : tvm.te.Tensor
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as data.
"""
n = len(data.shape)
if len(strides) != n:
raise ValueError("data dimension and strides size dismatch : %d vs %d" % (n, len(strides)))
ana = tvm.arith.Analyzer()
out_shape = tuple(ana.simplify((data.shape[i] - 1) * strides[i] + 1) for i in range(n))
def _dilate(*indices):
not_zero = []
index_tuple = []
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
for i in range(n):
if not utils.equal_const_int(strides[i], 1):
index_tuple.append(idxdiv(indices[i], strides[i]))
not_zero.append(idxmod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, data(*index_tuple), tvm.tir.const(dilation_value, data.dtype)
)
return data(*index_tuple)
return te.compute(out_shape, _dilate, name=name)
| dmlc/tvm | python/tvm/topi/nn/dilate.py | Python | apache-2.0 | 2,530 |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
TABLES_012 = ['resource', 'sourceassoc', 'user',
'project', 'meter', 'source', 'alarm']
TABLES_027 = ['user', 'project', 'alarm']
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
for table_name in TABLES_027:
try:
sa.Table('dump027_' + table_name, meta, autoload=True)\
.drop(checkfirst=True)
except sa.exc.NoSuchTableError:
pass
for table_name in TABLES_012:
try:
sa.Table('dump_' + table_name, meta, autoload=True)\
.drop(checkfirst=True)
except sa.exc.NoSuchTableError:
pass
def downgrade(migrate_engine):
pass
| NeCTAR-RC/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py | Python | apache-2.0 | 1,278 |
import orjson
from zerver.lib.send_email import FromAddress
from zerver.lib.test_classes import WebhookTestCase
from zerver.models import Recipient, get_realm, get_user_by_delivery_email
from zerver.webhooks.teamcity.view import MISCONFIGURED_PAYLOAD_TYPE_ERROR_MESSAGE
class TeamCityHookTests(WebhookTestCase):
STREAM_NAME = "teamcity"
URL_TEMPLATE = "/api/v1/external/teamcity?stream={stream}&api_key={api_key}"
TOPIC = "Project :: Compile"
WEBHOOK_DIR_NAME = "teamcity"
def test_teamcity_success(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 was successful! :thumbs_up: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("success", self.TOPIC, expected_message)
def test_teamcity_success_branch(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 was successful! :thumbs_up: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
expected_topic = "Project :: Compile (MyBranch)"
self.check_webhook("success_branch", expected_topic, expected_message)
def test_teamcity_broken(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 is broken with status Exit code 1 (new)! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("broken", self.TOPIC, expected_message)
def test_teamcity_failure(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 is still broken with status Exit code 1! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("failure", self.TOPIC, expected_message)
def test_teamcity_fixed(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 has been fixed! :thumbs_up: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("fixed", self.TOPIC, expected_message)
def test_teamcity_personal(self) -> None:
expected_message = "Your personal build for Project :: Compile build 5535 - CL 123456 is broken with status Exit code 1 (new)! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
payload = orjson.dumps(
orjson.loads(self.webhook_fixture_data(self.WEBHOOK_DIR_NAME, "personal"))
)
self.client_post(self.url, payload, content_type="application/json")
msg = self.get_last_message()
self.assertEqual(msg.content, expected_message)
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
def test_non_generic_payload_ignore_pm_notification(self) -> None:
expected_message = MISCONFIGURED_PAYLOAD_TYPE_ERROR_MESSAGE.format(
bot_name=get_user_by_delivery_email(
"[email protected]", get_realm("zulip")
).full_name,
support_email=FromAddress.SUPPORT,
).strip()
payload = self.get_body("slack_non_generic_payload")
self.client_post(self.url, payload, content_type="application/json")
msg = self.get_last_message()
self.assertEqual(msg.content, expected_message)
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
| eeshangarg/zulip | zerver/webhooks/teamcity/tests.py | Python | apache-2.0 | 4,058 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
IMPL = LazyPluggable('backend', sqlalchemy='senlin.db.sqlalchemy.api')
| openstack/senlin | senlin/db/utils.py | Python | apache-2.0 | 1,415 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Data.Custom import *
from QuantConnect.Algorithm import *
### <summary>
### Basic template algorithm simply initializes the date range and cash. This is a skeleton
### framework you can use for designing an algorithm.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class IndicatorSuiteAlgorithm(QCAlgorithm):
'''Demonstration algorithm of popular indicators and plotting them.'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.symbol = "SPY"
self.customSymbol = "WIKI/FB"
self.price = None
self.SetStartDate(2013, 1, 1) #Set Start Date
self.SetEndDate(2014, 12, 31) #Set End Date
self.SetCash(25000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity(self.symbol, Resolution.Daily)
self.AddData(Quandl, self.customSymbol, Resolution.Daily)
# Set up default Indicators, these indicators are defined on the Value property of incoming data (except ATR and AROON which use the full TradeBar object)
self.indicators = {
'BB' : self.BB(self.symbol, 20, 1, MovingAverageType.Simple, Resolution.Daily),
'RSI' : self.RSI(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily),
'EMA' : self.EMA(self.symbol, 14, Resolution.Daily),
'SMA' : self.SMA(self.symbol, 14, Resolution.Daily),
'MACD' : self.MACD(self.symbol, 12, 26, 9, MovingAverageType.Simple, Resolution.Daily),
'MOM' : self.MOM(self.symbol, 20, Resolution.Daily),
'MOMP' : self.MOMP(self.symbol, 20, Resolution.Daily),
'STD' : self.STD(self.symbol, 20, Resolution.Daily),
# by default if the symbol is a tradebar type then it will be the min of the low property
'MIN' : self.MIN(self.symbol, 14, Resolution.Daily),
# by default if the symbol is a tradebar type then it will be the max of the high property
'MAX' : self.MAX(self.symbol, 14, Resolution.Daily),
'ATR' : self.ATR(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily),
'AROON' : self.AROON(self.symbol, 20, Resolution.Daily)
}
# Here we're going to define indicators using 'selector' functions. These 'selector' functions will define what data gets sent into the indicator
# These functions have a signature like the following: decimal Selector(BaseData baseData), and can be defined like: baseData => baseData.Value
# We'll define these 'selector' functions to select the Low value
#
# For more information on 'anonymous functions' see: http:#en.wikipedia.org/wiki/Anonymous_function
# https:#msdn.microsoft.com/en-us/library/bb397687.aspx
#
self.selectorIndicators = {
'BB' : self.BB(self.symbol, 20, 1, MovingAverageType.Simple, Resolution.Daily, Field.Low),
'RSI' :self.RSI(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily, Field.Low),
'EMA' :self.EMA(self.symbol, 14, Resolution.Daily, Field.Low),
'SMA' :self.SMA(self.symbol, 14, Resolution.Daily, Field.Low),
'MACD' : self.MACD(self.symbol, 12, 26, 9, MovingAverageType.Simple, Resolution.Daily, Field.Low),
'MOM' : self.MOM(self.symbol, 20, Resolution.Daily, Field.Low),
'MOMP' : self.MOMP(self.symbol, 20, Resolution.Daily, Field.Low),
'STD' : self.STD(self.symbol, 20, Resolution.Daily, Field.Low),
'MIN' : self.MIN(self.symbol, 14, Resolution.Daily, Field.High),
'MAX' : self.MAX(self.symbol, 14, Resolution.Daily, Field.Low),
# ATR and AROON are special in that they accept a TradeBar instance instead of a decimal, we could easily project and/or transform the input TradeBar
# before it gets sent to the ATR/AROON indicator, here we use a function that will multiply the input trade bar by a factor of two
'ATR' : self.ATR(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily, Func[IBaseData, IBaseDataBar](self.selector_double_TradeBar)),
'AROON' : self.AROON(self.symbol, 20, Resolution.Daily, Func[IBaseData, IBaseDataBar](self.selector_double_TradeBar))
}
# Custom Data Indicator:
self.rsiCustom = self.RSI(self.customSymbol, 14, MovingAverageType.Simple, Resolution.Daily)
self.minCustom = self.MIN(self.customSymbol, 14, Resolution.Daily)
self.maxCustom = self.MAX(self.customSymbol, 14, Resolution.Daily)
# in addition to defining indicators on a single security, you can all define 'composite' indicators.
# these are indicators that require multiple inputs. the most common of which is a ratio.
# suppose we seek the ratio of BTC to SPY, we could write the following:
spyClose = Identity(self.symbol)
fbClose = Identity(self.customSymbol)
# this will create a new indicator whose value is FB/SPY
self.ratio = IndicatorExtensions.Over(fbClose, spyClose)
# we can also easily plot our indicators each time they update using th PlotIndicator function
self.PlotIndicator("Ratio", self.ratio)
# The following methods will add multiple charts to the algorithm output.
# Those chatrs names will be used later to plot different series in a particular chart.
# For more information on Lean Charting see: https://www.quantconnect.com/docs#Charting
Chart('BB')
Chart('STD')
Chart('ATR')
Chart('AROON')
Chart('MACD')
Chart('Averages')
# Here we make use of the Schelude method to update the plots once per day at market close.
self.Schedule.On(self.DateRules.EveryDay(), self.TimeRules.BeforeMarketClose(self.symbol), self.update_plots)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if (#not data.Bars.ContainsKey(self.symbol) or
not self.indicators['BB'].IsReady or
not self.indicators['RSI'].IsReady):
return
self.price = data[self.symbol].Close
if not self.Portfolio.HoldStock:
quantity = int(self.Portfolio.Cash / self.price)
self.Order(self.symbol, quantity)
self.Debug('Purchased SPY on ' + self.Time.strftime('%Y-%m-%d'))
def update_plots(self):
if not self.indicators['BB'].IsReady or not self.indicators['STD'].IsReady:
return
# Plots can also be created just with this one line command.
self.Plot('RSI', self.indicators['RSI'])
# Custom data indicator
self.Plot('RSI-FB', self.rsiCustom)
# Here we make use of the chats decalred in the Initialize method, plotting multiple series
# in each chart.
self.Plot('STD', 'STD', self.indicators['STD'].Current.Value)
self.Plot('BB', 'Price', self.price)
self.Plot('BB', 'BollingerUpperBand', self.indicators['BB'].UpperBand.Current.Value)
self.Plot('BB', 'BollingerMiddleBand', self.indicators['BB'].MiddleBand.Current.Value)
self.Plot('BB', 'BollingerLowerBand', self.indicators['BB'].LowerBand.Current.Value)
self.Plot('AROON', 'Aroon', self.indicators['AROON'].Current.Value)
self.Plot('AROON', 'AroonUp', self.indicators['AROON'].AroonUp.Current.Value)
self.Plot('AROON', 'AroonDown', self.indicators['AROON'].AroonDown.Current.Value)
# The following Plot method calls are commented out because of the 10 series limit for backtests
#self.Plot('ATR', 'ATR', self.indicators['ATR'].Current.Value)
#self.Plot('ATR', 'ATRDoubleBar', self.selectorIndicators['ATR'].Current.Value)
#self.Plot('Averages', 'SMA', self.indicators['SMA'].Current.Value)
#self.Plot('Averages', 'EMA', self.indicators['EMA'].Current.Value)
#self.Plot('MOM', self.indicators['MOM'].Current.Value)
#self.Plot('MOMP', self.indicators['MOMP'].Current.Value)
#self.Plot('MACD', 'MACD', self.indicators['MACD'].Current.Value)
#self.Plot('MACD', 'MACDSignal', self.indicators['MACD'].Signal.Current.Value)
def selector_double_TradeBar(self, bar):
trade_bar = TradeBar()
trade_bar.Close = 2 * bar.Close
trade_bar.DataType = bar.DataType
trade_bar.High = 2 * bar.High
trade_bar.Low = 2 * bar.Low
trade_bar.Open = 2 * bar.Open
trade_bar.Symbol = bar.Symbol
trade_bar.Time = bar.Time
trade_bar.Value = 2 * bar.Value
trade_bar.Period = bar.Period
return trade_bar | AnshulYADAV007/Lean | Algorithm.Python/IndicatorSuiteAlgorithm.py | Python | apache-2.0 | 10,805 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid.layers as layers
from paddle.fluid.contrib.decoder.beam_search_decoder import *
def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim,
target_dict_dim, is_generating, beam_size, max_length):
def encoder():
# Encoder implementation of RNN translation
src_word = layers.data(
name="src_word", shape=[1], dtype='int64', lod_level=1)
src_embedding = layers.embedding(
input=src_word,
size=[source_dict_dim, embedding_dim],
dtype='float32',
is_sparse=True)
fc1 = layers.fc(input=src_embedding, size=encoder_size * 4, act='tanh')
lstm_hidden0, lstm_0 = layers.dynamic_lstm(
input=fc1, size=encoder_size * 4)
encoder_out = layers.sequence_last_step(input=lstm_hidden0)
return encoder_out
def decoder_state_cell(context):
# Decoder state cell, specifies the hidden state variable and its updater
h = InitState(init=context, need_reorder=True)
state_cell = StateCell(
inputs={'x': None}, states={'h': h}, out_state='h')
@state_cell.state_updater
def updater(state_cell):
current_word = state_cell.get_input('x')
prev_h = state_cell.get_state('h')
# make sure lod of h heritted from prev_h
h = layers.fc(input=[prev_h, current_word],
size=decoder_size,
act='tanh')
state_cell.set_state('h', h)
return state_cell
def decoder_train(state_cell):
# Decoder for training implementation of RNN translation
trg_word = layers.data(
name="target_word", shape=[1], dtype='int64', lod_level=1)
trg_embedding = layers.embedding(
input=trg_word,
size=[target_dict_dim, embedding_dim],
dtype='float32',
is_sparse=True)
# A training decoder
decoder = TrainingDecoder(state_cell)
# Define the computation in each RNN step done by decoder
with decoder.block():
current_word = decoder.step_input(trg_embedding)
decoder.state_cell.compute_state(inputs={'x': current_word})
current_score = layers.fc(input=decoder.state_cell.get_state('h'),
size=target_dict_dim,
act='softmax')
decoder.state_cell.update_states()
decoder.output(current_score)
return decoder()
def decoder_infer(state_cell):
# Decoder for inference implementation
init_ids = layers.data(
name="init_ids", shape=[1], dtype="int64", lod_level=2)
init_scores = layers.data(
name="init_scores", shape=[1], dtype="float32", lod_level=2)
# A beam search decoder for inference
decoder = BeamSearchDecoder(
state_cell=state_cell,
init_ids=init_ids,
init_scores=init_scores,
target_dict_dim=target_dict_dim,
word_dim=embedding_dim,
input_var_dict={},
topk_size=50,
sparse_emb=True,
max_len=max_length,
beam_size=beam_size,
end_id=1,
name=None)
decoder.decode()
translation_ids, translation_scores = decoder()
return translation_ids, translation_scores
context = encoder()
state_cell = decoder_state_cell(context)
if not is_generating:
label = layers.data(
name="target_next_word", shape=[1], dtype='int64', lod_level=1)
rnn_out = decoder_train(state_cell)
cost = layers.cross_entropy(input=rnn_out, label=label)
avg_cost = layers.mean(x=cost)
feeding_list = ['src_word', 'target_word', 'target_next_word']
return avg_cost, feeding_list
else:
translation_ids, translation_scores = decoder_infer(state_cell)
feeding_list = ['src_word']
return translation_ids, translation_scores, feeding_list
| lcy-seso/models | fluid/neural_machine_translation/rnn_search/no_attention_model.py | Python | apache-2.0 | 4,829 |
# Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file contains helper functions for generating cURL examples
# based on Zulip's OpenAPI definitions, as well as test setup and
# fetching of appropriate parameter values to use when running the
# cURL examples as part of the tools/test-api test suite.
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
do_add_linkifier,
do_add_reaction,
do_add_realm_playground,
do_create_user,
update_user_presence,
)
from zerver.lib.events import do_events_register
from zerver.lib.initial_password import initial_password
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.users import get_api_key
from zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user
GENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {}
REGISTERED_GENERATOR_FUNCTIONS: Set[str] = set()
CALLED_GENERATOR_FUNCTIONS: Set[str] = set()
# This is a List rather than just a string in order to make it easier
# to write to it from another module.
AUTHENTICATION_LINE: List[str] = [""]
helpers = ZulipTestCase()
def openapi_param_value_generator(
endpoints: List[str],
) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]:
"""This decorator is used to register OpenAPI param value genarator functions
with endpoints. Example usage:
@openapi_param_value_generator(["/messages/render:post"])
def ...
"""
def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]:
@wraps(generator_func)
def _record_calls_wrapper() -> Dict[str, object]:
CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__)
return generator_func()
REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__)
for endpoint in endpoints:
GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper
return _record_calls_wrapper
return wrapper
def assert_all_helper_functions_called() -> None:
"""Throws an exception if any registered helpers were not called by tests"""
if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS:
return
uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS)
raise Exception(f"Registered curl API generators were not called: {uncalled_functions}")
def patch_openapi_example_values(
entry: str,
params: List[Dict[str, Any]],
request_body: Optional[Dict[str, Any]] = None,
) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]:
if entry not in GENERATOR_FUNCTIONS:
return params, request_body
func = GENERATOR_FUNCTIONS[entry]
realm_example_values: Dict[str, object] = func()
for param in params:
param_name = param["name"]
if param_name in realm_example_values:
if "content" in param:
param["content"]["application/json"]["example"] = realm_example_values[param_name]
else:
param["example"] = realm_example_values[param_name]
if request_body is not None:
properties = request_body["content"]["multipart/form-data"]["schema"]["properties"]
for key, property in properties.items():
if key in realm_example_values:
property["example"] = realm_example_values[key]
return params, request_body
@openapi_param_value_generator(["/fetch_api_key:post"])
def fetch_api_key() -> Dict[str, object]:
email = helpers.example_email("iago")
password = initial_password(email)
return {
"username": email,
"password": password,
}
@openapi_param_value_generator(
[
"/messages/{message_id}:get",
"/messages/{message_id}/history:get",
"/messages/{message_id}:patch",
"/messages/{message_id}:delete",
]
)
def iago_message_id() -> Dict[str, object]:
return {
"message_id": helpers.send_stream_message(helpers.example_user("iago"), "Denmark"),
}
@openapi_param_value_generator(["/messages/{message_id}/reactions:delete"])
def add_emoji_to_message() -> Dict[str, object]:
user_profile = helpers.example_user("iago")
# from OpenAPI format data in zulip.yaml
message_id = 43
emoji_name = "octopus"
emoji_code = "1f419"
reaction_type = "unicode_emoji"
message = Message.objects.select_related().get(id=message_id)
do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
return {}
@openapi_param_value_generator(["/messages/flags:post"])
def update_flags_message_ids() -> Dict[str, object]:
stream_name = "Venice"
helpers.subscribe(helpers.example_user("iago"), stream_name)
messages = []
for _ in range(3):
messages.append(helpers.send_stream_message(helpers.example_user("iago"), stream_name))
return {
"messages": messages,
}
@openapi_param_value_generator(["/mark_stream_as_read:post", "/users/me/{stream_id}/topics:get"])
def get_venice_stream_id() -> Dict[str, object]:
return {
"stream_id": helpers.get_stream_id("Venice"),
}
@openapi_param_value_generator(["/streams/{stream_id}:patch"])
def update_stream() -> Dict[str, object]:
stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 1")
return {
"stream_id": stream.id,
}
@openapi_param_value_generator(["/streams/{stream_id}:delete"])
def create_temp_stream_and_get_id() -> Dict[str, object]:
stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 2")
return {
"stream_id": stream.id,
}
@openapi_param_value_generator(["/mark_topic_as_read:post"])
def get_denmark_stream_id_and_topic() -> Dict[str, object]:
stream_name = "Denmark"
topic_name = "Tivoli Gardens"
helpers.subscribe(helpers.example_user("iago"), stream_name)
helpers.send_stream_message(helpers.example_user("hamlet"), stream_name, topic_name=topic_name)
return {
"stream_id": helpers.get_stream_id(stream_name),
"topic_name": topic_name,
}
@openapi_param_value_generator(["/users/me/subscriptions/properties:post"])
def update_subscription_data() -> Dict[str, object]:
profile = helpers.example_user("iago")
helpers.subscribe(profile, "Verona")
helpers.subscribe(profile, "social")
return {
"subscription_data": [
{"stream_id": helpers.get_stream_id("Verona"), "property": "pin_to_top", "value": True},
{"stream_id": helpers.get_stream_id("social"), "property": "color", "value": "#f00f00"},
],
}
@openapi_param_value_generator(["/users/me/subscriptions:delete"])
def delete_subscription_data() -> Dict[str, object]:
iago = helpers.example_user("iago")
zoe = helpers.example_user("ZOE")
helpers.subscribe(iago, "Verona")
helpers.subscribe(iago, "social")
helpers.subscribe(zoe, "Verona")
helpers.subscribe(zoe, "social")
return {}
@openapi_param_value_generator(["/events:get"])
def get_events() -> Dict[str, object]:
profile = helpers.example_user("iago")
helpers.subscribe(profile, "Verona")
client = Client.objects.create(name="curl-test-client-1")
response = do_events_register(profile, client, event_types=["message", "realm_emoji"])
helpers.send_stream_message(helpers.example_user("hamlet"), "Verona")
return {
"queue_id": response["queue_id"],
"last_event_id": response["last_event_id"],
}
@openapi_param_value_generator(["/events:delete"])
def delete_event_queue() -> Dict[str, object]:
profile = helpers.example_user("iago")
client = Client.objects.create(name="curl-test-client-2")
response = do_events_register(profile, client, event_types=["message"])
return {
"queue_id": response["queue_id"],
"last_event_id": response["last_event_id"],
}
@openapi_param_value_generator(["/users/{user_id_or_email}/presence:get"])
def get_user_presence() -> Dict[str, object]:
iago = helpers.example_user("iago")
client = Client.objects.create(name="curl-test-client-3")
update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False)
return {}
@openapi_param_value_generator(["/users:post"])
def create_user() -> Dict[str, object]:
return {
"email": helpers.nonreg_email("test"),
}
@openapi_param_value_generator(["/user_groups/create:post"])
def create_user_group_data() -> Dict[str, object]:
return {
"members": [helpers.example_user("hamlet").id, helpers.example_user("othello").id],
}
@openapi_param_value_generator(
["/user_groups/{user_group_id}:patch", "/user_groups/{user_group_id}:delete"]
)
def get_temp_user_group_id() -> Dict[str, object]:
user_group, _ = UserGroup.objects.get_or_create(name="temp", realm=get_realm("zulip"))
return {
"user_group_id": user_group.id,
}
@openapi_param_value_generator(["/realm/filters/{filter_id}:delete"])
def remove_realm_filters() -> Dict[str, object]:
filter_id = do_add_linkifier(
get_realm("zulip"), "#(?P<id>[0-9]{2,8})", "https://github.com/zulip/zulip/pull/%(id)s"
)
return {
"filter_id": filter_id,
}
@openapi_param_value_generator(["/realm/emoji/{emoji_name}:post", "/user_uploads:post"])
def upload_custom_emoji() -> Dict[str, object]:
return {
"filename": "zerver/tests/images/animated_img.gif",
}
@openapi_param_value_generator(["/realm/playgrounds:post"])
def add_realm_playground() -> Dict[str, object]:
return {
"name": "Python2 playground",
"pygments_language": "Python2",
"url_prefix": "https://python2.example.com",
}
@openapi_param_value_generator(["/realm/playgrounds/{playground_id}:delete"])
def remove_realm_playground() -> Dict[str, object]:
playground_info = dict(
name="Python playground",
pygments_language="Python",
url_prefix="https://python.example.com",
)
playground_id = do_add_realm_playground(get_realm("zulip"), **playground_info)
return {
"playground_id": playground_id,
}
@openapi_param_value_generator(["/users/{user_id}:delete"])
def deactivate_user() -> Dict[str, object]:
user_profile = do_create_user(
email="[email protected]",
password=None,
full_name="test_user",
realm=get_realm("zulip"),
acting_user=None,
)
return {"user_id": user_profile.id}
@openapi_param_value_generator(["/users/me:delete"])
def deactivate_own_user() -> Dict[str, object]:
test_user_email = "[email protected]"
deactivate_test_user = do_create_user(
test_user_email,
"secret",
get_realm("zulip"),
"Mr. Delete",
role=200,
acting_user=None,
)
realm = get_realm("zulip")
test_user = get_user(test_user_email, realm)
test_user_api_key = get_api_key(test_user)
# change authentication line to allow test_client to delete itself.
AUTHENTICATION_LINE[0] = f"{deactivate_test_user.email}:{test_user_api_key}"
return {}
| punchagan/zulip | zerver/openapi/curl_param_value_generators.py | Python | apache-2.0 | 11,278 |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from geecheck_tests import common
class TestVersion(unittest.TestCase):
@unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')
def testFusionVersion(self):
"""Check if Fusion release is the latest available."""
latest_version = common.GetLatestVersion()
fusion_version = common.GetFusionVersion()
error_msg = ('Running Fusion version %s. Upgrade to version %s.' %
(fusion_version, latest_version))
self.assertEqual(fusion_version, latest_version, msg=error_msg)
print ('Currently running the latest version of Fusion (%s).' %
fusion_version)
@unittest.skipUnless(common.IsGeeServerInstalled(),
'GEE Server is not installed')
def testGeeServerVersion(self):
"""Check if GEE Server release is the latest available."""
latest_version = common.GetLatestVersion()
gee_server_version = common.GetGeeServerVersion()
error_msg = ('Running GEE Server version %s. Upgrade to (%s).' %
(gee_server_version, latest_version))
self.assertEqual(gee_server_version, latest_version, msg=error_msg)
print ('Currently running the latest version of GEE Server (%s).' %
gee_server_version)
@unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')
@unittest.skipUnless(common.IsGeeServerInstalled(),
'GEE Server is not installed')
def testFusionVersionsMatch(self):
"""Check Fusion and server versions are aligned."""
fusion_version = common.GetFusionVersion()
gee_server_version = common.GetGeeServerVersion()
error_msg = ('Fusion and GEE Server versions DO NOT match. '
'Currently running Fusion v. %s and GEE Server v. %s.' %
(fusion_version, gee_server_version))
self.assertEqual(fusion_version, gee_server_version, msg=error_msg)
print 'Fusion and GEE Server versions match. Current version is %s.' % (
fusion_version)
if __name__ == '__main__':
unittest.main()
| tst-ahernandez/earthenterprise | earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/fusion_version_test.py | Python | apache-2.0 | 2,649 |
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import flags
from nova import test
from nova.api.openstack import accounts
from nova.auth.manager import User
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
FLAGS.verbose = True
def fake_init(self):
self.manager = fakes.FakeAuthManager()
def fake_admin_check(self, req):
return True
class AccountsTest(test.TestCase):
def setUp(self):
super(AccountsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(accounts.Controller, '__init__',
fake_init)
self.stubs.Set(accounts.Controller, '_check_admin',
fake_admin_check)
fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.allow_admin = FLAGS.allow_admin_api
FLAGS.allow_admin_api = True
fakemgr = fakes.FakeAuthManager()
joeuser = User('id1', 'guy1', 'acc1', 'secret1', False)
superuser = User('id2', 'guy2', 'acc2', 'secret2', True)
fakemgr.add_user(joeuser)
fakemgr.add_user(superuser)
fakemgr.create_project('test1', joeuser)
fakemgr.create_project('test2', superuser)
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
super(AccountsTest, self).tearDown()
def test_get_account(self):
req = webob.Request.blank('/v1.0/accounts/test1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict['account']['id'], 'test1')
self.assertEqual(res_dict['account']['name'], 'test1')
self.assertEqual(res_dict['account']['manager'], 'id1')
self.assertEqual(res.status_int, 200)
def test_account_delete(self):
req = webob.Request.blank('/v1.0/accounts/test1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertTrue('test1' not in fakes.FakeAuthManager.projects)
self.assertEqual(res.status_int, 200)
def test_account_create(self):
body = dict(account=dict(description='test account',
manager='id1'))
req = webob.Request.blank('/v1.0/accounts/newacct')
req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['account']['id'], 'newacct')
self.assertEqual(res_dict['account']['name'], 'newacct')
self.assertEqual(res_dict['account']['description'], 'test account')
self.assertEqual(res_dict['account']['manager'], 'id1')
self.assertTrue('newacct' in
fakes.FakeAuthManager.projects)
self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3)
def test_account_update(self):
body = dict(account=dict(description='test account',
manager='id2'))
req = webob.Request.blank('/v1.0/accounts/test1')
req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['account']['id'], 'test1')
self.assertEqual(res_dict['account']['name'], 'test1')
self.assertEqual(res_dict['account']['description'], 'test account')
self.assertEqual(res_dict['account']['manager'], 'id2')
self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2)
| superstack/nova | nova/tests/api/openstack/test_accounts.py | Python | apache-2.0 | 4,537 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
class SumReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce2D(self):
# Create a 2D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [0, 1])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testFloatReduce5D(self):
# Create a 5D array of floats and reduce across some dimensions
np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
self._compareAll(np_arr, [1, 2, 3, 4])
self._compareAll(np_arr, [0, 1, 2, 3, 4])
# Simple tests for various types.
def testDoubleReduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInt32Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testComplex64Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.complex64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [-1])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [0, 2])
# Int64??
def _compareGradient(self, shape, sum_shape, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_sum(t, reduction_axes)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
su,
sum_shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
def testGradient2(self):
self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
def testGradient3(self):
self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
def testGradient4(self):
self._compareGradient([2, 3, 4, 2], [], None)
class MeanReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_sum = x
count = 1
for ra in reduction_axes[::-1]:
np_sum = np.sum(np_sum, axis=ra, keepdims=keep_dims)
count *= x.shape[ra]
np_ans = np_sum / count
with self.test_session():
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_mean(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [0, 1, 2, 3])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class ProdReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
if reduction_axes is None:
np_ans = np.prod(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
# NOTE(kearnes): divide by 20 so product is a reasonable size
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [0, 1, 2, 3])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
# NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, _ = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
with self.assertRaisesOpError("Tensor had NaN values"):
tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
class MinReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t)
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
class MaxReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t)
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
class AllReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
class AnyReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testPartialShapes(self):
# Input shape is unknown.
c_unknown = tf.placeholder(tf.float32)
s_unknown = tf.reduce_sum(c_unknown, [1, 2])
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
# Input shape only has known rank.
c_known_rank = tf.placeholder(tf.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = tf.reduce_sum(c_known_rank, [1, 2], keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
# Reduction indices are unknown.
unknown_indices = tf.placeholder(tf.int32)
c_unknown_indices = tf.constant([[10.0], [20.0]])
s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
| YanTangZhai/tf | tensorflow/python/kernel_tests/reduction_ops_test.py | Python | apache-2.0 | 24,486 |
import sys, json
import random, os, subprocess
from twisted.internet import reactor
from twisted.web import server, resource
from twisted.web.static import File
from twisted.python import log
from datetime import datetime
import urllib, urllib2
import logging
import re
from sensei_client import *
PARSER_AGENT_PORT = 18888
client = SenseiClient("localhost",8080,'sensei')
#
# Main server resource
#
class Root(resource.Resource):
def render_GET(self, request):
"""
get response method for the root resource
localhost:/18888
"""
return 'Welcome to the REST API'
def getChild(self, name, request):
"""
We overrite the get child function so that we can handle invalid
requests
"""
print "root getchild"
request.setHeader("Access-Control-Allow-Origin", "*")
request.setHeader("Access-Control-Allow-Methods", "GET, POST")
request.setHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Accept")
if name == '':
return self
else:
if name in VIEWS.keys():
return VIEWS.get(name)#resource.Resource.getChild(self, name, request)
else:
return PageNotFoundError()
class PageNotFoundError(resource.Resource):
def render_GET(self, request):
return 'Page Not Found!'
class ParseBQL(resource.Resource):
def getChild(self, name, request):
"""
We overrite the get child function so that we can handle invalid
requests
"""
print "root getchild"
request.setHeader("Access-Control-Allow-Origin", "*")
request.setHeader("Access-Control-Allow-Methods", "GET, POST")
request.setHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Accept")
def render_OPTIONS(self,request):
# request.setHeader("Access-Control-Allow-Origin", "*")
# request.setHeader("Access-Control-Allow-Methods", "GET, POST")
# request.setHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Accept")
print "parse render options"
return "ok"
def render_GET(self, request):
"""Start a Sensei store."""
try:
info = request.args["info"][0]
info = json.loads(info.encode('utf-8'))
print ">>> info = ", info
variables = re.findall(r"\$[a-zA-Z0-9]+", info["bql"])
variables = list(set(variables))
info["auxParams"] = [ {"name": var[1:]} for var in variables ]
stmt = info["bql"]
req = SenseiRequest(stmt)
res = client.doQuery(req)
print "numhits: %d" % res.numHits
result = json.dumps(res.jsonMap)
print result
return json.dumps(
{
"ok": True,
"result": res.jsonMap
})
except ParseException as err:
print err
return json.dumps(
{
"ok": False,
"error": "Parsing error at location %s: %s" % (err.loc, err.msg)
})
except Exception as err:
print err
return "Error"
def render_POST(self, request):
return self.render_GET(request)
#to make the process of adding new views less static
VIEWS = {
"parse": ParseBQL()
}
if __name__ == '__main__':
params = {}
# params["info"] = """{"name": "nus_member", "description": "xxx xxxx", "urn": "urn:feed:nus:member:exp:a:$memberId", 'bql': 'select * from cars where memberId in ("$memberId")'}"""
params["info"] = """{"name": "nus_member", "description": "xxx xxxx"}"""
print urllib.urlencode(params)
root = Root()
#for viewName, className in VIEWS.items():
#add the view to the web service
# root.putChild(viewName, className)
log.startLogging(sys.stdout)
log.msg('Starting parser agent: %s' %str(datetime.now()))
server = server.Site(root)
reactor.listenTCP(PARSER_AGENT_PORT, server)
reactor.run()
| DataDog/sensei | clients/python/sensei/sensei_ql_proxy.py | Python | apache-2.0 | 3,753 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from eventlet import corolocal
# Thread local storage.
_th_loc_storage = threading.local()
def _get_greenlet_local_storage():
greenlet_id = corolocal.get_ident()
greenlet_locals = getattr(_th_loc_storage, "greenlet_locals", None)
if not greenlet_locals:
greenlet_locals = {}
_th_loc_storage.greenlet_locals = greenlet_locals
if greenlet_id in greenlet_locals:
return greenlet_locals[greenlet_id]
else:
return None
def has_thread_local(var_name):
gl_storage = _get_greenlet_local_storage()
return gl_storage and var_name in gl_storage
def get_thread_local(var_name):
if not has_thread_local(var_name):
return None
return _get_greenlet_local_storage()[var_name]
def set_thread_local(var_name, val):
if not val and has_thread_local(var_name):
gl_storage = _get_greenlet_local_storage()
# Delete variable from greenlet local storage.
if gl_storage:
del gl_storage[var_name]
# Delete the entire greenlet local storage from thread local storage.
if gl_storage and len(gl_storage) == 0:
del _th_loc_storage.greenlet_locals[corolocal.get_ident()]
if val:
gl_storage = _get_greenlet_local_storage()
if not gl_storage:
gl_storage =\
_th_loc_storage.greenlet_locals[corolocal.get_ident()] = {}
gl_storage[var_name] = val
def log_exec(logger, level=logging.INFO):
"""Decorator for logging function execution.
By default, target function execution is logged with INFO level.
"""
def _decorator(func):
def _logged(*args, **kw):
params_repr = "[args=%s, kw=%s]" % (str(args), str(kw)) \
if len(args) > 0 or len(kw) > 0 else ""
func_repr = "Called method [name=%s, doc='%s', params=%s]" % \
(func.__name__, func.__doc__, params_repr)
logger.log(level, func_repr)
return func(*args, **kw)
_logged.__doc__ = func.__doc__
return _logged
return _decorator
| dmitryilyin/mistral | mistral/utils/__init__.py | Python | apache-2.0 | 2,765 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the distributed values library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.saved_model.model_utils import mode_keys
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
class DistributedValuesTest(test.TestCase):
def testGetEager(self):
with ops.device("/device:CPU:0"):
one = constant_op.constant(1)
two = constant_op.constant(2)
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
v = values.DistributedValues(device_map, (one, two))
self.assertEqual(two, v.get("/device:GPU:0"))
self.assertEqual(one, v.get())
with self.assertRaises(ValueError):
self.assertIsNone(v.get("/device:GPU:2"))
def testGetGraph(self):
with context.graph_mode(), \
ops.Graph().as_default(), \
ops.device("/device:CPU:0"):
one = constant_op.constant(1)
two = constant_op.constant(2)
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
v = values.DistributedValues(device_map, (one, two))
self.assertEqual(two, v.get("/device:GPU:0"))
self.assertEqual(one, v.get())
with self.assertRaises(ValueError):
self.assertIsNone(v.get("/device:GPU:2"))
def testCanonicalization(self):
canonical_cpu = ("/job:localhost/replica:0/task:0/device:CPU:0",)
v = values.DistributedValues(values.SingleDeviceMap(""), (42,))
self.assertEqual(canonical_cpu, v.devices)
v = values.DistributedValues(values.SingleDeviceMap("/device:CPU:0"), (42,))
self.assertEqual(canonical_cpu, v.devices)
v = values.DistributedValues(values.SingleDeviceMap("/cpu:0"), (42,))
self.assertEqual(canonical_cpu, v.devices)
v = values.DistributedValues(values.SingleDeviceMap("/CPU:0"), (42,))
self.assertEqual(canonical_cpu, v.devices)
def testIsTensorLike(self):
with context.graph_mode(), \
ops.Graph().as_default(), \
ops.device("/device:CPU:0"):
one = constant_op.constant(1)
two = constant_op.constant(2)
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
v = values.DistributedValues(device_map, (one, two))
self.assertEqual(two, v.get("/device:GPU:0"))
self.assertEqual(one, v.get())
self.assertTrue(v.is_tensor_like)
self.assertTrue(tensor_util.is_tensor(v))
def testIsTensorLikeWithAConstant(self):
with context.graph_mode(), \
ops.Graph().as_default(), \
ops.device("/device:CPU:0"):
one = constant_op.constant(1)
two = 2.0
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
v = values.DistributedValues(device_map, (one, two))
self.assertEqual(two, v.get("/device:GPU:0"))
self.assertEqual(one, v.get())
self.assertFalse(v.is_tensor_like)
self.assertFalse(tensor_util.is_tensor(v))
class DistributedDelegateTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetAttr(self):
with ops.device("/device:CPU:0"):
class Foo(object):
def __init__(self, x):
self.x = x
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
v = values.DistributedDelegate(device_map, (Foo(7), Foo(8)))
self.assertEqual(7, v.x)
with self.assertRaises(AttributeError):
_ = v.y
@test_util.run_in_graph_and_eager_modes
def testOperatorOverride(self):
with ops.device("/device:CPU:0"):
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
v = values.DistributedDelegate(device_map, (7, 8))
# v should act like int(7).
self.assertEqual(8, v + 1)
self.assertEqual(10, 3 + v)
self.assertEqual(14, v + v)
self.assertEqual(5, v - 2)
self.assertEqual(6, 13 - v)
self.assertEqual(0, v - v)
self.assertEqual(14, v * 2)
self.assertEqual(21, 3 * v)
self.assertEqual(49, v * v)
self.assertEqual(3.5, v / 2)
self.assertEqual(1.5, 10.5 / v)
self.assertEqual(3, v // 2)
self.assertEqual(2, 15 // v)
self.assertEqual(1, v % 2)
self.assertEqual(2, 16 % v)
self.assertTrue(v < 12)
self.assertTrue(v <= 12)
self.assertFalse(v > 12)
self.assertFalse(v >= 12)
self.assertFalse(12 < v)
self.assertFalse(12 <= v)
self.assertTrue(12 > v)
self.assertTrue(12 >= v)
self.assertEqual(3, v & 3)
self.assertEqual(3, 11 & v)
self.assertEqual(15, v | 8)
self.assertEqual(23, 16 | v)
self.assertEqual(4, v ^ 3)
self.assertEqual(12, 11 ^ v)
self.assertEqual(343, pow(v, 3))
self.assertEqual(3, pow(v, 3, 10))
self.assertEqual(128, pow(2, v))
self.assertEqual(-7, -v)
self.assertEqual(~7, ~v)
self.assertEqual(7, abs(v))
with self.assertRaises(TypeError):
_ = v[2]
def _device_str(d):
return "/device:GPU:" + str(d)
def _nested_value(d):
return ("a" + d, ["b" + d, {"c": "d" + d, "e": "f" + d}, "g" + d], "h" + d)
def _make_mirrored_val(init_val=5.0):
v = []
devices = ["/device:GPU:0", "/device:CPU:0"]
for d, _ in zip(devices, ["v", "v/replica"]):
with ops.device(d):
v.append(constant_op.constant(init_val))
device_map = values.ReplicaDeviceMap(devices)
mirrored = values.Mirrored(device_map, v)
return mirrored
def _make_mirrored():
v = []
devices = ["/device:GPU:0", "/device:CPU:0"]
for d, n, init in zip(devices, ["v", "v/replica"], [1., 2.]):
with ops.device(d):
v.append(variable_scope.get_variable(
name=n, initializer=init, use_resource=True))
device_map = values.ReplicaDeviceMap(devices)
mirrored = values.MirroredVariable(None, device_map, v,
variable_scope.VariableAggregation.SUM)
return v, device_map, mirrored
class RegroupAndSelectDeviceTest(test.TestCase):
def _is_per_replica(self, result, expected, klass=values.PerReplica):
self.assertIsInstance(result, klass)
# We canonicalize the devices to match the device strings returned
# by PerReplica, which also does device string canonicalization.
devices = [device_util.canonicalize(_device_str(i))
for i in range(len(expected))]
self.assertEqual(set(devices), set(result.devices))
for i, d in enumerate(devices):
self.assertEqual(expected[i], result.get(d))
self.assertEqual(expected[i], result.get(_device_str(i)))
def testNested(self):
device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1)))
result = values.regroup(device_map,
(_nested_value("1"), _nested_value("2")))
self.assertIsInstance(result, tuple)
self.assertEqual(3, len(result))
self._is_per_replica(result[0], ["a1", "a2"])
self._is_per_replica(result[2], ["h1", "h2"])
self.assertIsInstance(result[1], list)
self.assertEqual(3, len(result[1]))
self._is_per_replica(result[1][0], ["b1", "b2"])
self._is_per_replica(result[1][2], ["g1", "g2"])
self.assertIsInstance(result[1][1], dict)
self.assertEqual(set(["c", "e"]), set(result[1][1].keys()))
self._is_per_replica(result[1][1]["c"], ["d1", "d2"])
self._is_per_replica(result[1][1]["e"], ["f1", "f2"])
# Also test that we can undo the merge using select_replica()
self.assertEqual(_nested_value("1"),
values.select_replica(0, result))
self.assertEqual(_nested_value("2"),
values.select_replica(1, result))
# select_device_mirrored() should fail due to non-mirrored values
with self.assertRaises(TypeError):
values.select_device_mirrored(_device_str(0), result)
with self.assertRaises(TypeError):
values.select_device_mirrored(_device_str(1), result)
def testWrapClass(self):
# Normally a mirrored value would be the same across devices, but
# for a test it is convenient to be able to tell the values apart.
device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1)))
result = values.regroup(device_map,
(_nested_value("1"), _nested_value("2")),
values.Mirrored)
self.assertIsInstance(result, tuple)
self.assertEqual(3, len(result))
self._is_per_replica(result[0], ["a1", "a2"], values.Mirrored)
self._is_per_replica(result[2], ["h1", "h2"], values.Mirrored)
self.assertIsInstance(result[1], list)
self.assertEqual(3, len(result[1]))
self._is_per_replica(result[1][0], ["b1", "b2"], values.Mirrored)
self._is_per_replica(result[1][2], ["g1", "g2"], values.Mirrored)
self.assertIsInstance(result[1][1], dict)
self.assertEqual(set(["c", "e"]), set(result[1][1].keys()))
self._is_per_replica(result[1][1]["c"], ["d1", "d2"], values.Mirrored)
self._is_per_replica(result[1][1]["e"], ["f1", "f2"], values.Mirrored)
# Also test that we can undo the merge using select_replica()
self.assertEqual(_nested_value("1"),
values.select_replica(0, result))
self.assertEqual(_nested_value("2"),
values.select_replica(1, result))
# Values are marked as mirrored, so select_device_mirrored() is allowed.
self.assertEqual(_nested_value("1"),
values.select_device_mirrored(_device_str(0), result))
self.assertEqual(_nested_value("2"),
values.select_device_mirrored(_device_str(1), result))
def testWrapAListOfTwoTuples(self):
device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1)))
result = values.regroup(device_map, [("1", "2"), ("3", "4")])
self.assertIsInstance(result, tuple)
self.assertEqual(2, len(result))
self._is_per_replica(result[0], ("1", "3"), values.PerReplica)
self._is_per_replica(result[1], ("2", "4"), values.PerReplica)
def testMirroredContainer(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
v, device_map, mirrored = _make_mirrored()
result = values.regroup(device_map, v)
self.assertIs(mirrored, result)
def testSameId(self):
foo = object()
device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1)))
result = values.regroup(device_map, (("a", foo), ("b", foo)))
self.assertIsInstance(result, tuple)
self.assertEqual(2, len(result))
self._is_per_replica(result[0], ["a", "b"])
self.assertIs(foo, result[1])
# Test select_replica(), should undo the merge done by regroup().
result_0 = values.select_replica(0, result)
self.assertIsInstance(result_0, tuple)
self.assertEqual(2, len(result_0))
self.assertEqual("a", result_0[0])
self.assertIs(foo, result_0[1])
result_1 = values.select_replica(1, result)
self.assertIsInstance(result_1, tuple)
self.assertEqual(2, len(result_1))
self.assertEqual("b", result_1[0])
self.assertIs(foo, result_1[1])
def testOneDevice(self):
device_map = values.ReplicaDeviceMap((_device_str(0),))
result = values.regroup(device_map, (_nested_value("1"),))
# On one device regroup() and select_replica() are basically identity.
self.assertEqual(_nested_value("1"), result)
self.assertEqual(_nested_value("1"),
values.select_replica(0, result))
# The one exception has to do with MirroredVariables.
d = "/device:CPU:0"
with ops.device(d):
v = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
device_map = values.ReplicaDeviceMap((d,))
mirrored = values.MirroredVariable(None, device_map, (v,),
variable_scope.VariableAggregation.SUM)
result = values.regroup(device_map, (v,))
self.assertIs(mirrored, result)
def testNamedTuple(self):
# We include toy implementations of Scaffold and EstimatorSpec to
# avoid a dependency on Estimator here.
class Scaffold(object):
pass
class EstimatorSpec(collections.namedtuple(
"EstimatorSpec", ["mode", "loss", "train_op", "scaffold"])):
def __new__(cls, mode, loss, train_op, scaffold=None):
return super(EstimatorSpec, cls).__new__(
cls, mode=mode, loss=loss, train_op=train_op,
scaffold=scaffold or Scaffold())
with context.graph_mode(), ops.Graph().as_default():
devices = []
created_estimator_specs = []
for device_id in range(3):
spec = EstimatorSpec(
mode=mode_keys.EstimatorModeKeys.TRAIN,
loss=constant_op.constant(device_id / 2),
train_op=array_ops.identity(constant_op.constant(device_id)))
devices.append(_device_str(device_id))
created_estimator_specs.append(spec)
device_map = values.ReplicaDeviceMap(devices)
merged_estimator_spec = values.regroup(
device_map, created_estimator_specs)
self.assertIsInstance(merged_estimator_spec, EstimatorSpec)
self.assertEqual(mode_keys.EstimatorModeKeys.TRAIN,
merged_estimator_spec.mode)
for device_id in range(3):
d = _device_str(device_id)
self.assertEqual(created_estimator_specs[device_id].loss,
merged_estimator_spec.loss.get(d))
self.assertEqual(created_estimator_specs[device_id].train_op,
merged_estimator_spec.train_op.get(d))
# Scaffold is populated by `EstimatorSpec.__new__`.
self.assertEqual(created_estimator_specs[device_id].scaffold,
merged_estimator_spec.scaffold.get(d))
self.assertIsInstance(created_estimator_specs[device_id].scaffold,
Scaffold)
# Also test that we can undo the merge using select_replica()
self.assertEqual(created_estimator_specs[device_id],
values.select_replica(device_id,
merged_estimator_spec))
class MirroredVariableTest(test.TestCase, parameterized.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
@test_util.run_in_graph_and_eager_modes(config=config)
def testProperties(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
v, _, mirrored = _make_mirrored()
self.assertEqual(v[0].name, mirrored.name)
self.assertEqual(v[0].dtype, mirrored.dtype)
self.assertEqual(v[0].shape, mirrored.shape)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableOnAnotherDevice(self):
v = variable_scope.get_variable(
name="v", initializer=[1.], use_resource=True)
device_map = values.ReplicaDeviceMap(("/job:foo/device:CPU:0",))
mirrored = values.MirroredVariable(None, device_map, (v,),
variable_scope.VariableAggregation.MEAN)
self.assertEqual(v.name, mirrored.name)
self.assertEqual(v.dtype, mirrored.dtype)
self.assertEqual(v.shape, mirrored.shape)
def _assign_mirrored(self, devices, v, new):
for d, var, n in zip(devices, v, new):
with ops.device(d):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
@test_util.run_in_graph_and_eager_modes(config=config)
def testSaveAndRestoreMirroredOneGraph(self):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
with self.cached_session(config=self.config) as sess:
v, device_map, mirrored = _make_mirrored()
devices = device_map.all_devices
# Overwrite the initial values.
self._assign_mirrored(devices, v, [3., 4.])
# Saves the current value of v[0], 3.
save_path, saver = self._save_return_saver(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(devices, v, [5., 6.])
# Restores the saved value of 3. to both variables.
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
def _save_mirrored(self):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, device_map, mirrored = _make_mirrored()
devices = device_map.all_devices
# Overwrite the initial values.
self._assign_mirrored(devices, v, [3., 4.])
# Saves the current value of v[0], 3.
save_path = self._save(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(devices, v, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.))
# Saves the current value of var, 3.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3. to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3., self.evaluate(var))
def _restore_mirrored(self, save_path):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, device_map, mirrored = _make_mirrored()
devices = device_map.all_devices
# Overwrite the initial values.
self._assign_mirrored(devices, v, [7., 8.])
# Restores the saved value of 3. to both variables.
saver = saver_lib.Saver(var_list=[mirrored])
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
@test_util.run_in_graph_and_eager_modes(config=config)
def testSaveMirroredRestoreMirrored(self):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_mirrored()
self._restore_mirrored(save_path)
@test_util.run_in_graph_and_eager_modes(config=config)
def testSaveMirroredRestoreNormal(self):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_mirrored()
self._restore_normal(save_path)
@test_util.run_in_graph_and_eager_modes(config=config)
def testSaveNormalRestoreMirrored(self):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_normal()
self._restore_mirrored(save_path)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph"]))
def testFetchAMirroredVariable(self, distribution):
with self.session(graph=ops.Graph()) as sess, distribution.scope():
with ops.device("/device:GPU:0"):
v = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
mirrored = values.MirroredVariable(
distribution, values.ReplicaDeviceMap(("/device:GPU:0",)), (v,),
variable_scope.VariableAggregation.MEAN)
sess.run(variables_lib.global_variables_initializer())
sess.run({"complicated": mirrored})
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
],
mode=["graph", "eager"]))
def testAssignOutOfScope_mirrored(self, distribution):
with distribution.scope():
mirrored = variables_lib.Variable(1.)
if not isinstance(mirrored, values.MirroredVariable):
self.assertIsInstance(mirrored, values.TPUMirroredVariable)
self.evaluate(mirrored.assign(3.))
self.assertEqual(self.evaluate(mirrored.read_value()), 3.)
for component in mirrored.values:
self.assertEqual(self.evaluate(component.read_value()), 3.)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.central_storage_strategy_with_two_gpus
],
mode=["graph", "eager"]))
def testAssignOutOfScope_aggregating(self, distribution):
with distribution.scope():
aggregating = variables_lib.Variable(1.)
self.assertIsInstance(aggregating, values.AggregatingVariable)
self.evaluate(aggregating.assign(3.))
self.assertEqual(self.evaluate(aggregating.read_value()), 3.)
self.assertEqual(self.evaluate(aggregating._v.read_value()), 3.)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
def testExtendsVariable(self, distribution):
with distribution.scope():
v = variables_lib.Variable(1.)
self.assertIsInstance(v, variables_lib.Variable)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
def testCheckpointing(self, distribution):
with distribution.scope():
v = variables_lib.Variable(constant_op.constant([1., 2., 3., 4]))
self.evaluate(v.initializer)
before_save = self.evaluate(v.read_value())
# Save random weights into checkpoint.
checkpoint = trackable_utils.Checkpoint(v=v)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.test_session():
save_path = checkpoint.save(prefix)
# Assign inverted value.
self.evaluate(v.assign(constant_op.constant([4., 3., 2., 1.])))
after_assign = self.evaluate(v.read_value())
self.assertNotAllClose(before_save, after_assign)
# Restore from the checkpoint.
with self.test_session():
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
after_restore = self.evaluate(v)
self.assertAllClose(before_save, after_restore)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=["graph"]))
def testTraceback(self, distribution):
with distribution.scope():
variable_scope.get_variable(
name="testVar", initializer=1., use_resource=True)
with self.assertRaisesRegex(
ValueError, "Variable testVar already exists"):
variable_scope.get_variable(
name="testVar", initializer=1., use_resource=True)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=["eager"]))
def testInitializedToSameValueInsideEagerRun(self, distribution):
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(random_ops.random_normal([]))
distribution.experimental_run_v2(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
def testSelectReplica(self, distribution):
with distribution.scope():
v = variables_lib.Variable(1.)
self.assertIs(v, values.select_replica(0, v))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
def testModAfterAssign(self, distribution):
with distribution.scope():
v = variables_lib.Variable(0)
def replica_fn():
def merge_fn(_):
return math_ops.mod(v.assign_add(1), 2)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn)
@def_function.function
def foo():
distribution.experimental_run_v2(replica_fn)
foo()
_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)
def _make_replica_local(method, strategy=None):
if strategy is None:
devices = ("/device:GPU:0", "/device:CPU:0")
else:
devices = strategy.extended.worker_devices
device_map = values.ReplicaDeviceMap(devices)
v = []
for d, n, init in zip(devices, ["v", "v/replica"], [1., 2.]):
with ops.device(d):
v.append(variable_scope.get_variable(
name=n, initializer=init, use_resource=True))
if (strategy is not None) and isinstance(strategy, _TPU_STRATEGIES):
var_cls = values.TPUSyncOnReadVariable
else:
var_cls = values.SyncOnReadVariable
replica_local = var_cls(strategy, device_map, v, method)
return v, replica_local
class SyncOnReadVariablePropertiesTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
@test_util.run_in_graph_and_eager_modes(config=config)
def testProperties(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM)
self.assertEqual(v[0].name, replica_local.name)
self.assertEqual(v[0].dtype, replica_local.dtype)
self.assertEqual(v[0].shape, replica_local.shape)
self.assertEqual(variable_scope.VariableAggregation.SUM,
replica_local.aggregation)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableOnAnotherDevice(self):
v = variable_scope.get_variable(
name="v", initializer=[1.], use_resource=True)
device_map = values.ReplicaDeviceMap(("/job:foo/device:CPU:0",))
replica_local = values.SyncOnReadVariable(
None, device_map, (v,), variable_scope.VariableAggregation.MEAN)
self.assertEqual(v.name, replica_local.name)
self.assertEqual(v.dtype, replica_local.dtype)
self.assertEqual(v.shape, replica_local.shape)
self.assertEqual(variable_scope.VariableAggregation.MEAN,
replica_local.aggregation)
def testTensorConversion(self):
with context.graph_mode():
_, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM)
converted = ops.convert_to_tensor(replica_local, as_ref=False)
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
converted = ops.convert_to_tensor(replica_local, as_ref=True)
# Resources variable are converted to tensors as well when as_ref is True.
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
@test_util.run_v2_only
def testCanPassToDefFun(self):
@def_function.function
def add1(x):
return x + 1
v = variable_scope.get_variable(
name="v", initializer=[1.], use_resource=True)
device_map = values.ReplicaDeviceMap(("/job:foo/device:CPU:0",))
replica_local = values.SyncOnReadVariable(
None, device_map, (v,), variable_scope.VariableAggregation.MEAN)
self.assertEqual(2., self.evaluate(add1(replica_local)))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
],
mode=["graph", "eager"]))
class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase):
def _assign_replica_local(self, v, new):
for var, n in zip(v, new):
with ops.device(var.device):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
def testSaveAndRestoreReplicaLocalSumOneGraph(self, distribution):
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 7.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 7. which gets divided equally
# between the variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def testSaveAndRestoreReplicaLocalMeanOneGraph(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 3.5 to both variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _save_replica_local_mean(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_replica_local_sum(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [1.5, 2.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.5))
# Saves the current value of var, 3.5.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3.5 to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3.5, self.evaluate(var))
def _restore_replica_local_mean(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _restore_replica_local_sum(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([1.75, 1.75], self.evaluate([v[0], v[1]]))
def testSaveReplicaLocalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_replica_local_mean(save_path, distribution)
def testSaveReplicaLocalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_replica_local_sum(save_path, distribution)
def testSaveReplicaLocalMeanRestoreNormal(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_normal(save_path)
def testSaveReplicaLocalSumRestoreNormal(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_normal(save_path)
def testSaveNormalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_mean(save_path, distribution)
def testSaveNormalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_sum(save_path, distribution)
def testAssign(self, distribution):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
return distribution.experimental_local_results(
distribution.experimental_run_v2(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = ( # VariableAggregation.SUM in cross-replica mode is tested below
[x for x in itertools.product(updates, aggregations, [True, False])
if not(x[1] == variables_lib.VariableAggregation.SUM and x[2])])
for update, aggregation, cross_replica in options:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
def testAssignDtypeConversion(self, distribution):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
return distribution.experimental_local_results(
distribution.experimental_run_v2(update_fn))
updates = [("assign", 1), ("assign_add", 1), ("assign_sub", -1)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = ( # VariableAggregation.SUM in cross-replica mode is tested below
[x for x in itertools.product(updates, aggregations, [True, False])
if not(x[1] == variables_lib.VariableAggregation.SUM and x[2])])
for update, aggregation, cross_replica in options:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
def testAssignWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(v.assign(1. * distribution.num_replicas_in_sync))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
def testAssignAddSubWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_add(1.))
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_sub(1.))
def testReadValueInReplicaContext(self, distribution):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
results = self.evaluate(distribution.experimental_local_results(
distribution.experimental_run_v2(v.read_value)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
def testReadValueInCrossReplicaContext(self, distribution):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
if isinstance(distribution, _TPU_STRATEGIES):
resolver = tpu_cluster_resolver.TPUClusterResolver('')
tpu_strategy_util.initialize_tpu_system(resolver)
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(v=v):
ctx = distribution_strategy_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
self.evaluate(distribution.experimental_local_results(
distribution.experimental_run_v2(assign)))
result = self.evaluate(v.read_value())
num_replicas = distribution.num_replicas_in_sync
sum_of_replica_values = num_replicas * (num_replicas - 1) / 2.
if aggregation == variables_lib.VariableAggregation.SUM:
expected = sum_of_replica_values
elif aggregation == variables_lib.VariableAggregation.MEAN:
expected = sum_of_replica_values / num_replicas
else:
expected = 0
self.assertEqual(expected, result, aggregation)
def testReadValueWithAggregationNoneInCrossReplicaContext(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "Could not convert from .* VariableAggregation\\.NONE"):
self.evaluate(v.read_value())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(
random_ops.random_normal([]),
synchronization=variables_lib.VariableSynchronization.ON_READ)
distribution.experimental_run_v2(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
class MirroredTest(test.TestCase):
def testAddOp(self):
if context.num_gpus() < 1:
self.skipTest("A GPU is not available for this test.")
mirrored_val = _make_mirrored_val(init_val=3.)
self.assertEqual(self.evaluate(constant_op.constant(6.)),
self.evaluate(mirrored_val + mirrored_val))
self.assertEqual(self.evaluate(constant_op.constant(4.)),
self.evaluate(mirrored_val + 1))
self.assertEqual(self.evaluate(mirrored_val + 1),
self.evaluate(math_ops.add(mirrored_val, 1)))
self.assertEqual(type(mirrored_val + 1),
type(math_ops.add(mirrored_val, 1)))
class PerReplicaTest(test.TestCase, parameterized.TestCase):
def testTypeSpec(self):
device_map = values.SingleDeviceMap("CPU")
vals = (constant_op.constant(1.),)
per_replica = values.PerReplica(device_map, vals)
spec = per_replica._type_spec
self.assertEqual(spec._value_specs,
(tensor_spec.TensorSpec([], dtypes.float32),))
self.assertEqual(spec._device_map, per_replica.device_map)
self.assertEqual(spec._logical_device, per_replica.logical_device)
def testTypeSpecRoundTrip(self):
device_map = values.SingleDeviceMap("CPU")
vals = (constant_op.constant(1.),)
per_replica = values.PerReplica(device_map, vals)
spec = per_replica._type_spec
tensor_list = spec._to_components(per_replica)
reconstructed = spec._from_components(tensor_list)
self.assertEqual(per_replica.device_map, reconstructed.device_map)
self.assertEqual(per_replica.logical_device, reconstructed.logical_device)
self.assertAllEqual(per_replica.values, reconstructed.values)
def testTypeSpecNest(self):
device_map = values.ReplicaDeviceMap(["CPU:0", "CPU:1"])
vals = (constant_op.constant(1.), constant_op.constant([5., 6.0]),)
per_replica = values.PerReplica(device_map, vals)
# Note: nest.map_structutre exercises nest.flatten and
# nest.pack_sequence_as.
result = nest.map_structure(lambda t: t + 10, per_replica,
expand_composites=True)
self.assertEqual(per_replica.device_map, result.device_map)
self.assertEqual(per_replica.logical_device, result.logical_device)
self.assertLen(result.values, 2)
self.assertAllEqual(result.values[0], 11.)
self.assertAllEqual(result.values[1], [15., 16.0])
@test_util.run_in_graph_and_eager_modes
def testIsGraphTensor(self):
per_replica = values.PerReplica(values.SingleDeviceMap("CPU"),
(constant_op.constant(1.),))
for t in nest.flatten(per_replica, expand_composites=True):
self.assertEqual(hasattr(t, "graph"), not context.executing_eagerly())
def testDoesNotTriggerFunctionTracing(self):
traces = []
@def_function.function
def f(x):
traces.append(None) # Only happens on trace.
return x
per_replica = values.PerReplica(
values.SingleDeviceMap("CPU"), (constant_op.constant(1.),))
# Trace once.
f(per_replica)
self.assertNotEmpty(traces)
del traces[:]
per_replica_spec = per_replica._type_spec
for _ in range(5):
vals = per_replica_spec._to_components(per_replica)
vals = [v * 2 for v in vals]
per_replica = per_replica_spec._from_components(vals)
output = f(per_replica)
self.assertIsInstance(output, values.PerReplica)
self.assertAllEqual(output._values, per_replica._values)
self.assertAllEqual(output._device_map, per_replica._device_map)
self.assertAllEqual(output._logical_device, per_replica._logical_device)
self.assertEmpty(traces) # Make sure we're not re-tracing `f`.
def testFunctionCanReturnPerReplica(self):
f = def_function.function(lambda x: x)
x = values.PerReplica(
values.SingleDeviceMap("CPU"), (constant_op.constant(1.),))
y = f(x)
self.assertIsNot(x, y)
nest.map_structure(self.assertAllEqual, x, y, expand_composites=True)
self.assertEqual(x._type_spec, y._type_spec)
@test_util.run_in_graph_and_eager_modes
def testCondWithTensorValues(self):
device_map = values.SingleDeviceMap("CPU")
per_replica_1 = values.PerReplica(device_map, (constant_op.constant("a"),))
per_replica_2 = values.PerReplica(device_map,
(constant_op.constant(["b", "c"]),))
condition = array_ops.placeholder_with_default(True, [])
result = control_flow_ops.cond(
condition, lambda: per_replica_1, lambda: per_replica_2)
self.assertEqual(per_replica_1.device_map, result.device_map)
self.assertEqual(per_replica_1.logical_device, result.logical_device)
self.assertLen(result.values, 1)
self.assertAllEqual(result.values[0], "a")
@test_util.run_in_graph_and_eager_modes
def testCondWithValuesConvertibleToTensor(self):
device_map = values.SingleDeviceMap("CPU")
per_replica_1 = values.PerReplica(device_map, ("a",))
per_replica_2 = values.PerReplica(device_map, ("b",))
condition = array_ops.placeholder_with_default(True, [])
result = control_flow_ops.cond(
condition, lambda: per_replica_1, lambda: per_replica_2)
self.assertEqual(per_replica_1.device_map, result.device_map)
self.assertEqual(per_replica_1.logical_device, result.logical_device)
self.assertLen(result.values, 1)
self.assertAllEqual(result.values[0], "a")
@test_util.build_as_function_and_v1_graph
def testCondWithValuesNotConvertibleToTensor(self):
device_map = values.SingleDeviceMap("CPU")
per_replica_1 = values.PerReplica(device_map, (set(["a"]),))
per_replica_2 = values.PerReplica(device_map, (set(["b", "c"]),))
condition = array_ops.placeholder(dtypes.bool, [])
with self.assertRaisesRegex(TypeError, "Could not build a TypeSpec for"):
control_flow_ops.cond(
condition, lambda: per_replica_1, lambda: per_replica_2)
class WorkerDeviceMapTest(test.TestCase, parameterized.TestCase):
class ReplicaContext(object):
def __init__(self, replica_id_in_sync_group):
self.replica_id_in_sync_group = replica_id_in_sync_group
def testBasic(self):
devices = [
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:2/device:CPU:0"
]
device_map = values.WorkerDeviceMap(devices, 1)
self.assertAllEqual(devices, device_map.all_devices)
# pylint:disable=pointless-statement
with self.assertRaisesWithPredicateMatch(
ValueError, "`WorkerDeviceMap` is not indexed by replicas"):
device_map.devices_by_replica
self.assertEqual(1, device_map.num_logical_devices)
self.assertEqual(2, device_map.num_replicas_in_graph)
self.assertEqual(0, device_map.logical_device_from_values(["a", "b"]))
self.assertAllEqual(devices, device_map.logical_to_actual_devices(0))
replica_context = WorkerDeviceMapTest.ReplicaContext(1)
self.assertEqual(
"b", device_map.select_for_current_replica(["a", "b"], replica_context))
with self.assertRaisesWithPredicateMatch(
ValueError, "`WorkerDeviceMap` not indexed by replicas"):
device_map.replica_for_device(devices[1])
self.assertEqual("b", device_map.select_for_device(["a", "b"], devices[1]))
with self.assertRaisesWithPredicateMatch(
ValueError, "WorkerDeviceMap not indexed by replicas"):
device_map.is_device_in_replica(devices[1], 1)
self.assertEqual(
"WorkerDeviceMap(('/job:worker/replica:0/task:0/device:CPU:0', "
"'/job:worker/replica:0/task:2/device:CPU:0'), "
"num_replicas_per_worker=1)", repr(device_map))
def testMultipleReplicasPerWorker(self):
devices = [
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:2/device:CPU:0"
]
device_map = values.WorkerDeviceMap(devices, 2)
replica_context = WorkerDeviceMapTest.ReplicaContext(3)
self.assertEqual(
"b", device_map.select_for_current_replica(["a", "b"], replica_context))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
],
mode=["graph", "eager"]))
def testExperimentalLocalResultsOrder(self, distribution):
# Create 2 devices in the device map, where the alphabetical order and the
# actual order of devices are different.
device_map = values.ReplicaDeviceMap(["CPU:2", "CPU:10"])
vals = (
constant_op.constant(1.),
constant_op.constant([5., 6.0]),
)
per_replica = values.PerReplica(device_map, vals)
results = self.evaluate(
distribution.experimental_local_results(per_replica))
# We expect the outputs order the same as the inputs order.
self.assertLen(results, 2)
self.assertAllEqual(1.0, results[0])
self.assertAllEqual([5., 6.], results[1])
if __name__ == "__main__":
test.main()
| ppwwyyxx/tensorflow | tensorflow/python/distribute/values_test.py | Python | apache-2.0 | 56,467 |
from __future__ import absolute_import
from django.conf import settings
from django.core.mail import EmailMessage
from typing import Any, Mapping, Optional, Text
from zerver.lib.actions import internal_send_message
from zerver.lib.send_email import FromAddress
from zerver.lib.redis_utils import get_redis_client
from zerver.models import get_realm, get_system_bot, \
UserProfile, Realm
import time
client = get_redis_client()
def has_enough_time_expired_since_last_message(sender_email, min_delay):
# type: (Text, float) -> bool
# This function returns a boolean, but it also has the side effect
# of noting that a new message was received.
key = 'zilencer:feedback:%s' % (sender_email,)
t = int(time.time())
last_time = client.getset(key, t)
if last_time is None:
return True
delay = t - int(last_time)
return delay > min_delay
def get_ticket_number():
# type: () -> int
num_file = '/var/tmp/.feedback-bot-ticket-number'
try:
ticket_number = int(open(num_file).read()) + 1
except Exception:
ticket_number = 1
open(num_file, 'w').write('%d' % (ticket_number,))
return ticket_number
def deliver_feedback_by_zulip(message):
# type: (Mapping[str, Any]) -> None
subject = "%s" % (message["sender_email"],)
if len(subject) > 60:
subject = subject[:57].rstrip() + "..."
content = u''
sender_email = message['sender_email']
# We generate ticket numbers if it's been more than a few minutes
# since their last message. This avoids some noise when people use
# enter-send.
need_ticket = has_enough_time_expired_since_last_message(sender_email, 180)
if need_ticket:
ticket_number = get_ticket_number()
content += '\n~~~'
content += '\nticket Z%03d (@support please ack)' % (ticket_number,)
content += '\nsender: %s' % (message['sender_full_name'],)
content += '\nemail: %s' % (sender_email,)
if 'sender_realm_str' in message:
content += '\nrealm: %s' % (message['sender_realm_str'],)
content += '\n~~~'
content += '\n\n'
content += message['content']
user_profile = get_system_bot(settings.FEEDBACK_BOT)
internal_send_message(user_profile.realm, settings.FEEDBACK_BOT,
"stream", settings.FEEDBACK_STREAM, subject, content)
def handle_feedback(event):
# type: (Mapping[str, Any]) -> None
if not settings.ENABLE_FEEDBACK:
return
if settings.FEEDBACK_EMAIL is not None:
to_email = settings.FEEDBACK_EMAIL
subject = "Zulip feedback from %s" % (event["sender_email"],)
content = event["content"]
from_email = '"%s" <%s>' % (event["sender_full_name"], FromAddress.SUPPORT)
headers = {'Reply-To': '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
if settings.FEEDBACK_STREAM is not None:
deliver_feedback_by_zulip(event)
| vaidap/zulip | zerver/lib/feedback.py | Python | apache-2.0 | 3,063 |
#!/bin/env python
import os
import re
import sys
ws = re.compile(r'-')
f = open("list.txt")
names = f.readlines()
f.close()
for name in names:
name = name[0:-1]
newname = ""
for token in ws.split(name):
newname += token[0].upper()
newname += token[1:]
cmd = "cp %s %s" % (name,newname)
print cmd
os.system(cmd)
| wh81752/flaka | opt/rename.py | Python | apache-2.0 | 351 |
# Copyright (c) 2007-2019 UShareSoft, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from texttable import Texttable
from ussclicore.utils import generics_utils
def scan_status(scan):
if (scan.status.complete and not scan.status.error and not scan.status.cancelled):
return "Done"
elif(not scan.status.complete and not scan.status.error and not scan.status.cancelled):
return str(scan.status.percentage)+"%"
else:
return "Error"
def scan_table(scanInstances, scan = None):
table = Texttable(800)
table.set_cols_dtype(["t", "t", "t", "t", "t"])
table.set_cols_align(["c", "l", "c", "c", "c"])
table.header(["Id", "Name", "Status", "Distribution", "With overlay"])
if scan:
table.add_row([scan.dbId, "\t"+scan.name, scan_status(scan), "", ""])
return table
for myScannedInstance in scanInstances:
withOverlayStr = ''
if myScannedInstance.overlayIncluded:
withOverlayStr = 'X'
table.add_row([myScannedInstance.dbId, myScannedInstance.name, "", myScannedInstance.distribution.name + " "+ myScannedInstance.distribution.version + " " + myScannedInstance.distribution.arch, withOverlayStr])
scans = generics_utils.order_list_object_by(myScannedInstance.scans.scan, "name")
for lscan in scans:
table.add_row([lscan.dbId, "\t"+lscan.name, scan_status(lscan), "", ""])
return table
| emuus/hammr | hammr/utils/scan_utils.py | Python | apache-2.0 | 1,969 |
"""Test the Z-Wave over MQTT config flow."""
from homeassistant import config_entries, setup
from homeassistant.components.ozw.config_flow import TITLE
from homeassistant.components.ozw.const import DOMAIN
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_user_create_entry(hass):
"""Test the user step creates an entry."""
hass.config.components.add("mqtt")
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.ozw.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.ozw.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TITLE
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_mqtt_not_setup(hass):
"""Test that mqtt is required."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "mqtt_required"
async def test_one_instance_allowed(hass):
"""Test that only one instance is allowed."""
entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
| sdague/home-assistant | tests/components/ozw/test_config_flow.py | Python | apache-2.0 | 1,943 |
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Dict, List
from .template_parser import (
tokenize,
Token,
is_django_block_tag,
)
from six.moves import range
import os
def pretty_print_html(html, num_spaces=4):
# type: (str, int) -> str
# We use 1-based indexing for both rows and columns.
tokens = tokenize(html)
lines = html.split('\n')
# We will keep a stack of "start" tags so that we know
# when HTML ranges end. Note that some start tags won't
# be blocks from an indentation standpoint.
stack = [] # type: List[Dict[str, Any]]
# Seed our stack with a pseudo entry to make depth calculations
# easier.
info = dict(
block=False,
depth=-1,
line=-1,
token_kind='html_start',
tag='html',
extra_indent=0) # type: Dict[str, Any]
stack.append(info)
# Our main job is to figure out offsets that we use to nudge lines
# over by.
offsets = {} # type: Dict[int, int]
# Loop through our start/end tokens, and calculate offsets. As
# we proceed, we will push/pop info dictionaries on/off a stack.
for token in tokens:
if token.kind in ('html_start', 'handlebars_start',
'html_singleton', 'django_start') and stack[-1]['tag'] != 'pre':
# An HTML start tag should only cause a new indent if we
# are on a new line.
if (token.tag not in ('extends', 'include', 'else', 'elif') and
(is_django_block_tag(token.tag) or
token.kind != 'django_start')):
is_block = token.line > stack[-1]['line']
if is_block:
if (((token.kind == 'handlebars_start' and
stack[-1]['token_kind'] == 'handlebars_start') or
(token.kind == 'django_start' and
stack[-1]['token_kind'] == 'django_start')) and
not stack[-1]['indenting']):
info = stack.pop()
info['depth'] = info['depth'] + 1
info['indenting'] = True
info['adjust_offset_until'] = token.line
stack.append(info)
new_depth = stack[-1]['depth'] + 1
extra_indent = stack[-1]['extra_indent']
line = lines[token.line - 1]
adjustment = len(line)-len(line.lstrip()) + 1
offset = (1 + extra_indent + new_depth * num_spaces) - adjustment
info = dict(
block=True,
depth=new_depth,
actual_depth=new_depth,
line=token.line,
tag=token.tag,
token_kind=token.kind,
line_span=token.line_span,
offset=offset,
extra_indent=token.col - adjustment + extra_indent,
extra_indent_prev=extra_indent,
adjustment=adjustment,
indenting=True,
adjust_offset_until=token.line
)
if token.kind in ('handlebars_start', 'django_start'):
info.update(dict(depth=new_depth - 1, indenting=False))
else:
info = dict(
block=False,
depth=stack[-1]['depth'],
actual_depth=stack[-1]['depth'],
line=token.line,
tag=token.tag,
token_kind=token.kind,
extra_indent=stack[-1]['extra_indent']
)
stack.append(info)
elif token.kind in ('html_end', 'handlebars_end',
'html_singleton_end', 'django_end') and (stack[-1]['tag'] != 'pre' or token.tag == 'pre'):
info = stack.pop()
if info['block']:
# We are at the end of an indentation block. We
# assume the whole block was formatted ok before, just
# possibly at an indentation that we don't like, so we
# nudge over all lines in the block by the same offset.
start_line = info['line']
end_line = token.line
if token.tag == 'pre':
offsets[start_line] = 0
offsets[end_line] = 0
else:
offsets[start_line] = info['offset']
line = lines[token.line - 1]
adjustment = len(line)-len(line.lstrip()) + 1
if adjustment == token.col:
offsets[end_line] = (info['offset'] +
info['adjustment'] -
adjustment +
info['extra_indent'] -
info['extra_indent_prev'])
elif (start_line + info['line_span'] - 1 == end_line and
info['line_span'] > 2 and token.kind != 'html_singleton_end'):
offsets[end_line] = (1 + info['extra_indent'] + (info['depth'] + 1) * num_spaces) - adjustment
elif token.line != info['line']:
offsets[end_line] = info['offset']
if token.tag != 'pre' and token.kind != 'html_singleton_end' and token.tag != 'script':
for line_num in range(start_line + 1, end_line):
# Be careful not to override offsets that happened
# deeper in the HTML within our block.
if line_num not in offsets:
line = lines[line_num - 1]
new_depth = info['depth'] + 1
if (line.lstrip().startswith('{{else}}') or
line.lstrip().startswith('{% else %}') or
line.lstrip().startswith('{% elif')):
new_depth = info['actual_depth']
extra_indent = info['extra_indent']
adjustment = len(line)-len(line.lstrip()) + 1
offset = (1 + extra_indent + new_depth * num_spaces) - adjustment
offsets[line_num] = offset
elif (token.kind in ('handlebars_end', 'django_end') and
info['indenting'] and
line_num < info['adjust_offset_until']):
offsets[line_num] += num_spaces
elif token.tag != 'pre':
for line_num in range(start_line + 1, end_line):
if line_num not in offsets:
offsets[line_num] = info['offset']
else:
for line_num in range(start_line + 1, end_line):
if line_num not in offsets:
offsets[line_num] = 0
# Now that we have all of our offsets calculated, we can just
# join all our lines together, fixing up offsets as needed.
formatted_lines = []
for i, line in enumerate(html.split('\n')):
row = i + 1
offset = offsets.get(row, 0)
pretty_line = line
if line.strip() == '':
pretty_line = ''
else:
if offset > 0:
pretty_line = (' ' * offset) + pretty_line
elif offset < 0:
pretty_line = pretty_line[-1 * offset:]
assert line.strip() == pretty_line.strip()
formatted_lines.append(pretty_line)
return '\n'.join(formatted_lines)
def validate_indent_html(fn):
# type: (str) -> int
file = open(fn)
html = file.read()
phtml = pretty_print_html(html)
file.close()
if not html.split('\n') == phtml.split('\n'):
temp_file = open('/var/tmp/pretty_html.txt', 'w')
temp_file.write(phtml)
temp_file.close()
print('Invalid Indentation detected in file: %s\nDiff for the file against expected indented file:' % (fn))
os.system('diff %s %s' % (fn, '/var/tmp/pretty_html.txt'))
return 0
return 1
| christi3k/zulip | tools/lib/pretty_print.py | Python | apache-2.0 | 8,553 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks FilterDataset input pipeline op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class FilterBenchmark(test.Benchmark):
# This benchmark compares the performance of pipeline with multiple chained
# filter with and without filter fusion.
def benchmarkFilters(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmarkFilters(chain_length, False)
self._benchmarkFilters(chain_length, True)
def _benchmarkFilters(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(5).repeat(None)
for _ in range(chain_length):
dataset = dataset.filter(lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
dataset = dataset.apply(optimization.optimize(["filter_fusion"]))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "no-opt"
print("Filter dataset {} chain length: {} Median wall time: {}".format(
opt_mark, chain_length, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="benchmark_filter_dataset_chain_latency_{}_{}".format(
opt_mark, chain_length))
if __name__ == "__main__":
test.main()
| kobejean/tensorflow | tensorflow/contrib/data/python/kernel_tests/filter_dataset_op_test.py | Python | apache-2.0 | 2,828 |
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011,2012 Akira YOSHIYAMA <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This source code is based ./auth_token.py and ./ec2_token.py.
# See them for their copyright.
"""
-------------------
S3 Token Middleware
-------------------
s3token middleware is for authentication with s3api + keystone.
This middleware:
* Gets a request from the s3api middleware with an S3 Authorization
access key.
* Validates s3 token with Keystone.
* Transforms the account name to AUTH_%(tenant_name).
* Optionally can retrieve and cache secret from keystone
to validate signature locally
.. note::
If upgrading from swift3, the ``auth_version`` config option has been
removed, and the ``auth_uri`` option now includes the Keystone API
version. If you previously had a configuration like
.. code-block:: ini
[filter:s3token]
use = egg:swift3#s3token
auth_uri = https://keystonehost:35357
auth_version = 3
you should now use
.. code-block:: ini
[filter:s3token]
use = egg:swift#s3token
auth_uri = https://keystonehost:35357/v3
"""
import base64
import json
from keystoneclient.v3 import client as keystone_client
from keystoneauth1 import session as keystone_session
from keystoneauth1 import loading as keystone_loading
import requests
import six
from six.moves import urllib
from swift.common.swob import Request, HTTPBadRequest, HTTPUnauthorized, \
HTTPException
from swift.common.utils import config_true_value, split_path, get_logger, \
cache_from_env, append_underscore
from swift.common.wsgi import ConfigFileError
PROTOCOL_NAME = 'S3 Token Authentication'
# Headers to purge if they came from (or may have come from) the client
KEYSTONE_AUTH_HEADERS = (
'X-Identity-Status', 'X-Service-Identity-Status',
'X-Domain-Id', 'X-Service-Domain-Id',
'X-Domain-Name', 'X-Service-Domain-Name',
'X-Project-Id', 'X-Service-Project-Id',
'X-Project-Name', 'X-Service-Project-Name',
'X-Project-Domain-Id', 'X-Service-Project-Domain-Id',
'X-Project-Domain-Name', 'X-Service-Project-Domain-Name',
'X-User-Id', 'X-Service-User-Id',
'X-User-Name', 'X-Service-User-Name',
'X-User-Domain-Id', 'X-Service-User-Domain-Id',
'X-User-Domain-Name', 'X-Service-User-Domain-Name',
'X-Roles', 'X-Service-Roles',
'X-Is-Admin-Project',
'X-Service-Catalog',
# Deprecated headers, too...
'X-Tenant-Id',
'X-Tenant-Name',
'X-Tenant',
'X-User',
'X-Role',
)
def parse_v2_response(token):
access_info = token['access']
headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': ','.join(r['name']
for r in access_info['user']['roles']),
'X-User-Id': access_info['user']['id'],
'X-User-Name': access_info['user']['name'],
'X-Tenant-Id': access_info['token']['tenant']['id'],
'X-Tenant-Name': access_info['token']['tenant']['name'],
'X-Project-Id': access_info['token']['tenant']['id'],
'X-Project-Name': access_info['token']['tenant']['name'],
}
return headers, access_info['token']['tenant']
def parse_v3_response(token):
token = token['token']
headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': ','.join(r['name']
for r in token['roles']),
'X-User-Id': token['user']['id'],
'X-User-Name': token['user']['name'],
'X-User-Domain-Id': token['user']['domain']['id'],
'X-User-Domain-Name': token['user']['domain']['name'],
'X-Tenant-Id': token['project']['id'],
'X-Tenant-Name': token['project']['name'],
'X-Project-Id': token['project']['id'],
'X-Project-Name': token['project']['name'],
'X-Project-Domain-Id': token['project']['domain']['id'],
'X-Project-Domain-Name': token['project']['domain']['name'],
}
return headers, token['project']
class S3Token(object):
"""Middleware that handles S3 authentication."""
def __init__(self, app, conf):
"""Common initialization code."""
self._app = app
self._logger = get_logger(
conf, log_route=conf.get('log_name', 's3token'))
self._logger.debug('Starting the %s component', PROTOCOL_NAME)
self._timeout = float(conf.get('http_timeout', '10.0'))
if not (0 < self._timeout <= 60):
raise ValueError('http_timeout must be between 0 and 60 seconds')
self._reseller_prefix = append_underscore(
conf.get('reseller_prefix', 'AUTH'))
self._delay_auth_decision = config_true_value(
conf.get('delay_auth_decision'))
# where to find the auth service (we use this to validate tokens)
self._request_uri = conf.get('auth_uri', '').rstrip('/') + '/s3tokens'
parsed = urllib.parse.urlsplit(self._request_uri)
if not parsed.scheme or not parsed.hostname:
raise ConfigFileError(
'Invalid auth_uri; must include scheme and host')
if parsed.scheme not in ('http', 'https'):
raise ConfigFileError(
'Invalid auth_uri; scheme must be http or https')
if parsed.query or parsed.fragment or '@' in parsed.netloc:
raise ConfigFileError('Invalid auth_uri; must not include '
'username, query, or fragment')
# SSL
insecure = config_true_value(conf.get('insecure'))
cert_file = conf.get('certfile')
key_file = conf.get('keyfile')
if insecure:
self._verify = False
elif cert_file and key_file:
self._verify = (cert_file, key_file)
elif cert_file:
self._verify = cert_file
else:
self._verify = None
self._secret_cache_duration = int(conf.get('secret_cache_duration', 0))
if self._secret_cache_duration < 0:
raise ValueError('secret_cache_duration must be non-negative')
if self._secret_cache_duration:
try:
auth_plugin = keystone_loading.get_plugin_loader(
conf.get('auth_type', 'password'))
available_auth_options = auth_plugin.get_options()
auth_options = {}
for option in available_auth_options:
name = option.name.replace('-', '_')
value = conf.get(name)
if value:
auth_options[name] = value
auth = auth_plugin.load_from_options(**auth_options)
session = keystone_session.Session(auth=auth)
self.keystoneclient = keystone_client.Client(
session=session,
region_name=conf.get('region_name'))
self._logger.info("Caching s3tokens for %s seconds",
self._secret_cache_duration)
except Exception:
self._logger.warning("Unable to load keystone auth_plugin. "
"Secret caching will be unavailable.",
exc_info=True)
self.keystoneclient = None
self._secret_cache_duration = 0
def _deny_request(self, code):
error_cls, message = {
'AccessDenied': (HTTPUnauthorized, 'Access denied'),
'InvalidURI': (HTTPBadRequest,
'Could not parse the specified URI'),
}[code]
resp = error_cls(content_type='text/xml')
error_msg = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
'<Error>\r\n <Code>%s</Code>\r\n '
'<Message>%s</Message>\r\n</Error>\r\n' %
(code, message))
if six.PY3:
error_msg = error_msg.encode()
resp.body = error_msg
return resp
def _json_request(self, creds_json):
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(self._request_uri,
headers=headers, data=creds_json,
verify=self._verify,
timeout=self._timeout)
except requests.exceptions.RequestException as e:
self._logger.info('HTTP connection exception: %s', e)
raise self._deny_request('InvalidURI')
if response.status_code < 200 or response.status_code >= 300:
self._logger.debug('Keystone reply error: status=%s reason=%s',
response.status_code, response.reason)
raise self._deny_request('AccessDenied')
return response
def __call__(self, environ, start_response):
"""Handle incoming request. authenticate and send downstream."""
req = Request(environ)
self._logger.debug('Calling S3Token middleware.')
# Always drop auth headers if we're first in the pipeline
if 'keystone.token_info' not in req.environ:
req.headers.update({h: None for h in KEYSTONE_AUTH_HEADERS})
try:
parts = split_path(urllib.parse.unquote(req.path), 1, 4, True)
version, account, container, obj = parts
except ValueError:
msg = 'Not a path query: %s, skipping.' % req.path
self._logger.debug(msg)
return self._app(environ, start_response)
# Read request signature and access id.
s3_auth_details = req.environ.get('s3api.auth_details')
if not s3_auth_details:
msg = 'No authorization details from s3api. skipping.'
self._logger.debug(msg)
return self._app(environ, start_response)
access = s3_auth_details['access_key']
if isinstance(access, six.binary_type):
access = access.decode('utf-8')
signature = s3_auth_details['signature']
if isinstance(signature, six.binary_type):
signature = signature.decode('utf-8')
string_to_sign = s3_auth_details['string_to_sign']
if isinstance(string_to_sign, six.text_type):
string_to_sign = string_to_sign.encode('utf-8')
token = base64.urlsafe_b64encode(string_to_sign)
if isinstance(token, six.binary_type):
token = token.decode('ascii')
# NOTE(chmou): This is to handle the special case with nova
# when we have the option s3_affix_tenant. We will force it to
# connect to another account than the one
# authenticated. Before people start getting worried about
# security, I should point that we are connecting with
# username/token specified by the user but instead of
# connecting to its own account we will force it to go to an
# another account. In a normal scenario if that user don't
# have the reseller right it will just fail but since the
# reseller account can connect to every account it is allowed
# by the swift_auth middleware.
force_tenant = None
if ':' in access:
access, force_tenant = access.split(':')
# Authenticate request.
creds = {'credentials': {'access': access,
'token': token,
'signature': signature}}
memcache_client = None
memcache_token_key = 's3secret/%s' % access
if self._secret_cache_duration > 0:
memcache_client = cache_from_env(environ)
cached_auth_data = None
if memcache_client:
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
if len(cached_auth_data) == 4:
# Old versions of swift may have cached token, too,
# but we don't need it
headers, _token, tenant, secret = cached_auth_data
else:
headers, tenant, secret = cached_auth_data
if s3_auth_details['check_signature'](secret):
self._logger.debug("Cached creds valid")
else:
self._logger.debug("Cached creds invalid")
cached_auth_data = None
if not cached_auth_data:
creds_json = json.dumps(creds)
self._logger.debug('Connecting to Keystone sending this JSON: %s',
creds_json)
# NOTE(vish): We could save a call to keystone by having
# keystone return token, tenant, user, and roles
# from this call.
#
# NOTE(chmou): We still have the same problem we would need to
# change token_auth to detect if we already
# identified and not doing a second query and just
# pass it through to swiftauth in this case.
try:
# NB: requests.Response, not swob.Response
resp = self._json_request(creds_json)
except HTTPException as e_resp:
if self._delay_auth_decision:
msg = ('Received error, deferring rejection based on '
'error: %s')
self._logger.debug(msg, e_resp.status)
return self._app(environ, start_response)
else:
msg = 'Received error, rejecting request with error: %s'
self._logger.debug(msg, e_resp.status)
# NB: swob.Response, not requests.Response
return e_resp(environ, start_response)
self._logger.debug('Keystone Reply: Status: %d, Output: %s',
resp.status_code, resp.content)
try:
token = resp.json()
if 'access' in token:
headers, tenant = parse_v2_response(token)
elif 'token' in token:
headers, tenant = parse_v3_response(token)
else:
raise ValueError
if memcache_client:
user_id = headers.get('X-User-Id')
if not user_id:
raise ValueError
try:
cred_ref = self.keystoneclient.ec2.get(
user_id=user_id,
access=access)
memcache_client.set(
memcache_token_key,
(headers, tenant, cred_ref.secret),
time=self._secret_cache_duration)
self._logger.debug("Cached keystone credentials")
except Exception:
self._logger.warning("Unable to cache secret",
exc_info=True)
# Populate the environment similar to auth_token,
# so we don't have to contact Keystone again.
#
# Note that although the strings are unicode following json
# deserialization, Swift's HeaderEnvironProxy handles ensuring
# they're stored as native strings
req.environ['keystone.token_info'] = token
except (ValueError, KeyError, TypeError):
if self._delay_auth_decision:
error = ('Error on keystone reply: %d %s - '
'deferring rejection downstream')
self._logger.debug(error, resp.status_code, resp.content)
return self._app(environ, start_response)
else:
error = ('Error on keystone reply: %d %s - '
'rejecting request')
self._logger.debug(error, resp.status_code, resp.content)
return self._deny_request('InvalidURI')(
environ, start_response)
req.headers.update(headers)
tenant_to_connect = force_tenant or tenant['id']
if six.PY2 and isinstance(tenant_to_connect, six.text_type):
tenant_to_connect = tenant_to_connect.encode('utf-8')
self._logger.debug('Connecting with tenant: %s', tenant_to_connect)
new_tenant_name = '%s%s' % (self._reseller_prefix, tenant_to_connect)
environ['PATH_INFO'] = environ['PATH_INFO'].replace(account,
new_tenant_name)
return self._app(environ, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return S3Token(app, conf)
return auth_filter
| openstack/swift | swift/common/middleware/s3api/s3token.py | Python | apache-2.0 | 17,603 |
"""
Helper module that will enable lazy imports of Cocoa wrapper items.
This should improve startup times and memory usage, at the cost
of not being able to use 'from Cocoa import *'
"""
__all__ = ('ObjCLazyModule',)
import sys
import re
import struct
from objc import lookUpClass, getClassList, nosuchclass_error, loadBundle
import objc
ModuleType = type(sys)
def _loadBundle(frameworkName, frameworkIdentifier, frameworkPath):
if frameworkIdentifier is None:
bundle = loadBundle(
frameworkName,
{},
bundle_path=frameworkPath,
scan_classes=False)
else:
try:
bundle = loadBundle(
frameworkName,
{},
bundle_identifier=frameworkIdentifier,
scan_classes=False)
except ImportError:
bundle = loadBundle(
frameworkName,
{},
bundle_path=frameworkPath,
scan_classes=False)
return bundle
class GetAttrMap (object):
__slots__ = ('_container',)
def __init__(self, container):
self._container = container
def __getitem__(self, key):
try:
return getattr(self._container, key)
except AttributeError:
raise KeyError(key)
class ObjCLazyModule (ModuleType):
# Define slots for all attributes, that way they don't end up it __dict__.
__slots__ = (
'_ObjCLazyModule__bundle', '_ObjCLazyModule__enummap', '_ObjCLazyModule__funcmap',
'_ObjCLazyModule__parents', '_ObjCLazyModule__varmap', '_ObjCLazyModule__inlinelist',
'_ObjCLazyModule__aliases',
)
def __init__(self, name, frameworkIdentifier, frameworkPath, metadict, inline_list=None, initialdict={}, parents=()):
super(ObjCLazyModule, self).__init__(name)
if frameworkIdentifier is not None or frameworkPath is not None:
self.__bundle = self.__dict__['__bundle__'] = _loadBundle(name, frameworkIdentifier, frameworkPath)
pfx = name + '.'
for nm in sys.modules:
if nm.startswith(pfx):
rest = nm[len(pfx):]
if '.' in rest: continue
if sys.modules[nm] is not None:
self.__dict__[rest] = sys.modules[nm]
self.__dict__.update(initialdict)
self.__dict__.update(metadict.get('misc', {}))
self.__parents = parents
self.__varmap = metadict.get('constants')
self.__varmap_dct = metadict.get('constants_dict', {})
self.__enummap = metadict.get('enums')
self.__funcmap = metadict.get('functions')
self.__aliases = metadict.get('aliases')
self.__inlinelist = inline_list
self.__expressions = metadict.get('expressions')
self.__expressions_mapping = GetAttrMap(self)
self.__load_cftypes(metadict.get('cftypes'))
if metadict.get('protocols') is not None:
self.__dict__['protocols'] = ModuleType('%s.protocols'%(name,))
self.__dict__['protocols'].__dict__.update(
metadict['protocols'])
for p in objc.protocolsForProcess():
setattr(self.__dict__['protocols'], p.__name__, p)
def __dir__(self):
return self.__all__
def __getattr__(self, name):
if name == "__all__":
# Load everything immediately
value = self.__calc_all()
self.__dict__[name] = value
return value
# First try parent module, as we had done
# 'from parents import *'
for p in self.__parents:
try:
value = getattr(p, name)
except AttributeError:
pass
else:
self.__dict__[name] = value
return value
# Check if the name is a constant from
# the metadata files
try:
value = self.__get_constant(name)
except AttributeError:
pass
else:
self.__dict__[name] = value
return value
# Then check if the name is class
try:
value = lookUpClass(name)
except nosuchclass_error:
pass
else:
self.__dict__[name] = value
return value
# Finally give up and raise AttributeError
raise AttributeError(name)
def __calc_all(self):
all = set()
# Ensure that all dynamic entries get loaded
if self.__varmap_dct:
for nm in self.__varmap_dct:
try:
getattr(self, nm)
except AttributeError:
pass
if self.__varmap:
for nm in re.findall(r"\$([A-Z0-9a-z_]*)(?:@[^$]*)?(?=\$)", self.__varmap):
try:
getattr(self, nm)
except AttributeError:
pass
if self.__enummap:
for nm in re.findall(r"\$([A-Z0-9a-z_]*)@[^$]*(?=\$)", self.__enummap):
try:
getattr(self, nm)
except AttributeError:
pass
if self.__funcmap:
for nm in self.__funcmap:
try:
getattr(self, nm)
except AttributeError:
pass
if self.__expressions:
for nm in self.__expressions:
try:
getattr(self, nm)
except AttributeError:
pass
if self.__aliases:
for nm in self.__aliases:
try:
getattr(self, nm)
except AttributeError:
pass
# Add all names that are already in our __dict__
all.update(self.__dict__)
# Merge __all__of parents ('from parent import *')
for p in self.__parents:
all.update(getattr(p, '__all__', ()))
# Add all class names
all.update(cls.__name__ for cls in getClassList())
return [ v for v in all if not v.startswith('_') ]
return list(all)
def __get_constant(self, name):
# FIXME: Loading variables and functions requires too much
# code at the moment, the objc API can be adjusted for
# this later on.
if self.__varmap_dct:
if name in self.__varmap_dct:
tp = self.__varmap_dct[name]
return objc._loadConstant(name, tp, False)
if self.__varmap:
m = re.search(r"\$%s(@[^$]*)?\$"%(name,), self.__varmap)
if m is not None:
tp = m.group(1)
if tp is None:
tp = '@'
else:
tp = tp[1:]
d = {}
if tp.startswith('='):
tp = tp[1:]
magic = True
else:
magic = False
#try:
return objc._loadConstant(name, tp, magic)
#except Exception as exc:
# print "LOAD %r %r %r -> raise %s"%(name, tp, magic, exc)
# raise
if self.__enummap:
m = re.search(r"\$%s@([^$]*)\$"%(name,), self.__enummap)
if m is not None:
val = m.group(1)
if val.startswith("'"):
if isinstance(val, bytes):
# Python 2.x
val, = struct.unpack('>l', val[1:-1])
else:
# Python 3.x
val, = struct.unpack('>l', val[1:-1].encode('latin1'))
elif '.' in val:
val = float(val)
else:
val = int(val)
return val
if self.__funcmap:
if name in self.__funcmap:
info = self.__funcmap[name]
func_list = [ (name,) + info ]
d = {}
objc.loadBundleFunctions(self.__bundle, d, func_list)
if name in d:
return d[name]
if self.__inlinelist is not None:
try:
objc.loadFunctionList(
self.__inlinelist, d, func_list, skip_undefined=False)
except objc.error:
pass
else:
if name in d:
return d[name]
if self.__expressions:
if name in self.__expressions:
info = self.__expressions[name]
try:
return eval(info, {}, self.__expressions_mapping)
except NameError:
pass
if self.__aliases:
if name in self.__aliases:
alias = self.__aliases[name]
if alias == 'ULONG_MAX':
return (sys.maxsize * 2) + 1
elif alias == 'LONG_MAX':
return sys.maxsize
elif alias == 'LONG_MIN':
return -sys.maxsize-1
return getattr(self, alias)
raise AttributeError(name)
def __load_cftypes(self, cftypes):
if not cftypes: return
for name, type, gettypeid_func, tollfree in cftypes:
if tollfree:
for nm in tollfree.split(','):
try:
objc.lookUpClass(nm)
except objc.error:
pass
else:
tollfree = nm
break
try:
v = objc.registerCFSignature(name, type, None, tollfree)
if v is not None:
self.__dict__[name] = v
continue
except objc.nosuchclass_error:
pass
try:
func = getattr(self, gettypeid_func)
except AttributeError:
# GetTypeID function not found, this is either
# a CFType that isn't present on the current
# platform, or a CFType without a public GetTypeID
# function. Proxy using the generic CFType
if tollfree is None:
v = objc.registerCFSignature(name, type, None, 'NSCFType')
if v is not None:
self.__dict__[name] = v
continue
v = objc.registerCFSignature(name, type, func())
if v is not None:
self.__dict__[name] = v
| albertz/music-player | mac/pyobjc-core/Lib/objc/_lazyimport.py | Python | bsd-2-clause | 10,756 |
# this: every_hour.py
# by: Poul Staugaard (poul(dot)staugaard(at)gmail...)
# URL: http://code.google.com/p/giewiki
# ver.: 1.13
import cgi
import codecs
import datetime
import difflib
import glob
import hashlib
import logging
import os
import re
import urllib
import urlparse
import uuid
import xml.dom.minidom
from new import instance, classobj
from os import path
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import mail
from google.appengine.api import namespace_manager
from giewikidb import Tiddler,SiteInfo,ShadowTiddler,EditLock,Page,PageTemplate,DeletionLog,Comment,Include,Note,Message,Group,GroupMember,UrlImport,UploadedFile,UserProfile,PenName,SubDomain,LogEntry,CronJob
from giewikidb import truncateModel, truncateAllData, HasGroupAccess, ReadAccessToPage, AccessToPage, IsSoleOwner, Upgrade, CopyIntoNamespace, dropCronJob
class EveryHour(webapp.RequestHandler):
def get(self):
for cj in CronJob.all():
if cj.when < datetime.datetime.now():
tdlr = Tiddler.all().filter('id',cj.tiddler).filter('current',True).get()
if tdlr is None:
logging.warning("Tiddler not found")
else:
if cj.action == 'promote':
logging.info("CJob:promote " + cj.tiddler)
if hasattr(tdlr,'deprecated'):
delattr(tdlr,'deprecated')
tdlr.tags = tdlr.tags.replace('@promote@','@promoted@')
tdlr.put()
dts = Tiddler.all().filter('page', tdlr.page).filter('title','DefaultTiddlers').filter('current', True).get()
if dts is None:
logging.warning("DefaultTiddlers not found for page " + tdlr.page)
else:
dtparts = dts.text.split('\n')
dtparts.insert(cj.position,'[[' + tdlr.title + ']]')
dts.text = '\n'.join(dtparts)
dts.put()
logging.info("Tiddler " + tdlr.title + " added to DefaultTiddlers")
if cj.action == 'announce':
logging.info("CJob/announce " + cj.tiddler)
if hasattr(tdlr,'deprecated'):
delattr(tdlr,'deprecated')
tdlr.tags = tdlr.tags.replace('@announce@','@announced@')
tdlr.put()
dts = Tiddler.all().filter('page', tdlr.page).filter('title','MainMenu').filter('current', True).get()
if dts is None:
logging.warning("MainMenu not found for page " + tdlr.page)
else:
dtparts = dts.text.split('\n')
dtparts.insert(cj.position,'[[' + tdlr.title + ']]')
dts.text = '\n'.join(dtparts)
dts.put()
logging.info("Tiddler " + tdlr.title + " added to MainMenu")
if cj.action == 'demote' or cj.action == 'deprecate':
logging.info("CJob:demote " + cj.tiddler)
dts = Tiddler.all().filter('page', tdlr.page).filter('title','DefaultTiddlers').filter('current', True).get()
if not dts is None:
ss = '[[' + tdlr.title + ']]\n'
dts.text = dts.text.replace(ss,'')
dts.put()
dts = Tiddler.all().filter('page', tdlr.page).filter('title','MainMenu').filter('current', True).get()
if not dts is None:
ss = '[[' + tdlr.title + ']]\n'
dts.text = dts.text.replace(ss,'')
dts.put()
if cj.action == 'deprecate':
logging.info("CJob:deprecate " + cj.tiddler)
setattr(tdlr,'deprecated',True)
tdlr.put()
if cj.action == 'revert':
logging.info("CJob: revert " + str(cj.tiddler) + " to V#" + str(cj.position))
rvn = cj.position if cj.position > 0 else tdlr.version - 1
tdlrvr = Tiddler.all().filter('id',cj.tiddler).filter('version',rvn).get()
if tdlrvr is None:
logging.warning("Version " + str(rvn) + " of tiddler " + tdlr.page + "#" + tdlr.title + " not found!")
else:
tdlr.current = False
tdlr.put()
tdlrvr.current = True
tdlrvr.vercnt = tdlr.vercnt
tdlrvr.reverted = datetime.datetime.now()
tdlrvr.reverted_by = None
tdlrvr.put()
cj.delete()
application = webapp.WSGIApplication( [('/every_1_hours', EveryHour)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| wangjun/giewiki | every_hour.py | Python | bsd-2-clause | 4,271 |
from lino.api import dd
class Persons(dd.Table):
model = 'app.Person'
column_names = 'name *'
detail_layout = """
id name
owned_places managed_places
VisitsByPerson
MealsByPerson
"""
class Places(dd.Table):
model = 'app.Place'
detail_layout = """
id name ceo
owners
VisitsByPlace
"""
class Restaurants(dd.Table):
model = 'app.Restaurant'
detail_layout = """
id place serves_hot_dogs
cooks
MealsByRestaurant
"""
class VisitsByPlace(dd.Table):
model = 'app.Visit'
master_key = 'place'
column_names = 'person purpose'
class VisitsByPerson(dd.Table):
model = 'app.Visit'
master_key = 'person'
column_names = 'place purpose'
class MealsByRestaurant(dd.Table):
model = 'app.Meal'
master_key = 'restaurant'
column_names = 'person what'
class MealsByPerson(dd.Table):
model = 'app.Meal'
master_key = 'person'
column_names = 'restaurant what'
| lino-framework/book | lino_book/projects/nomti/app/desktop.py | Python | bsd-2-clause | 979 |
"""Test module for wiring with invalid type of marker for attribute injection."""
from dependency_injector.wiring import Closing
service = Closing["service"]
| rmk135/dependency_injector | tests/unit/samples/wiringstringids/module_invalid_attr_injection.py | Python | bsd-3-clause | 161 |
from datadog import initialize, api
options = {
'api_key': '9775a026f1ca7d1c6c5af9d94d9595a4',
'app_key': '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
}
initialize(**options)
# Get a downtime
api.Downtime.get(2910)
| jhotta/documentation | code_snippets/api-monitor-get-downtime.py | Python | bsd-3-clause | 224 |
import os
import cStringIO as StringIO
git_binary = "git"
verbose_mode = False
try:
from subprocess import Popen, PIPE
def run_cmd(cmd):
p = Popen(cmd, shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=True)
return p.stdin, p.stdout, p.stderr
except ImportError:
def run_cmd(cmd):
return os.popen3(self._cmd)
class GitData (object):
def __init__(self, location, command_string):
self._cmd = "%s %s" % (git_binary, command_string)
self._location = location
self._data = None
def open(self):
if verbose_mode:
print " >> %s" % (self._cmd)
cwd = os.getcwd()
os.chdir(self._location)
self._in, self._data, self._err = run_cmd(self._cmd)
self._read = 0
os.chdir(cwd)
def tell(self):
return self._read
def read(self, l=-1):
if self._data is None:
self.open()
data = self._data.read(l)
self._read += len(data)
return data
def write(self, data):
if self._data is None:
self.open()
self._in.write(data)
def flush(self):
if self._data is not None:
self._in.flush()
def close_stdin(self):
if self._data is not None:
self._in.close()
def close(self):
if self._data is not None:
self._in.close()
self._data.close()
self._err.close()
self._data = None
def reopen(self):
self.close()
self.open()
class FakeData (object):
def __init__(self, data):
self._data = data
self._string = None
def open(self):
self._string = StringIO.StringIO(self._data)
def read(self, l=-1):
if self._string is None:
self.open()
return self._string.read(l)
def close(self):
if self._string is not None:
self._string.close()
self._string = None
def reopen(self):
self.close()
self.open()
| slonopotamus/git_svn_server | GitSvnServer/vcs/git/data.py | Python | bsd-3-clause | 2,086 |
# -*- coding: utf-8 -*-
from functools import update_wrapper
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from cms import constants
from cms.utils.compat.urls import urljoin
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'CONTENT_CACHE_DURATION': 60,
'APPHOOKS': [],
'TOOLBARS': [],
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'PAGE_CACHE': True,
'PLACEHOLDER_CACHE': True,
'PLUGIN_CACHE': True,
'CACHE_PREFIX': 'cms-',
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'MAX_PAGE_PUBLISH_REVERSIONS': 10,
'MAX_PAGE_HISTORY_REVERSIONS': 15,
'TOOLBAR_URL__EDIT_ON': 'edit',
'TOOLBAR_URL__EDIT_OFF': 'edit_off',
'TOOLBAR_URL__BUILD': 'build',
'ADMIN_NAMESPACE': 'admin',
}
def get_cache_durations():
return {
'menus': getattr(settings, 'MENU_CACHE_DURATION', 60 * 60),
'content': get_cms_setting('CONTENT_CACHE_DURATION'),
'permissions': 60 * 60,
}
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_URL')
def get_media_url():
return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('CMS_TOOLBAR_URL__EDIT_ON')
def get_toolbar_url__edit_on():
return get_cms_setting('TOOLBAR_URL__EDIT_ON')
@default('CMS_TOOLBAR_URL__EDIT_OFF')
def get_toolbar_url__edit_off():
return get_cms_setting('TOOLBAR_URL__EDIT_OFF')
@default('CMS_TOOLBAR_URL__BUILD')
def get_toolbar_url__build():
return get_cms_setting('TOOLBAR_URL__BUILD')
def get_templates():
from cms.utils.django_load import load_from_file
if getattr(settings, 'CMS_TEMPLATES_DIR', False):
tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False)
# CMS_TEMPLATES_DIR can either be a string poiting to the templates directory
# or a dictionary holding 'site: template dir' entries
if isinstance(tpldir, dict):
tpldir = tpldir[settings.SITE_ID]
# We must extract the relative path of CMS_TEMPLATES_DIR to the neares
# valid templates directory. Here we mimick what the filesystem and
# app_directories template loaders do
prefix = ''
# Relative to TEMPLATE_DIRS for filesystem loader
for basedir in settings.TEMPLATE_DIRS:
if tpldir.find(basedir) == 0:
prefix = tpldir.replace(basedir + os.sep, '')
break
# Relative to 'templates' directory that app_directory scans
if not prefix:
components = tpldir.split(os.sep)
try:
prefix = os.path.join(*components[components.index('templates') + 1:])
except ValueError:
# If templates is not found we use the directory name as prefix
# and hope for the best
prefix = os.path.basename(tpldir)
config_path = os.path.join(tpldir, '__init__.py')
# Try to load templates list and names from the template module
# If module file is not present skip configuration and just dump the filenames as templates
if config_path:
template_module = load_from_file(config_path)
templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()]
else:
templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir))
else:
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _(constants.TEMPLATE_INHERITANCE_LABEL)))
return templates
def _ensure_languages_settings(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
if not isinstance(languages, dict):
raise ImproperlyConfigured(
"CMS_LANGUAGES must be a dictionary with site IDs and 'default'"
" as keys. Please check the format.")
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if not isinstance(site, six.integer_types):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
languages[VERIFIED] = True # this will be busted by SettingsOverride and cause a re-check
return languages
def get_languages():
if not isinstance(settings.SITE_ID, six.integer_types):
raise ImproperlyConfigured(
"SITE_ID must be an integer"
)
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES):
raise ImproperlyConfigured(
'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE
)
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on,
'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off,
'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__build,
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
else:
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
def get_site_id(site):
from django.contrib.sites.models import Site
if isinstance(site, Site):
return site.id
try:
return int(site)
except (TypeError, ValueError):
pass
return settings.SITE_ID
| samirasnoun/django_cms_gallery_image | cms/utils/conf.py | Python | bsd-3-clause | 9,303 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Apple's SunSpider JavaScript benchmark."""
import collections
import json
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class SunSpiderMeasurement(page_measurement.PageMeasurement):
def CreatePageSet(self, _, options):
return page_set.PageSet.FromDict({
'serving_dirs': ['../../../chrome/test/data/sunspider/'],
'pages': [
{ 'url': 'file:///../../../chrome/test/data/sunspider/'
'sunspider-1.0/driver.html' }
]
}, os.path.abspath(__file__))
def MeasurePage(self, _, tab, results):
js_is_done = """
window.location.pathname.indexOf('results.html') >= 0"""
def _IsDone():
return tab.EvaluateJavaScript(js_is_done)
util.WaitFor(_IsDone, 300, poll_interval=5)
js_get_results = 'JSON.stringify(output);'
js_results = json.loads(tab.EvaluateJavaScript(js_get_results))
r = collections.defaultdict(list)
totals = []
# js_results is: [{'foo': v1, 'bar': v2},
# {'foo': v3, 'bar': v4},
# ...]
for result in js_results:
total = 0
for key, value in result.iteritems():
r[key].append(value)
total += value
totals.append(total)
for key, values in r.iteritems():
results.Add(key, 'ms', values, data_type='unimportant')
results.Add('Total', 'ms', totals)
| jing-bao/pa-chromium | tools/perf/perf_tools/sunspider.py | Python | bsd-3-clause | 1,597 |
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' %}
- Use mailhog for emails
{% else %}
- Use console backend for emails
{% endif %}
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
{% if cookiecutter.use_mailhog == 'y' and cookiecutter.use_docker == 'y' %}
EMAIL_HOST = env("EMAIL_HOST", default='mailhog')
{% else %}
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
{% endif %}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
{% if cookiecutter.use_celery == 'y' %}
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
{% endif %}
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| schacki/cookiecutter-django | {{cookiecutter.project_slug}}/config/settings/local.py | Python | bsd-3-clause | 2,678 |
# (C) Datadog, Inc. 2014-2016
# (C) Cory Watson <[email protected]> 2015-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
from urlparse import urlparse
import re
# 3rd party
import requests
# project
from checks import AgentCheck
DEFAULT_MAX_METRICS = 350
PATH = "path"
ALIAS = "alias"
TYPE = "type"
TAGS = "tags"
GAUGE = "gauge"
RATE = "rate"
COUNTER = "counter"
DEFAULT_TYPE = GAUGE
SUPPORTED_TYPES = {
GAUGE: AgentCheck.gauge,
RATE: AgentCheck.rate,
COUNTER: AgentCheck.increment,
}
DEFAULT_METRIC_NAMESPACE = "go_expvar"
# See http://golang.org/pkg/runtime/#MemStats
DEFAULT_GAUGE_MEMSTAT_METRICS = [
# General statistics
"Alloc", "TotalAlloc",
# Main allocation heap statistics
"HeapAlloc", "HeapSys", "HeapIdle", "HeapInuse",
"HeapReleased", "HeapObjects",
]
DEFAULT_RATE_MEMSTAT_METRICS = [
# General statistics
"Lookups", "Mallocs", "Frees",
# Garbage collector statistics
"PauseTotalNs", "NumGC",
]
DEFAULT_METRICS = [{PATH: "memstats/%s" % path, TYPE: GAUGE} for path in DEFAULT_GAUGE_MEMSTAT_METRICS] +\
[{PATH: "memstats/%s" % path, TYPE: RATE} for path in DEFAULT_RATE_MEMSTAT_METRICS]
GO_EXPVAR_URL_PATH = "/debug/vars"
class GoExpvar(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_gc_count = defaultdict(int)
def _get_data(self, url, instance):
ssl_params = {
'ssl': instance.get('ssl'),
'ssl_keyfile': instance.get('ssl_keyfile'),
'ssl_certfile': instance.get('ssl_certfile'),
'ssl_verify': instance.get('ssl_verify'),
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
# Load SSL configuration, if available.
# ssl_verify can be a bool or a string (http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification)
if isinstance(ssl_params.get('ssl_verify'), bool) or isinstance(ssl_params.get('ssl_verify'), basestring):
verify = ssl_params.get('ssl_verify')
else:
verify = None
if ssl_params.get('ssl_certfile') and ssl_params.get('ssl_keyfile'):
cert = (ssl_params.get('ssl_certfile'), ssl_params.get('ssl_keyfile'))
elif ssl_params.get('ssl_certfile'):
cert = ssl_params.get('ssl_certfile')
else:
cert = None
resp = requests.get(
url,
timeout=10,
verify=verify,
cert=cert
)
resp.raise_for_status()
return resp.json()
def _load(self, instance):
url = instance.get('expvar_url')
if not url:
raise Exception('GoExpvar instance missing "expvar_url" value.')
parsed_url = urlparse(url)
# if no path is specified we use the default one
if not parsed_url.path:
url = parsed_url._replace(path=GO_EXPVAR_URL_PATH).geturl()
tags = instance.get('tags', [])
tags.append("expvar_url:%s" % url)
data = self._get_data(url, instance)
metrics = DEFAULT_METRICS + instance.get("metrics", [])
max_metrics = instance.get("max_returned_metrics", DEFAULT_MAX_METRICS)
namespace = instance.get('namespace', DEFAULT_METRIC_NAMESPACE)
return data, tags, metrics, max_metrics, url, namespace
def get_gc_collection_histogram(self, data, tags, url, namespace):
num_gc = data.get("memstats", {}).get("NumGC")
pause_hist = data.get("memstats", {}).get("PauseNs")
last_gc_count = self._last_gc_count[url]
if last_gc_count == num_gc:
# No GC has run. Do nothing
return
start = last_gc_count % 256
end = (num_gc + 255) % 256 + 1
if start < end:
values = pause_hist[start:end]
else:
values = pause_hist[start:] + pause_hist[:end]
self._last_gc_count[url] = num_gc
for value in values:
self.histogram(
self.normalize("memstats.PauseNs", namespace, fix_case=True),
value, tags=tags)
def check(self, instance):
data, tags, metrics, max_metrics, url, namespace = self._load(instance)
self.get_gc_collection_histogram(data, tags, url, namespace)
self.parse_expvar_data(data, tags, metrics, max_metrics, namespace)
def parse_expvar_data(self, data, tags, metrics, max_metrics, namespace):
'''
Report all the metrics based on the configuration in instance
If a metric is not well configured or is not present in the payload,
continue processing metrics but log the information to the info page
'''
count = 0
for metric in metrics:
path = metric.get(PATH)
metric_type = metric.get(TYPE, DEFAULT_TYPE)
metric_tags = list(metric.get(TAGS, []))
metric_tags += tags
alias = metric.get(ALIAS)
if not path:
self.warning("Metric %s has no path" % metric)
continue
if metric_type not in SUPPORTED_TYPES:
self.warning("Metric type %s not supported for this check" % metric_type)
continue
keys = path.split("/")
values = self.deep_get(data, keys)
if len(values) == 0:
self.warning("No results matching path %s" % path)
continue
tag_by_path = alias is not None
for traversed_path, value in values:
actual_path = ".".join(traversed_path)
path_tag = ["path:%s" % actual_path] if tag_by_path else []
metric_name = alias or self.normalize(actual_path, namespace, fix_case=True)
try:
float(value)
except ValueError:
self.log.warning("Unreportable value for path %s: %s" % (path, value))
continue
if count >= max_metrics:
self.warning("Reporting more metrics than the allowed maximum. "
"Please contact [email protected] for more information.")
return
SUPPORTED_TYPES[metric_type](self, metric_name, value, metric_tags + path_tag)
count += 1
def deep_get(self, content, keys, traversed_path=None):
'''
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"] would return [(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"] would return [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"] would return [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
'''
if traversed_path is None:
traversed_path = []
if keys == []:
return [(traversed_path, content)]
key = keys[0]
regex = "".join(["^", key, "$"])
try:
key_rex = re.compile(regex)
except Exception:
self.warning("Cannot compile regex: %s" % regex)
return []
results = []
for new_key, new_content in self.items(content):
if key_rex.match(new_key):
results.extend(self.deep_get(new_content, keys[1:], traversed_path + [str(new_key)]))
return results
def items(self, object):
if isinstance(object, list):
for new_key, new_content in enumerate(object):
yield str(new_key), new_content
elif isinstance(object, dict):
for new_key, new_content in object.iteritems():
yield str(new_key), new_content
else:
self.log.warning("Could not parse this object, check the json"
"served by the expvar")
| varlib1/servermall | go_expvar/check.py | Python | bsd-3-clause | 8,833 |
# Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===============================
Sigma to Pressure Interpolation
===============================
By using `metpy.calc.log_interp`, data with sigma as the vertical coordinate can be
interpolated to isobaric coordinates.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from netCDF4 import Dataset, num2date
from metpy.cbook import get_test_data
from metpy.interpolate import log_interpolate_1d
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
######################################
# **Data**
#
# The data for this example comes from the outer domain of a WRF-ARW model forecast
# initialized at 1200 UTC on 03 June 1980. Model data courtesy Matthew Wilson, Valparaiso
# University Department of Geography and Meteorology.
data = Dataset(get_test_data('wrf_example.nc', False))
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
time = data.variables['time']
vtimes = num2date(time[:], time.units)
temperature = units.Quantity(data.variables['temperature'][:], 'degC')
pressure = units.Quantity(data.variables['pressure'][:], 'Pa')
hgt = units.Quantity(data.variables['height'][:], 'meter')
####################################
# Array of desired pressure levels
plevs = [700.] * units.hPa
#####################################
# **Interpolate The Data**
#
# Now that the data is ready, we can interpolate to the new isobaric levels. The data is
# interpolated from the irregular pressure values for each sigma level to the new input
# mandatory isobaric levels. `mpcalc.log_interp` will interpolate over a specified dimension
# with the `axis` argument. In this case, `axis=1` will correspond to interpolation on the
# vertical axis. The interpolated data is output in a list, so we will pull out each
# variable for plotting.
height, temp = log_interpolate_1d(plevs, pressure, hgt, temperature, axis=1)
####################################
# **Plotting the Data for 700 hPa.**
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Set the forecast hour
FH = 1
# Create the figure and grid for subplots
fig = plt.figure(figsize=(17, 12))
add_metpy_logo(fig, 470, 320, size='large')
# Plot 700 hPa
ax = plt.subplot(111, projection=crs)
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the heights
cs = ax.contour(lon, lat, height[FH, 0, :, :], transform=ccrs.PlateCarree(),
colors='k', linewidths=1.0, linestyles='solid')
cs.clabel(fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True,
use_clabeltext=True)
# Contour the temperature
cf = ax.contourf(lon, lat, temp[FH, 0, :, :], range(-20, 20, 1), cmap=plt.cm.RdBu_r,
transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Celsius', size='x-large')
ax.set_extent([-106.5, -90.4, 34.5, 46.75], crs=ccrs.PlateCarree())
# Make the axis title
ax.set_title(f'{plevs[0]:~.0f} Heights (m) and Temperature (C)', loc='center', fontsize=10)
# Set the figure title
fig.suptitle(f'WRF-ARW Forecast VALID: {vtimes[FH]} UTC', fontsize=14)
add_timestamp(ax, vtimes[FH], y=0.02, high_contrast=True)
plt.show()
| metpy/MetPy | v1.1/_downloads/0f93e682cc461be360e2fd037bf1fb7e/sigma_to_pressure_interpolation.py | Python | bsd-3-clause | 3,493 |
#
# cocos2d
# http://cocos2d.org
#
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from cocos.director import director
from cocos.layer import Layer, ColorLayer
from cocos.scene import Scene
from cocos.scenes.transitions import *
from cocos.actions import *
from cocos.sprite import Sprite
import pyglet
from pyglet import gl, font
from pyglet.window import key
class ControlLayer(Layer):
is_event_handler = True #: enable pyglet's events
def __init__( self ):
super(ControlLayer, self).__init__()
self.text_title = pyglet.text.Label("Transition Demos",
font_size=32,
x=5,
y=director.get_window_size()[1],
anchor_x=font.Text.LEFT,
anchor_y=font.Text.TOP )
self.text_subtitle = pyglet.text.Label( transition_list[current_transition].__name__,
font_size=18,
multiline=True,
width=600,
x=5,
y=director.get_window_size()[1] - 80,
anchor_x=font.Text.LEFT,
anchor_y=font.Text.TOP )
self.text_help = pyglet.text.Label("Press LEFT / RIGHT for prev/next test, ENTER to restart test",
font_size=16,
x=director.get_window_size()[0] // 2,
y=20,
anchor_x=font.Text.CENTER,
anchor_y=font.Text.CENTER)
def draw( self ):
self.text_title.draw()
self.text_subtitle.draw()
self.text_help.draw()
def on_key_press( self, k , m ):
global current_transition, control_p
if k == key.LEFT:
current_transition = (current_transition-1)%len(transition_list)
elif k == key.RIGHT:
current_transition = (current_transition+1)%len(transition_list)
elif k == key.ENTER:
director.replace( transition_list[current_transition](
(control_list[(control_p+1)%len(control_list)] ),
1.25)
)
control_p = (control_p + 1) % len(control_list)
return True
if k in (key.LEFT, key.RIGHT ):
self.text_subtitle.text = transition_list[current_transition].__name__
class GrossiniLayer(Layer):
def __init__( self ):
super( GrossiniLayer, self ).__init__()
g = Sprite( 'grossini.png')
g.position = (320,240)
rot = RotateBy( 360, 4 )
g.do( Repeat( rot + Reverse(rot) ) )
self.add( g )
class GrossiniLayer2(Layer):
def __init__( self ):
super( GrossiniLayer2, self ).__init__()
rot = Rotate( 360, 5 )
g1 = Sprite( 'grossinis_sister1.png' )
g1.position = (490,240)
g2 = Sprite( 'grossinis_sister2.png' )
g2.position = (140,240)
g1.do( Repeat( rot + Reverse(rot) ) )
g2.do( Repeat( rot + Reverse(rot) ) )
self.add( g1 )
self.add( g2 )
if __name__ == "__main__":
director.init(resizable=True)
# director.window.set_fullscreen(True)
transition_list = [
# ActionTransitions
RotoZoomTransition,
JumpZoomTransition,
SplitColsTransition,
SplitRowsTransition,
MoveInLTransition,
MoveInRTransition,
MoveInBTransition,
MoveInTTransition,
SlideInLTransition,
SlideInRTransition,
SlideInBTransition,
SlideInTTransition,
FlipX3DTransition,
FlipY3DTransition,
FlipAngular3DTransition,
ShuffleTransition,
ShrinkGrowTransition,
CornerMoveTransition,
EnvelopeTransition,
FadeTRTransition,
FadeBLTransition,
FadeUpTransition,
FadeDownTransition,
TurnOffTilesTransition,
FadeTransition,
ZoomTransition,
]
current_transition = 0
g = GrossiniLayer()
g2 = GrossiniLayer2()
c2 = ColorLayer(128,16,16,255)
c1 = ColorLayer(0,255,255,255)
control1 = ControlLayer()
control2 = ControlLayer()
controlScene1 = Scene( c2, g, control2 )
controlScene2 = Scene( c1, g2, control2 )
control_p = 0
control_list = [controlScene1, controlScene2]
director.run( controlScene1 )
| Alwnikrotikz/los-cocos | samples/demo_transitions.py | Python | bsd-3-clause | 4,303 |
class PlasmaException(Exception):
pass
class IncompleteAtomicData(PlasmaException):
def __init__(self, atomic_data_name):
message = ('The current plasma calculation requires {0}, '
'which is not provided by the given atomic data'.format(
atomic_data_name))
super(PlasmaException, self).__init__(message)
class PlasmaMissingModule(PlasmaException):
pass
class PlasmaIsolatedModule(PlasmaException):
pass
class NotInitializedModule(PlasmaException):
pass
class PlasmaIonizationError(PlasmaException):
pass
class PlasmaConfigContradiction(PlasmaException):
pass | rajul/tardis | tardis/plasma/exceptions.py | Python | bsd-3-clause | 640 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for mojo
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os.path
import re
# NOTE: The EDK allows all external paths, so doesn't need a whitelist.
_PACKAGE_WHITELISTED_EXTERNAL_PATHS = {
"SDK": ["//build/module_args/mojo.gni",
"//build/module_args/dart.gni",
"//testing/gtest",
"//third_party/cython",
"//third_party/khronos"],
"services": ["//build/module_args/mojo.gni",
"//testing/gtest"],
}
# These files are not part of the exported package.
_PACKAGE_IGNORED_BUILD_FILES = {
"SDK": {},
"EDK": {},
"services": {"mojo/services/BUILD.gn"},
}
_PACKAGE_PATH_PREFIXES = {"SDK": "mojo/public/",
"EDK": "mojo/edk/",
"services": "mojo/services"}
# TODO(etiennej): python_binary_source_set added due to crbug.com/443147
_PACKAGE_SOURCE_SET_TYPES = {"SDK": ["mojo_sdk_source_set",
"python_binary_source_set"],
"EDK": ["mojo_edk_source_set"],
"services": ["mojo_sdk_source_set"]}
_ILLEGAL_EXTERNAL_PATH_WARNING_MESSAGE = \
"Found disallowed external paths within SDK buildfiles."
_ILLEGAL_SERVICES_ABSOLUTE_PATH_WARNING_MESSAGE = \
"Found references to services' public buildfiles via absolute paths " \
"within services' public buildfiles."
_ILLEGAL_EDK_ABSOLUTE_PATH_WARNING_MESSAGE = \
"Found references to the EDK via absolute paths within EDK buildfiles."
_ILLEGAL_SDK_ABSOLUTE_PATH_WARNING_MESSAGE_TEMPLATE = \
"Found references to the SDK via absolute paths within %s buildfiles."
_ILLEGAL_SDK_ABSOLUTE_PATH_WARNING_MESSAGES = {
"SDK": _ILLEGAL_SDK_ABSOLUTE_PATH_WARNING_MESSAGE_TEMPLATE % "SDK",
"EDK": _ILLEGAL_SDK_ABSOLUTE_PATH_WARNING_MESSAGE_TEMPLATE % "EDK",
"services": _ILLEGAL_SDK_ABSOLUTE_PATH_WARNING_MESSAGE_TEMPLATE
% "services' public",
}
_INCORRECT_SOURCE_SET_TYPE_WARNING_MESSAGE_TEMPLATE = \
"All source sets in %s must be constructed via %s."
_INCORRECT_SOURCE_SET_TYPE_WARNING_MESSAGES = {
"SDK": _INCORRECT_SOURCE_SET_TYPE_WARNING_MESSAGE_TEMPLATE
% ("the SDK", _PACKAGE_SOURCE_SET_TYPES["SDK"]),
"EDK": _INCORRECT_SOURCE_SET_TYPE_WARNING_MESSAGE_TEMPLATE
% ("the EDK", _PACKAGE_SOURCE_SET_TYPES["EDK"]),
"services": _INCORRECT_SOURCE_SET_TYPE_WARNING_MESSAGE_TEMPLATE
% ("services' client libs", _PACKAGE_SOURCE_SET_TYPES["services"]),
}
def _IsBuildFileWithinPackage(f, package):
"""Returns whether |f| specifies a GN build file within |package|."""
assert package in _PACKAGE_PATH_PREFIXES
package_path_prefix = _PACKAGE_PATH_PREFIXES[package]
if not f.LocalPath().startswith(package_path_prefix):
return False
if (not f.LocalPath().endswith("/BUILD.gn") and
not f.LocalPath().endswith(".gni")):
return False
if f.LocalPath() in _PACKAGE_IGNORED_BUILD_FILES[package]:
return False
return True
def _AffectedBuildFilesWithinPackage(input_api, package):
"""Returns all the affected build files within |package|."""
return [f for f in input_api.AffectedFiles()
if _IsBuildFileWithinPackage(f, package)]
def _FindIllegalAbsolutePathsInBuildFiles(input_api, package):
"""Finds illegal absolute paths within the build files in
|input_api.AffectedFiles()| that are within |package|.
An illegal absolute path within the SDK or a service's SDK is one that is to
the SDK itself or a non-whitelisted external path. An illegal absolute path
within the EDK is one that is to the SDK or the EDK.
Returns any such references in a list of (file_path, line_number,
referenced_path) tuples."""
illegal_references = []
for f in _AffectedBuildFilesWithinPackage(input_api, package):
for line_num, line in f.ChangedContents():
# Determine if this is a reference to an absolute path.
m = re.search(r'"(//[^"]*)"', line)
if not m:
continue
referenced_path = m.group(1)
if not referenced_path.startswith("//mojo"):
# In the EDK, all external absolute paths are allowed.
if package == "EDK":
continue
# Determine if this is a whitelisted external path.
if referenced_path in _PACKAGE_WHITELISTED_EXTERNAL_PATHS[package]:
continue
illegal_references.append((f.LocalPath(), line_num, referenced_path))
return illegal_references
def _PathReferenceInBuildFileWarningItem(build_file, line_num, referenced_path):
"""Returns a string expressing a warning item that |referenced_path| is
referenced at |line_num| in |build_file|."""
return "%s, line %d (%s)" % (build_file, line_num, referenced_path)
def _IncorrectSourceSetTypeWarningItem(build_file, line_num):
"""Returns a string expressing that the error occurs at |line_num| in
|build_file|."""
return "%s, line %d" % (build_file, line_num)
def _CheckNoIllegalAbsolutePathsInBuildFiles(input_api, output_api, package):
"""Makes sure that the BUILD.gn files within |package| do not reference the
SDK/EDK via absolute paths, and do not reference disallowed external
dependencies."""
sdk_references = []
edk_references = []
external_deps_references = []
services_references = []
# Categorize any illegal references.
illegal_references = _FindIllegalAbsolutePathsInBuildFiles(input_api, package)
for build_file, line_num, referenced_path in illegal_references:
reference_string = _PathReferenceInBuildFileWarningItem(build_file,
line_num,
referenced_path)
if referenced_path.startswith("//mojo/public"):
sdk_references.append(reference_string)
elif package == "SDK":
external_deps_references.append(reference_string)
elif package == "services":
if referenced_path.startswith("//mojo/services"):
services_references.append(reference_string)
else:
external_deps_references.append(reference_string)
elif referenced_path.startswith("//mojo/edk"):
edk_references.append(reference_string)
# Package up categorized illegal references into results.
results = []
if sdk_references:
results.extend([output_api.PresubmitError(
_ILLEGAL_SDK_ABSOLUTE_PATH_WARNING_MESSAGES[package],
items=sdk_references)])
if external_deps_references:
assert package == "SDK" or package == "services"
results.extend([output_api.PresubmitError(
_ILLEGAL_EXTERNAL_PATH_WARNING_MESSAGE,
items=external_deps_references)])
if services_references:
assert package == "services"
results.extend([output_api.PresubmitError(
_ILLEGAL_SERVICES_ABSOLUTE_PATH_WARNING_MESSAGE,
items=services_references)])
if edk_references:
assert package == "EDK"
results.extend([output_api.PresubmitError(
_ILLEGAL_EDK_ABSOLUTE_PATH_WARNING_MESSAGE,
items=edk_references)])
return results
def _CheckSourceSetsAreOfCorrectType(input_api, output_api, package):
"""Makes sure that the BUILD.gn files always use the correct wrapper type for
|package|, which can be one of ["SDK", "EDK"], to construct source_set
targets."""
assert package in _PACKAGE_SOURCE_SET_TYPES
required_source_set_type = _PACKAGE_SOURCE_SET_TYPES[package]
problems = []
for f in _AffectedBuildFilesWithinPackage(input_api, package):
for line_num, line in f.ChangedContents():
m = re.search(r"[a-z_]*source_set\(", line)
if not m:
continue
source_set_type = m.group(0)[:-1]
if source_set_type in required_source_set_type:
continue
problems.append(_IncorrectSourceSetTypeWarningItem(f.LocalPath(),
line_num))
if not problems:
return []
return [output_api.PresubmitError(
_INCORRECT_SOURCE_SET_TYPE_WARNING_MESSAGES[package],
items=problems)]
def _CheckChangePylintsClean(input_api, output_api):
# Additional python module paths (we're in src/mojo/); not everyone needs
# them, but it's easiest to add them to everyone's path.
# For ply and jinja2:
third_party_path = os.path.join(
input_api.PresubmitLocalPath(), "..", "third_party")
# For the bindings generator:
mojo_public_bindings_pylib_path = os.path.join(
input_api.PresubmitLocalPath(), "public", "tools", "bindings", "pylib")
# For the python bindings:
mojo_python_bindings_path = os.path.join(
input_api.PresubmitLocalPath(), "public", "python")
# For the python bindings tests:
mojo_python_bindings_tests_path = os.path.join(
input_api.PresubmitLocalPath(), "python", "tests")
# For the roll tools scripts:
mojo_roll_tools_path = os.path.join(
input_api.PresubmitLocalPath(), "tools", "roll")
# For all mojo/tools scripts:
mopy_path = os.path.join(input_api.PresubmitLocalPath(), "tools")
# For all mojo/devtools scripts:
devtools_path = os.path.join(input_api.PresubmitLocalPath(), "devtools")
# TODO(vtl): Don't lint these files until the (many) problems are fixed
# (possibly by deleting/rewriting some files).
temporary_black_list = (
r".*\bpublic[\\\/]tools[\\\/]bindings[\\\/]pylib[\\\/]mojom[\\\/]"
r"generate[\\\/].+\.py$",
r".*\bpublic[\\\/]tools[\\\/]bindings[\\\/]generators[\\\/].+\.py$")
black_list = input_api.DEFAULT_BLACK_LIST + temporary_black_list + (
# Imported from Android tools, we might want not to fix the warnings
# raised for it to make it easier to compare the code with the original.
r".*\bdevtools[\\\/]common[\\\/]android_stack_parser[\\\/].+\.py$",)
results = []
pylint_extra_paths = [
third_party_path,
mojo_public_bindings_pylib_path,
mojo_python_bindings_path,
mojo_python_bindings_tests_path,
mojo_roll_tools_path,
mopy_path,
devtools_path
]
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api, extra_paths_list=pylint_extra_paths,
black_list=black_list))
return results
def _BuildFileChecks(input_api, output_api):
"""Performs checks on SDK, EDK, and services' public buildfiles."""
results = []
for package in ["SDK", "EDK", "services"]:
results.extend(_CheckNoIllegalAbsolutePathsInBuildFiles(input_api,
output_api,
package))
results.extend(_CheckSourceSetsAreOfCorrectType(input_api,
output_api,
package))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_BuildFileChecks(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangePylintsClean(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
| zhangxq5012/sky_engine | mojo/PRESUBMIT.py | Python | bsd-3-clause | 11,467 |
from energenie import switch_off
switch_off()
| rjw57/energenie | examples/simple/off.py | Python | bsd-3-clause | 47 |
"""
WSGI config for {{ cookiecutter.project_name }} project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
{% if cookiecutter.use_newrelic == 'y' -%}
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
import newrelic.agent
newrelic.agent.initialize()
{%- endif %}
from django.core.wsgi import get_wsgi_application
{% if cookiecutter.use_sentry == 'y' -%}
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
{%- endif %}
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
{% if cookiecutter.use_sentry == 'y' -%}
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
{%- endif %}
{% if cookiecutter.use_newrelic == 'y' -%}
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = newrelic.agent.WSGIApplicationWrapper(application)
{%- endif %}
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| aeikenberry/cookiecutter-django-rest-babel | {{cookiecutter.project_slug}}/config/wsgi.py | Python | bsd-3-clause | 2,232 |
# -*- coding: utf-8 -*-
import os
import sys
try:
from gluon import current
except ImportError:
print >> sys.stderr, """
The installed version of Web2py is too old -- it does not define current.
Please upgrade Web2py to a more recent version.
"""
# Version of 000_config.py
# Increment this if the user should update their running instance
VERSION = 1
#def update_check(environment, template="default"):
def update_check(settings):
"""
Check whether the dependencies are sufficient to run Eden
@ToDo: Load deployment_settings so that we can configure the update_check
- need to rework so that 000_config.py is parsed 1st
@param settings: the deployment_settings
"""
# Get Web2py environment into our globals.
#globals().update(**environment)
request = current.request
# Fatal errors
errors = []
# Non-fatal warnings
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
# Get mandatory global dependencies
app_path = request.folder
gr_path = os.path.join(app_path, "requirements.txt")
or_path = os.path.join(app_path, "optional_requirements.txt")
global_dep = parse_requirements({}, gr_path)
optional_dep = parse_requirements({}, or_path)
templates = settings.get_template()
location = settings.get_template_location()
if not isinstance(templates, (tuple, list)):
templates = (templates,)
template_dep = {}
template_optional_dep = {}
for template in templates:
tr_path = os.path.join(app_path, location, "templates", template, "requirements.txt")
tor_path = os.path.join(app_path, location, "templates", template, "optional_requirements.txt")
parse_requirements(template_dep, tr_path)
parse_requirements(template_optional_dep, tor_path)
# Remove optional dependencies which are already accounted for in template dependencies
unique = set(optional_dep.keys()).difference(set(template_dep.keys()))
for dependency in optional_dep.keys():
if dependency not in unique:
del optional_dep[dependency]
# Override optional dependency messages from template
unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys()))
for dependency in optional_dep.keys():
if dependency not in unique:
del optional_dep[dependency]
errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep)
# @ToDo: Move these to Template
# for now this is done in s3db.climate_first_run()
if settings.has_module("climate"):
if settings.get_database_type() != "postgres":
errors.append("Climate unresolved dependency: PostgreSQL required")
try:
import rpy2
except ImportError:
errors.append("Climate unresolved dependency: RPy2 required")
try:
from Scientific.IO import NetCDF
except ImportError:
warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings")
try:
from scipy import stats
except ImportError:
warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")
# -------------------------------------------------------------------------
# Check Web2Py version
#
# Currently, the minimum usable Web2py is determined by whether the
# Scheduler is available
web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44"
# Offset of datetime in return value of parse_version.
datetime_index = 4
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
try:
web2py_minimum_parsed = parse_version(web2py_minimum_version)
web2py_minimum_datetime = web2py_minimum_parsed[datetime_index]
web2py_installed_version = request.global_settings.web2py_version
if isinstance(web2py_installed_version, str):
# Post 2.4.2, request.global_settings.web2py_version is unparsed
web2py_installed_parsed = parse_version(web2py_installed_version)
web2py_installed_datetime = web2py_installed_parsed[datetime_index]
else:
# 2.4.2 & earlier style
web2py_installed_datetime = web2py_installed_version[datetime_index]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
except:
# Will get AttributeError if Web2py's parse_version is too old for
# its current version format, which changed in 2.3.2.
web2py_version_ok = False
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to support the current version of Sahana Eden."
"\nPlease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Create required directories if needed
databases_dir = os.path.join(app_path, "databases")
try:
os.stat(databases_dir)
except OSError:
# not found, create it
os.mkdir(databases_dir)
# -------------------------------------------------------------------------
# Copy in Templates
# - 000_config.py (machine-specific settings)
# - rest are run in-place
#
template_folder = os.path.join(app_path, "modules", "templates")
template_files = {
# source : destination
"000_config.py" : os.path.join("models", "000_config.py"),
}
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_folder, t)
dst_path = os.path.join(app_path, template_files[t])
try:
os.stat(dst_path)
except OSError:
# Not found, copy from template
if t == "000_config.py":
input = open(src_path)
output = open(dst_path, "w")
for line in input:
if "akeytochange" in line:
# Generate a random hmac_key to secure the passwords in case
# the database is compromised
import uuid
hmac_key = uuid.uuid4()
line = 'settings.auth.hmac_key = "%s"' % hmac_key
output.write(line)
output.close()
input.close()
else:
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(template_files[t])
# @ToDo: WebSetup
# http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup
#if not os.path.exists("%s/applications/websetup" % os.getcwd()):
# # @ToDo: Check Permissions
# # Copy files into this folder (@ToDo: Pythonise)
# cp -r private/websetup "%s/applications" % os.getcwd()
# Launch WebSetup
#redirect(URL(a="websetup", c="default", f="index",
# vars=dict(appname=request.application,
# firstTime="True")))
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# Check if it's up to date (i.e. a critical update requirement)
version_pattern = r"VERSION =\s*([0-9]+)"
version_matcher = re.compile(version_pattern).match
has_version = False
with open(dst_path) as f:
for line in f:
version_result = version_matcher(line)
if version_result:
has_version = True
version = version_result.group(1)
break
if not has_version:
error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t
errors.append(error)
elif int(version) != VERSION:
error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \
(t, version, VERSION)
errors.append(error)
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# -------------------------------------------------------------------------
def parse_requirements(output, filepath):
"""
"""
try:
with open(filepath) as filehandle:
dependencies = filehandle.read().splitlines()
msg = ""
for dependency in dependencies:
if dependency[0] == "#":
# either a normal comment or custom message
if dependency[:9] == "# Warning" or dependency[7] == "# Error:":
msg = dependency.split(":", 1)[1]
else:
import re
# Check if the module name is different from the package name
if "#" in dependency:
dep = dependency.split("#", 1)[1]
output[dep] = msg
else:
pattern = re.compile(r'([A-Za-z0-9_-]+)')
try:
dep = pattern.match(dependency).group(1)
output[dep] = msg
except AttributeError:
# Invalid dependency syntax
pass
msg = ""
except IOError:
# No override for Template
pass
return output
# -------------------------------------------------------------------------
def s3_check_python_lib(global_mandatory, template_mandatory, template_optional, global_optional):
"""
checks for optional as well as mandatory python libraries
"""
errors = []
warnings = []
for dependency, err in global_mandatory.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if err:
errors.append(err)
else:
errors.append("S3 unresolved dependency: %s required for Sahana to run" % dependency)
for dependency, err in template_mandatory.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if err:
errors.append(err)
else:
errors.append("Unresolved template dependency: %s required" % dependency)
for dependency, warn in template_optional.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if warn:
warnings.append(warn)
else:
warnings.append("Unresolved optional dependency: %s required" % dependency)
for dependency, warn in global_optional.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if warn:
warnings.append(warn)
else:
warnings.append("Unresolved optional dependency: %s required" % dependency)
return errors, warnings
# END =========================================================================
| sahana/Turkey | modules/s3_update_check.py | Python | mit | 13,160 |
__title__ = 'pif.exceptions'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('InvalidRegistryItemType',)
class InvalidRegistryItemType(ValueError):
"""
Raised when an attempt is made to register an item in the registry which does not have a proper type.
""" | djabber/Dashboard | bottle/dash/local/lib/pif-0.7/src/pif/exceptions.py | Python | mit | 352 |
"""
Logger for the spectroscopic toolkit packge
Goal: Use decorators to log each command in full
Author: Adam Ginsburg
Created: 03/17/2011
"""
import os
import time
class Logger(object):
"""
Logger object. Should be initiated on import.
"""
def __init__(self, filename):
"""
Open a log file for writing
"""
newfilename = filename
ii=0
while os.path.exists(filename):
newfilename = filename+".%3i" % ii
ii += 1
if newfilename != filename:
os.rename(filename,newfilename)
self.outfile = open(filename,'w')
print >>self.outfile,"Began logging at %s" % (time.strftime("%M/%d/%Y %H:%M:%S",time.localtime()))
def __call__(self, function):
"""
"""
def wrapper(*args, **kwargs):
modname = function.__module__
fname = function.__name__
print >>self.outfile,"%s.%s" % (modname,fname) +\
"("+"".join([a.__name__ for a in args]) + \
"".join(["%s=%s" % (k,v) for (k,v) in kwargs])+ ")"
return function
return wrapper
def close(self):
self.outfile.close()
| keflavich/pyspeckit-obsolete | pyspeckit/spectrum/logger.py | Python | mit | 1,211 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Utility functions used in the autocomplete package.
"""
import functools
import time
import weakref
def keep(f):
"""Returns a decorator that remembers its return value for some time."""
_delay = 5.0 # sec
_cache = weakref.WeakKeyDictionary()
@functools.wraps(f)
def decorator(self, *args):
try:
result = _cache[self]
except KeyError:
pass
else:
t, ret = result
if (time.time() - t) < _delay:
return ret
ret = f(self, *args)
_cache[self] = (time.time(), ret)
return ret
return decorator
# helper functions for displaying data from models
def command(item):
"""Prepends '\\' to item."""
return '\\' + item
def variable(item):
"""Appends ' = ' to item."""
return item + " = "
def cmd_or_var(item):
"""Appends ' = ' to item if it does not start with '\\'."""
return item if item.startswith('\\') else item + " = "
def make_cmds(words):
"""Returns generator prepending '\\' to every word."""
return ('\\' + w for w in words)
| dliessi/frescobaldi | frescobaldi_app/autocomplete/util.py | Python | gpl-2.0 | 1,994 |
from django.db import models
import useraccounts
from librehatti.catalog.models import Product
from librehatti.catalog.models import ModeOfPayment
from librehatti.catalog.models import Surcharge
from django.contrib.auth.models import User
from librehatti.config import _BUYER
from librehatti.config import _DELIVERY_ADDRESS
from librehatti.config import _IS_DEBIT
from librehatti.config import _PURCHASED_ITEMS
from librehatti.config import _QTY
from librehatti.config import _REFERENCE
from librehatti.config import _REFERENCE_DATE
from librehatti.voucher.models import FinancialSession
from django.core.urlresolvers import reverse
class NoteLine(models.Model):
note = models.CharField(max_length=400)
is_permanent = models.BooleanField(default=False)
def __unicode__(self):
return '%s' % (self.note)
class Meta:
verbose_name_plural = "Quoted Order Note"
class QuotedOrder(models.Model):
buyer = models.ForeignKey(User,verbose_name=_BUYER)
is_debit = models.BooleanField(default=False, verbose_name=_IS_DEBIT)
reference = models.CharField(max_length=200, verbose_name=_REFERENCE)
reference_date = models.DateField(verbose_name=_REFERENCE_DATE)
delivery_address = models.CharField(max_length=500, blank=True,\
null=True, verbose_name=_DELIVERY_ADDRESS)
organisation = models.ForeignKey('useraccounts.AdminOrganisations',default=1)
date_time = models.DateField(auto_now_add=True)
total_discount = models.IntegerField(default=0)
cheque_dd_number = models.CharField(max_length=50, blank=True)
cheque_dd_date = models.DateField(max_length=50, blank=True, null=True)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return '%s' % (self.id)
class QuotedItem(models.Model):
quoted_order = models.ForeignKey(QuotedOrder)
price_per_unit = models.IntegerField()
qty = models.IntegerField(verbose_name=_QTY)
price = models.IntegerField()
item = models.ForeignKey(Product)
def save(self, *args, **kwargs):
if self.quoted_order:
self.price = self.price_per_unit * self.qty
super(QuotedItem,self).save(*args, **kwargs)
def __unicode__(self):
return '%s' % (self.item) + ' - ' '%s' % (self.quoted_order)
class QuotedOrderofSession(models.Model):
quoted_order = models.ForeignKey(QuotedOrder)
quoted_order_session = models.IntegerField()
session = models.ForeignKey(FinancialSession)
class QuotedTaxesApplied(models.Model):
quoted_order = models.ForeignKey(QuotedOrder)
surcharge = models.ForeignKey(Surcharge)
tax = models.IntegerField()
def __unicode__(self):
return "%s" % (self.surcharge)
class QuotedBill(models.Model):
quoted_order = models.ForeignKey(QuotedOrder)
delivery_charges = models.IntegerField()
total_cost = models.IntegerField()
totalplusdelivery = models.IntegerField()
total_tax = models.IntegerField()
grand_total = models.IntegerField()
amount_received = models.IntegerField()
class QuotedOrderNote(models.Model):
quoted_order = models.ForeignKey(QuotedOrder)
note = models.CharField(max_length=400) | sofathitesh/TCCReports | src/librehatti/bills/models.py | Python | gpl-2.0 | 3,184 |
#!/Library/Frameworks/Python.framework/Versions/2.5/Resources/Python.app/Contents/MacOS/Python
import pkg_resources
pkg_resources.require("TurboGears")
from turbogears import update_config, start_server
import cherrypy, os, time
cherrypy.lowercase_api = True
from os.path import *
import sys
import datetime
from datetime import timedelta
import os, glob, time
LOCAL_DIR = os.path.dirname(os.path.join(os.getcwd(),"uploads/"))
SESSION_PREFIX = 'session-'
LOCK = 'Store'
def Delete_dirs(data):
sessionfiles = [fname for fname in os.listdir(LOCAL_DIR)
if (fname.startswith(SESSION_PREFIX) and not fname.endswith(LOCK))]
now = datetime.datetime.now()
now.timetuple()
for sfile in sessionfiles:
for file in glob.glob(LOCAL_DIR+"/"+sfile):
stats = os.stat(file)
dtfile = datetime.datetime.fromtimestamp(stats[8])
t = now-timedelta(days=2)
if t > dtfile:
os.remove(os.path.join((LOCAL_DIR), sfile))
for fname in os.listdir(LOCAL_DIR):
if (not fname.startswith(SESSION_PREFIX) and not fname.endswith(LOCK)):
for sname in sessionfiles:
if not fname.endswith(sname.split('-')[1]):
for aname in os.listdir(LOCAL_DIR+'/'+fname):
os.remove(os.path.join((LOCAL_DIR+'/'+fname), aname))
os.rmdir(os.path.join(LOCAL_DIR, fname))
# first look on the command line for a desired config file,
# if it's not on the command line, then
# look for setup.py in this directory. If it's not there, this script is
# probably installed
if len(sys.argv) > 1:
update_config(configfile=sys.argv[1],
modulename="validator.config")
elif exists(join(dirname(__file__), "setup.py")):
update_config(configfile="dev.cfg",modulename="validator.config")
else:
update_config(configfile="prod.cfg",modulename="validator.config")
if not os.path.isdir(LOCAL_DIR):
try:
os.mkdir(LOCAL_DIR)
except IOError:
print "IOError: %s could not be created" % LOCAL_DIR
from validator.controllers import Root
from cherrypy.filters import sessionfilter
cherrypy.root = Root()
cherrypy.config.update({
'server.log_to_screen': True,
'server.environment': 'production',
'session_filter.on': True,
'session_filter.storage_type' : 'file',
'session_filter.storage_path' : LOCAL_DIR,
'session_filter.timeout': 60,
'session_filter.clean_up_delay': 60,
'session_filter.on_create_session': Delete_dirs,
})
start_server(Root())
| joshmoore/openmicroscopy | components/validator/WebApp/start-validator.py | Python | gpl-2.0 | 2,476 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import rhnpush_config
import utils
import sys
import os
class ConfManager:
def __init__(self, optionparser, store_true_list):
sysdir = '/etc/sysconfig/rhn'
homedir = utils.get_home_dir()
default = 'rhnpushrc'
regular = '.rhnpushrc'
deffile = os.path.join(sysdir, default)
regfile = os.path.join(homedir, regular)
cwdfile = os.path.join(os.getcwd(), regular)
self.cfgFileList = [deffile, regfile, cwdfile]
self.defaultconfig = rhnpush_config.rhnpushConfigParser(ensure_consistency=True)
# Get a reference to the object containing command-line options
self.cmdconfig = optionparser
self.store_true_list = store_true_list
# Change the files options of the self.userconfig
# Change the exclude options of the self.userconfig
def _files_to_list(self):
# Change the files options to lists.
if (self.defaultconfig.__dict__.has_key('files') and
not isinstance(self.defaultconfig.files, type([]))):
self.defaultconfig.files = [x.strip() for x in
self.defaultconfig.files.split(',')]
# Change the exclude options to list.
if (self.defaultconfig.__dict__.has_key('exclude') and
not isinstance(self.defaultconfig.__dict__['exclude'], type([]))):
self.defaultconfig.exclude = [x.strip() for x in
self.defaultconfig.exclude.split(',')]
def get_config(self):
for f in self.cfgFileList:
if os.access(f, os.F_OK):
if not os.access(f, os.R_OK):
print "rhnpush does not have read permission on %s" % f
sys.exit(1)
config2 = rhnpush_config.rhnpushConfigParser(f)
self.defaultconfig, config2 = utils.make_common_attr_equal(self.defaultconfig, config2)
self._files_to_list()
# Change the channel string into a list of strings.
# pylint: disable=E1103
if not self.defaultconfig.channel:
# if no channel then make it null array instead of
# an empty string array from of size 1 [''] .
self.defaultconfig.channel = []
else:
self.defaultconfig.channel = [x.strip() for x in
self.defaultconfig.channel.split(',')]
# Get the command line arguments. These take precedence over the other settings
argoptions, files = self.cmdconfig.parse_args()
# Makes self.defaultconfig compatible with argoptions by changing all '0' value attributes to None.
_zero_to_none(self.defaultconfig, self.store_true_list)
# If verbose isn't set at the command-line, it automatically gets set to zero. If it's at zero, change it to
# None so the settings in the config files take precedence.
if argoptions.verbose == 0:
argoptions.verbose = None
# Orgid, count, cache_lifetime, and verbose all need to be integers, just like in argoptions.
if self.defaultconfig.orgid:
self.defaultconfig.orgid = int(self.defaultconfig.orgid)
if self.defaultconfig.count:
self.defaultconfig.count = int(self.defaultconfig.count)
if self.defaultconfig.cache_lifetime:
self.defaultconfig.cache_lifetime = int(self.defaultconfig.cache_lifetime)
if self.defaultconfig.verbose:
self.defaultconfig.verbose = int(self.defaultconfig.verbose)
if self.defaultconfig.timeout:
self.defaultconfig.timeout = int(self.defaultconfig.timeout)
# Copy the settings in argoptions into self.defaultconfig.
self.defaultconfig, argoptions = utils.make_common_attr_equal(self.defaultconfig, argoptions)
# Make sure files is in the correct format.
if self.defaultconfig.files != files:
self.defaultconfig.files = files
return self.defaultconfig
# Changes every option in config that is also in store_true_list that is set to '0' to None
def _zero_to_none(config, store_true_list):
for opt in config.keys():
for cmd in store_true_list:
if str(opt) == cmd and config.__dict__[opt] == '0':
config.__dict__[opt] = None
| xkollar/spacewalk | client/tools/rhnpush/rhnpush_confmanager.py | Python | gpl-2.0 | 4,950 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2014'
__copyright__ = '(C) 201, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtWidgets import QFileDialog, QMessageBox
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QSettings, QFileInfo
from processing.script.ScriptAlgorithm import ScriptAlgorithm
from processing.gui.ToolboxAction import ToolboxAction
from processing.script.WrongScriptException import WrongScriptException
from processing.script.ScriptUtils import ScriptUtils
from processing.core.alglist import algList
pluginPath = os.path.split(os.path.dirname(__file__))[0]
class AddScriptFromFileAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Add script from file')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'script.png'))
def execute(self):
settings = QSettings()
lastDir = settings.value('Processing/lastScriptsDir', '')
filename, selected_filter = QFileDialog.getOpenFileName(self.toolbox,
self.tr('Script files', 'AddScriptFromFileAction'), lastDir,
self.tr('Script files (*.py *.PY)', 'AddScriptFromFileAction'))
if filename:
try:
settings.setValue('Processing/lastScriptsDir',
QFileInfo(filename).absoluteDir().absolutePath())
script = ScriptAlgorithm(filename)
except WrongScriptException:
QMessageBox.warning(self.toolbox,
self.tr('Error reading script', 'AddScriptFromFileAction'),
self.tr('The selected file does not contain a valid script', 'AddScriptFromFileAction'))
return
destFilename = os.path.join(ScriptUtils.scriptsFolders()[0], os.path.basename(filename))
with open(destFilename, 'w') as f:
f.write(script.script)
algList.reloadProvider('script')
| wonder-sk/QGIS | python/plugins/processing/script/AddScriptFromFileAction.py | Python | gpl-2.0 | 3,158 |
#
# Karaka Skype-XMPP Gateway: Python package metadata
# <http://www.vipadia.com/products/karaka.html>
#
# Copyright (C) 2008-2009 Vipadia Limited
# Richard Mortier <[email protected]>
# Neil Stratford <[email protected]>
#
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License version
## 2 as published by the Free Software Foundation.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License version 2 for more details.
## You should have received a copy of the GNU General Public License
## version 2 along with this program; if not, write to the Free
## Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
## MA 02110-1301, USA.
| gaka13/karaka | karaka/api/__init__.py | Python | gpl-2.0 | 896 |
Subsets and Splits