repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hieupham007/Titanium_Mobile | support/android/deltafy.py | 38 | 6586 | #!/usr/bin/env python
# deltafy
# - a simple library that keeps track of modified/created/removed files and directories in a file tree
#
# Author: Marshall Culpepper
# Licensed under the Apache Public License v2 (see LICENSE.txt)
import os, sys, platform, sqlite3, time, stat
from datetime import datetime, timedelta
class DeltaList(list):
def has_path(self, path):
for delta in self:
if delta.get_path() == path: return True
return False
def is_updated(self, path):
for delta in self:
if delta.get_path() == path:
return delta.get_status() == Delta.MODIFIED or \
delta.get_status() == Delta.CREATED
return False
class Delta:
CREATED = 0
MODIFIED = 1
DELETED = 2
def __init__(self, path, timestamp, status):
self.path = path
self.timestamp = timestamp
self.status = status
def __str__(self):
return "%s [%s] @ %s" % (self.get_status_str(), self.get_path(), self.get_timestamp())
def get_path(self):
return self.path
def get_status(self):
return self.status
def get_status_str(self):
if self.status == self.CREATED: return "CREATED"
elif self.status == self.MODIFIED: return "MODIFIED"
else: return "DELETED"
def get_timestamp(self):
return self.timestamp
home = os.path.expanduser('~')
if platform.system() == 'Windows':
home = os.environ['USERPROFILE']
class Deltafy:
db_home = os.path.join(home, '.deltafy')
db_path = os.path.join(db_home, 'deltas')
@classmethod
def get_database_path(cls):
return cls.db_path
@classmethod
def set_database_path(cls, path):
cls.db_path = path
@classmethod
def get_modified_datetime(cls, path):
return datetime.fromtimestamp(os.stat(path).st_mtime)
@classmethod
def compare_datetime(cls, dt1, dt2, mindelta=None):
delta = dt1 - dt2
if mindelta is None:
mindelta = timedelta(microseconds=0)
if delta < mindelta: return -1
elif delta > mindelta: return 1
else: return 0
@classmethod
def compare_paths(cls, path1, path2, mindelta=None):
time1 = datetime.fromtimestamp(os.stat(path1).st_mtime)
time2 = datetime.fromtimestamp(os.stat(path2).st_mtime)
return cls.compare_datetime(time1, time2, mindelta)
@classmethod
def needs_update(cls, src_path, dest_path, mindelta=None):
"checks if dest_path needs to be updated by src_path with a default minimum delta of 1 second"
if mindelta is None:
mindelta = timedelta(seconds=1)
return not os.path.exists(dest_path) or \
(os.path.exists(src_path) and \
Deltafy.compare_paths(src_path, dest_path, mindelta) > 0)
@classmethod
def needs_update_timestamp(cls, src_path, dest_ts, mindelta=None):
"checks if dest_ts needs to be updated by src_path with a default minimum delta of 1 second"
return os.path.exists(src_path) and \
cls.compare_datetime(cls.get_modified_datetime(src_path), dest_ts, mindelta) > 0
def __init__(self, dir, include_callback=None):
self.dir = dir
self.include_callback = include_callback
if not os.path.exists(self.db_home):
os.makedirs(self.db_home)
self.conn = sqlite3.connect(self.db_path, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('create table if not exists timestamps (path text, modified timestamp)')
def clear_state(self):
self.conn.execute('delete from timestamps')
self.conn.commit()
def get_timestamp(self, path):
c = self.conn.cursor()
c.execute('select modified from timestamps where path = ?', (path,))
row = c.fetchone()
timestamp = None
if row is not None and len(row) == 1:
timestamp = row[0]
c.close()
return timestamp
def insert_timestamp(self, path, path_stat):
timestamp = datetime.fromtimestamp(path_stat.st_mtime)
self.conn.execute('insert into timestamps(path, modified) values (?, ?)', (path, timestamp))
self.conn.commit()
return timestamp
def update_timestamp(self, path, timestamp):
self.conn.execute('update timestamps set modified = ? where path = ?', (timestamp, path))
self.conn.commit()
def delete_timestamp(self, path):
self.conn.execute('delete from timestamps where path = ?', (path,))
self.conn.commit()
def get_paths(self):
c = self.conn.cursor()
c.execute('select path from timestamps')
rows = c.fetchall()
paths = [row[0] for row in rows]
c.close()
return paths
def check_delta(self, path, path_stat):
timestamp = self.get_timestamp(path)
modified_time = datetime.fromtimestamp(path_stat.st_mtime)
if timestamp is None:
timestamp = self.insert_timestamp(path, path_stat)
return Delta(path, timestamp, Delta.CREATED)
elif modified_time - timestamp >= timedelta(seconds=1):
# this needs to be a little fuzzy.
# windows loses a few microseconds in precision
self.update_timestamp(path, modified_time)
return Delta(path, modified_time, Delta.MODIFIED)
return None
def scan(self):
deltas = DeltaList()
# first pass against the filesystem
self.scan_path(self.dir, deltas)
# second pass check again paths in db
# to find deleted paths in the filesystem
for path in self.get_paths():
if path.startswith(self.dir):
include_path = True
if self.include_callback is not None:
include_path = self.include_callback(path, True)
if not include_path:
continue
if not os.path.exists(path):
self.delete_timestamp(path)
deltas.append(Delta(path, 0, Delta.DELETED))
return deltas
def scan_single_file(self, file):
return self.check_delta(file, os.stat(file))
def scan_path(self, path, deltas):
for file in os.listdir(path):
absolute_path = os.path.join(path, file)
# reduce to just one stat, major speed up in windows
path_stat = os.stat(absolute_path)
if stat.S_ISDIR(path_stat.st_mode):
include_dir = True
if self.include_callback is not None:
include_dir = self.include_callback(absolute_path, False)
if not include_dir:
continue
self.scan_path(absolute_path, deltas)
else:
include_file = True
if self.include_callback is not None:
include_file = self.include_callback(absolute_path, True)
if not include_file:
continue
file_delta = self.check_delta(absolute_path, path_stat)
if file_delta is not None:
deltas.append(file_delta)
if __name__ == "__main__":
if len(sys.argv) == 1:
print "Usage: %s <dir>" % sys.argv[0]
sys.exit(-1)
deltafy = Deltafy(sys.argv[1])
sys.stdout.write("Initial scan...")
deltafy.scan()
print "done\nScanning for changes (Ctrl+C to stop)..."
while True:
try:
time.sleep(1)
deltas = deltafy.scan()
for delta in deltas:
print str(delta)
except KeyboardInterrupt:
print "Killed."
break
| apache-2.0 | -6,760,348,379,931,444,000 | 28.401786 | 102 | 0.69587 | false |
fw1121/luigi | test/email_test.py | 27 | 4841 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import mock
from helpers import with_config
from luigi import notifications
from luigi.scheduler import CentralPlannerScheduler
from luigi.worker import Worker
from luigi import six
import luigi
class TestEmail(unittest.TestCase):
def testEmailNoPrefix(self):
self.assertEqual("subject", notifications._prefix('subject'))
@with_config({"core": {"email-prefix": "[prefix]"}})
def testEmailPrefix(self):
self.assertEqual("[prefix] subject", notifications._prefix('subject'))
class TestException(Exception):
pass
class TestTask(luigi.Task):
foo = luigi.Parameter()
bar = luigi.Parameter()
class FailSchedulingTask(TestTask):
def requires(self):
raise TestException('Oops!')
def run(self):
pass
def complete(self):
return False
class FailRunTask(TestTask):
def run(self):
raise TestException('Oops!')
def complete(self):
return False
class ExceptionFormatTest(unittest.TestCase):
def setUp(self):
self.sch = CentralPlannerScheduler()
self.w = Worker(scheduler=self.sch)
def tear_down(self):
self.w.stop()
def test_fail_run(self):
task = FailRunTask(foo='foo', bar='bar')
self._run_task(task)
def test_fail_run_html(self):
task = FailRunTask(foo='foo', bar='bar')
self._run_task_html(task)
def test_fail_schedule(self):
task = FailSchedulingTask(foo='foo', bar='bar')
self._run_task(task)
def test_fail_schedule_html(self):
task = FailSchedulingTask(foo='foo', bar='bar')
self._run_task_html(task)
@with_config({'core': {'error-email': '[email protected]',
'email-prefix': '[TEST] '}})
@mock.patch('luigi.notifications.send_error_email')
def _run_task(self, task, mock_send):
self.w.add(task)
self.w.run()
self.assertEqual(mock_send.call_count, 1)
args, kwargs = mock_send.call_args
self._check_subject(args[0], task)
self._check_body(args[1], task, html=False)
@with_config({'core': {'error-email': '[email protected]',
'email-prefix': '[TEST] ',
'email-type': 'html'}})
@mock.patch('luigi.notifications.send_error_email')
def _run_task_html(self, task, mock_send):
self.w.add(task)
self.w.run()
self.assertEqual(mock_send.call_count, 1)
args, kwargs = mock_send.call_args
self._check_subject(args[0], task)
self._check_body(args[1], task, html=True)
def _check_subject(self, subject, task):
self.assertIn(task.task_id, subject)
def _check_body(self, body, task, html=False):
if html:
self.assertIn('<th>name</th><td>{}</td>'.format(task.task_family), body)
self.assertIn('<div class="highlight"', body)
self.assertIn('Oops!', body)
for param, value in task.param_kwargs.items():
self.assertIn('<th>{}</th><td>{}</td>'.format(param, value), body)
else:
self.assertIn('Name: {}\n'.format(task.task_family), body)
self.assertIn('Parameters:\n', body)
self.assertIn('TestException: Oops!', body)
for param, value in task.param_kwargs.items():
self.assertIn('{}: {}\n'.format(param, value), body)
@with_config({"core": {"error-email": "[email protected]"}})
def testEmailRecipients(self):
six.assertCountEqual(self, notifications._email_recipients(), ["[email protected]"])
six.assertCountEqual(self, notifications._email_recipients("[email protected]"), ["[email protected]", "[email protected]"])
six.assertCountEqual(self, notifications._email_recipients(["[email protected]", "[email protected]"]),
["[email protected]", "[email protected]", "[email protected]"])
@with_config({"core": {}}, replace_sections=True)
def testEmailRecipientsNoConfig(self):
six.assertCountEqual(self, notifications._email_recipients(), [])
six.assertCountEqual(self, notifications._email_recipients("[email protected]"), ["[email protected]"])
six.assertCountEqual(self, notifications._email_recipients(["[email protected]", "[email protected]"]),
["[email protected]", "[email protected]"])
| apache-2.0 | -7,390,409,214,534,662,000 | 31.709459 | 96 | 0.618467 | false |
mancoast/CPythonPyc_test | cpython/250_fork_wait.py | 19 | 2257 | """This test case provides support for checking forking and wait behavior.
To test different wait behavior, overrise the wait_impl method.
We want fork1() semantics -- only the forking thread survives in the
child after a fork().
On some systems (e.g. Solaris without posix threads) we find that all
active threads survive in the child after a fork(); this is an error.
While BeOS doesn't officially support fork and native threading in
the same application, the present example should work just fine. DC
"""
import os, sys, time, thread, unittest
from test.test_support import TestSkipped
LONGSLEEP = 2
SHORTSLEEP = 0.5
NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
self.alive = {}
self.stop = 0
def f(self, id):
while not self.stop:
self.alive[id] = os.getpid()
try:
time.sleep(SHORTSLEEP)
except IOError:
pass
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(2 * SHORTSLEEP)
self.assertEquals(spid, cpid)
self.assertEquals(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_wait(self):
for i in range(NUM_THREADS):
thread.start_new(self.f, (i,))
time.sleep(LONGSLEEP)
a = self.alive.keys()
a.sort()
self.assertEquals(a, range(NUM_THREADS))
prefork_lives = self.alive.copy()
if sys.platform in ['unixware7']:
cpid = os.fork1()
else:
cpid = os.fork()
if cpid == 0:
# Child
time.sleep(LONGSLEEP)
n = 0
for key in self.alive:
if self.alive[key] != prefork_lives[key]:
n += 1
os._exit(n)
else:
# Parent
self.wait_impl(cpid)
# Tell threads to die
self.stop = 1
time.sleep(2*SHORTSLEEP) # Wait for threads to die
| gpl-3.0 | -1,821,554,376,100,620,800 | 27.935897 | 88 | 0.569783 | false |
doncatnip/require.cython | setup.py | 1 | 1970 | # build script for 'require.cython'
# - a cython extension for 'require', a Validation library
# ( script stolen from http://wiki.cython.org/PackageHierarchy )
import sys, os, stat, commands
from distutils.core import setup
from distutils.extension import Extension
#from setuptools import setup, Extension
include_dirs = []
# we'd better have Cython installed, or it's a no-go
try:
from Cython.Distutils import build_ext
except:
print "You don't seem to have Cython installed. Please get a"
print "copy from www.cython.org and install it"
sys.exit(1)
# scan the 'require' directory for extension files, converting
# them to extension names in dotted notation
def scandir(dir, files=[], currentPath=None):
if currentPath is None:
currentPath = []
for file in os.listdir(dir):
path = os.path.join(dir,file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append("%s.%s" % ('.'.join(currentPath),file[:-4]))
elif os.path.isdir(path):
currentPath.append( file )
scandir(path, files, currentPath)
currentPath.pop()
return files
# generate an Extension object from its dotted name
def makeExtension(dir, extName):
extPath = "%s.pyx" % extName.replace('.',os.path.sep)
return Extension(
extName,
[os.path.join(dir,extPath)],
include_dirs = include_dirs+[ "."], # adding the '.' to include_dirs is CRUCIAL!!
extra_compile_args = [ "-Wall"],
extra_link_args = ['-g'],
)
# get the list of extensions
extNames = scandir("src")
# and build up the set of Extension objects
extensions = [makeExtension("src",name) for name in extNames]
# finally, we can pass all this to distutils
setup\
( name="require.cython"
, packages=["require", "require.cython", "require.cython.validator"]
, package_dir={'':'src'}
, ext_modules=extensions
, cmdclass = {'build_ext': build_ext}
)
| unlicense | -7,895,806,552,033,004,000 | 29.78125 | 91 | 0.655838 | false |
mrknow/filmkodi | plugin.video.xbmcfilm/resources/lib/mrknow_pCommon.py | 2 | 15323 | # -*- coding: utf-8 -*-
'''
method getURLRequestData(params):
params['use_host'] - True or False. If false the method can use global HOST
params['host'] - Use when params['outside_host'] is setting on True. Enter a own host
params['use_cookie'] - True, or False. Enable using cookie
params['cookiefile'] - Set cookie file
params['save_cookie'] - True, or False. Save cookie to file
params['load_cookie'] - True, or False. Load cookie
params['url'] - Url address
params['use_post'] - True, or False. Use post method.
post_data - Post data
params['return_data'] - True, or False. Return response read data.
params['read_data'] - True, or False. Use when params['return_data'] is False.
If you want to get data from url use this method (for default host):
data = { 'url': <your url>, 'use_host': False, use_cookie': False, 'use_post': False, 'return_data': True }
response = self.getURLRequestData(data)
If you want to get XML, or JSON data then:
data = { 'url': <your url>, 'use_host': False, use_cookie': False, 'use_post': False, 'return_data': False }
response = self.getURLRequestData(data)
If you want to get data with different user-agent then:
data = { 'url': <your url>, 'use_host': True, 'host': <your own user-agent define>, use_cookie': False, 'use_post': False, 'return_data': True }
response = self.getURLRequestData(data)
If you want to save cookie file:
data = { 'url': <your url>, 'use_host': True, 'host': <your own user-agent define>, 'use_cookie': True, 'load_cookie': False, 'save_cookie': True, 'cookiefile': <path to cookie file>, 'use_post': True, 'return_data': True }
response = self.getURLRequestData(data, post_data)
If you want to load cookie file:
data = { 'url': <your url>, 'use_host': True, 'host': <your own user-agent define>, 'use_cookie': True, 'load_cookie': True, 'save_cookie': False, 'cookiefile': <path to cookie file>, 'use_post': True, 'return_data': True }
response = self.getURLRequestData(data, post_data)
If you want to load cookie file without post:
data = { 'url': <your url>, 'use_host': True, 'host': <your own user-agent define>, 'use_cookie': True, 'load_cookie': True, 'save_cookie': False, 'cookiefile': <path to cookie file>, 'use_post': False, 'return_data': True }
response = self.getURLRequestData(data)
and etc...
'''
import re, os, sys, cookielib, random
import urllib, urllib2, re, sys, math
#import elementtree.ElementTree as ET
import xbmcaddon, xbmc, xbmcgui
try:
import simplejson as json
except ImportError:
import json
class StopDownloading(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
import mrknow_pLog
log = mrknow_pLog.pLog()
scriptID = sys.modules[ "__main__" ].scriptID
scriptname = "Polish Live TV"
ptv = xbmcaddon.Addon(scriptID)
dbg = ptv.getSetting('default_debug')
HOST_TABLE = { 100: 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0',
101: 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11',
102: 'Opera/9.80 (Windows NT 6.1; WOW64) Presto/2.12.388 Version/12.11',
103: 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1',
104: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20121213 Firefox/19.0',
105: 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0',
106: 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.95 Safari/537.11',
107: 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
108: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/536.26.17 (KHTML, like Gecko) Version/6.0.2 Safari/536.26.17',
109: 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
110: 'Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01',
111: 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
112: 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
113: 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)',
}
HOST = 'Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0'
#Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0
HISTORYFILE = xbmc.translatePath(ptv.getAddonInfo('profile') + "history.xml")
#cj = cookielib.LWPCookieJar()
cj = cookielib.MozillaCookieJar()
class common:
HOST = HOST
HEADER = None
def __init__(self):
pass
def html_special_chars(self,txt):
txt = txt.replace('#038;','')
txt = txt.replace('"','"')
txt = txt.replace(''','\'')
txt = txt.replace('”','"')
txt = txt.replace('„','"')
txt = txt.replace('–','-')
txt = txt.replace('"','"')
txt = txt.replace('ó','ó')
txt = txt.replace('\u0105','ą').replace('\u0104','Ą')
txt = txt.replace('\u0107','ć').replace('\u0106','Ć')
txt = txt.replace('\u0119','ę').replace('\u0118','Ę')
txt = txt.replace('\u0142','ł').replace('\u0141','Ł')
txt = txt.replace('\u0144','ń').replace('\u0144','Ń')
txt = txt.replace('\u00f3','ó').replace('\u00d3','Ó')
txt = txt.replace('\u015b','ś').replace('\u015a','Ś')
txt = txt.replace('\u017a','ź').replace('\u0179','Ź')
txt = txt.replace('\u017c','ż').replace('\u017b','Ż')
txt = txt.replace('×','x')
return txt
def getCookieItem(self, cookiefile, item):
ret = ''
cj = cookielib.MozillaCookieJar()
cj.load(cookiefile, ignore_discard = True)
for cookie in cj:
if cookie.name == item: ret = cookie.value
return ret
#item = {'name': 'xxx', 'value': 'yyy', 'domain': 'zzz'}
def addCookieItem(self, cookiefile, item, load_cookie=True):
if load_cookie==True and os.path.isfile(cookiefile):
cj.load(cookiefile, ignore_discard = True)
c = cookielib.Cookie(0, item['name'], item['value'], None, False, item['domain'], False, False, '/', True, False, None, True, None, None, {})
cj.set_cookie(c)
cj.save(cookiefile, ignore_discard = True)
def getURLRequestData(self, params = {}, post_data = None):
def urlOpen(req, customOpeners):
if len(customOpeners) > 0:
opener = urllib2.build_opener( *customOpeners )
response = opener.open(req)
else:
response = urllib2.urlopen(req)
return response
cj = cookielib.MozillaCookieJar()
response = None
req = None
out_data = None
opener = None
if 'host' in params:
host = params['host']
else:
host = self.HOST
if 'header' in params:
headers = params['header']
elif None != self.HEADER:
headers = self.HEADER
else:
headers = { 'User-Agent' : host }
if dbg == 'true':
log.info('pCommon - getURLRequestData() -> params: ' + str(params))
log.info('pCommon - getURLRequestData() -> params: ' + str(headers))
customOpeners = []
#cookie support
if 'use_cookie' not in params and 'cookiefile' in params and ('load_cookie' in params or 'save_cookie' in params):
params['use_cookie'] = True
if params.get('use_cookie', False):
customOpeners.append( urllib2.HTTPCookieProcessor(cj) )
if params.get('load_cookie', True):
cj.load(params['cookiefile'], ignore_discard = True)
if None != post_data:
if dbg == 'true': log.info('pCommon - getURLRequestData() -> post data: ' + str(post_data))
if params.get('raw_post_data', False):
dataPost = post_data
else:
dataPost = urllib.urlencode(post_data)
req = urllib2.Request(params['url'], dataPost, headers)
else:
req = urllib2.Request(params['url'], None, headers)
if not params.get('return_data', False):
out_data = urlOpen(req, customOpeners)
else:
gzip_encoding = False
try:
response = urlOpen(req, customOpeners)
if response.info().get('Content-Encoding') == 'gzip':
gzip_encoding = True
data = response.read()
response.close()
except urllib2.HTTPError, e:
if e.code == 404:
if dbg == 'true': log.info('pCommon - getURLRequestData() -> !!!!!!!! 404 - page not found handled')
if e.fp.info().get('Content-Encoding') == 'gzip':
gzip_encoding = True
data = e.fp.read()
#e.msg
#e.headers
else:
#printExc()
raise
try:
if gzip_encoding:
if dbg == 'true': log.info('pCommon - getURLRequestData() -> Content-Encoding == gzip')
buf = StringIO(data)
f = gzip.GzipFile(fileobj=buf)
out_data = f.read()
else:
out_data = data
except:
out_data = data
if params.get('use_cookie', False) and params.get('save_cookie', False):
cj.save(params['cookiefile'], ignore_discard = True)
return out_data
def makeABCList(self):
strTab = []
strTab.append('0 - 9');
for i in range(65,91):
strTab.append(str(unichr(i)))
return strTab
def getItemByChar(self, char, tab):
strTab = []
char = char[0]
for i in range(len(tab)):
if ord(char) >= 65:
if tab[i][0].upper() == char:
strTab.append(tab[i])
else:
if ord(tab[i][0]) >= 48 and ord(tab[i][0]) <= 57:
strTab.append(tab[i])
return strTab
def isNumeric(self,s):
try:
float(s)
return True
except ValueError:
return False
def checkDir(self, path):
if not os.path.isdir(self.encoded_item(path)):
os.mkdir(self.encoded_item(path))
def encoded_item(self,v):
if isinstance(v, unicode):
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
return v
def getRandomHost(self):
host_id = random.choice(HOST_TABLE.keys())
log.info("host ID: " + str(host_id))
host = HOST_TABLE[host_id]
return host
def LOAD_AND_PLAY_VIDEO(self, url, title, player = True):
if url == '':
d = xbmcgui.Dialog()
d.ok('Nie znaleziono streamingu', 'MoÅŸe to chwilowa awaria.', 'Spróbuj ponownie za jakiÅ? czas')
return False
thumbnail = xbmc.getInfoImage("ListItem.Thumb")
liz=xbmcgui.ListItem(title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
liz.setInfo( type="Video", infoLabels={ "Title": title } )
try:
if player != True:
print "custom player pCommon"
xbmcPlayer = player
else:
print "default player pCommon"
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(url, liz)
except:
d = xbmcgui.Dialog()
d.ok('BÅ?Ä?d przy przetwarzaniu, lub wyczerpany limit czasowy oglÄ?dania.', 'Zarejestruj siÄ? i opÅ?aÄ? abonament.', 'Aby oglÄ?daÄ? za darmo spróbuj ponownie za jakiÅ? czas')
return False
return True
def formatDialogMsg(self, msg):
valTab = []
LENGTH = 56
item = msg.split(' ');
valTab.append('')
valTab.append('')
valTab.append('')
if len(msg) <= LENGTH or len(item)==1:
valTab[0] = msg
else:
isFull = [False, False]
for i in item:
if isFull[0] == False and isFull[1] == False:
if len(valTab[0] + ' ' + i) <= LENGTH:
s = valTab[0] + ' ' + i
valTab[0] = s.strip()
else:
isFull[0] = True
if isFull[0]:
if len(valTab[1] + ' ' + i) <= LENGTH:
s = valTab[1] + ' ' + i
valTab[1] = s.strip()
else:
isFull[1] = True
if isFull[1]:
if len(valTab[2] + ' ' + i) <= LENGTH:
s = valTab[2] + ' ' + i
valTab[2] = s.strip()
else:
break
return valTab
class history:
def __init__(self):
pass
def readHistoryFile(self):
file = open(HISTORYFILE, 'r')
root = ET.parse(file).getroot()
file.close()
return root
def writeHistoryFile(self, root):
file = open(HISTORYFILE, 'w')
ET.ElementTree(root).write(file)
file.close()
def loadHistoryFile(self, service):
if not os.path.isfile(HISTORYFILE):
self.makeHistoryFile(service)
history = self.parseHistoryFile(service)
return history
def addHistoryItem(self, service, item):
if not os.path.isfile(HISTORYFILE):
self.makeHistoryFile(service)
strTab = []
root = self.readHistoryFile()
#check if item already exists
exists = False
for node in root.getiterator(service):
for child in node.getchildren():
if child.text != None:
strTab.append(child.text)
else:
strTab.append('')
if child.text == item:
exists = True
if not exists:
print "tab: " + str(strTab)
i=0
for node in root.getiterator(service):
for child in node.getchildren():
if i==0: child.text = item
else: child.text = strTab[i-1]
i = i + 1
self.writeHistoryFile(root)
def clearHistoryItems(self, service):
root = self.readHistoryFile()
for node in root.getiterator(service):
for child in node.getchildren():
child.text = ''
self.writeHistoryFile(root)
def parseHistoryFile(self, service):
strTab = []
root = self.readHistoryFile()
serviceList = root.findall(service)
if len(serviceList) == 0:
child = ET.Element(service)
root.append(child)
for i in range(5):
item = ET.Element('search')
child.append(item)
self.writeHistoryFile(root)
for node in root.getiterator(service):
for child in node.getchildren():
if child.text != None:
strTab.append(child.text)
else:
strTab.append('')
return strTab
def makeHistoryFile(self, service):
root = ET.Element('history')
child = ET.Element(service)
root.append(child)
for i in range(5):
item = ET.Element('search')
child.append(item)
self.writeHistoryFile(root)
class Chars:
def __init__(self):
pass
def setCHARS(self):
return CHARS
def replaceString(self, array, string):
out = string
for i in range(len(array)):
out = string.replace(array[i][0], array[i][1])
string = out
return out
def replaceChars(self, string):
out = string
for i in range(len(CHARS)):
out = string.replace(CHARS[i][0], CHARS[i][1])
string = out
return out
| apache-2.0 | 5,302,844,915,283,300,000 | 33.201342 | 225 | 0.577577 | false |
fishin4guitars/DB_Final_Project_Website | website/applicant/models.py | 1 | 1350 | from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from common.models import HasAContactModel
# Create your models here.
# 'create table Applicant
class Applicant(models.Model):
# ORM automatically creates integer primary key 'id'
# fk_user INT NOT NULL UNIQUE REFERENCES User.id
fk_user = models.ForeignKey(User, unique = True)
class Meta:
db_table = 'Applicant'
# create table WatchedJob
class WatchedJob(models.Model):
# ORM automatically creates integer primary key 'id'
# watcher
# fk_applicant_id integer NOT NULL REFERENCES Applicant.id
fk_applicant = models.ForeignKey('Applicant')
# watchee
# fk_job_id integer NOT NULL REFERENCES Job.id
fk_job = models.ForeignKey('job.Job')
class Meta:
# UNIQUE (fk_applicant, fk_job)
unique_together = ('fk_applicant', 'fk_job')
# table name
db_table = 'WatchedJob'
# create table WatchedEvent
class WatchedEvent(models.Model):
# ORM automatically creates integer primary key 'id'
# fk_applicant_id integer NOT NULL REFERENCES Applicant.id
fk_applicant = models.ForeignKey('Applicant')
# fk_event_id integer NOT NULL REFERENCES Event.id
fk_event = models.ForeignKey('event.Event')
class Meta:
# UNIQUE (fk_applicant, fk_event)
unique_together = ('fk_applicant', 'fk_event')
db_table = 'WatchedEvent'
| bsd-2-clause | -7,221,635,600,733,417,000 | 31.926829 | 59 | 0.751852 | false |
living180/git-cola | test/diffparse_test.py | 3 | 12144 | from __future__ import absolute_import, division, unicode_literals
import unittest
from cola import core
from cola import diffparse
from . import helper
class ParseDiffTestCase(unittest.TestCase):
def test_diff(self):
fixture_path = helper.fixture('diff.txt')
parser = diffparse.DiffParser('cola/diffparse.py', core.read(fixture_path))
hunks = parser.hunks
self.assertEqual(len(hunks), 3)
self.assertEqual(hunks[0].first_line_idx, 0)
self.assertEqual(len(hunks[0].lines), 23)
self.assertEqual(
hunks[0].lines[0], '@@ -6,10 +6,21 @@ from cola import gitcmds\n'
)
self.assertEqual(hunks[0].lines[1], ' from cola import gitcfg\n')
self.assertEqual(hunks[0].lines[2], ' \n')
self.assertEqual(hunks[0].lines[3], ' \n')
self.assertEqual(hunks[0].lines[4], '+class DiffSource(object):\n')
self.assertEqual(
hunks[0].lines[-1],
r" self._header_start_re = re.compile('^@@ -(\d+)"
r" \+(\d+),(\d+) @@.*')"
'\n',
)
self.assertEqual(hunks[1].first_line_idx, 23)
self.assertEqual(len(hunks[1].lines), 18)
self.assertEqual(
hunks[1].lines[0], '@@ -29,13 +40,11 @@ class DiffParser(object):\n'
)
self.assertEqual(hunks[1].lines[1], ' self.diff_sel = []\n')
self.assertEqual(hunks[1].lines[2], ' self.selected = []\n')
self.assertEqual(hunks[1].lines[3], ' self.filename = filename\n')
self.assertEqual(
hunks[1].lines[4],
'+ self.diff_source = diff_source or DiffSource()\n',
)
self.assertEqual(hunks[1].lines[-1], ' self.header = header\n')
self.assertEqual(hunks[2].first_line_idx, 41)
self.assertEqual(len(hunks[2].lines), 16)
self.assertEqual(
hunks[2].lines[0], '@@ -43,11 +52,10 @@ class DiffParser(object):\n'
)
self.assertEqual(
hunks[2].lines[-1],
' """Writes a new diff corresponding to the user\'s'
' selection."""\n',
)
def test_diff_at_start(self):
fixture_path = helper.fixture('diff-start.txt')
parser = diffparse.DiffParser('foo bar/a', core.read(fixture_path))
hunks = parser.hunks
self.assertEqual(hunks[0].lines[0], '@@ -1 +1,4 @@\n')
self.assertEqual(hunks[-1].lines[-1], '+c\n')
self.assertEqual(hunks[0].old_start, 1)
self.assertEqual(hunks[0].old_count, 1)
self.assertEqual(hunks[0].new_start, 1)
self.assertEqual(hunks[0].new_count, 4)
self.assertEqual(
parser.generate_patch(1, 3),
'--- a/foo bar/a\n'
'+++ b/foo bar/a\n'
'@@ -1 +1,3 @@\n'
' bar\n'
'+a\n'
'+b\n',
)
self.assertEqual(
parser.generate_patch(0, 4),
'--- a/foo bar/a\n'
'+++ b/foo bar/a\n'
'@@ -1 +1,4 @@\n'
' bar\n'
'+a\n'
'+b\n'
'+c\n',
)
def test_diff_at_end(self):
fixture_path = helper.fixture('diff-end.txt')
parser = diffparse.DiffParser('rijndael.js', core.read(fixture_path))
hunks = parser.hunks
self.assertEqual(hunks[0].lines[0], '@@ -1,39 +1 @@\n')
self.assertEqual(
hunks[-1].lines[-1],
"+module.exports = require('./build/Release/rijndael');\n",
)
self.assertEqual(hunks[0].old_start, 1)
self.assertEqual(hunks[0].old_count, 39)
self.assertEqual(hunks[0].new_start, 1)
self.assertEqual(hunks[0].new_count, 1)
def test_diff_that_empties_file(self):
fixture_path = helper.fixture('diff-empty.txt')
parser = diffparse.DiffParser('filename', core.read(fixture_path))
hunks = parser.hunks
self.assertEqual(hunks[0].lines[0], '@@ -1,2 +0,0 @@\n')
self.assertEqual(hunks[-1].lines[-1], '-second\n')
self.assertEqual(hunks[0].old_start, 1)
self.assertEqual(hunks[0].old_count, 2)
self.assertEqual(hunks[0].new_start, 0)
self.assertEqual(hunks[0].new_count, 0)
self.assertEqual(
parser.generate_patch(1, 1),
'--- a/filename\n'
'+++ b/filename\n'
'@@ -1,2 +1 @@\n'
'-first\n'
' second\n',
)
self.assertEqual(
parser.generate_patch(0, 2),
'--- a/filename\n'
'+++ b/filename\n'
'@@ -1,2 +0,0 @@\n'
'-first\n'
'-second\n',
)
def test_diff_file_removal(self):
diff_text = """\
deleted file mode 100755
@@ -1,1 +0,0 @@
-#!/bin/sh
"""
parser = diffparse.DiffParser('deleted.txt', diff_text)
self.assertEqual(1, len(parser.hunks))
# Selecting the first two lines generate no diff
expect = None
actual = parser.generate_patch(0, 1)
self.assertEqual(expect, actual)
# Selecting the last line should generate a line removal
expect = """\
--- a/deleted.txt
+++ b/deleted.txt
@@ -1 +0,0 @@
-#!/bin/sh
"""
actual = parser.generate_patch(1, 2)
self.assertEqual(expect, actual)
# All three lines should map to the same hunk diff
actual = parser.generate_hunk_patch(0)
self.assertEqual(expect, actual)
actual = parser.generate_hunk_patch(1)
self.assertEqual(expect, actual)
actual = parser.generate_hunk_patch(2)
self.assertEqual(expect, actual)
class DiffLinesTestCase(unittest.TestCase):
def setUp(self):
self.parser = diffparse.DiffLines()
fixture_path = helper.fixture('diff.txt')
self.text = core.read(fixture_path)
def test_basic_diff_line_count(self):
"""Verify the basic line counts"""
lines = self.parser.parse(self.text)
expect = len(self.text.splitlines())
actual = len(lines)
self.assertEqual(expect, actual)
def test_diff_line_count_ranges(self):
parser = self.parser
lines = parser.parse(self.text)
# Diff header
line = 0
count = 1
self.assertEqual(lines[line][0], parser.DASH)
self.assertEqual(lines[line][1], parser.DASH)
line += count
# 3 lines of context
count = 3
current_old = 6
current_new = 6
for i in range(count):
self.assertEqual(lines[line + i][0], current_old + i)
self.assertEqual(lines[line + i][1], current_new + i)
line += count
current_old += count
current_new += count
# 10 lines of new text
count = 10
for i in range(count):
self.assertEqual(lines[line + i][0], parser.EMPTY)
self.assertEqual(lines[line + i][1], current_new + i)
line += count
current_new += count
# 3 more lines of context
count = 3
for i in range(count):
self.assertEqual(lines[line + i][0], current_old + i)
self.assertEqual(lines[line + i][1], current_new + i)
line += count
current_new += count
current_old += count
# 1 line of removal
count = 1
for i in range(count):
self.assertEqual(lines[line + i][0], current_old + i)
self.assertEqual(lines[line + i][1], parser.EMPTY)
line += count
current_old += count
# 2 lines of addition
count = 2
for i in range(count):
self.assertEqual(lines[line + i][0], parser.EMPTY)
self.assertEqual(lines[line + i][1], current_new + i)
line += count
current_new += count
# 3 more lines of context
count = 3
for i in range(count):
self.assertEqual(lines[line + i][0], current_old + i)
self.assertEqual(lines[line + i][1], current_new + i)
line += count
current_new += count
current_old += count
# 1 line of header
count = 1
for i in range(count):
self.assertEqual(lines[line + i][0], parser.DASH)
self.assertEqual(lines[line + i][1], parser.DASH)
line += count
# 3 more lines of context
current_old = 29
current_new = 40
count = 3
for i in range(count):
self.assertEqual(lines[line + i][0], current_old + i)
self.assertEqual(lines[line + i][1], current_new + i)
line += count
current_new += count
current_old += count
expect_max_old = 54
self.assertEqual(expect_max_old, parser.old.max_value)
expect_max_new = 62
self.assertEqual(expect_max_new, parser.new.max_value)
self.assertEqual(parser.digits(), 2)
def test_diff_line_for_merge(self):
"""Verify the basic line counts"""
text = """@@@ -1,23 -1,33 +1,75 @@@
++<<<<<<< upstream
+
+Ok
"""
parser = self.parser
lines = parser.parse(text)
self.assertEqual(len(lines), 4)
self.assertEqual(len(lines[0]), 3)
self.assertEqual(len(lines[1]), 3)
self.assertEqual(len(lines[2]), 3)
self.assertEqual(len(lines[3]), 3)
self.assertEqual(lines[0][0], parser.DASH)
self.assertEqual(lines[0][1], parser.DASH)
self.assertEqual(lines[0][2], parser.DASH)
self.assertEqual(lines[1][0], parser.EMPTY)
self.assertEqual(lines[1][1], parser.EMPTY)
self.assertEqual(lines[1][2], 1)
self.assertEqual(lines[2][0], 1)
self.assertEqual(lines[2][1], parser.EMPTY)
self.assertEqual(lines[2][2], 2)
self.assertEqual(lines[3][0], 2)
self.assertEqual(lines[3][1], parser.EMPTY)
self.assertEqual(lines[3][2], 3)
class FormatDiffLinesTestCase(unittest.TestCase):
def test_format_basic(self):
fmt = diffparse.FormatDigits()
fmt.set_digits(2)
expect = '01 99'
actual = fmt.value(1, 99)
self.assertEqual(expect, actual)
def test_format_reuse(self):
fmt = diffparse.FormatDigits()
fmt.set_digits(3)
expect = '001 099'
actual = fmt.value(1, 99)
self.assertEqual(expect, actual)
fmt.set_digits(4)
expect = '0001 0099'
actual = fmt.value(1, 99)
self.assertEqual(expect, actual)
def test_format_special_values(self):
fmt = diffparse.FormatDigits(dash='-')
fmt.set_digits(3)
expect = ' 099'
actual = fmt.value(fmt.EMPTY, 99)
self.assertEqual(expect, actual)
expect = '001 '
actual = fmt.value(1, fmt.EMPTY)
self.assertEqual(expect, actual)
expect = ' '
actual = fmt.value(fmt.EMPTY, fmt.EMPTY)
self.assertEqual(expect, actual)
expect = '--- 001'
actual = fmt.value(fmt.DASH, 1)
self.assertEqual(expect, actual)
expect = '099 ---'
actual = fmt.value(99, fmt.DASH)
self.assertEqual(expect, actual)
expect = '--- ---'
actual = fmt.value(fmt.DASH, fmt.DASH)
self.assertEqual(expect, actual)
expect = ' ---'
actual = fmt.value(fmt.EMPTY, fmt.DASH)
self.assertEqual(expect, actual)
expect = '--- '
actual = fmt.value(fmt.DASH, fmt.EMPTY)
self.assertEqual(expect, actual)
class ParseRangeStrTestCase(unittest.TestCase):
def test_parse_range_str(self):
start, count = diffparse.parse_range_str('1,2')
self.assertEqual(start, 1)
self.assertEqual(count, 2)
def test_parse_range_str_single_line(self):
start, count = diffparse.parse_range_str('2')
self.assertEqual(start, 2)
self.assertEqual(count, 1)
def test_parse_range_str_empty(self):
start, count = diffparse.parse_range_str('0,0')
self.assertEqual(start, 0)
self.assertEqual(count, 0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -693,057,719,999,351,800 | 30.957895 | 83 | 0.548748 | false |
synweap15/pyload | module/plugins/hoster/UnibytesCom.py | 9 | 2452 | # -*- coding: utf-8 -*-
import pycurl
import re
import urlparse
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class UnibytesCom(SimpleHoster):
__name__ = "UnibytesCom"
__type__ = "hoster"
__version__ = "0.14"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?unibytes\.com/[\w .-]{11}B'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """UniBytes.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
HOSTER_DOMAIN = "unibytes.com"
INFO_PATTERN = r'<span[^>]*?id="fileName".*?>(?P<N>[^>]+)</span>\s*\((?P<S>\d.*?)\)'
WAIT_PATTERN = r'Wait for <span id="slowRest">(\d+)</span> sec'
LINK_FREE_PATTERN = r'<a href="(.+?)">Download</a>'
def handle_free(self, pyfile):
domain = "http://www.%s/" % self.HOSTER_DOMAIN
action, post_data = self.parse_html_form('id="startForm"')
self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 0)
for _i in xrange(8):
self.log_debug(action, post_data)
self.html = self.load(urlparse.urljoin(domain, action), post=post_data)
m = re.search(r'location:\s*(\S+)', self.req.http.header, re.I)
if m:
self.link = m.group(1)
break
if '>Somebody else is already downloading using your IP-address<' in self.html:
self.wait(10 * 60, True)
self.retry()
if post_data['step'] == "last":
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m:
self.link = m.group(1)
self.captcha.correct()
break
else:
self.captcha.invalid()
last_step = post_data['step']
action, post_data = self.parse_html_form('id="stepForm"')
if last_step == "timer":
m = re.search(self.WAIT_PATTERN, self.html)
self.wait(m.group(1) if m else 60, False)
elif last_step in ("captcha", "last"):
post_data['captcha'] = self.captcha.decrypt(urlparse.urljoin(domain, "captcha.jpg"))
else:
self.fail(_("No valid captcha code entered"))
self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 1)
getInfo = create_getInfo(UnibytesCom)
| gpl-3.0 | -8,157,262,197,723,866,000 | 31.263158 | 100 | 0.533034 | false |
matbu/ansible-modules-extras | cloud/cloudstack/cs_ip_address.py | 48 | 7197 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <[email protected]>
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_ip_address
short_description: Manages public IP address associations on Apache CloudStack based clouds.
description:
- Acquires and associates a public IP to an account or project. Due to API
limitations this is not an idempotent call, so be sure to only
conditionally call this when C(state=present)
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address.
- Required if C(state=absent)
required: false
default: null
domain:
description:
- Domain the IP address is related to.
required: false
default: null
network:
description:
- Network the IP address is related to.
required: false
default: null
vpc:
description:
- VPC the IP address is related to.
required: false
default: null
version_added: "2.2"
account:
description:
- Account the IP address is related to.
required: false
default: null
project:
description:
- Name of the project the IP address is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the IP address is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Associate an IP address conditonally
- local_action:
module: cs_ip_address
network: My Network
register: ip_address
when: instance.public_ip is undefined
# Disassociate an IP address
- local_action:
module: cs_ip_address
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the Public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
zone:
description: Name of zone the IP address is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the IP address is related to.
returned: success
type: string
sample: Production
account:
description: Account the IP address is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the IP address is related to.
returned: success
type: string
sample: example domain
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackIPAddress(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIPAddress, self).__init__(module)
self.returns = {
'ipaddress': 'ip_address',
}
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
args = {
'ipaddress': self.module.params.get('ip_address'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.cs.listPublicIpAddresses(**args)
if ip_addresses:
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def associate_ip_address(self):
self.result['changed'] = True
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'networkid': self.get_network(key='id'),
'zoneid': self.get_zone(key='id'),
}
ip_address = None
if not self.module.check_mode:
res = self.cs.associateIpAddress(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
ip_address = self.poll_job(res, 'ipaddress')
return ip_address
def disassociate_ip_address(self):
ip_address = self.get_ip_address()
if not ip_address:
return None
if ip_address['isstaticnat']:
self.module.fail_json(msg="IP address is allocated via static nat")
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.disassociateIpAddress(id=ip_address['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'ipaddress')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=False),
state = dict(choices=['present', 'absent'], default='present'),
vpc = dict(default=None),
network = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
required_if=[
('state', 'absent', ['ip_address']),
],
supports_check_mode=True
)
try:
acs_ip_address = AnsibleCloudStackIPAddress(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_ip_address.disassociate_ip_address()
else:
ip_address = acs_ip_address.associate_ip_address()
result = acs_ip_address.get_result(ip_address)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,037,430,460,957,615,000 | 28.487705 | 92 | 0.629326 | false |
jonzobrist/Percona-Server-5.1 | kewpie/percona_tests/innodbCrash/innodbCrash3_test.py | 1 | 2378 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from percona_tests.innodbCrash.innodbCrashTestCase import innodbCrashTestCase
from percona_tests.innodbCrash import suite_config
server_requirements = suite_config.server_requirements
server_requests = suite_config.server_requests
servers = suite_config.servers
test_executor = suite_config.test_executor
class basicTest(innodbCrashTestCase):
def test_crash(self):
"""
self.logging = test_executor.logging
self.servers = servers
self.master_server = servers[0]
self.slave_server = servers[1]
self.randgen_threads = suite_config.randgen_threads
self.randgen_queries_per_thread = suite_config.randgen_queries_per_thread
self.crashes = suite_config.crashes
"""
self.initialize(test_executor, servers, suite_config)
# create our table
self.test_bed_cmd = "./gendata.pl --spec=conf/percona/percona_no_blob.zz "
self.create_test_bed()
# Our randgen load-generation command (transactional grammar)
self.test_seq = [ "./gentest.pl"
, "--grammar=conf/percona/trx_randDebugCrash.yy"
, "--queries=%d" %(self.randgen_queries_per_thread)
, "--threads=%d" %(self.randgen_threads)
, "--sqltrace"
, "--debug"
, "--seed=%s" %(self.randgen_seed)
]
self.test_seq = " ".join(self.test_seq)
self.execute_crash_test()
| bsd-3-clause | -9,067,557,016,154,109,000 | 40 | 82 | 0.656013 | false |
wilkerwma/codeschool | old/cs_questions/migrations/old/0004_auto_20160501_2349.py | 3 | 1047 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-02 02:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cs_core', '0001_initial'),
('cs_questions', '0003_remove_codingioresponse_feedback'),
]
operations = [
migrations.RemoveField(
model_name='codingioactivity',
name='answer_key',
),
migrations.AddField(
model_name='codingioactivity',
name='language',
field=models.ForeignKey(default='python', on_delete=django.db.models.deletion.CASCADE, to='cs_core.ProgrammingLanguage'),
preserve_default=False,
),
migrations.AddField(
model_name='codingioresponse',
name='question_fallback',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cs_questions.CodingIoQuestion'),
),
]
| gpl-3.0 | -4,886,820,328,519,006,000 | 31.71875 | 140 | 0.623687 | false |
Caoimhinmg/PmagPy | programs/pt_rot.py | 1 | 4981 | #!/usr/bin/env python
# define some variables
from __future__ import print_function
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.frp as frp
def main():
"""
NAME
pt_rot.py
DESCRIPTION
rotates pt according to specified age and plate
SYNTAX
pt_rot.py [command line options]
OPTIONS
-h prints help and quits
-f file with lon lat plate age Dplate as space delimited input
Dplate is the destination plate coordinates desires
- default is "fixed south africa"
Dplate should be one of: [nwaf, neaf,saf,aus, eur, ind, sam, ant, grn, nam]
-ff file Efile, file has lat lon data file and Efile has sequential rotation poles: Elat Elon Omega
-F OFILE, output pmag_results formatted file with rotated points stored in vgp_lon, vgp_lat
default is to print out rotated lon, lat to standard output
"""
dir_path='.'
PTS=[]
ResRecs=[]
ofile=""
Dplates=['nwaf', 'neaf','saf','aus', 'eur', 'ind', 'sam', 'ant', 'grn', 'nam']
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile=dir_path+'/'+sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
file=dir_path+'/'+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
elif '-ff' in sys.argv:
ind = sys.argv.index('-ff')
file=dir_path+'/'+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
Efile=dir_path+'/'+sys.argv[ind+2]
f=open(Efile,'r')
edata=f.readlines()
Poles=[]
for p in edata:
rec=p.split()
pole=[float(rec[0]),float(rec[1]),float(rec[2])] # pole is lat/lon/omega
Poles.append(pole)
else:
data=sys.stdin.readlines()
for line in data:
PtRec={}
rec=line.split()
PtRec['site_lon']=rec[0]
PtRec['site_lat']=rec[1]
if '-ff' in sys.argv:
pt_lat,pt_lon=float(rec[1]),float(rec[0])
for pole in Poles:
ptrot= pmag.PTrot(pole,[pt_lat],[pt_lon])
pt_lat=ptrot[0][0]
pt_lon=ptrot[1][0]
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={'vgp_lat': '%7.1f'%(ptrot[0][0]),'vgp_lon':'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
else:
PtRec['cont']=rec[2]
if PtRec['cont']=='af':PtRec['cont']='saf' # use fixed south africa
PtRec['age']=rec[3]
if len(rec)>4:
PtRec['dcont']=rec[4]
PTS.append(PtRec)
if '-ff' not in sys.argv:
for pt in PTS:
pole='not specified'
pt_lat=float(pt['site_lat'])
pt_lon=float(pt['site_lon'])
age=float(pt['age'])
ptrot=[[pt_lat],[pt_lon]]
if pt['cont']=='ib':
pole=frp.get_pole(pt['cont'],age)
ptrot= pmag.PTrot(pole,[pt_lat],[pt_lon])
pt_lat=ptrot[0][0]
pt_lon=ptrot[1][0]
pt['cont']='eur'
if pt['cont']!='saf':
pole1=frp.get_pole(pt['cont'],age)
ptrot= pmag.PTrot(pole1,[pt_lat],[pt_lon])
if 'dcont' in list(pt.keys()):
pt_lat=ptrot[0][0]
pt_lon=ptrot[1][0]
pole=frp.get_pole(pt['dcont'],age)
pole[2]=-pole[2]
ptrot= pmag.PTrot(pole,[pt_lat],[pt_lon])
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={'vgp_lat': '%7.1f'%(ptrot[0][0]),'vgp_lon':'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
else:
if 'dcont' in list(pt.keys()):
pole=frp.get_pole(pt['dcont'],age)
pole[2]=-pole[2]
ptrot= pmag.PTrot(pole,[pt_lat],[pt_lon])
print(ptrot)
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={'vgp_lat': '%7.1f'%(ptrot[0][0]),'vgp_lon':'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
else:
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={'vgp_lat': '%7.1f'%(ptrot[0][0]),'vgp_lon':'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
if len(ResRecs)>0:
pmag.magic_write(ofile,ResRecs,'pmag_results')
if __name__ == "__main__":
main()
| bsd-3-clause | 8,358,615,214,262,717,000 | 34.834532 | 111 | 0.469384 | false |
pblottiere/QGIS | tests/src/python/test_qgslayoutpagecollection.py | 30 | 40291 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutPageCollection
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/07/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.PyQt import sip
from qgis.core import (QgsUnitTypes,
QgsLayout,
QgsLayoutItemPage,
QgsLayoutSize,
QgsLayoutPoint,
QgsLayoutObject,
QgsProject,
QgsMargins,
QgsProperty,
QgsLayoutGuide,
QgsLayoutMeasurement,
QgsLayoutPageCollection,
QgsSimpleFillSymbolLayer,
QgsLayoutItemShape,
QgsFillSymbol,
QgsReadWriteContext)
from qgis.PyQt.QtCore import Qt, QCoreApplication, QEvent, QPointF, QRectF
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
start_app()
class TestQgsLayoutPageCollection(unittest.TestCase):
def testLayout(self):
# test that layouts have a collection
p = QgsProject()
l = QgsLayout(p)
self.assertTrue(l.pageCollection())
self.assertEqual(l.pageCollection().layout(), l)
def testSymbol(self):
"""
Test setting a page symbol for the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertTrue(collection.pageStyleSymbol())
fill = QgsSimpleFillSymbolLayer()
fill_symbol = QgsFillSymbol()
fill_symbol.changeSymbolLayer(0, fill)
fill.setColor(Qt.green)
fill.setStrokeColor(Qt.red)
fill.setStrokeWidth(6)
collection.setPageStyleSymbol(fill_symbol)
self.assertEqual(collection.pageStyleSymbol().symbolLayer(0).color().name(), '#00ff00')
self.assertEqual(collection.pageStyleSymbol().symbolLayer(0).strokeColor().name(), '#ff0000')
def testPages(self):
"""
Test adding/retrieving/deleting pages from the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertEqual(collection.pageCount(), 0)
self.assertFalse(collection.pages())
self.assertFalse(collection.page(-1))
self.assertFalse(collection.page(0))
self.assertFalse(collection.page(1))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
self.assertEqual(collection.pageNumber(page), -1)
collection.addPage(page)
self.assertTrue(page in l.items())
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.pages(), [page])
self.assertFalse(collection.page(-1))
self.assertEqual(collection.page(0), page)
self.assertFalse(collection.page(1))
self.assertEqual(collection.pageNumber(page), 0)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(collection.pages(), [page, page2])
self.assertFalse(collection.page(-1))
self.assertEqual(collection.page(0), page)
self.assertEqual(collection.page(1), page2)
self.assertEqual(collection.pageNumber(page2), 1)
# insert a page
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A3')
collection.insertPage(page3, 1)
self.assertTrue(page3 in l.items())
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(collection.pages(), [page, page3, page2])
self.assertEqual(collection.page(0), page)
self.assertEqual(collection.page(1), page3)
self.assertEqual(collection.page(2), page2)
self.assertEqual(collection.pageNumber(page3), 1)
# delete page
collection.deletePage(-1)
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(collection.pages(), [page, page3, page2])
collection.deletePage(100)
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(collection.pages(), [page, page3, page2])
collection.deletePage(1)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(collection.pages(), [page, page2])
# make sure page was deleted
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page3))
del l
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page))
self.assertTrue(sip.isdeleted(page2))
def testDeletePages(self):
"""
Test deleting pages from the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
page_about_to_be_removed_spy = QSignalSpy(collection.pageAboutToBeRemoved)
# delete page
collection.deletePage(None)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(len(page_about_to_be_removed_spy), 0)
page3 = QgsLayoutItemPage(l)
# try deleting a page not in collection
collection.deletePage(page3)
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertFalse(sip.isdeleted(page3))
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(len(page_about_to_be_removed_spy), 0)
self.assertEqual(l.layoutBounds(ignorePages=False), QRectF(0.0, 0.0, 210.0, 517.0))
collection.deletePage(page)
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(l.layoutBounds(ignorePages=False), QRectF(0.0, 0.0, 148.0, 210.0))
self.assertFalse(page in collection.pages())
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page))
self.assertEqual(len(page_about_to_be_removed_spy), 1)
self.assertEqual(page_about_to_be_removed_spy[-1][0], 0)
collection.deletePage(page2)
self.assertEqual(collection.pageCount(), 0)
self.assertFalse(collection.pages())
self.assertEqual(l.layoutBounds(ignorePages=False), QRectF())
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page2))
self.assertEqual(len(page_about_to_be_removed_spy), 2)
self.assertEqual(page_about_to_be_removed_spy[-1][0], 0)
def testClear(self):
"""
Test clearing the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
collection.clear()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
page_about_to_be_removed_spy = QSignalSpy(collection.pageAboutToBeRemoved)
# clear
collection.clear()
self.assertEqual(collection.pageCount(), 0)
self.assertEqual(len(page_about_to_be_removed_spy), 2)
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page))
self.assertTrue(sip.isdeleted(page2))
def testExtendByNewPage(self):
"""
Test extend by adding new page
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# no existing page to extend
self.assertIsNone(collection.extendByNewPage())
self.assertEqual(collection.pageCount(), 0)
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize(QgsLayoutSize(10, 10))
collection.addPage(page)
self.assertEqual(collection.pageCount(), 1)
new_page = collection.extendByNewPage()
self.assertIsNotNone(new_page)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(new_page.sizeWithUnits(), page.sizeWithUnits())
new_page.setPageSize(QgsLayoutSize(20, 20))
new_page2 = collection.extendByNewPage()
self.assertIsNotNone(new_page2)
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(new_page2.sizeWithUnits(), new_page.sizeWithUnits())
def testMaxPageWidthAndSize(self):
"""
Test calculating maximum page width and size
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertEqual(collection.maximumPageWidth(), 210.0)
self.assertEqual(collection.maximumPageSize().width(), 210.0)
self.assertEqual(collection.maximumPageSize().height(), 297.0)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A3')
collection.addPage(page2)
self.assertEqual(collection.maximumPageWidth(), 297.0)
self.assertEqual(collection.maximumPageSize().width(), 297.0)
self.assertEqual(collection.maximumPageSize().height(), 420.0)
# add a page with other units
page3 = QgsLayoutItemPage(l)
page3.setPageSize(QgsLayoutSize(100, 100, QgsUnitTypes.LayoutMeters))
collection.addPage(page3)
self.assertEqual(collection.maximumPageWidth(), 100000.0)
self.assertEqual(collection.maximumPageSize().width(), 100000.0)
self.assertEqual(collection.maximumPageSize().height(), 100000.0)
def testUniformPageSizes(self):
"""
Test detection of uniform page sizes
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertTrue(collection.hasUniformPageSizes())
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertTrue(collection.hasUniformPageSizes())
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize(QgsLayoutSize(21.0, 29.7, QgsUnitTypes.LayoutCentimeters))
collection.addPage(page2)
self.assertTrue(collection.hasUniformPageSizes())
# add a page with other units
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A5')
collection.addPage(page3)
self.assertFalse(collection.hasUniformPageSizes())
def testReflow(self):
"""
Test reflowing pages
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# should be positioned at origin
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
# second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 307)
# third page, slotted in middle
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A3')
collection.insertPage(page3, 1)
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 737)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 307)
page.setPageSize(QgsLayoutSize(100, 120))
# no update until reflow is called
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 737)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 307)
collection.reflow()
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 560)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 130)
def testInsertPageWithItems(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
# item on pages
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50), page=0)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), page=1)
l.addLayoutItem(shape2)
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape2.page(), 1)
# third page, slotted in middle
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A3')
collection.insertPage(page3, 0)
# check item position
self.assertEqual(shape1.page(), 1)
self.assertEqual(shape1.pagePositionWithUnits(), QgsLayoutPoint(90, 50))
self.assertEqual(shape2.page(), 2)
self.assertEqual(shape2.pagePositionWithUnits(), QgsLayoutPoint(100, 150))
def testDeletePageWithItems(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A4')
collection.addPage(page2)
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A4')
collection.addPage(page3)
# item on pages
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50), page=0)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), page=2)
l.addLayoutItem(shape2)
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape2.page(), 2)
collection.deletePage(1)
# check item position
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape1.pagePositionWithUnits(), QgsLayoutPoint(90, 50))
self.assertEqual(shape2.page(), 1)
self.assertEqual(shape2.pagePositionWithUnits(), QgsLayoutPoint(100, 150))
def testDeletePageWithItems2(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A4')
collection.addPage(page2)
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A4')
collection.addPage(page3)
# item on pages
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50), page=0)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), page=2)
l.addLayoutItem(shape2)
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape2.page(), 2)
collection.deletePage(page2)
# check item position
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape1.pagePositionWithUnits(), QgsLayoutPoint(90, 50))
self.assertEqual(shape2.page(), 1)
self.assertEqual(shape2.pagePositionWithUnits(), QgsLayoutPoint(100, 150))
def testDataDefinedSize(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add some pages
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A5')
collection.addPage(page3)
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 307)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 527)
page.dataDefinedProperties().setProperty(QgsLayoutObject.ItemHeight, QgsProperty.fromExpression('50*3'))
page.refresh()
collection.reflow()
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 160)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 380)
page2.dataDefinedProperties().setProperty(QgsLayoutObject.ItemHeight, QgsProperty.fromExpression('50-20'))
page2.refresh()
collection.reflow()
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 160)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 200)
def testPositionOnPage(self):
"""
Test pageNumberForPoint and positionOnPage
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 270)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1270)), 0)
self.assertEqual(collection.positionOnPage(QPointF(-100, -100)), QPointF(-100, -100))
self.assertEqual(collection.positionOnPage(QPointF(-100, -1)), QPointF(-100, -1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1)), QPointF(-100, 1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 270)), QPointF(-100, 270))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1270)), QPointF(-100, 973))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 270)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 370)), 1)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1270)), 1)
self.assertEqual(collection.positionOnPage(QPointF(-100, -100)), QPointF(-100, -100))
self.assertEqual(collection.positionOnPage(QPointF(-100, -1)), QPointF(-100, -1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1)), QPointF(-100, 1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 270)), QPointF(-100, 270))
self.assertEqual(collection.positionOnPage(QPointF(-100, 370)), QPointF(-100, 63))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1270)), QPointF(-100, 753))
def testPredictionPageNumberForPoint(self):
"""
Test predictPageNumberForPoint
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# no crash if no pages
self.assertEqual(collection.predictPageNumberForPoint(QPointF(1, 1)), 0)
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize(QgsLayoutSize(100, 100))
collection.addPage(page)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 20)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 120)), 1)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 230)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 350)), 3)
page2 = QgsLayoutItemPage(l)
page2.setPageSize(QgsLayoutSize(100, 50))
collection.addPage(page2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 20)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 120)), 1)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 230)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 280)), 3)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 340)), 4)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 370)), 5)
page3 = QgsLayoutItemPage(l)
page3.setPageSize(QgsLayoutSize(100, 200))
collection.addPage(page3)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 20)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 120)), 1)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 230)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 280)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 340)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 370)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 470)), 3)
def testPageAtPoint(self):
"""
Test pageAtPoint
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertFalse(collection.pageAtPoint(QPointF(0, 0)))
self.assertFalse(collection.pageAtPoint(QPointF(10, 10)))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertFalse(collection.pageAtPoint(QPointF(10, -1)))
self.assertEqual(collection.pageAtPoint(QPointF(1, 1)), page)
self.assertEqual(collection.pageAtPoint(QPointF(10, 10)), page)
self.assertFalse(collection.pageAtPoint(QPointF(-10, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(1000, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(10, -10)))
self.assertFalse(collection.pageAtPoint(QPointF(10, 1000)))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageAtPoint(QPointF(1, 1)), page)
self.assertEqual(collection.pageAtPoint(QPointF(10, 10)), page)
self.assertFalse(collection.pageAtPoint(QPointF(-10, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(1000, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(10, -10)))
self.assertEqual(collection.pageAtPoint(QPointF(10, 330)), page2)
self.assertEqual(collection.pageAtPoint(QPointF(10, 500)), page2)
self.assertFalse(collection.pageAtPoint(QPointF(10, 600)))
def testPagePositionToLayout(self):
"""
Test pagePositionToLayoutPosition
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# invalid pages
self.assertEqual(collection.pagePositionToLayoutPosition(-1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(100, QgsLayoutPoint(1, 1)), QPointF(1, 1))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# invalid pages
self.assertEqual(collection.pagePositionToLayoutPosition(-1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
# valid page
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6)), QPointF(5, 6))
self.assertEqual(
collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)),
QPointF(50, 60))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
# invalid pages
self.assertEqual(collection.pagePositionToLayoutPosition(-1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(3, QgsLayoutPoint(1, 1)), QPointF(1, 1))
# valid pages
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6)), QPointF(5, 6))
self.assertEqual(
collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)),
QPointF(50, 60))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(1, 1)), QPointF(1, 308.0))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(5, 6)), QPointF(5, 313.0))
self.assertEqual(
collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(0.5, 0.6, QgsUnitTypes.LayoutCentimeters)),
QPointF(5, 313.0))
def testPagePositionToAbsolute(self):
"""
Test pagePositionToAbsolute
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# invalid pages
self.assertEqual(collection.pagePositionToAbsolute(-1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(100, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# invalid pages
self.assertEqual(collection.pagePositionToAbsolute(-1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
# valid page
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6)), QgsLayoutPoint(5, 6))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)),
QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
# invalid pages
self.assertEqual(collection.pagePositionToAbsolute(-1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(3, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
# valid pages
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6)), QgsLayoutPoint(5, 6))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)),
QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 308.0))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(5, 6)), QgsLayoutPoint(5, 313.0))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(0.5, 0.6, QgsUnitTypes.LayoutCentimeters)),
QgsLayoutPoint(0.5, 31.3, QgsUnitTypes.LayoutCentimeters))
def testVisiblePages(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertFalse(collection.visiblePages(QRectF(0, 0, 10, 10)))
self.assertFalse(collection.visiblePageNumbers(QRectF(0, 0, 10, 10)))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertFalse(collection.visiblePages(QRectF(-10, -10, 5, 5)))
self.assertFalse(collection.visiblePageNumbers(QRectF(-10, -10, 5, 5)))
self.assertEqual(collection.visiblePages(QRectF(-10, -10, 15, 15)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(-10, -10, 15, 15)), [0])
self.assertEqual(collection.visiblePages(QRectF(200, 200, 115, 115)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(200, 200, 115, 115)), [0])
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertFalse(collection.visiblePages(QRectF(-10, -10, 5, 5)))
self.assertFalse(collection.visiblePageNumbers(QRectF(-10, -10, 5, 5)))
self.assertEqual(collection.visiblePages(QRectF(-10, -10, 15, 15)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(-10, -10, 15, 15)), [0])
self.assertEqual(collection.visiblePages(QRectF(200, 200, 115, 115)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(200, 200, 115, 115)), [0])
self.assertEqual(collection.visiblePages(QRectF(200, 200, 115, 615)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(200, 200, 115, 115)), [0])
self.assertEqual(collection.visiblePages(QRectF(100, 200, 115, 615)), [page, page2])
self.assertEqual(collection.visiblePageNumbers(QRectF(100, 200, 115, 115)), [0, 1])
self.assertEqual(collection.visiblePages(QRectF(100, 310, 115, 615)), [page2])
self.assertEqual(collection.visiblePageNumbers(QRectF(100, 310, 115, 115)), [1])
def testTakePage(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add some pages
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageCount(), 2)
self.assertFalse(collection.takePage(None))
self.assertEqual(collection.takePage(page), page)
self.assertFalse(sip.isdeleted(page))
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.pages(), [page2])
self.assertEqual(collection.page(0), page2)
self.assertEqual(collection.takePage(page2), page2)
self.assertFalse(sip.isdeleted(page2))
self.assertEqual(collection.pageCount(), 0)
self.assertEqual(collection.pages(), [])
self.assertFalse(collection.page(0))
def testReadWriteXml(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
fill = QgsSimpleFillSymbolLayer()
fill_symbol = QgsFillSymbol()
fill_symbol.changeSymbolLayer(0, fill)
fill.setColor(Qt.green)
fill.setStrokeColor(Qt.red)
fill.setStrokeWidth(6)
collection.setPageStyleSymbol(fill_symbol)
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
self.assertEqual(collection.pageNumber(page), -1)
collection.addPage(page)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(collection.writeXml(elem, doc, QgsReadWriteContext()))
l2 = QgsLayout(p)
collection2 = l2.pageCollection()
self.assertTrue(collection2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertEqual(collection2.pageCount(), 2)
self.assertEqual(collection2.page(0).pageSize().width(), 210)
self.assertEqual(collection2.page(0).pageSize().height(), 297)
self.assertEqual(collection2.page(1).pageSize().width(), 148)
self.assertEqual(collection2.page(1).pageSize().height(), 210)
self.assertEqual(collection2.pageStyleSymbol().symbolLayer(0).color().name(), '#00ff00')
self.assertEqual(collection2.pageStyleSymbol().symbolLayer(0).strokeColor().name(), '#ff0000')
def testUndoRedo(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertEqual(collection.pageCount(), 1)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 0)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 1)
# make sure page is accessible
self.assertEqual(collection.page(0).pageSize().width(), 210)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
# delete page
collection.deletePage(collection.page(0))
self.assertEqual(collection.pageCount(), 1)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 2)
# make sure pages are accessible
self.assertEqual(collection.page(0).pageSize().width(), 210)
self.assertEqual(collection.page(1).pageSize().width(), 148)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 1)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 0)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.page(0).pageSize().width(), 210)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(collection.page(0).pageSize().width(), 210)
self.assertEqual(collection.page(1).pageSize().width(), 148)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.page(0).pageSize().width(), 148)
def testResizeToContents(self):
p = QgsProject()
l = QgsLayout(p)
# no items -- no crash!
l.pageCollection().resizeToContents(QgsMargins(1, 2, 3, 4), QgsUnitTypes.LayoutCentimeters)
page = QgsLayoutItemPage(l)
page.setPageSize("A5", QgsLayoutItemPage.Landscape)
l.pageCollection().addPage(page)
# no items, no change
l.pageCollection().resizeToContents(QgsMargins(1, 2, 3, 4), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l.pageCollection().pageCount(), 1)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().width(), 210.0, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().height(), 148.0, 2)
p = QgsProject()
l = QgsLayout(p)
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50))
shape1.setItemRotation(45, False)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), True, False, 0)
l.addLayoutItem(shape2)
shape3 = QgsLayoutItemShape(l)
l.addLayoutItem(shape3)
shape3.attemptResize(QgsLayoutSize(50, 100))
shape3.attemptMove(QgsLayoutPoint(210, 250), True, False, 0)
shape4 = QgsLayoutItemShape(l)
l.addLayoutItem(shape4)
shape4.attemptResize(QgsLayoutSize(50, 30))
shape4.attemptMove(QgsLayoutPoint(10, 340), True, False, 0)
shape4.setVisibility(False)
# resize with no existing pages
l.pageCollection().resizeToContents(QgsMargins(1, 2, 3, 4), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l.pageCollection().pageCount(), 1)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().width(), 290.3, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().height(), 380.36, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertAlmostEqual(shape1.positionWithUnits().x(), 90.15, 2)
self.assertAlmostEqual(shape1.positionWithUnits().y(), 20.21, 2)
self.assertAlmostEqual(shape2.positionWithUnits().x(), 100.15, 2)
self.assertAlmostEqual(shape2.positionWithUnits().y(), 120.21, 2)
self.assertAlmostEqual(shape3.positionWithUnits().x(), 210.15, 2)
self.assertAlmostEqual(shape3.positionWithUnits().y(), 220.21, 2)
self.assertAlmostEqual(shape4.positionWithUnits().x(), 10.15, 2)
self.assertAlmostEqual(shape4.positionWithUnits().y(), 310.21, 2)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize("A4", QgsLayoutItemPage.Landscape)
l.pageCollection().addPage(page2)
# add some guides
g1 = QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(2.5, QgsUnitTypes.LayoutCentimeters),
l.pageCollection().page(0))
l.guides().addGuide(g1)
g2 = QgsLayoutGuide(Qt.Vertical, QgsLayoutMeasurement(4.5, QgsUnitTypes.LayoutCentimeters),
l.pageCollection().page(0))
l.guides().addGuide(g2)
# second page should be removed
l.pageCollection().resizeToContents(QgsMargins(0, 0, 0, 0), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l.pageCollection().pageCount(), 1)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().width(), 250.3, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().height(), 320.36, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertAlmostEqual(g1.position().length(), 0.5, 2)
self.assertAlmostEqual(g2.position().length(), 3.5, 2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -338,861,353,723,335,200 | 39.780364 | 120 | 0.649128 | false |
linea-it/dri | api/userquery/create_table_as.py | 1 | 7499 | from __future__ import absolute_import, unicode_literals
import logging
import os
import traceback
from urllib.parse import urljoin
from coadd.models import Release
from common.notify import Notify
from django.conf import settings
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.utils import timezone
from lib.sqlalchemy_wrapper import DBBase
from product.models import Product
from userquery.models import Job, Table
from .email import Email
from .target_viewer import TargetViewer
class CreateTableAs:
def __init__(self, job_id, user_id, table_name, table_display_name, release_id, release_name, associate_target_viewer, task_id, schema=None):
# Get an instance of a logger
self.logger = logging.getLogger('userquery')
self.user_id = user_id
self.job_id = job_id
self.table_name = table_name
self.table_display_name = table_display_name
self.release_id = release_id
self.release_name = release_name
self.associate_target_viewer = associate_target_viewer
self.schema = schema
self.task_id = task_id
self.rows_count = 0
self.table = None
# Flag que indica se a tabela foi criada nesta rodada, evita que tabelas já existentes sejam apagadas.
self.table_created = False
def do_all(self, ):
self.logger.info("Starting User Query Job ID: [%s]" % self.job_id)
# Recupera informação do Usuario.
self.user = User.objects.get(pk=self.user_id)
# Recupera informação do Job
self.job = Job.objects.get(pk=self.job_id)
# Recupera informação do Release
self.release = Release.objects.get(pk=self.release_id)
self.logger.debug("Release: %s" % self.release)
# Altera o Status do Job para Running
self._update_job_status_before_table_creation()
# Envia email de notificação Start para o usuario.
self._notify_by_email_start()
try:
# Cria a tabela, preenche com os dados, cria os indexes.
self._create_table_by_job_id()
# Registra a tabela
# TODO: Adicionar a tabela uma relação com o Job que a criou.
self.table = Table(table_name=self.table_name,
display_name=self.job.display_name,
owner=self.job.owner,
schema=self.schema,
release=self.release,
tbl_num_objects=self.rows_count)
self.table.save()
# Registra a nova tabela como um produto no Target Viewer
self._associate_target_viewer()
# Altera o Status do Job para Done
self.job.job_status = 'ok'
self.job.save()
# Notifica ao Usuario o termino do Job com sucesso.
self._notify_by_email_finish()
except Exception as e:
# Altera o status do job para Error
self.job.job_status = 'er'
self.job.error = str(e)
self.job.save()
# Notifica o usuario o termino do Job com Error.
self._notify_user_by_email_failure(e)
finally:
# Guarda a o datetime em que o job terminou.
self.job.end_date_time = timezone.now()
self.job.save()
self.logger.info("Job completed Job ID: [%s] Job Status: [%s]" % (self.job.pk, self.job.job_status))
def _update_job_status_before_table_creation(self):
self.job.job_status = 'rn'
self.job.job_id = self.task_id
self.job.save()
self.logger.info("Changed Job status to Running")
def _create_table_by_job_id(self):
self.logger.info("Creating the table")
try:
db = DBBase('catalog')
# Seta o log do user query a classe de banco de dados,
# nesta instancia as querys serão logadas no log do userquery
db.setLogger(self.logger)
# Se não foi especificado um schema para a criação da tabela, utilizar o schema da conexão.
if self.schema is None:
self.schema = db.schema
self.logger.debug("Schema: %s" % self.schema)
self.logger.debug("Tablename: %s" % self.table_name)
# Verificar se a tabela já existe
if db.table_exists(self.table_name, self.schema):
raise Exception("This %s table already exists." % self.table_name)
# Criacao da tabela
db.create_table_raw_sql(self.table_name, self.job.sql_sentence, schema=self.schema,
timeout=self.job.timeout)
# Flag que indica que a tabela foi criada neste job. em caso de erro daqui para frente ela deve ser apagada.
self.table_created = True
# Criacao da Primary key
db.create_auto_increment_column(self.table_name, 'meta_id', schema=self.schema)
# Total de linhas adicionadas a tabela
self.rows_count = db.get_count(self.table_name, schema=self.schema)
self.logger.debug("Rows Count: %s" % self.rows_count)
self.logger.info("Table Created successfully.")
except Exception as e:
trace = traceback.format_exc()
self.logger.error(trace)
self.logger.error("Table creation failed: %s" % e)
# Se a tabela tiver sido criada antes do erro é necessário fazer o drop.
self.logger.info("Checking if the table was created, if it has been, it will be droped.")
if self.table_created and db.table_exists(self.table_name, schema=self.schema):
self.logger.info("Trying to drop the table.")
try:
db.drop_table(self.table_name, schema=self.schema)
self.logger.info("Table successfully droped")
except Exception as e:
self.logger.error("Failed to drop the table. %s" % e)
raise (e)
def _associate_target_viewer(self):
if self.associate_target_viewer:
TargetViewer.register(user=self.user, table_pk=self.table.pk, release_name=self.release_name)
def _notify_by_email_start(self):
Email().send({
"email": self.user.email,
"template": "job_notification_start.html",
"subject": "The creation of your table is being processed",
"username": self.user.username,
"id_job": self.job.pk,
"table_name": self.table_name,
"table_display_name": self.table_display_name
})
def _notify_by_email_finish(self):
Email().send({
"email": self.user.email,
"template": "job_notification_finish.html",
"subject": "The table creation is finished",
"username": self.user.username,
"id_job": self.job.pk,
"table_name": self.table_name,
"table_display_name": self.table_display_name
})
def _notify_user_by_email_failure(self, error_message):
Email().send({
"email": self.user.email,
"template": "job_notification_error.html",
"subject": "The table creation failed",
"username": self.user.username,
"table_name": self.table_name,
"error_message": error_message
})
| gpl-3.0 | -139,530,181,073,171,940 | 36.777778 | 145 | 0.593984 | false |
DirectXMan12/oslo.reports | oslo_reports/tests/test_guru_meditation_report.py | 2 | 8214 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import datetime
import os
import re
import signal
import sys
# needed to get greenthreads
import fixtures
import greenlet
import mock
from oslotest import base
import six
from oslo_reports import guru_meditation_report as gmr
from oslo_reports.models import with_default_views as mwdv
class FakeVersionObj(object):
def vendor_string(self):
return 'Cheese Shoppe'
def product_string(self):
return 'Sharp Cheddar'
def version_string_with_package(self):
return '1.0.0'
def skip_body_lines(start_line, report_lines):
curr_line = start_line
while (len(report_lines[curr_line]) == 0
or report_lines[curr_line][0] != '='):
curr_line += 1
return curr_line
class TestGuruMeditationReport(base.BaseTestCase):
def setUp(self):
super(TestGuruMeditationReport, self).setUp()
self.curr_g = greenlet.getcurrent()
self.report = gmr.TextGuruMeditation(FakeVersionObj())
self.old_stderr = None
def test_basic_report(self):
report_lines = self.report.run().split('\n')
target_str_header = ['========================================================================', # noqa
'==== Guru Meditation ====', # noqa
'========================================================================', # noqa
'||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||', # noqa
'',
'',
'========================================================================', # noqa
'==== Package ====', # noqa
'========================================================================', # noqa
'product = Sharp Cheddar',
'vendor = Cheese Shoppe',
'version = 1.0.0',
'========================================================================', # noqa
'==== Threads ====', # noqa
'========================================================================'] # noqa
# first the header and version info...
self.assertEqual(target_str_header,
report_lines[0:len(target_str_header)])
# followed by at least one thread...
# NOTE(zqfan): add an optional '-' because sys._current_frames()
# may return a negative thread id on 32 bit operating system.
self.assertTrue(re.match(r'------(\s+)Thread #-?\d+\1\s?------',
report_lines[len(target_str_header)]))
self.assertEqual('', report_lines[len(target_str_header) + 1])
# followed by more thread stuff stuff...
curr_line = skip_body_lines(len(target_str_header) + 2, report_lines)
# followed by at least one green thread
target_str_gt = ['========================================================================', # noqa
'==== Green Threads ====', # noqa
'========================================================================', # noqa
'------ Green Thread ------', # noqa
'']
end_bound = curr_line + len(target_str_gt)
self.assertEqual(target_str_gt,
report_lines[curr_line:end_bound])
# followed by some more green thread stuff
curr_line = skip_body_lines(curr_line + len(target_str_gt),
report_lines)
# followed by the processes header
target_str_p_head = ['========================================================================', # noqa
'==== Processes ====', # noqa
'========================================================================'] # noqa
end_bound = curr_line + len(target_str_p_head)
self.assertEqual(target_str_p_head,
report_lines[curr_line:end_bound])
curr_line += len(target_str_p_head)
# followed by at least one process
self.assertTrue(re.match("Process \d+ \(under \d+\)",
report_lines[curr_line]))
# followed by some more process stuff
curr_line = skip_body_lines(curr_line + 1, report_lines)
# followed finally by the configuration
target_str_config = ['========================================================================', # noqa
'==== Configuration ====', # noqa
'========================================================================', # noqa
'']
end_bound = curr_line + len(target_str_config)
self.assertEqual(target_str_config,
report_lines[curr_line:end_bound])
def test_reg_persistent_section(self):
def fake_gen():
fake_data = {'cheddar': ['sharp', 'mild'],
'swiss': ['with holes', 'with lots of holes'],
'american': ['orange', 'yellow']}
return mwdv.ModelWithDefaultViews(data=fake_data)
gmr.TextGuruMeditation.register_section('Cheese Types', fake_gen)
report_lines = self.report.run()
target_lst = ['========================================================================', # noqa
'==== Cheese Types ====', # noqa
'========================================================================', # noqa
'american = ',
' orange',
' yellow',
'cheddar = ',
' mild',
' sharp',
'swiss = ',
' with holes',
' with lots of holes']
target_str = '\n'.join(target_lst)
self.assertIn(target_str, report_lines)
def test_register_autorun(self):
gmr.TextGuruMeditation.setup_autorun(FakeVersionObj())
self.old_stderr = sys.stderr
sys.stderr = six.StringIO()
os.kill(os.getpid(), signal.SIGUSR1)
self.assertIn('Guru Meditation', sys.stderr.getvalue())
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(2014, 1, 1, 12, 0, 0))
def test_register_autorun_log_dir(self, mock_strtime):
log_dir = self.useFixture(fixtures.TempDir()).path
gmr.TextGuruMeditation.setup_autorun(
FakeVersionObj(), "fake-service", log_dir)
os.kill(os.getpid(), signal.SIGUSR1)
with open(os.path.join(
log_dir, "fake-service_gurumeditation_20140101120000")) as df:
self.assertIn('Guru Meditation', df.read())
def tearDown(self):
super(TestGuruMeditationReport, self).tearDown()
if self.old_stderr is not None:
sys.stderr = self.old_stderr
| apache-2.0 | -705,975,913,952,806,100 | 43.16129 | 112 | 0.425128 | false |
jimsimon/sky_engine | build/download_gold_plugin.py | 33 | 1402 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download LLVM gold plugin from google storage."""
import json
import os
import shutil
import subprocess
import sys
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(CHROME_SRC, 'tools'))
import find_depot_tools
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build',
'Release+Asserts')
CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts',
'update.py')
CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
def main():
targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
os.chdir(LLVM_BUILD_PATH)
subprocess.check_call(['python', GSUTIL_PATH,
'cp', remote_path, targz_name])
subprocess.check_call(['tar', 'xzf', targz_name])
os.remove(targz_name)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -2,349,984,222,188,039,000 | 30.155556 | 80 | 0.661912 | false |
EricNeedham/assignment-1 | venv/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/pymssql.py | 22 | 2968 | # mssql/pymssql.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql doesn't have a Binary method. we use string
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
super(MSDialect_pymssql, self).__init__(**params)
self.use_scope_identity = True
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed"
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
| mit | -647,246,458,945,361,900 | 30.574468 | 74 | 0.598383 | false |
Micronaet/micronaet-mx8 | mx_partner_zone/zone.py | 1 | 2583 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
""" Model name: Sale Order
"""
_inherit = 'sale.order'
def _get_zone_from_partner(self, cr, uid, ids, context=None):
''' When change sol line order
'''
sale_pool = self.pool['sale.order']
order_ids = sale_pool.search(cr, uid, [
('partner_id', 'in', ids)], context=context)
return order_ids
def _get_zone_from_order(self, cr, uid, ids, context=None):
''' When change sol line order
'''
return ids
_columns = {
'zone_id': fields.related(
'partner_id', 'zone_id', type='many2one',
relation='res.partner.zone',
store={
'sale.order': (_get_zone_from_order, [
'partner_id'], 10),
'res.partner': (_get_zone_from_partner, [
'zone_id'], 10),
}, string='Zone',
)
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,427,575,919,849,841,700 | 34.875 | 79 | 0.603175 | false |
Perferom/android_external_chromium_org | tools/telemetry/telemetry/page/extensions_profile_creator.py | 24 | 8171 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import platform
import shutil
import socket
import sys
import tempfile
import time
import urllib2
import zipfile
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import profile_creator
def _ExternalExtensionsPath():
"""Returns the OS-dependent path at which to install the extension deployment
files"""
if platform.system() == 'Darwin':
return os.path.join('/Library', 'Application Support', 'Google', 'Chrome',
'External Extensions')
elif platform.system() == 'Linux':
return os.path.join('/opt', 'google', 'chrome', 'extensions' )
else:
raise NotImplementedError('Extension install on %s is not yet supported' %
platform.system())
def _DownloadExtension(extension_id, output_dir):
"""Download an extension to disk.
Args:
extension_id: the extension id.
output_dir: Directory to download into.
Returns:
Extension file downloaded."""
extension_download_path = os.path.join(output_dir, "%s.crx" % extension_id)
extension_url = (
"https://clients2.google.com/service/update2/crx?response=redirect"
"&x=id%%3D%s%%26lang%%3Den-US%%26uc" % extension_id)
response = urllib2.urlopen(extension_url)
assert(response.getcode() == 200)
with open(extension_download_path, "w") as f:
f.write(response.read())
return extension_download_path
def _GetExtensionInfoFromCRX(crx_path):
"""Parse an extension archive and return information.
Note:
The extension name returned by this function may not be valid
(e.g. in the case of a localized extension name). It's use is just
meant to be informational.
Args:
crx_path: path to crx archive to look at.
Returns:
Tuple consisting of:
(crx_version, extension_name)"""
crx_zip = zipfile.ZipFile(crx_path)
manifest_contents = crx_zip.read('manifest.json')
decoded_manifest = json.loads(manifest_contents)
crx_version = decoded_manifest['version']
extension_name = decoded_manifest['name']
return (crx_version, extension_name)
class ExtensionsProfileCreator(profile_creator.ProfileCreator):
"""Virtual base class for profile creators that install extensions.
Extensions are installed using the mechanism described in
https://developer.chrome.com/extensions/external_extensions.html .
Subclasses are meant to be run interactively.
"""
def __init__(self):
super(ExtensionsProfileCreator, self).__init__()
typical_25 = os.path.join(util.GetBaseDir(), 'page_sets', 'typical_25.json')
self._page_set = page_set.PageSet.FromFile(typical_25)
# Directory into which the output profile is written.
self._output_profile_path = None
# List of extensions to install.
self._extensions_to_install = []
# Theme to install (if any).
self._theme_to_install = None
# Directory to download extension files into.
self._extension_download_dir = None
# Have the extensions been installed yet?
self._extensions_installed = False
# List of files to delete after run.
self._files_to_cleanup = []
def _PrepareExtensionInstallFiles(self):
"""Download extension archives and create extension install files."""
extensions_to_install = self._extensions_to_install
if self._theme_to_install:
extensions_to_install = extensions_to_install + [self._theme_to_install]
num_extensions = len(extensions_to_install)
if not num_extensions:
raise ValueError("No extensions or themes to install:",
extensions_to_install)
# Create external extensions path if it doesn't exist already.
external_extensions_dir = _ExternalExtensionsPath()
if not os.path.isdir(external_extensions_dir):
os.makedirs(external_extensions_dir)
self._extension_download_dir = tempfile.mkdtemp()
for i in xrange(num_extensions):
extension_id = extensions_to_install[i]
logging.info("Downloading %s - %d/%d" % (
extension_id, (i + 1), num_extensions))
extension_path = _DownloadExtension(extension_id,
self._extension_download_dir)
(version, name) = _GetExtensionInfoFromCRX(extension_path)
extension_info = {'external_crx' : extension_path,
'external_version' : version,
'_comment' : name}
extension_json_path = os.path.join(external_extensions_dir,
"%s.json" % extension_id)
with open(extension_json_path, 'w') as f:
f.write(json.dumps(extension_info))
self._files_to_cleanup.append(extension_json_path)
def _CleanupExtensionInstallFiles(self):
"""Cleanup stray files before exiting."""
logging.info("Cleaning up stray files")
for filename in self._files_to_cleanup:
os.remove(filename)
if self._extension_download_dir:
# Simple sanity check to lessen the impact of a stray rmtree().
if len(self._extension_download_dir.split(os.sep)) < 3:
raise Exception("Path too shallow: %s" % self._extension_download_dir)
shutil.rmtree(self._extension_download_dir)
self._extension_download_dir = None
def CustomizeBrowserOptions(self, options):
self._output_profile_path = options.output_profile_path
def WillRunTest(self):
"""Run before browser starts.
Download extensions and write installation files."""
# Running this script on a corporate network or other managed environment
# could potentially alter the profile contents.
hostname = socket.gethostname()
if hostname.endswith('corp.google.com'):
raise Exception("It appears you are connected to a corporate network "
"(hostname=%s). This script needs to be run off the corp "
"network." % hostname)
prompt = ("\n!!!This script must be run on a fresh OS installation, "
"disconnected from any corporate network. Are you sure you want to "
"continue? (y/N) ")
if (raw_input(prompt).lower() != 'y'):
sys.exit(-1)
self._PrepareExtensionInstallFiles()
def DidRunTest(self, browser, results):
"""Run before exit."""
# Do some basic sanity checks to make sure the profile is complete.
installed_extensions = browser.extensions.keys()
if not len(installed_extensions) == len(self._extensions_to_install):
# Diagnosing errors:
# Too many extensions: Managed environment may be installing additional
# extensions.
raise Exception("Unexpected number of extensions installed in browser",
installed_extensions)
# Check that files on this list exist and have content.
expected_files = [
os.path.join('Default', 'Network Action Predictor')]
for filename in expected_files:
filename = os.path.join(self._output_profile_path, filename)
if not os.path.getsize(filename) > 0:
raise Exception("Profile not complete: %s is zero length." % filename)
self._CleanupExtensionInstallFiles()
def CanRunForPage(self, page):
# No matter how many pages in the pageset, just perform two test iterations.
return page.page_set.pages.index(page) < 2
def MeasurePage(self, _, tab, results):
# Profile setup works in 2 phases:
# Phase 1: When the first page is loaded: we wait for a timeout to allow
# all extensions to install and to prime safe browsing and other
# caches. Extensions may open tabs as part of the install process.
# Phase 2: When the second page loads, page_runner closes all tabs -
# we are left with one open tab, wait for that to finish loading.
# Sleep for a bit to allow safe browsing and other data to load +
# extensions to install.
if not self._extensions_installed:
sleep_seconds = 5 * 60
logging.info("Sleeping for %d seconds." % sleep_seconds)
time.sleep(sleep_seconds)
self._extensions_installed = True
else:
# Phase 2: Wait for tab to finish loading.
for i in xrange(len(tab.browser.tabs)):
t = tab.browser.tabs[i]
t.WaitForDocumentReadyStateToBeComplete()
| bsd-3-clause | 1,752,775,316,776,101,400 | 35.972851 | 80 | 0.693795 | false |
n37r06u3/webpymail | imaplib2/sexp.py | 7 | 4061 | # -*- coding: utf-8 -*-
# imaplib2 python module, meant to be a replacement to the python default
# imaplib module
# Copyright (C) 2008 Helder Guerreiro
## This file is part of imaplib2.
##
## imaplib2 is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## imaplib2 is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with hlimap. If not, see <http://www.gnu.org/licenses/>.
#
# Helder Guerreiro <[email protected]>
#
# $Id$
#
# Global imports
import re, string
# Regexp
literal_re = re.compile(r'^{(\d+)}\r\n')
simple_re = re.compile(r'^([^ ()]+)')
quoted_re = re.compile(r'^"((?:[^"\\]|\\")*?)"')
# Errors
class SError(Exception): pass
def scan_sexp(text):
'''S-Expression scanner.
This is a non-recursive version. It uses the lists property of assigning
only by reference to assemble the s-exp.
@param text: text to be scanned.
@type text: s-exp string
@return result: s-exp in a python list.
'''
# Initialization
pos = 0
lenght = len(text)
current = ''
result = []
cur_result = result
level = [ cur_result ]
# Scanner
while pos < lenght:
# Quoted literal:
if text[pos] == '"':
quoted = quoted_re.match(text[pos:])
if quoted:
cur_result.append( quoted.groups()[0] )
pos += quoted.end() - 1
# Numbered literal:
elif text[pos] == '{':
lit = literal_re.match(text[pos:])
if lit:
start = pos+lit.end()
end = pos+lit.end()+int(lit.groups()[0])
pos = end - 1
cur_result.append( text[ start:end ] )
# Simple literal
elif text[pos] not in '() ':
simple = simple_re.match(text[pos:])
if simple:
tmp = simple.groups()[0]
if tmp.isdigit():
tmp = int(tmp)
elif tmp == 'NIL':
tmp = None
cur_result.append( tmp )
pos += simple.end() - 1
# Level handling, if we find a '(' we must add another list, if we
# find a ')' we must return to the previous list.
elif text[pos] == '(':
cur_result.append([])
cur_result = cur_result[-1]
level.append(cur_result)
elif text[pos] == ')':
try:
cur_result = level[-2]
del level[-1]
except IndexError:
raise SError('Unexpected parenthesis at pos %d' % pos)
pos += 1
return result
if __name__ == '__main__':
from time import time
count = 1000
text = '(A NIL {5}\r\n12345 (D E))(F G)'
text = '266 FETCH (FLAGS (\Seen) UID 31608 INTERNALDATE "30-Jan-2008 02:48:01 +0000" RFC822.SIZE 4509 ENVELOPE ("Tue, 29 Jan 2008 14:00:24 +0000" "Aprenda as tXcnicas e os truques da cozinha mais doce..." (("Ediclube" NIL "ediclube" "sigmathis.info")) (("Ediclube" NIL "ediclube" "sigmathis.info")) ((NIL NIL "ediclube" "sigmathis.info")) ((NIL NIL "helder" "example.com")) NIL NIL NIL "<[email protected]>"))'
#text = 'AA 12341 NIL (A NIL "asdasd fffff\\"sdasd" {%d}\r\n%s (D E))(F G)' % ( count, '#' * count)
#text = 'A B (C NIL (D E))(F G)'
itx = 300
rit = xrange(itx)
print 'Test to the s-exp parser:'
print
print 'Non Recursive (%d times):' % itx
a = time()
for i in rit:
scan_sexp(text)
b = time()
print 1000 * (b-a) / itx, 'ms/iter'
print itx, ' --> ', 1000 * (b-a) , 'ms'
print
print scan_sexp(text)
| gpl-3.0 | -6,480,336,893,990,935,000 | 30 | 451 | 0.563901 | false |
nsu/contact-cement | cement/cement/settings.py | 1 | 2079 | """
Django settings for cement project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(=d2w9_ojm%uba)j++)5hr2w70$w^&@n=4$%ipb3*2mx_+ndux'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'contact',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cement.urls'
WSGI_APPLICATION = 'cement.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cement',
'USER': 'cement',
'PASSWORD': 'cement',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| mit | -4,598,070,259,126,308,400 | 23.174419 | 71 | 0.70178 | false |
yfried/ansible | lib/ansible/modules/cloud/amazon/lambda.py | 12 | 22603 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs.
Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
- Required when C(state=present)
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present)
handler:
description:
- The function within your code that Lambda calls to begin execution
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored
- If C(state=present) then either zip_file or s3_bucket must be present.
- s3_bucket and s3_key are required together
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload
- s3_bucket and s3_key are required together
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above)
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass a empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account id we are currently working on
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privilages to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = sts_client.get_caller_identity().get('Account')
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
except Exception as e:
module.fail_json_aws(e, msg="getting account information")
return account_id
def get_current_function(connection, function_name, qualifier=None):
try:
if qualifier is not None:
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
return connection.get_function(FunctionName=function_name)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return None
except (KeyError, AttributeError):
pass
raise e
def sha256sum(filename):
hasher = hashlib.sha256()
with open(filename, 'rb') as f:
hasher.update(f.read())
code_hash = hasher.digest()
code_b64 = base64.b64encode(code_hash)
hex_digest = code_b64.decode('utf-8')
return hex_digest
def set_tag(client, module, tags, function):
if not hasattr(client, "list_tags"):
module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
changed = False
arn = function['Configuration']['FunctionArn']
try:
current_tags = client.list_tags(Resource=arn).get('Tags', {})
except ClientError as e:
module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
try:
if tags_to_remove:
client.untag_resource(
Resource=arn,
TagKeys=tags_to_remove
)
changed = True
if tags_to_add:
client.tag_resource(
Resource=arn,
Tags=tags_to_add
)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc())
return changed
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
runtime=dict(),
role=dict(),
handler=dict(),
zip_file=dict(aliases=['src']),
s3_bucket=dict(),
s3_key=dict(),
s3_object_version=dict(),
description=dict(default=''),
timeout=dict(type='int', default=3),
memory_size=dict(type='int', default=128),
vpc_subnet_ids=dict(type='list'),
vpc_security_group_ids=dict(type='list'),
environment_variables=dict(type='dict'),
dead_letter_arn=dict(),
tags=dict(type='dict'),
)
mutually_exclusive = [['zip_file', 's3_key'],
['zip_file', 's3_bucket'],
['zip_file', 's3_object_version']]
required_together = [['s3_key', 's3_bucket'],
['vpc_subnet_ids', 'vpc_security_group_ids']]
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if)
name = module.params.get('name')
state = module.params.get('state').lower()
runtime = module.params.get('runtime')
role = module.params.get('role')
handler = module.params.get('handler')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
s3_object_version = module.params.get('s3_object_version')
zip_file = module.params.get('zip_file')
description = module.params.get('description')
timeout = module.params.get('timeout')
memory_size = module.params.get('memory_size')
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
environment_variables = module.params.get('environment_variables')
dead_letter_arn = module.params.get('dead_letter_arn')
tags = module.params.get('tags')
check_mode = module.check_mode
changed = False
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (ClientError, ValidationError) as e:
module.fail_json_aws(e, msg="Trying to connect to AWS")
if state == 'present':
if role.startswith('arn:aws:iam'):
role_arn = role
else:
# get account ID and assemble ARN
account_id = get_account_id(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
# Get function configuration if present, False otherwise
current_function = get_current_function(client, name)
# Update existing Lambda function
if state == 'present' and current_function:
# Get current state
current_config = current_function['Configuration']
current_version = None
# Update function configuration
func_kwargs = {'FunctionName': name}
# Update configuration if needed
if role_arn and current_config['Role'] != role_arn:
func_kwargs.update({'Role': role_arn})
if handler and current_config['Handler'] != handler:
func_kwargs.update({'Handler': handler})
if description and current_config['Description'] != description:
func_kwargs.update({'Description': description})
if timeout and current_config['Timeout'] != timeout:
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
if (environment_variables is not None) and (current_config.get(
'Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
else:
if dead_letter_arn != "":
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# Check for unsupported mutation
if current_config['Runtime'] != runtime:
module.fail_json(msg='Cannot change runtime. Please recreate the function')
# If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
if 'VpcConfig' in current_config:
# Compare VPC config with current config
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}
func_kwargs.update({'VpcConfig': new_vpc_config})
else:
# No VPC configuration is desired, assure VPC config is empty when present in current config
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
# Upload new configuration if configuration has changed
if len(func_kwargs) > 1:
try:
if not check_mode:
response = client.update_function_configuration(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to update lambda configuration")
# Update code configuration
code_kwargs = {'FunctionName': name, 'Publish': True}
# Update S3 location
if s3_bucket and s3_key:
# If function is stored on S3 always update
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
# If S3 Object Version is given
if s3_object_version:
code_kwargs.update({'S3ObjectVersion': s3_object_version})
# Compare local checksum, update remote code when different
elif zip_file:
local_checksum = sha256sum(zip_file)
remote_checksum = current_config['CodeSha256']
# Only upload new code when local code is different compared to the remote code
if local_checksum != remote_checksum:
try:
with open(zip_file, 'rb') as f:
encoded_zip = f.read()
code_kwargs.update({'ZipFile': encoded_zip})
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
# Tag Function
if tags is not None:
if set_tag(client, module, tags, current_function):
changed = True
# Upload new code if needed (e.g. code checksum has changed)
if len(code_kwargs) > 2:
try:
if not check_mode:
response = client.update_function_code(**code_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to upload new code")
# Describe function code and configuration
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after updating')
# We're done
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Function doesn't exists, create new Lambda function
elif state == 'present':
if s3_bucket and s3_key:
# If function is stored on S3
code = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
if s3_object_version:
code.update({'S3ObjectVersion': s3_object_version})
elif zip_file:
# If function is stored in local zipfile
try:
with open(zip_file, 'rb') as f:
zip_content = f.read()
code = {'ZipFile': zip_content}
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
else:
module.fail_json(msg='Either S3 object or path to zipfile required')
func_kwargs = {'FunctionName': name,
'Publish': True,
'Runtime': runtime,
'Role': role_arn,
'Code': code,
'Timeout': timeout,
'MemorySize': memory_size,
}
if description is not None:
func_kwargs.update({'Description': description})
if handler is not None:
func_kwargs.update({'Handler': handler})
if environment_variables:
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# If VPC configuration is given
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}})
# Finally try to create function
current_version = None
try:
if not check_mode:
response = client.create_function(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to create function")
# Tag Function
if tags is not None:
if set_tag(client, module, tags, get_current_function(client, name)):
changed = True
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after creating')
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Delete existing Lambda function
if state == 'absent' and current_function:
try:
if not check_mode:
client.delete_function(FunctionName=name)
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to delete Lambda function")
module.exit_json(changed=changed)
# Function already absent, do nothing
elif state == 'absent':
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,696,605,933,043,684,000 | 37.116358 | 152 | 0.610671 | false |
deepsrijit1105/edx-platform | common/lib/xmodule/xmodule/modulestore/inheritance.py | 4 | 14930 | """
Support for inheritance of fields down an XBlock hierarchy.
"""
from __future__ import absolute_import
from django.conf import settings
from xmodule.partitions.partitions import UserPartition
from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List
from xblock.runtime import KeyValueStore, KvsFieldData
from xmodule.fields import Date, Timedelta
from ..course_metadata_utils import DEFAULT_START_DATE
# Make '_' a no-op so we can scrape strings
# Using lambda instead of `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class UserPartitionList(List):
"""Special List class for listing UserPartitions"""
def from_json(self, values):
return [UserPartition.from_json(v) for v in values]
def to_json(self, values):
return [user_partition.to_json()
for user_partition in values]
class InheritanceMixin(XBlockMixin):
"""Field definitions for inheritable fields."""
graded = Boolean(
help="Whether this module contributes to the final course grade",
scope=Scope.settings,
default=False,
)
start = Date(
help="Start time when this module is visible",
default=DEFAULT_START_DATE,
scope=Scope.settings
)
due = Date(
display_name=_("Due Date"),
help=_("Enter the default date by which problems are due."),
scope=Scope.settings,
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_("Enter the method by which this course is edited (\"XML\" or \"Studio\")."),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because user would not change away from Studio within Studio.
)
giturl = String(
display_name=_("GIT URL"),
help=_("Enter the URL for the course data GIT repository."),
scope=Scope.settings
)
xqa_key = String(
display_name=_("XQA Key"),
help=_("This setting is not currently supported."), scope=Scope.settings,
deprecated=True
)
annotation_storage_url = String(
help=_("Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("URL for Annotation Storage")
)
annotation_token_secret = String(
help=_("Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings,
)
group_access = Dict(
help=_("Enter the ids for the content groups this problem belongs to."),
scope=Scope.settings,
)
showanswer = String(
display_name=_("Show Answer"),
help=_(
# Translators: DO NOT translate the words in quotes here, they are
# specific words for the acceptable values.
'Specify when the Show Answer button appears for each problem. '
'Valid values are "always", "answered", "attempted", "closed", '
'"finished", "past_due", "correct_or_past_due", and "never".'
),
scope=Scope.settings,
default="finished",
)
rerandomize = String(
display_name=_("Randomization"),
help=_(
# Translators: DO NOT translate the words in quotes here, they are
# specific words for the acceptable values.
'Specify the default for how often variable values in a problem are randomized. '
'This setting should be set to "never" unless you plan to provide a Python '
'script to identify and randomize values in most of the problems in your course. '
'Valid values are "always", "onreset", "never", and "per_student".'
),
scope=Scope.settings,
default="never",
)
days_early_for_beta = Float(
display_name=_("Days Early for Beta Users"),
help=_("Enter the number of days before the start date that beta users can access the course."),
scope=Scope.settings,
default=None,
)
static_asset_path = String(
display_name=_("Static Asset Path"),
help=_("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."),
scope=Scope.settings,
default='',
)
text_customization = Dict(
display_name=_("Text Customization"),
help=_("Enter string customization substitutions for particular locations."),
scope=Scope.settings,
)
use_latex_compiler = Boolean(
display_name=_("Enable LaTeX Compiler"),
help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."),
default=False,
scope=Scope.settings
)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."),
values={"min": 0}, scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use in this course for the specified duration. "
"Do not share the API key with other courses. Notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact [email protected]"),
scope=Scope.settings
)
# This is should be scoped to content, but since it's defined in the policy
# file, it is currently scoped to settings.
user_partitions = UserPartitionList(
display_name=_("Group Configurations"),
help=_("Enter the configurations that govern how students are grouped together."),
default=[],
scope=Scope.settings
)
video_speed_optimizations = Boolean(
display_name=_("Enable video caching system"),
help=_("Enter true or false. If true, video caching will be used for HTML5 videos."),
default=True,
scope=Scope.settings
)
video_bumper = Dict(
display_name=_("Video Pre-Roll"),
help=_(
"Identify a video, 5-10 seconds in length, to play before course videos. Enter the video ID from "
"the Video Uploads page and one or more transcript files in the following format: {format}. "
"For example, an entry for a video with two transcripts looks like this: {example}"
).format(
format='{"video_id": "ID", "transcripts": {"language": "/static/filename.srt"}}',
example=(
'{'
'"video_id": "77cef264-d6f5-4cf2-ad9d-0178ab8c77be", '
'"transcripts": {"en": "/static/DemoX-D01_1.srt", "uk": "/static/DemoX-D01_1_uk.srt"}'
'}'
),
),
scope=Scope.settings
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button for Problems"),
help=_(
"Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. "
"You can override this in each problem's settings. All existing problems are affected when "
"this course-wide setting is changed."
),
scope=Scope.settings,
default=default_reset_button
)
edxnotes = Boolean(
display_name=_("Enable Student Notes"),
help=_("Enter true or false. If true, students can use the Student Notes feature."),
default=False,
scope=Scope.settings
)
edxnotes_visibility = Boolean(
display_name="Student Notes Visibility",
help=_("Indicates whether Student Notes are visible in the course. "
"Students can also show or hide their notes in the courseware."),
default=True,
scope=Scope.user_info
)
in_entrance_exam = Boolean(
display_name=_("Tag this module as part of an Entrance Exam section"),
help=_("Enter true or false. If true, answer submissions for problem modules will be "
"considered in the Entrance Exam scoring/gating algorithm."),
scope=Scope.settings,
default=False
)
self_paced = Boolean(
display_name=_('Self Paced'),
help=_(
'Set this to "true" to mark this course as self-paced. Self-paced courses do not have '
'due dates for assignments, and students can progress through the course at any rate before '
'the course ends.'
),
default=False,
scope=Scope.settings
)
def compute_inherited_metadata(descriptor):
"""Given a descriptor, traverse all of its descendants and do metadata
inheritance. Should be called on a CourseDescriptor after importing a
course.
NOTE: This means that there is no such thing as lazy loading at the
moment--this accesses all the children."""
if descriptor.has_children:
parent_metadata = descriptor.xblock_kvs.inherited_settings.copy()
# add any of descriptor's explicitly set fields to the inheriting list
for field in InheritanceMixin.fields.values():
if field.is_set_on(descriptor):
# inherited_settings values are json repr
parent_metadata[field.name] = field.read_json(descriptor)
for child in descriptor.get_children():
inherit_metadata(child, parent_metadata)
compute_inherited_metadata(child)
def inherit_metadata(descriptor, inherited_data):
"""
Updates this module with metadata inherited from a containing module.
Only metadata specified in self.inheritable_metadata will
be inherited
`inherited_data`: A dictionary mapping field names to the values that
they should inherit
"""
try:
descriptor.xblock_kvs.inherited_settings = inherited_data
except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module
pass
def own_metadata(module):
"""
Return a JSON-friendly dictionary that contains only non-inherited field
keys, mapped to their serialized values
"""
return module.get_explicitly_set_fields_by_scope(Scope.settings)
class InheritingFieldData(KvsFieldData):
"""A `FieldData` implementation that can inherit value from parents to children."""
def __init__(self, inheritable_names, **kwargs):
"""
`inheritable_names` is a list of names that can be inherited from
parents.
"""
super(InheritingFieldData, self).__init__(**kwargs)
self.inheritable_names = set(inheritable_names)
def has_default_value(self, name):
"""
Return whether or not the field `name` has a default value
"""
has_default_value = getattr(self._kvs, 'has_default_value', False)
if callable(has_default_value):
return has_default_value(name)
return has_default_value
def default(self, block, name):
"""
The default for an inheritable name is found on a parent.
"""
if name in self.inheritable_names:
# Walk up the content tree to find the first ancestor
# that this field is set on. Use the field from the current
# block so that if it has a different default than the root
# node of the tree, the block's default will be used.
field = block.fields[name]
ancestor = block.get_parent()
# In case, if block's parent is of type 'library_content',
# bypass inheritance and use kvs' default instead of reusing
# from parent as '_copy_from_templates' puts fields into
# defaults.
if ancestor and \
ancestor.location.category == 'library_content' and \
self.has_default_value(name):
return super(InheritingFieldData, self).default(block, name)
while ancestor is not None:
if field.is_set_on(ancestor):
return field.read_json(ancestor)
else:
ancestor = ancestor.get_parent()
return super(InheritingFieldData, self).default(block, name)
def inheriting_field_data(kvs):
"""Create an InheritanceFieldData that inherits the names in InheritanceMixin."""
return InheritingFieldData(
inheritable_names=InheritanceMixin.fields.keys(),
kvs=kvs,
)
class InheritanceKeyValueStore(KeyValueStore):
"""
Common superclass for kvs's which know about inheritance of settings. Offers simple
dict-based storage of fields and lookup of inherited values.
Note: inherited_settings is a dict of key to json values (internal xblock field repr)
"""
def __init__(self, initial_values=None, inherited_settings=None):
super(InheritanceKeyValueStore, self).__init__()
self.inherited_settings = inherited_settings or {}
self._fields = initial_values or {}
def get(self, key):
return self._fields[key.field_name]
def set(self, key, value):
# xml backed courses are read-only, but they do have some computed fields
self._fields[key.field_name] = value
def delete(self, key):
del self._fields[key.field_name]
def has(self, key):
return key.field_name in self._fields
def default(self, key):
"""
Check to see if the default should be from inheritance. If not
inheriting, this will raise KeyError which will cause the caller to use
the field's global default.
"""
return self.inherited_settings[key.field_name]
| agpl-3.0 | -8,668,647,946,764,020,000 | 40.129477 | 407 | 0.641862 | false |
zstackio/zstack-woodpecker | integrationtest/vm/virtualrouter/pf/test_udp.py | 4 | 2441 | '''
Test Port Forwarding with UDP connection
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import zstackwoodpecker.zstack_test.zstack_test_port_forwarding as zstack_pf_header
import apibinding.inventory as inventory
import os
test_stub = test_lib.lib_get_test_stub()
PfRule = test_state.PfRule
Port = test_state.Port
test_obj_dict = test_state.TestStateDict()
def test():
'''
PF test needs at least 3 VR existence. Besides of PF_VM's VR, there
are needed another 2 VR VMs. 1st VR public IP address will be set as
allowedCidr. The 1st VR VM should be able to access PF_VM. The 2nd VR
VM should not be able to access PF_VM.
'''
pf_vm = test_stub.create_dnat_vm()
test_obj_dict.add_vm(pf_vm)
l3_name = os.environ.get('l3VlanNetworkName1')
vr1 = test_stub.create_vr_vm(test_obj_dict, l3_name)
l3_name = os.environ.get('l3NoVlanNetworkName1')
vr2 = test_stub.create_vr_vm(test_obj_dict, l3_name)
vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1)
vr2_pub_ip = test_lib.lib_find_vr_pub_ip(vr2)
pf_vm.check()
vm_nic = pf_vm.vm.vmNics[0]
vm_nic_uuid = vm_nic.uuid
pri_l3_uuid = vm_nic.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip = test_stub.create_vip('pf_udp_test', l3_uuid)
test_obj_dict.add_vip(vip)
vip_uuid = vip.get_vip().uuid
pf_creation_opt = PfRule.generate_pf_rule_option(vr1_pub_ip, protocol=inventory.UDP, vip_target_rule=Port.rule1_ports, private_target_rule=Port.rule1_ports, vip_uuid=vip_uuid, vm_nic_uuid=vm_nic_uuid)
test_pf = zstack_pf_header.ZstackTestPortForwarding()
test_pf.set_creation_option(pf_creation_opt)
test_pf.create(pf_vm)
vip.attach_pf(test_pf)
pf_vm.check()
vip.check()
test_pf.delete()
pf_vm.destroy()
test_obj_dict.rm_vm(pf_vm)
vip.delete()
test_obj_dict.rm_vip(vip)
test_util.test_pass("Test Port Forwarding UDP Rule Successfully")
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 | -7,657,247,217,824,791,000 | 31.986486 | 204 | 0.697665 | false |
tytso/compute-image-packages | gcimagebundle/gcimagebundlelib/imagebundle.py | 1 | 9515 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to bundle root filesystem to a tarball.
Creates a tar bundle and a Manifest, which can be uploaded to image store.
"""
import logging
from optparse import OptionParser
import os
import shutil
import subprocess
import tempfile
import time
from gcimagebundlelib import block_disk
from gcimagebundlelib import exclude_spec
from gcimagebundlelib import platform_factory
from gcimagebundlelib import utils
def SetupArgsParser():
"""Sets up the command line flags."""
parser = OptionParser()
parser.add_option('-d', '--disk', dest='disk',
default='/dev/sda',
help='Disk to bundle.')
parser.add_option('-r', '--root', dest='root_directory',
default='/', metavar='ROOT',
help='Root of the file system to bundle.'
' Recursively bundles all sub directories.')
parser.add_option('-e', '--excludes', dest='excludes',
help='Comma separated list of sub directories to exclude.'
' The defaults are platform specific.')
parser.add_option('-o', '--output_directory', dest='output_directory',
default='/tmp/', metavar='DIR',
help='Output directory for image.')
parser.add_option('--output_file_name', dest='output_file_name',
default=None, metavar='FILENAME',
help=('Output filename for the image. Default is a digest'
' of the image bytes.'))
parser.add_option('--include_mounts', dest='include_mounts',
help='Don\'t ignore mounted filesystems under ROOT.',
action='store_true', default=False)
parser.add_option('-v', '--version',
action='store_true', dest='display_version', default=False,
help='Print the tool version.')
parser.add_option('--loglevel', dest='log_level',
help='Debug logging level.', default='INFO',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR' 'CRITICAL'])
parser.add_option('--log_file', dest='log_file',
help='Output file for log messages.')
parser.add_option('-k', '--key', dest='key', default='nebula',
help='Public key used for signing the image.')
parser.add_option('--nocleanup', dest='cleanup',
action='store_false', default=True,
help=' Do not clean up temporary and log files.')
#TODO(user): Get dehumanize.
parser.add_option('--fssize', dest='fs_size', default=10*1024*1024*1024,
type='int', help='File system size in bytes')
parser.add_option('-b', '--bucket', dest='bucket',
help='Destination storage bucket')
parser.add_option('-f', '--filesystem', dest='file_system',
default=None,
help='File system type for the image.')
parser.add_option('--skip_disk_space_check', dest='skip_disk_space_check',
default=False, action='store_true',
help='Skip the disk space requirement check.')
return parser
def VerifyArgs(parser, options):
"""Verifies that commandline flags are consistent."""
if not options.output_directory:
parser.error('output bundle directory must be specified.')
if not os.path.exists(options.output_directory):
parser.error('output bundle directory does not exist.')
# TODO(user): add more verification as needed
def EnsureSuperUser():
"""Ensures that current user has super user privileges."""
if os.getuid() != 0:
logging.warning('Tool must be run as root.')
exit(-1)
def GetLogLevel(options):
"""Log Level string to logging.LogLevel mapping."""
level = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
if options.log_level in level:
return level[options.log_level]
print 'Invalid logging level. defaulting to INFO.'
return logging.INFO
def SetupLogging(options, log_dir='/tmp'):
"""Set up logging.
All messages above INFO level are also logged to console.
Args:
options: collection of command line options.
log_dir: directory used to generate log files.
"""
if options.log_file:
logfile = options.log_file
else:
logfile = tempfile.mktemp(dir=log_dir, prefix='bundle_log_')
print 'Starting logging in %s' % logfile
logging.basicConfig(filename=logfile,
level=GetLogLevel(options),
format='%(asctime)s %(levelname)s:%(name)s:%(message)s')
# Use GMT timestamp in logging.
logging.Formatter.converter=time.gmtime
console = logging.StreamHandler()
console.setLevel(GetLogLevel(options))
logging.getLogger().addHandler(console)
def PrintVersionInfo():
#TODO: Should read from the VERSION file instead.
print 'version 1.2.8'
def GetTargetFilesystem(options, guest_platform):
if options.file_system:
return options.file_system
else:
return guest_platform.GetPreferredFilesystemType()
def main():
parser = SetupArgsParser()
(options, _) = parser.parse_args()
if options.display_version:
PrintVersionInfo()
return 0
EnsureSuperUser()
VerifyArgs(parser, options)
scratch_dir = tempfile.mkdtemp(dir=options.output_directory)
SetupLogging(options, scratch_dir)
try:
guest_platform = platform_factory.PlatformFactory(
options.root_directory).GetPlatform()
except platform_factory.UnknownPlatformException:
logging.critical('Platform is not supported.'
' Platform rules can be added to platform_factory.py.')
return -1
temp_file_name = tempfile.mktemp(dir=scratch_dir, suffix='.tar.gz')
file_system = GetTargetFilesystem(options, guest_platform)
logging.info('File System: %s', file_system)
logging.info('Disk Size: %s bytes', options.fs_size)
bundle = block_disk.RootFsRaw(
options.fs_size, file_system, options.skip_disk_space_check)
bundle.SetTarfile(temp_file_name)
if options.disk:
readlink_command = ['readlink', '-f', options.disk]
final_path = utils.RunCommand(readlink_command).strip()
logging.info('Resolved %s to %s', options.disk, final_path)
bundle.AddDisk(final_path)
# TODO(user): Find the location where the first partition of the disk
# is mounted and add it as the source instead of relying on the source
# param flag
bundle.AddSource(options.root_directory)
bundle.SetKey(options.key)
bundle.SetScratchDirectory(scratch_dir)
# Merge platform specific exclude list, mounts points
# and user specified excludes
excludes = guest_platform.GetExcludeList()
if options.excludes:
excludes.extend([exclude_spec.ExcludeSpec(x) for x in
options.excludes.split(',')])
logging.info('exclude list: %s', ' '.join([x.GetSpec() for x in excludes]))
bundle.AppendExcludes(excludes)
if not options.include_mounts:
mount_points = utils.GetMounts(options.root_directory)
logging.info('ignoring mounts %s', ' '.join(mount_points))
bundle.AppendExcludes([exclude_spec.ExcludeSpec(x, preserve_dir=True) for x
in utils.GetMounts(options.root_directory)])
bundle.SetPlatform(guest_platform)
# Verify that bundle attributes are correct and create tar bundle.
bundle.Verify()
(fs_size, digest) = bundle.Bundleup()
if not digest:
logging.critical('Could not get digest for the bundle.'
' The bundle may not be created correctly')
return -1
if fs_size > options.fs_size:
logging.critical('Size of tar %d exceeds the file system size %d.', fs_size,
options.fs_size)
return -1
if options.output_file_name:
output_file = os.path.join(
options.output_directory, options.output_file_name)
else:
output_file = os.path.join(
options.output_directory, '%s.image.tar.gz' % digest)
os.rename(temp_file_name, output_file)
logging.info('Created tar.gz file at %s' % output_file)
if options.bucket:
bucket = options.bucket
if bucket.startswith('gs://'):
output_bucket = '%s/%s' % (
bucket, os.path.basename(output_file))
else:
output_bucket = 'gs://%s/%s' % (
bucket, os.path.basename(output_file))
# TODO: Consider using boto library directly.
cmd = ['gsutil', 'cp', output_file, output_bucket]
retcode = subprocess.call(cmd)
if retcode != 0:
logging.critical('Failed to copy image to bucket. '
'gsutil returned %d. To retry, run the command: %s',
retcode, ' '.join(cmd))
return -1
logging.info('Uploaded image to %s', output_bucket)
# If we've uploaded, then we can remove the local file.
os.remove(output_file)
if options.cleanup:
shutil.rmtree(scratch_dir)
| apache-2.0 | 4,274,021,336,817,056,000 | 36.46063 | 80 | 0.653284 | false |
vanhonit/xmario_center | softwarecenter/ui/gtk3/panes/viewswitcher.py | 4 | 11293 | # Copyright (C) 2009 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk, GObject
import logging
from gettext import gettext as _
from softwarecenter.backend import get_install_backend
from softwarecenter.enums import ViewPages
from softwarecenter.backend.channel import (get_channels_manager,
AllInstalledChannel,
AllAvailableChannel)
from softwarecenter.ui.gtk3.widgets.buttons import (SectionSelector,
ChannelSelector)
from softwarecenter.ui.gtk3.em import StockEms
from softwarecenter.ui.gtk3.widgets.symbolic_icons import (
SymbolicIcon, PendingSymbolicIcon)
LOG = logging.getLogger(__name__)
_last_button = None
class ViewSwitcher(Gtk.Box):
ICON_SIZE = Gtk.IconSize.LARGE_TOOLBAR
def __init__(self, view_manager, datadir, db, cache, icons):
# boring stuff
self.view_manager = view_manager
def on_view_changed(widget, view_id):
self.view_buttons[view_id].set_active(True)
self.view_manager.connect('view-changed', on_view_changed)
self.channel_manager = get_channels_manager(db)
# backend sig handlers ...
self.backend = get_install_backend()
self.backend.connect("transactions-changed",
self.on_transaction_changed)
self.backend.connect("transaction-finished",
self.on_transaction_finished)
self.backend.connect("channels-changed",
self.on_channels_changed)
# widgetry
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.HORIZONTAL)
# Gui stuff
self.view_buttons = {}
self.selectors = {}
self._prev_view = None # track the previous active section
self._prev_item = None # track the previous active menu-item
self._handlers = []
# order is important here!
# first, the availablepane items
icon = SymbolicIcon("available")
self.append_section_with_channel_sel(
ViewPages.AVAILABLE,
_("All Software"),
icon,
self.on_get_available_channels)
# the installedpane items
icon = SymbolicIcon("installed")
self.append_section_with_channel_sel(
ViewPages.INSTALLED,
_("Installed"),
icon,
self.on_get_installed_channels)
# the historypane item
icon = SymbolicIcon("history")
self.append_section(ViewPages.HISTORY, _("History"), icon)
# the pendingpane
icon = PendingSymbolicIcon("pending")
self.append_section(ViewPages.PENDING, _("Progress"), icon)
# set sensible atk name
atk_desc = self.get_accessible()
atk_desc.set_name(_("Software sources"))
def on_transaction_changed(self, backend, total_transactions):
LOG.debug("on_transactions_changed '%s'" % total_transactions)
pending = len(total_transactions)
self.notify_icon_of_pending_count(pending)
if pending > 0:
self.start_icon_animation()
pending_btn = self.view_buttons[ViewPages.PENDING]
if not pending_btn.get_visible():
pending_btn.set_visible(True)
else:
self.stop_icon_animation()
pending_btn = self.view_buttons[ViewPages.PENDING]
from softwarecenter.ui.gtk3.session.viewmanager import (
get_viewmanager,
)
vm = get_viewmanager()
if vm.get_active_view() == 'view-page-pending':
vm.nav_back()
vm.clear_forward_history()
pending_btn.set_visible(False)
def start_icon_animation(self):
self.view_buttons[ViewPages.PENDING].image.start()
def stop_icon_animation(self):
self.view_buttons[ViewPages.PENDING].image.stop()
def notify_icon_of_pending_count(self, count):
image = self.view_buttons[ViewPages.PENDING].image
image.set_transaction_count(count)
def on_transaction_finished(self, backend, result):
if result.success:
self.on_channels_changed()
def on_section_sel_clicked(self, button, event, view_id):
# mvo: this check causes bug LP: #828675
#if self._prev_view is view_id:
# return True
vm = self.view_manager
def config_view():
# set active pane
pane = vm.set_active_view(view_id)
# configure DisplayState
state = pane.state.copy()
if view_id == ViewPages.INSTALLED:
state.channel = AllInstalledChannel()
else:
state.channel = AllAvailableChannel()
# decide which page we want to display
if hasattr(pane, "Pages"):
page = pane.Pages.HOME
else:
page = None
# request page change
vm.display_page(pane, page, state)
return False
self._prev_view = view_id
GObject.idle_add(config_view)
def on_get_available_channels(self, popup):
return self.build_channel_list(popup, ViewPages.AVAILABLE)
def on_get_installed_channels(self, popup):
return self.build_channel_list(popup, ViewPages.INSTALLED)
def on_channels_changed(self, backend=None, res=None):
for view_id, sel in self.selectors.items():
# setting popup to None will cause a rebuild of the popup
# menu the next time the selector is clicked
sel.popup = None
def append_section(self, view_id, label, icon):
btn = SectionSelector(label, icon, self.ICON_SIZE)
self.view_buttons[view_id] = btn
self.pack_start(btn, False, False, 0)
global _last_button
if _last_button is not None:
btn.join_group(_last_button)
_last_button = btn
# this must go last otherwise as the buttons are added
# to the group, toggled & clicked gets emitted... causing
# all panes to fully initialise on USC startup, which is
# undesirable!
btn.connect("button-release-event", self.on_section_sel_clicked,
view_id)
return btn
def append_channel_selector(self, section_btn, view_id, build_func):
sel = ChannelSelector(section_btn)
self.selectors[view_id] = sel
sel.set_build_func(build_func)
self.pack_start(sel, False, False, 0)
return sel
def append_section_with_channel_sel(self, view_id, label, icon,
build_func):
btn = self.append_section(view_id, label, icon)
btn.draw_hint_has_channel_selector = True
sel = self.append_channel_selector(btn, view_id, build_func)
return btn, sel
def build_channel_list(self, popup, view_id):
# clean up old signal handlers
for sig in self._handlers:
GObject.source_remove(sig)
if view_id == ViewPages.AVAILABLE:
channels = self.channel_manager.channels
elif view_id == ViewPages.INSTALLED:
channels = self.channel_manager.channels_installed_only
else:
channels = self.channel_manager.channels
for i, channel in enumerate(channels):
# only calling it with a explicit new() makes it a really
# empty one, otherwise the following error is raised:
# """Attempting to add a widget with type GtkBox to a
# GtkCheckMenuItem, but as a GtkBin subclass a
# GtkCheckMenuItem can only contain one widget at a time;
# it already contains a widget of type GtkAccelLabel """
item = Gtk.MenuItem.new()
label = Gtk.Label.new(channel.display_name)
image = Gtk.Image.new_from_icon_name(channel.icon,
Gtk.IconSize.MENU)
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, StockEms.SMALL)
box.pack_start(image, False, False, 0)
box.pack_start(label, False, False, 0)
item.add(box)
item.show_all()
self._handlers.append(
item.connect(
"button-release-event",
self.on_channel_selected,
channel,
view_id
)
)
popup.attach(item, 0, 1, i, i + 1)
def on_channel_selected(self, item, event, channel, view_id):
vm = self.view_manager
def config_view():
# set active pane
pane = vm.set_active_view(view_id)
# configure DisplayState
state = pane.state.copy()
state.category = None
state.subcategory = None
state.channel = channel
# decide which page we want to display
if hasattr(pane, "Pages"):
if channel.origin == "all":
page = pane.Pages.HOME
else:
page = pane.Pages.LIST
else:
page = None
# request page change
vm.display_page(pane, page, state)
return False
GObject.idle_add(config_view)
def get_test_window_viewswitcher():
from softwarecenter.testutils import (get_test_db,
get_test_datadir,
get_test_gtk3_viewmanager,
get_test_pkg_info,
get_test_gtk3_icon_cache,
)
cache = get_test_pkg_info()
db = get_test_db()
icons = get_test_gtk3_icon_cache()
datadir = get_test_datadir()
manager = get_test_gtk3_viewmanager()
view = ViewSwitcher(manager, datadir, db, cache, icons)
scroll = Gtk.ScrolledWindow()
box = Gtk.VBox()
box.pack_start(scroll, True, True, 0)
win = Gtk.Window()
scroll.add_with_viewport(view)
win.add(box)
win.set_size_request(400, 200)
win.connect("destroy", Gtk.main_quit)
win.show_all()
return win
if __name__ == "__main__":
import softwarecenter.paths
logging.basicConfig(level=logging.DEBUG)
softwarecenter.paths.datadir = "./data"
win = get_test_window_viewswitcher()
Gtk.main()
| gpl-3.0 | -903,609,816,859,898,000 | 34.624606 | 79 | 0.578677 | false |
crazy-cat/incubator-mxnet | python/mxnet/__init__.py | 8 | 2281 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""MXNet: a concise, fast and flexible framework for deep learning."""
from __future__ import absolute_import
from .context import Context, current_context, cpu, gpu
from .base import MXNetError
from . import base
from . import contrib
from . import ndarray
from . import ndarray as nd
from . import name
# use mx.sym as short for symbol
from . import symbol as sym
from . import symbol
from . import symbol_doc
from . import io
from . import recordio
from . import operator
# use mx.rnd as short for mx.random
from . import random as rnd
from . import random
from . import optimizer
from . import model
from . import notebook
from . import initializer
# use mx.init as short for mx.initializer
from . import initializer as init
from . import visualization
# use viz as short for mx.ndarray
from . import visualization as viz
from . import callback
# from . import misc
from . import lr_scheduler
# use mx.kv as short for kvstore
from . import kvstore as kv
from . import kvstore_server
# Runtime compile module
from . import rtc
# Attribute scope to add attributes to symbolic graphs
from .attribute import AttrScope
from . import monitor
from . import monitor as mon
from . import torch
from . import torch as th
from . import profiler
from . import log
from . import module
from . import module as mod
from . import image
from . import image as img
from . import test_utils
from . import rnn
from . import gluon
__version__ = base.__version__
| apache-2.0 | 7,940,631,855,476,645,000 | 26.817073 | 70 | 0.759316 | false |
kernsuite-debian/lofar | CEP/GSM/bremen/stress/generator.py | 1 | 1742 | #!/usr/bin/env python3
import math
import sys
from os import path
from numpy import random
from tests.testlib import write_parset
FREQUENCY = {
1: 30000000,
2: 34000000,
3: 38000000,
4: 42000000,
5: 120000000,
6: 130000000,
7: 140000000,
8: 150000000,
9: 160000000,
10: 170000000,
11: 325000000,
12: 352000000,
13: 640000000,
14: 850000000,
15: 1400000000,
16: 2300000000,
17: 4800000000,
18: 8500000000,
19: 33000000,
20: 39000000,
21: 45000000,
22: 51000000,
23: 57000000,
24: 63000000,
25: 69000000,
26: 75000000,
}
def generate_field(ra, decl, radius, size):
for _ in range(size):
rr = radius * math.sqrt(random.random())
alpha = math.pi * 2 * random.random()
ra_ = rr * math.cos(alpha) + ra
decl_ = rr * math.sin(alpha) + decl
yield ra_, decl_, random.random()
def generate_field_file(filename, ra, decl, radius, size):
f = open(filename, 'w')
f.write('# RA DEC Total_flux e_Total_flux\n\n')
for z in generate_field(ra, decl, radius, size):
f.write('%s %s %s %s\n' % (z[0], z[1], z[2], 0.01))
f.close()
def generate_field_parset(filename, ra, decl, radius, size,
frequency=8):
generate_field_file(filename, ra, decl, radius, size)
parsetname = path.basename(filename)
parsetname = parsetname[:parsetname.index('.')] + '.parset'
write_parset(parsetname, filename, FREQUENCY[frequency], ra, decl, radius)
if __name__ == '__main__':
generate_field_parset(sys.argv[1],
float(sys.argv[2]),
float(sys.argv[3]),
float(sys.argv[4]),
int(sys.argv[5]))
| gpl-3.0 | 156,863,460,824,143,230 | 25.393939 | 78 | 0.579219 | false |
pjknkda/sockjs-tornado | sockjs/tornado/transports/htmlfile.py | 10 | 2418 | # -*- coding: utf-8 -*-
"""
sockjs.tornado.transports.htmlfile
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
HtmlFile transport implementation.
"""
from tornado.web import asynchronous
from sockjs.tornado import proto
from sockjs.tornado.transports import streamingbase
# HTMLFILE template
HTMLFILE_HEAD = r'''
<!doctype html>
<html><head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head><body><h2>Don't panic!</h2>
<script>
document.domain = document.domain;
var c = parent.%s;
c.start();
function p(d) {c.message(d);};
window.onload = function() {c.stop();};
</script>
'''.strip()
HTMLFILE_HEAD += ' ' * (1024 - len(HTMLFILE_HEAD) + 14)
HTMLFILE_HEAD += '\r\n\r\n'
class HtmlFileTransport(streamingbase.StreamingTransportBase):
name = 'htmlfile'
def initialize(self, server):
super(HtmlFileTransport, self).initialize(server)
@asynchronous
def get(self, session_id):
# Start response
self.preflight()
self.handle_session_cookie()
self.disable_cache()
self.set_header('Content-Type', 'text/html; charset=UTF-8')
# Grab callback parameter
callback = self.get_argument('c', None)
if not callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# TODO: Fix me - use parameter
self.write(HTMLFILE_HEAD % callback)
self.flush()
# Now try to attach to session
if not self._attach_session(session_id):
self.finish()
return
# Flush any pending messages
if self.session:
self.session.flush()
def send_pack(self, message, binary=False):
if binary:
raise Exception('binary not supported for HtmlFileTransport')
# TODO: Just do escaping
msg = '<script>\np(%s);\n</script>\r\n' % proto.json_encode(message)
self.active = False
try:
self.notify_sent(len(message))
self.write(msg)
self.flush(callback=self.send_complete)
except IOError:
# If connection dropped, make sure we close offending session instead
# of propagating error all way up.
self.session.delayed_close()
self._detach()
| mit | 382,968,946,760,560,300 | 27.116279 | 81 | 0.596774 | false |
adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_fileio.py | 4 | 14207 | # Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.test_support import (TESTFN, check_warnings, run_unittest,
make_bad_fd, is_jython)
from test.test_support import py3k_bytes as bytes
from test.script_helper import run_python
from _io import FileIO as _FileIO
"""
XXX: ignoring ValueError on Jython for now as the ValueError/IOError thing is
too mixed up right now. Needs investigation especially in Jython3 -- we
should get this cleaned up if possible.
"""
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
@unittest.skipIf(is_jython, "FIXME: not working in Jython")
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for writing
f.close()
self.assertTrue(f.closed)
f = self.f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
# These methods all accept a call with 0 arguments
methods = ['fileno', 'isatty', 'read',
'tell', 'truncate', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
# These other methods should be tested using a specific call
# in case the test for number of arguments comes first.
b = bytearray()
self.assertRaises(ValueError, self.f.readinto, b )
self.assertRaises(ValueError, self.f.seek, 0)
self.assertRaises(ValueError, self.f.write, b )
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except ValueError:
if not is_jython:
self.fail("ValueError only on Jython")
finally:
try:
self.f.close()
except IOError:
pass
except ValueError:
if not is_jython:
self.fail("ValueError only on Jython")
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
except ValueError as e:
if not is_jython:
self.fail("ValueError only on Jython")
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
except ValueError:
if not is_jython:
self.fail("ValueError only on Jython")
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
# file tests for which a test file is not created but cleaned up
# This introduced by Jython, to prevent the cascade of errors when
# a test exits leaving an open file. Also a CPython problem.
def setUp(self):
self.f = None
def tearDown(self):
if self.f:
self.f.close()
if os.path.exists(TESTFN):
os.remove(TESTFN)
def testAbles(self):
f = self.f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = self.f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = self.f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = self.f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = self.f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = self.f = _FileIO(str(TESTFN), "w")
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
# Skip test
return
f = self.f = _FileIO(fn, "w")
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.f = f
self.assertEqual(f.read(), b"abc")
def testInvalidFd(self):
if is_jython:
self.assertRaises(TypeError, _FileIO, -10) # file descriptor not int in Jython
else:
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = self.f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
#XXX: next assert not working in Jython:
#self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = self.f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = self.f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
# Test for bug 801631
bug801631()
def testAppend(self):
f = self.f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = self.f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = self.f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| epl-1.0 | 8,063,707,896,594,834,000 | 31.069977 | 91 | 0.548673 | false |
klahnakoski/ActiveData | vendor/jx_python/namespace/rename.py | 1 | 4633 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from copy import copy
from jx_base.dimensions import Dimension
from jx_base.utils import is_variable_name
from jx_base.expressions import QueryOp
from jx_base.language import is_op
from jx_python.namespace import Namespace, convert_list
from mo_dots import Data, coalesce, is_data, is_list, listwrap, set_default, unwraplist, to_data, is_many, dict_to_data
from mo_future import is_text
from mo_logs import Log
from mo_math import is_number
from mo_times.dates import Date
class Rename(Namespace):
def __init__(self, dimensions, source):
"""
EXPECTING A LIST OF {"name":name, "value":value} OBJECTS TO PERFORM A MAPPING
"""
dimensions = to_data(dimensions)
if is_data(dimensions) and dimensions.name == None:
# CONVERT TO A REAL DIMENSION DEFINITION
dimensions = {"name": ".", "type": "set", "edges":[{"name": k, "field": v} for k, v in dimensions.items()]}
self.dimensions = Dimension(dimensions, None, source)
def convert(self, expr):
"""
EXPAND INSTANCES OF name TO value
"""
if expr is True or expr == None or expr is False:
return expr
elif is_number(expr):
return expr
elif expr == ".":
return "."
elif is_variable_name(expr):
return coalesce(self.dimensions[expr], expr)
elif is_text(expr):
Log.error("{{name|quote}} is not a valid variable name", name=expr)
elif isinstance(expr, Date):
return expr
elif is_op(expr, QueryOp):
return self._convert_query(expr)
elif is_data(expr):
if expr["from"]:
return self._convert_query(expr)
elif len(expr) >= 2:
#ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION
return dict_to_data({name: self.convert(value) for name, value in expr.leaves()})
else:
# ASSUME SINGLE-CLAUSE EXPRESSION
k, v = expr.items()[0]
return converter_map.get(k, self._convert_bop)(self, k, v)
elif is_many(expr):
return list_to_data([self.convert(value) for value in expr])
else:
return expr
def _convert_query(self, query):
output = QueryOp(None)
output.select = self._convert_clause(query.select)
output.where = self.convert(query.where)
output.frum = self._convert_from(query.frum)
output.edges = convert_list(self._convert_edge, query.edges)
output.window = convert_list(self._convert_window, query.window)
output.sort = self._convert_clause(query.sort)
output.format = query.format
return output
def _convert_bop(self, op, term):
if is_list(term):
return {op: list(map(self.convert, term))}
return {op: {self.convert(var): val for var, val in term.items()}}
def _convert_many(self, k, v):
return {k: list(map(self.convert, v))}
def _convert_from(self, frum):
if is_data(frum):
return Data(name=self.convert(frum.name))
else:
return self.convert(frum)
def _convert_edge(self, edge):
dim = self.dimensions[edge.value]
if not dim:
return edge
if len(listwrap(dim.fields)) == 1:
#TODO: CHECK IF EDGE DOMAIN AND DIMENSION DOMAIN CONFLICT
new_edge = set_default({"value": unwraplist(dim.fields)}, edge)
return new_edge
new_edge.domain = dim.getDomain()
edge = copy(edge)
edge.value = None
edge.domain = dim.getDomain()
return edge
def _convert_clause(self, clause):
"""
JSON QUERY EXPRESSIONS HAVE MANY CLAUSES WITH SIMILAR COLUMN DELCARATIONS
"""
clause = to_data(clause)
if clause == None:
return None
elif is_data(clause):
return set_default({"value": self.convert(clause.value)}, clause)
else:
return [set_default({"value": self.convert(c.value)}, c) for c in clause]
converter_map = {
"and": Rename._convert_many,
"or": Rename._convert_many,
"not": Rename.convert,
"missing": Rename.convert,
"exists": Rename.convert
}
| mpl-2.0 | -3,161,715,251,237,288,400 | 32.572464 | 119 | 0.600259 | false |
patrickm/chromium.src | tools/metrics/histograms/find_unmapped_histograms.py | 12 | 7448 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Scans the Chromium source for histograms that are absent from histograms.xml.
This is a heuristic scan, so a clean run of this script does not guarantee that
all histograms in the Chromium source are properly mapped. Notably, field
trials are entirely ignored by this script.
"""
import commands
import extract_histograms
import logging
import optparse
import os
import re
import sys
ADJACENT_C_STRING_REGEX = re.compile(r"""
(" # Opening quotation mark
[^"]*) # Literal string contents
" # Closing quotation mark
\s* # Any number of spaces
" # Another opening quotation mark
""", re.VERBOSE)
CONSTANT_REGEX = re.compile(r"""
(\w*::)? # Optional namespace
k[A-Z] # Match a constant identifier: 'k' followed by an uppercase letter
\w* # Match the rest of the constant identifier
$ # Make sure there's only the identifier, nothing else
""", re.VERBOSE)
HISTOGRAM_REGEX = re.compile(r"""
UMA_HISTOGRAM # Match the shared prefix for standard UMA histogram macros
\w* # Match the rest of the macro name, e.g. '_ENUMERATION'
\( # Match the opening parenthesis for the macro
\s* # Match any whitespace -- especially, any newlines
([^,]*) # Capture the first parameter to the macro
, # Match the comma that delineates the first parameter
""", re.VERBOSE)
class DirectoryNotFoundException(Exception):
"""Base class to distinguish locally defined exceptions from standard ones."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def findDefaultRoot():
"""Find the root of the chromium repo, in case the script is run from the
histograms dir.
Returns:
string: path to the src dir of the repo.
Raises:
DirectoryNotFoundException if the target directory cannot be found.
"""
path = os.getcwd()
while path:
head, tail = os.path.split(path)
if tail == 'src':
return path
if path == head:
break
path = head
raise DirectoryNotFoundException('Could not find src/ dir')
def collapseAdjacentCStrings(string):
"""Collapses any adjacent C strings into a single string.
Useful to re-combine strings that were split across multiple lines to satisfy
the 80-col restriction.
Args:
string: The string to recombine, e.g. '"Foo"\n "bar"'
Returns:
The collapsed string, e.g. "Foobar" for an input of '"Foo"\n "bar"'
"""
while True:
collapsed = ADJACENT_C_STRING_REGEX.sub(r'\1', string, count=1)
if collapsed == string:
return collapsed
string = collapsed
def logNonLiteralHistogram(filename, histogram):
"""Logs a statement warning about a non-literal histogram name found in the
Chromium source.
Filters out known acceptable exceptions.
Args:
filename: The filename for the file containing the histogram, e.g.
'chrome/browser/memory_details.cc'
histogram: The expression that evaluates to the name of the histogram, e.g.
'"FakeHistogram" + variant'
Returns:
None
"""
# Ignore histogram macros, which typically contain backslashes so that they
# can be formatted across lines.
if '\\' in histogram:
return
# Field trials are unique within a session, so are effectively constants.
if histogram.startswith('base::FieldTrial::MakeName'):
return
# Ignore histogram names that have been pulled out into C++ constants.
if CONSTANT_REGEX.match(histogram):
return
# TODO(isherman): This is still a little noisy... needs further filtering to
# reduce the noise.
logging.warning('%s contains non-literal histogram name <%s>', filename,
histogram)
def readChromiumHistograms():
"""Searches the Chromium source for all histogram names.
Also prints warnings for any invocations of the UMA_HISTOGRAM_* macros with
names that might vary during a single run of the app.
Returns:
A set cotaining any found literal histogram names.
"""
logging.info('Scanning Chromium source for histograms...')
# Use git grep to find all invocations of the UMA_HISTOGRAM_* macros.
# Examples:
# 'path/to/foo.cc:420: UMA_HISTOGRAM_COUNTS_100("FooGroup.FooName",'
# 'path/to/bar.cc:632: UMA_HISTOGRAM_ENUMERATION('
locations = commands.getoutput('git gs UMA_HISTOGRAM').split('\n')
filenames = set([location.split(':')[0] for location in locations])
histograms = set()
for filename in filenames:
contents = ''
with open(filename, 'r') as f:
contents = f.read()
matches = set(HISTOGRAM_REGEX.findall(contents))
for histogram in matches:
histogram = collapseAdjacentCStrings(histogram)
# Must begin and end with a quotation mark.
if histogram[0] != '"' or histogram[-1] != '"':
logNonLiteralHistogram(filename, histogram)
continue
# Must not include any quotation marks other than at the beginning or end.
histogram_stripped = histogram.strip('"')
if '"' in histogram_stripped:
logNonLiteralHistogram(filename, histogram)
continue
histograms.add(histogram_stripped)
return histograms
def readXmlHistograms(histograms_file_location):
"""Parses all histogram names from histograms.xml.
Returns:
A set cotaining the parsed histogram names.
"""
logging.info('Reading histograms from %s...' % histograms_file_location)
histograms = extract_histograms.ExtractHistograms(histograms_file_location)
return set(extract_histograms.ExtractNames(histograms))
def main():
# Find default paths.
default_root = findDefaultRoot()
default_histograms_path = os.path.join(
default_root, 'tools/metrics/histograms/histograms.xml')
# Parse command line options
parser = optparse.OptionParser()
parser.add_option(
'--root-directory', dest='root_directory', default=default_root,
help='scan within DIRECTORY for histograms [optional, defaults to "%s"]' %
default_root,
metavar='DIRECTORY')
parser.add_option(
'--histograms-file', dest='histograms_file_location',
default=default_histograms_path,
help='read histogram definitions from FILE (relative to --root-directory) '
'[optional, defaults to "%s"]' % default_histograms_path,
metavar='FILE')
(options, args) = parser.parse_args()
if args:
parser.print_help()
sys.exit(1)
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
try:
os.chdir(options.root_directory)
except EnvironmentError as e:
logging.error("Could not change to root directory: %s", e)
sys.exit(1)
chromium_histograms = readChromiumHistograms()
xml_histograms = readXmlHistograms(options.histograms_file_location)
unmapped_histograms = sorted(chromium_histograms - xml_histograms)
if len(unmapped_histograms):
logging.info('')
logging.info('')
logging.info('Histograms in Chromium but not in %s:' %
options.histograms_file_location)
logging.info('-------------------------------------------------')
for histogram in unmapped_histograms:
logging.info(' %s', histogram)
else:
logging.info('Success! No unmapped histograms found.')
if __name__ == '__main__':
main()
| bsd-3-clause | 5,174,157,654,525,293,000 | 30.965665 | 80 | 0.68072 | false |
SSJohns/osf.io | website/addons/box/views.py | 10 | 1237 | """Views for the node settings page."""
# -*- coding: utf-8 -*-
from flask import request
from website.addons.base import generic_views
from website.addons.box.serializer import BoxSerializer
from website.project.decorators import must_have_addon, must_be_addon_authorizer
SHORT_NAME = 'box'
FULL_NAME = 'Box'
box_account_list = generic_views.account_list(
SHORT_NAME,
BoxSerializer
)
box_import_auth = generic_views.import_auth(
SHORT_NAME,
BoxSerializer
)
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def box_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
folder_id = request.args.get('folder_id')
return node_addon.get_folders(folder_id=folder_id)
box_get_config = generic_views.get_config(
SHORT_NAME,
BoxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = folder['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
box_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
BoxSerializer,
_set_folder
)
box_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
box_root_folder = generic_views.root_folder(
SHORT_NAME
)
| apache-2.0 | 6,605,838,628,226,291,000 | 21.907407 | 80 | 0.70574 | false |
PrefPy/prefpy | prefpy/stats.py | 2 | 4120 | # Filename: statistics.py
# Author: Peter Piech
# Date: 10/4/2015
# Description: Module for computing statistical results
###################### NOTICE: ######################
# The current names of the functions are misleading
# and represent an incorrect understanding of the
# statistical measures. All functions below operate
# on a single data point and can be later aggregated
# using a whole set of data points in the appropriate
# fashion. The names of the functions are embedded
# in many files within this package and will be
# renamed correctly in the future once time allows
# for this massive endeavor.
#####################################################
import numpy as np
def mse(mean, estimator):
"""
Description:
Calculates the Mean Squared Error (MSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
"""
return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
def sse(mean, estimator):
"""
Description:
Calculates the Sum of Squared Errors (SSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
"""
return np.sum((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
#deprecated
def mix2PL_mse(mean, estimator, m):
"""
Description:
Calculates the Mean Squared Error (MSE) of an
estimator of a mixture of 2 Plackett-Luce models,
on flat numpy ndarrays, where the first element is
the mixing proportion of the first model defined
as the minimum MSE over the inverse permutations of
the estimator.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
m: number of alternatives in each of the two models
"""
mse1 = mse(mean, estimator)
estimator = np.hstack((1 - estimator[0], estimator[m+1:], estimator[1:m+1]))
mse2 = mse(mean, estimator)
return min(mse1, mse2)
#deprecated
def mix2PL_sse(mean, estimator, m):
"""
Description:
Calculates the Sum of Squared Errors (SSE) of an
estimator of a mixture of 2 Plackett-Luce models,
on flat numpy ndarrays, where the first element is
the mixing proportion of the first model defined
as the minimum SSE over the inverse permutations of
the estimator.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
m: number of alternatives in each of the two models
"""
sse1 = sse(mean, estimator)
estimator = np.hstack((1 - estimator[0], estimator[m+1:], estimator[1:m+1]))
sse2 = sse(mean, estimator)
return min(sse1, sse2)
#deprecated
def mix2PL_wsse(mean, estimator, m):
"""
Description:
Calculates the weighted Sum of Squared Errors (WSSE)
of an estimator of a mixture of 2 Plackett-Luce models,
on flat numpy ndarrays, where the first element is
the mixing proportion of the first model defined
as the minimum WSSE over the inverse permutations of
the estimator.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
m: number of alternatives in each of the two models
"""
def wsse(mean1, est1, m1):
return (((est1[0] - mean1[0])**2) +
(mean1[0]*np.sum((np.asarray(est1[1:m1+1]) - np.asarray(mean1[1:m1+1]))**2)) +
((1 - mean1[0]) * np.sum((np.asarray(est1[m1+1:]) - np.asarray(mean1[m1+1:]))**2))
)
wsse1 = wsse(mean, estimator, m)
estimator = np.hstack((1 - estimator[0], estimator[m+1:], estimator[1:m+1]))
wsse2 = wsse(mean, estimator, m)
return min(wsse1, wsse2)
| gpl-3.0 | -7,313,998,905,999,094,000 | 36.504673 | 98 | 0.615777 | false |
iulian787/spack | var/spack/repos/builtin/packages/plumed/package.py | 3 | 8042 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import os.path
class Plumed(AutotoolsPackage):
"""PLUMED is an open source library for free energy calculations in
molecular systems which works together with some of the most popular
molecular dynamics engines.
Free energy calculations can be performed as a function of many order
parameters with a particular focus on biological problems, using state
of the art methods such as metadynamics, umbrella sampling and
Jarzynski-equation based steered MD.
The software, written in C++, can be easily interfaced with both fortran
and C/C++ codes.
"""
homepage = 'http://www.plumed.org/'
url = 'https://github.com/plumed/plumed2/archive/v2.5.3.tar.gz'
git = 'https://github.com/plumed/plumed2.git'
version('master', branch='master')
version('2.6.1', sha256='c1b3c397b2d971140aa240dde50e48a04ce78e3dedb02b6dca80fa53f8026e4e')
version('2.6.0', sha256='3d57ae460607a49547ef38a52c4ac93493a3966857c352280a9c05f5dcdb1820')
version('2.5.5', preferred=True, sha256='70faa9ff1938e286dc388cb793b39840953e5646855b684f48df1bc864b737e8')
version('2.5.4', sha256='a1647e598191f261e75d06351e607475d395af481315052a4c28563ac9989a7f')
version('2.5.3', sha256='543288be667dc4201fc461ecd2dd4878ddfbeac682d0c021c99ea8e501c7c9dc')
version('2.5.2', sha256='85d10cc46e2e37c7719cf51c0931278f56c2c8f8a9d86188b2bf97c2535a2ab4')
version('2.5.1', sha256='de309980dcfd6f6e0e70e138856f4bd9eb4d8a513906a5e6389f18a5af7f2eba')
version('2.5.0', sha256='53e08187ec9f8af2326fa84407e34644a7c51d2af93034309fb70675eee5e4f7')
version('2.4.6', sha256='c22ad19f5cd36ce9fe4ba0b53158fc2a3d985c48fc04606e3f3b3e835b994cb3')
version('2.4.4', sha256='1e5c24109314481fad404da97d61c7339b219e27e120c9c80bacc79c9f6a51a8')
version('2.4.2', sha256='528ce57f1f5330480bcd403140166a4580efd2acaea39c85dfeca5e2cd649321')
version('2.4.1', sha256='f00410ebdd739c2ddf55fcd714ff4bd88a1029e02d2fc9cea0b5fca34e0fc4eb')
version('2.3.5', sha256='a6a66ca4582c1aecc6138c96be015e13cd06a718e8446b2f13e610fe34602e4f')
version('2.3.3', sha256='ac058ff529f207d5b4169fb5a87bdb3c77307dfef1ac543ad8b6c74c5de7fc91')
version('2.3.0', sha256='b1c8a54a313a0569e27e36420770074f35406453f73de70e55c424652abeddf1')
version('2.2.4', sha256='d7a1dba34a7fe03f23e8d39ab6e15b230c4851373fdceb3602e2de26ea53ce37')
version('2.2.3', sha256='2db19c5f6a2918833941d0bf47b5431d0865529d786df797ccc966d763ed7b0c')
# Variants. PLUMED by default builds a number of optional modules.
# The ones listed here are not built by default for various reasons,
# such as stability, lack of testing, or lack of demand.
# FIXME: This needs to be an optional
variant(
'optional_modules',
default='all',
values=lambda x: True,
description='String that is used to build optional modules'
)
variant('shared', default=True, description='Builds shared libraries')
variant('mpi', default=True, description='Activates MPI support')
variant('gsl', default=True, description='Activates GSL support')
# Dependencies. LAPACK and BLAS are recommended but not essential.
depends_on('zlib')
depends_on('blas')
depends_on('lapack')
# For libmatheval support through the 'function' module
# which is enabled by default (or when optional_modules=all)
depends_on('libmatheval', when='@:2.4.99')
depends_on('mpi', when='+mpi')
depends_on('gsl', when='+gsl')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('py-cython', type='build', when='@2.5:')
force_autoreconf = True
parallel = False
def apply_patch(self, other):
# The name of MD engines differ slightly from the ones used in Spack
format_strings = collections.defaultdict(
lambda: '{0.name}-{0.version}'
)
format_strings['espresso'] = 'q{0.name}-{0.version}'
format_strings['amber'] = '{0.name}{0.version}'
get_md = lambda x: format_strings[x.name].format(x)
# Get available patches
plumed_patch = Executable(
os.path.join(self.spec.prefix.bin, 'plumed-patch')
)
out = plumed_patch('-q', '-l', output=str)
available = out.split(':')[-1].split()
# Check that `other` is among the patchable applications
if get_md(other) not in available:
msg = '{0.name}@{0.version} is not among the MD engine'
msg += ' that can be patched by {1.name}@{1.version}.\n'
msg += 'Supported engines are:\n'
for x in available:
msg += x + '\n'
raise RuntimeError(msg.format(other, self.spec))
# Call plumed-patch to patch executables
target = format_strings[other.name].format(other)
plumed_patch('-p', '-e', target)
def setup_dependent_package(self, module, dependent_spec):
# Make plumed visible from dependent packages
module.plumed = dependent_spec['plumed'].command
@property
def plumed_inc(self):
return os.path.join(
self.prefix.lib, 'plumed', 'src', 'lib', 'Plumed.inc'
)
@run_before('autoreconf')
def filter_gslcblas(self):
# This part is needed to avoid linking with gsl cblas
# interface which will mask the cblas interface
# provided by optimized libraries due to linking order
filter_file('-lgslcblas', '', 'configure.ac')
def configure_args(self):
spec = self.spec
# From plumed docs :
# Also consider that this is different with respect to what some other
# configure script does in that variables such as MPICXX are
# completely ignored here. In case you work on a machine where CXX is
# set to a serial compiler and MPICXX to a MPI compiler, to compile
# with MPI you should use:
#
# > ./configure CXX="$MPICXX"
# The configure.ac script may detect the wrong linker for
# LD_RO which causes issues at link time. Here we work around
# the issue saying we have no LD_RO executable.
configure_opts = ['--disable-ld-r']
# If using MPI then ensure the correct compiler wrapper is used.
if '+mpi' in spec:
configure_opts.extend([
'--enable-mpi',
'CXX={0}'.format(spec['mpi'].mpicxx)
])
# If the MPI dependency is provided by the intel-mpi package then
# the following additional argument is required to allow it to
# build.
if 'intel-mpi' in spec:
configure_opts.extend([
'STATIC_LIBS=-mt_mpi'
])
# Set flags to help find gsl
if '+gsl' in self.spec:
gsl_libs = self.spec['gsl'].libs
blas_libs = self.spec['blas'].libs
configure_opts.append('LDFLAGS={0}'.format(
(gsl_libs + blas_libs).ld_flags
))
# Additional arguments
configure_opts.extend([
'--enable-shared={0}'.format('yes' if '+shared' in spec else 'no'),
'--enable-gsl={0}'.format('yes' if '+gsl' in spec else 'no')
])
# Construct list of optional modules
# If we have specified any optional modules then add the argument to
# enable or disable them.
optional_modules = self.spec.variants['optional_modules'].value
if optional_modules:
# From 'configure --help' @2.3:
# all/none/reset or : separated list such as
# +crystallization:-bias default: reset
configure_opts.append(
'--enable-modules={0}'.format(optional_modules)
)
return configure_opts
| lgpl-2.1 | 5,425,242,061,569,583,000 | 41.776596 | 111 | 0.660159 | false |
somini/gpodder | src/gpodder/gtkui/base.py | 3 | 4742 | # -*- coding: utf-8 -*-
"""
UI Base Module for GtkBuilder
Based on SimpleGladeApp.py Copyright (C) 2004 Sandino Flores Moreno
"""
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os
import sys
import re
import tokenize
import gtk
class GtkBuilderWidget(object):
def __init__(self, ui_folders, textdomain, **kwargs):
"""
Loads the UI file from the specified folder (with translations
from the textdomain) and initializes attributes.
ui_folders:
List of folders with GtkBuilder .ui files in search order
textdomain:
The textdomain to be used for translating strings
**kwargs:
Keyword arguments will be set as attributes to this window
"""
for key, value in kwargs.items():
setattr(self, key, value)
self.builder = gtk.Builder()
self.builder.set_translation_domain(textdomain)
#print >>sys.stderr, 'Creating new from file', self.__class__.__name__
ui_file = '%s.ui' % self.__class__.__name__.lower()
# Search for the UI file in the UI folders, stop after first match
for ui_folder in ui_folders:
filename = os.path.join(ui_folder, ui_file)
if os.path.exists(filename):
self.builder.add_from_file(filename)
break
self.builder.connect_signals(self)
self.set_attributes()
self.new()
def set_attributes(self):
"""
Convert widget names to attributes of this object.
It means a widget named vbox-dialog in GtkBuilder
is refered using self.vbox_dialog in the code.
"""
for widget in self.builder.get_objects():
# Just to be safe - every widget from the builder is buildable
if not isinstance(widget, gtk.Buildable):
continue
# The following call looks ugly, but see Gnome bug 591085
widget_name = gtk.Buildable.get_name(widget)
widget_api_name = '_'.join(re.findall(tokenize.Name, widget_name))
if hasattr(self, widget_api_name):
raise AttributeError("instance %s already has an attribute %s" % (self,widget_api_name))
else:
setattr(self, widget_api_name, widget)
@property
def main_window(self):
"""Returns the main window of this GtkBuilderWidget"""
return getattr(self, self.__class__.__name__)
def new(self):
"""
Method called when the user interface is loaded and ready to be used.
At this moment, the widgets are loaded and can be refered as self.widget_name
"""
pass
def main(self):
"""
Starts the main loop of processing events.
The default implementation calls gtk.main()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main()
Do not directly call this method in your programs.
Use the method run() instead.
"""
gtk.main()
def quit(self):
"""
Quit processing events.
The default implementation calls gtk.main_quit()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main_quit()
"""
gtk.main_quit()
def run(self):
"""
Starts the main loop of processing events checking for Control-C.
The default implementation checks wheter a Control-C is pressed,
then calls on_keyboard_interrupt().
Use this method for starting programs.
"""
try:
self.main()
except KeyboardInterrupt:
self.on_keyboard_interrupt()
def on_keyboard_interrupt(self):
"""
This method is called by the default implementation of run()
after a program is finished by pressing Control-C.
"""
pass
| gpl-3.0 | 7,934,617,221,064,061,000 | 31.479452 | 104 | 0.62927 | false |
IONISx/edx-platform | common/test/acceptance/tests/video/test_video_module.py | 1 | 43968 | # -*- coding: utf-8 -*-
"""
Acceptance tests for Video.
"""
from nose.plugins.attrib import attr
from unittest import skipIf, skip
from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig
from ...pages.lms.video.video import VideoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import skip_if_browser
from flaky import flaky
VIDEO_SOURCE_PORT = 8777
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
@attr('shard_4')
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super(VideoBaseTest, self).setUp()
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.verticals = None
self.youtube_configuration = {}
self.user_info = {}
# reset youtube stub server
self.addCleanup(YouTubeStubConfig.reset)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
YouTubeStubConfig.configure(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_verticals = self.verticals
# Video tests require at least one vertical with a single video.
if not _verticals:
_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical: vertical data list
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index))
for video in vertical:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
self.auth_page.visit()
self.user_info = self.auth_page.user_info
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
if player_mode == 'html5':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if additional_data:
metadata.update(additional_data)
return metadata
def go_to_sequential_position(self, position):
"""
Navigate to sequential specified by `video_display_name`
"""
self.course_nav.go_to_sequential_position(position)
self.video.wait_for_video_player_render()
class YouTubeVideoTest(VideoBaseTest):
""" Test YouTube Video Player """
def setUp(self):
super(YouTubeVideoTest, self).setUp()
def test_youtube_video_rendering_wo_html5_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
"""
self.navigate_to_video()
# Verify that video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_cc_button_transcripts_and_sub_fields_empty(self):
"""
Scenario: CC button works correctly if transcripts and sub fields are empty,
but transcript file exists in assets (Youtube mode of Video component)
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
Then I see the correct english text in the captions
"""
self._install_course_fixture()
self.course_fixture.add_asset(['subs__Szu8hVDvgc.srt.sjson'])
self.course_fixture._upload_assets()
self._navigate_to_courseware_video_and_render()
self.video.show_captions()
# Verify that we see "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
def test_cc_button_hidden_no_translations(self):
"""
Scenario: CC button is hidden if no translations
Given the course has a Video component in "Youtube" mode
Then the "CC" button is hidden
"""
self.navigate_to_video()
self.assertFalse(self.video.is_button_shown('CC'))
def test_fullscreen_video_alignment_with_transcript_hidden(self):
"""
Scenario: Video is aligned with transcript hidden in fullscreen mode
Given the course has a Video component in "Youtube" mode
When I view the video at fullscreen
Then the video with the transcript hidden is aligned correctly
"""
self.navigate_to_video()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in YouTube mode
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I can download the transcript in "srt" format
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs__Szu8hVDvgc.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_fullscreen_video_alignment_on_transcript_toggle(self):
"""
Scenario: Video is aligned correctly on transcript toggle in fullscreen mode
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
And the video with the transcript hidden is aligned correctly
"""
self.assets.append('subs__Szu8hVDvgc.srt.sjson')
data = {'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
# click video button "CC"
self.video.click_player_button('CC')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_video_rendering_with_default_response_time(self):
"""
Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly
Given the YouTube server response time less than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "Youtube" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 0.4
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_video_rendering_wo_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 2.0
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_video_with_youtube_blocked_with_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_video_with_youtube_blocked_delayed_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube server response time is greater than 1.5 seconds
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_html5_video_rendered_with_youtube_captions(self):
"""
Scenario: User should see Youtube captions for If there are no transcripts
available for HTML5 mode
Given that I have uploaded a .srt.sjson file to assets for Youtube mode
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
And Video component rendered in HTML5 mode
And Html5 mode video has no transcripts
When I see the captions for HTML5 mode video
Then I should see the Youtube captions
"""
self.assets.append('subs__Szu8hVDvgc.srt.sjson')
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
data = {'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('youtube_html5', additional_data=data)
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# check if caption button is visible
self.assertTrue(self.video.is_button_shown('CC'))
self._verify_caption_text('Welcome to edX.')
def test_download_transcript_button_works_correctly(self):
"""
Scenario: Download Transcript button works correctly
Given the course has Video components A and B in "Youtube" mode
And Video component C in "HTML5" mode
And I have defined downloadable transcripts for the videos
Then I can download a transcript for Video A in "srt" format
And I can download a transcript for Video A in "txt" format
And I can download a transcript for Video B in "txt" format
And the Download Transcript menu does not exist for Video C
"""
data_a = {'sub': '_Szu8hVDvgc', 'download_track': True}
youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a)
self.assets.append('subs__Szu8hVDvgc.srt.sjson')
data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True}
youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b)
self.assets.append('subs_b7xgknqkQk8.srt.sjson')
data_c = {'track': 'http://example.org/', 'download_track': True}
html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c)
self.verticals = [
[{'display_name': 'A', 'metadata': youtube_a_metadata}],
[{'display_name': 'B', 'metadata': youtube_b_metadata}],
[{'display_name': 'C', 'metadata': html5_c_metadata}]
]
# open the section with videos (open video "A")
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "00:00:00,260"
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', '00:00:00,260'))
# select the transcript format "txt"
self.assertTrue(self.video.select_transcript_format('txt'))
# check if we can download transcript in "txt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Welcome to edX.'))
# open video "B"
self.course_nav.go_to_sequential('B')
# check if we can download transcript in "txt" format that has text "Equal transcripts"
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Equal transcripts'))
# open video "C"
self.course_nav.go_to_sequential('C')
# menu "download_transcript" doesn't exist
self.assertFalse(self.video.is_menu_present('download_transcript'))
def _verify_caption_text(self, text):
self.video._wait_for(
lambda: (text in self.video.captions_text),
u'Captions contain "{}" text'.format(text),
timeout=5
)
def test_video_language_menu_working(self):
"""
Scenario: Language menu works correctly in Video component
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "zh"
Then I see "好 各位同学" text in the captions
And I select language with code "en"
Then I see "Welcome to edX." text in the captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs__Szu8hVDvgc.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.hide_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
self.video.select_language('zh')
unicode_text = "好 各位同学".decode('utf-8')
self._verify_caption_text(unicode_text)
self.video.select_language('en')
self._verify_caption_text('Welcome to edX.')
def test_multiple_videos_in_sequentials_load_and_work(self):
"""
Scenario: Multiple videos in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" mode in position "1" of sequential
And videos "E,F" in "Youtube" mode in position "2" of sequential
"""
self.verticals = [
[{'display_name': 'A'}, {'display_name': 'B'}], [{'display_name': 'C'}, {'display_name': 'D'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C', 'D']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.use_video(video_name)
self.video.click_player_button('play')
self.assertIn(self.video.state, ['playing', 'buffering'])
self.video.click_player_button('pause')
# go to video
self.navigate_to_video()
execute_video_steps(tab1_video_names)
# go to second sequential position
self.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
self.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
def test_video_component_stores_speed_correctly_for_multiple_videos(self):
"""
Scenario: Video component stores speed correctly when each video is in separate sequential
Given I have a video "A" in "Youtube" mode in position "1" of sequential
And a video "B" in "Youtube" mode in position "2" of sequential
And a video "C" in "HTML5" mode in position "3" of sequential
"""
self.verticals = [
[{'display_name': 'A'}], [{'display_name': 'B'}],
[{'display_name': 'C', 'metadata': self.metadata_for_mode('html5')}]
]
self.navigate_to_video()
# select the "2.0" speed on video "A"
self.course_nav.go_to_sequential('A')
self.video.speed = '2.0'
# select the "0.50" speed on video "B"
self.course_nav.go_to_sequential('B')
self.video.speed = '0.50'
# open video "C"
self.course_nav.go_to_sequential('C')
# Since the playback speed was set to .5 in "B", this video will also be impacted
# because a playback speed has never explicitly been set for it. However, this video
# does not have a .5 playback option, so the closest possible (.75) should be selected.
self.video.verify_speed_changed('0.75x')
# open video "A"
self.course_nav.go_to_sequential('A')
# Video "A" should still play at speed 2.0 because it was explicitly set to that.
self.assertEqual(self.video.speed, '2.0x')
# reload the page
self.video.reload_page()
# open video "A"
self.course_nav.go_to_sequential('A')
# check if video "A" should start playing at speed "2.0"
self.assertEqual(self.video.speed, '2.0x')
# select the "1.0" speed on video "A"
self.video.speed = '1.0'
# open video "B"
self.course_nav.go_to_sequential('B')
# Video "B" should still play at speed .5 because it was explicitly set to that.
self.assertEqual(self.video.speed, '0.50x')
# open video "C"
self.course_nav.go_to_sequential('C')
# The change of speed for Video "A" should impact Video "C" because it still has
# not been explicitly set to a speed.
self.video.verify_speed_changed('1.0x')
def test_video_has_correct_transcript(self):
"""
Scenario: Youtube video has correct transcript if fields for other speeds are filled
Given it has a video in "Youtube" mode
And I have uploaded multiple transcripts
And I make sure captions are opened
Then I see "Welcome to edX." text in the captions
And I select the "1.50" speed
And I reload the page with video
Then I see "Welcome to edX." text in the captions
And I see duration "1:56"
"""
self.assets.extend(['subs__Szu8hVDvgc.srt.sjson', 'subs_b7xgknqkQk8.srt.sjson'])
data = {'sub': '_Szu8hVDvgc', 'youtube_id_1_5': 'b7xgknqkQk8'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.video.speed = '1.50'
self.video.reload_page()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.assertTrue(self.video.duration, '1.56')
def test_video_position_stored_correctly_wo_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
Then I wait until video reaches at position "0.05"
And I click video button "pause"
And I reload the page with video
And I click video button "play""
And I click video button "pause"
Then video slider should be Equal or Greater than "0:05"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:05')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 5)
@skip("Intermittently fails 03 June 2014")
def test_video_position_stored_correctly_with_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
And I click video button "pause"
Then I seek video to "0:10" position
And I click video button "play""
And I click video button "pause"
And I reload the page with video
Then video slider should be Equal or Greater than "0:10"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.seek('0:10')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 10)
def test_simplified_and_traditional_chinese_transcripts(self):
"""
Scenario: Simplified and Traditional Chinese transcripts work as expected in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a Simplified Chinese transcript for the video
And I have defined a Traditional Chinese transcript for the video
Then I see the correct subtitle language options in cc menu
Then I see the correct text in the captions for Simplified and Traditional Chinese transcripts
And I can download the transcripts for Simplified and Traditional Chinese
And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese'
and 'Traditional Chinese' respectively
"""
data = {
'download_track': True,
'transcripts': {'zh_HANS': 'simplified_chinese.srt', 'zh_HANT': 'traditional_chinese.srt'}
}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.extend(['simplified_chinese.srt', 'traditional_chinese.srt'])
self.navigate_to_video()
langs = {'zh_HANS': '在线学习是革', 'zh_HANT': '在線學習是革'}
for lang_code, text in langs.items():
self.assertTrue(self.video.select_language(lang_code))
unicode_text = text.decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'})
def test_video_bumper_render(self):
"""
Scenario: Multiple videos with bumper in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" and "HTML5" modes in position "1" of sequential
And video "C" in "Youtube" mode in position "2" of sequential
When I open sequential position "1"
Then I see video "B" has a poster
When I click on it
Then I see video bumper is playing
When I skip the bumper
Then I see the main video
When I click on video "A"
Then the main video starts playing
When I open sequential position "2"
And click on the poster
Then the main video starts playing
Then I see that the main video starts playing once I go back to position "2" of sequential
When I reload the page
Then I see that the main video starts playing when I click on the poster
"""
additional_data = {
u'video_bumper': {
u'value': {
"transcripts": {},
"video_id": "video_001"
}
}
}
self.verticals = [
[{'display_name': 'A'}, {'display_name': 'B', 'metadata': self.metadata_for_mode('html5')}],
[{'display_name': 'C'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.use_video(video_name)
self.assertTrue(self.video.is_poster_shown)
self.video.click_on_poster()
self.video.wait_for_video_player_render(autoplay=True)
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.course_fixture.add_advanced_settings(additional_data)
self.navigate_to_video_no_render()
self.video.use_video('B')
self.assertTrue(self.video.is_poster_shown)
self.video.click_on_poster()
self.video.wait_for_video_bumper_render()
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.video.click_player_button('skip_bumper')
# no autoplay here, maybe video is too small, so pause is not switched
self.video.wait_for_video_player_render()
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.video.use_video('A')
execute_video_steps(['A'])
# go to second sequential position
self.course_nav.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
self.course_nav.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
self.video.browser.refresh()
execute_video_steps(tab1_video_names)
class YouTubeHtml5VideoTest(VideoBaseTest):
""" Test YouTube HTML5 Video Player """
def setUp(self):
super(YouTubeHtml5VideoTest, self).setUp()
@flaky # TODO fix this, see TNL-1642
def test_youtube_video_rendering_with_unsupported_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode
with HTML5 sources that doesn't supported by browser
Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode
Then the video has rendered in "Youtube" mode
"""
self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video')
self.navigate_to_video()
# Verify that the video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
class Html5VideoTest(VideoBaseTest):
""" Test HTML5 Video Player """
def setUp(self):
super(Html5VideoTest, self).setUp()
def test_autoplay_disabled_for_video_component(self):
"""
Scenario: Autoplay is disabled by default for a Video component
Given the course has a Video component in "HTML5" mode
When I view the Video component
Then it does not have autoplay enabled
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
# Verify that the video has autoplay mode disabled
self.assertFalse(self.video.is_autoplay_enabled)
def test_html5_video_rendering_with_unsupported_sources(self):
"""
Scenario: LMS displays an error message for HTML5 sources that are not supported by browser
Given the course has a Video component in "HTML5_Unsupported_Video" mode
When I view the Video component
Then and error message is shown
And the error message has the correct text
"""
self.metadata = self.metadata_for_mode('html5_unsupported_video')
self.navigate_to_video_no_render()
# Verify that error message is shown
self.assertTrue(self.video.is_error_message_shown)
# Verify that error message has correct text
correct_error_message_text = 'No playable video sources found.'
self.assertIn(correct_error_message_text, self.video.error_message_text)
# Verify that spinner is not shown
self.assertFalse(self.video.is_spinner_shown)
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs__Szu8hVDvgc.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# Then I can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_full_screen_video_alignment_with_transcript_visible(self):
"""
Scenario: Video is aligned correctly with transcript enabled in fullscreen mode
Given the course has a Video component in "HTML5" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I show the captions
And I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
"""
self.assets.append('subs__Szu8hVDvgc.srt.sjson')
data = {'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
def test_cc_button_with_english_transcript(self):
"""
Scenario: CC button works correctly with only english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined english subtitles for the video
And I have uploaded an english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('subs__Szu8hVDvgc.srt.sjson')
data = {'sub': '_Szu8hVDvgc'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "Welcome to edX." text in the captions
self.assertIn("Welcome to edX.", self.video.captions_text)
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('chinese_transcripts.srt')
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_video_rendering(self):
"""
Scenario: Video component is fully rendered in the LMS in HTML5 mode
Given the course has a Video component in "HTML5" mode
Then the video has rendered in "HTML5" mode
And video sources are correct
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources]))
class YouTubeQualityTest(VideoBaseTest):
""" Test YouTube Video Quality Button """
def setUp(self):
super(YouTubeQualityTest, self).setUp()
@skip_if_browser('firefox')
def test_quality_button_visibility(self):
"""
Scenario: Quality button appears on play.
Given the course has a Video component in "Youtube" mode
Then I see video button "quality" is hidden
And I click video button "play"
Then I see video button "quality" is visible
"""
self.navigate_to_video()
self.assertFalse(self.video.is_quality_button_visible)
self.video.click_player_button('play')
self.assertTrue(self.video.is_quality_button_visible)
@skip_if_browser('firefox')
def test_quality_button_works_correctly(self):
"""
Scenario: Quality button works correctly.
Given the course has a Video component in "Youtube" mode
And I click video button "play"
And I see video button "quality" is inactive
And I click video button "quality"
Then I see video button "quality" is active
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.assertFalse(self.video.is_quality_button_active)
self.video.click_player_button('quality')
self.assertTrue(self.video.is_quality_button_active)
| agpl-3.0 | 5,575,694,197,653,081,000 | 38.731153 | 123 | 0.638305 | false |
piraz/podship | podship/test/services/account.py | 3 | 1299 | #!/usr/bin/env python
#
# Copyright 2015 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
from __future__ import (absolute_import, division, print_function,
with_statement)
from podship.services.account import AccountService
import unittest
class AccountServiceTestCase(unittest.TestCase):
""" Case that covers the account service.
"""
def setUp(self):
self.service = AccountService(self)
def test_is_account_service(self):
""" Dummy test that verifies if the service is an account service
"""
# TODO: This test must go, it was designed to get travis build working
# properly.
self.assertEqual(self.service.__class__, AccountService)
| apache-2.0 | 3,502,722,712,378,938,400 | 33.184211 | 78 | 0.712856 | false |
freedomtan/tensorflow | tensorflow/compiler/mlir/tfr/examples/pad/ops_defs.py | 3 | 5752 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the mirror pad and mirror pad grad."""
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=missing-function-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from tensorflow.compiler.mlir.tfr.python import composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
Composite = composite.Composite
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output', None,
'Path to write the genereated register op file and MLIR file.')
flags.DEFINE_bool('gen_register_op', True,
'Generate register op cc file or tfr mlir file.')
@Composite(
'NewMirrorPad',
inputs=['input_: T', 'paddings: Tpaddings'],
attrs=['mode: {"REFLECT", "SYMMETRIC"}'],
derived_attrs=['T: type', 'Tpaddings: {int32, int64} = DT_INT32'],
outputs=['output: T'])
def _composite_mirror_pad(input_, paddings, mode):
shape = input_.shape.as_list()
for i in range(len(shape)):
rdims = tf.raw_ops.OneHot(
indices=i, depth=len(shape), on_value=True, off_value=False, axis=-1)
rarray = tf.raw_ops.Reverse(tensor=input_, dims=rdims)
left_padding_size = tf.raw_ops.GatherNd(params=paddings, indices=[i, 0])
right_padding_size = tf.raw_ops.GatherNd(params=paddings, indices=[i, 1])
if mode == 'REFLECT':
left_padding, _ = tf.raw_ops.SplitV(
value=rarray,
size_splits=[left_padding_size, -1],
axis=i,
num_split=2)
_, right_padding = tf.raw_ops.SplitV(
value=rarray,
size_splits=[-1, right_padding_size],
axis=i,
num_split=2)
else:
_, left_padding = tf.raw_ops.SplitV(
value=rarray,
size_splits=[-1, left_padding_size],
axis=i,
num_split=2)
right_padding, _ = tf.raw_ops.SplitV(
value=rarray,
size_splits=[right_padding_size, -1],
axis=i,
num_split=2)
input_ = tf.raw_ops.Concat(
concat_dim=i, values=[left_padding, input_, right_padding])
return input_
@tf.RegisterGradient('NewMirrorPad')
def _mirror_pad_grad(op, grad):
mode = op.get_attr('mode')
return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
@Composite(
'NewMirrorPadGrad',
inputs=['input_: T', 'paddings: Tpaddings'],
attrs=['mode: {"REFLECT", "SYMMETRIC"}'],
derived_attrs=['T: type', 'Tpaddings: {int32, int64} = DT_INT32'],
outputs=['output: T'])
def _composite_mirror_pad_grad(input_, paddings, mode):
shape = input_.shape.as_list()
for i in range(len(shape)):
rdims = tf.raw_ops.OneHot(
indices=i, depth=len(shape), on_value=True, off_value=False, axis=-1)
left_padding_size = tf.raw_ops.GatherNd(params=paddings, indices=[i, 0])
right_padding_size = tf.raw_ops.GatherNd(params=paddings, indices=[i, 1])
left_padding, core, right_padding = tf.raw_ops.SplitV(
value=input_,
size_splits=[left_padding_size, -1, right_padding_size],
axis=i,
num_split=3)
reversed_left_padding = tf.raw_ops.Reverse(tensor=left_padding, dims=rdims)
reversed_right_padding = tf.raw_ops.Reverse(
tensor=right_padding, dims=rdims)
zero_like = tf.raw_ops.ZerosLike(x=core)
left_offset, _ = tf.raw_ops.SplitV(
value=zero_like,
size_splits=[-1, left_padding_size],
axis=i,
num_split=2)
right_offset, _ = tf.raw_ops.SplitV(
value=zero_like,
size_splits=[-1, right_padding_size],
axis=i,
num_split=2)
if mode == 'REFLECT':
from_left_padding = tf.raw_ops.Concat(
concat_dim=i, values=[left_offset, reversed_left_padding])
from_right_padding = tf.raw_ops.Concat(
concat_dim=i, values=[reversed_right_padding, right_offset])
else:
from_left_padding = tf.raw_ops.Concat(
concat_dim=i, values=[reversed_left_padding, left_offset])
from_right_padding = tf.raw_ops.Concat(
concat_dim=i, values=[right_offset, reversed_right_padding])
input_ = tf.raw_ops.AddN(
inputs=[from_left_padding, core, from_right_padding])
return input_
@tf.RegisterGradient('NewMirrorPadGrad')
def _mirror_pad_grad_grad(op, grad):
mode = op.get_attr('mode')
return [gen_array_ops.mirror_pad(grad, op.inputs[1], mode=mode), None]
def main(_):
if FLAGS.gen_register_op:
assert FLAGS.output.endswith('.cc')
generated_code = gen_register_op(sys.modules[__name__], '_composite_')
else:
assert FLAGS.output.endswith('.mlir')
generated_code = tfr_gen_from_module(sys.modules[__name__], '_composite_')
dirname = os.path.dirname(FLAGS.output)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(FLAGS.output, 'w') as f:
f.write(generated_code)
if __name__ == '__main__':
app.run(main=main)
| apache-2.0 | -8,191,528,790,846,239,000 | 33.238095 | 79 | 0.656641 | false |
brad999/nikita | client/modules/text.py | 2 | 2449 | """
Texting module
Name: text.py
Description: Sends text message to designated recipient.
Responds to "text" or "tell"
Dependencies: Gmail, Contacts list
Author: Brad Ahlers (github - brad999)
"""
import re
import yaml
from client import nikitapath, app_utils
WORDS = ["TEXT", "TELL"]
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user
"""
# determine recipient name and message
# !! add logic to check if name was provided but no message
if re.search("(text|tell) (\w+) (.*)", text, re.IGNORECASE):
x = re.search("(text|tell) (\w+) (.*)", text, re.IGNORECASE)
name = x.group(2)
message = x.group(3)
else:
mic.say('A', "Who would you like to text?")
name = mic.activeListen()
mic.say('A', "What would you like to tell " + name + "?")
message = mic.activeListen()
# check for recipient number in contacts.yml
f = open(nikitapath.data('text', 'CONTACTS.yml'))
contacts = yaml.safe_load(f)
recipientNumber = str(contacts[name.lower()])
f.close()
if recipientNumber:
# check for a message
if message:
# format message properly
message = app_utils.convertPunctuation(message.lower())
# confirm message and recipient before sending
mic.say('A', "Are you sure you would like to tell " +
name + ", " + message + "?")
if app_utils.YesOrNo(mic.activeListen()):
# send text message
app_utils.sendTextMsg(profile, recipientNumber, message)
mic.say('A', "Message has been sent to " + name + ".")
else:
mic.say('A', "Message was not sent.")
else:
mic.say('A', "I'm sorry. I didn't understand that message")
else:
mic.say('A', "I'm sorry. I could not find " +
name + " in my address book.")
def isValid(text):
"""
Returns True if the input is related to texting.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b(text|tell(?!me))\b', text, re.IGNORECASE))
| mit | 3,660,254,986,958,460,000 | 31.653333 | 73 | 0.576562 | false |
vijayendrabvs/ssl-neutron | neutron/tests/unit/test_linux_interface.py | 4 | 22521 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.extensions.flavor import (FLAVOR_NETWORK)
from neutron.openstack.common import uuidutils
from neutron.tests import base
class BaseChild(interface.LinuxInterfaceDriver):
def plug(*args):
pass
def unplug(*args):
pass
class FakeNetwork:
id = '12345678-1234-5678-90ab-ba0987654321'
class FakeSubnet:
cidr = '192.168.1.1/24'
class FakeAllocation:
subnet = FakeSubnet()
ip_address = '192.168.1.2'
ip_version = 4
class FakePort:
id = 'abcdef01-1234-5678-90ab-ba0987654321'
fixed_ips = [FakeAllocation]
device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
network = FakeNetwork()
network_id = network.id
class TestBase(base.BaseTestCase):
def setUp(self):
super(TestBase, self).setUp()
self.conf = config.setup_conf()
self.conf.register_opts(interface.OPTS)
config.register_root_helper(self.conf)
self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice')
self.ip_dev = self.ip_dev_p.start()
self.addCleanup(self.ip_dev_p.stop)
self.ip_p = mock.patch.object(ip_lib, 'IPWrapper')
self.ip = self.ip_p.start()
self.addCleanup(self.ip_p.stop)
self.device_exists_p = mock.patch.object(ip_lib, 'device_exists')
self.device_exists = self.device_exists_p.start()
self.addCleanup(self.device_exists_p.stop)
class TestABCDriver(TestBase):
def test_get_device_name(self):
bc = BaseChild(self.conf)
device_name = bc.get_device_name(FakePort())
self.assertEqual('tapabcdef01-12', device_name)
def test_l3_init(self):
addresses = [dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns)
self.ip_dev.assert_has_calls(
[mock.call('tap0', 'sudo', namespace=ns),
mock.call().addr.list(scope='global', filters=['permanent']),
mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'),
mock.call().addr.delete(4, '172.16.77.240/24')])
def test_l3_init_with_preserve(self):
addresses = [dict(ip_version=4, scope='global',
dynamic=False, cidr='192.168.1.3/32')]
self.ip_dev().addr.list = mock.Mock(return_value=addresses)
bc = BaseChild(self.conf)
ns = '12345678-1234-5678-90ab-ba0987654321'
bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns,
preserve_ips=['192.168.1.3/32'])
self.ip_dev.assert_has_calls(
[mock.call('tap0', 'sudo', namespace=ns),
mock.call().addr.list(scope='global', filters=['permanent']),
mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255')])
self.assertFalse(self.ip_dev().addr.delete.called)
class TestOVSInterfaceDriver(TestBase):
def test_get_device_name(self):
br = interface.OVSInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('tapabcdef01-12', device_name)
def test_plug_no_ns(self):
self._test_plug()
def test_plug_with_ns(self):
self._test_plug(namespace='01234567-1234-1234-99')
def test_plug_alt_bridge(self):
self._test_plug(bridge='br-foo')
def test_plug_configured_bridge(self):
br = 'br-v'
self.conf.set_override('ovs_use_veth', False)
self.conf.set_override('ovs_integration_bridge', br)
self.assertEqual(self.conf.ovs_integration_bridge, br)
def device_exists(dev, root_helper=None, namespace=None):
return dev == br
ovs = interface.OVSInterfaceDriver(self.conf)
with mock.patch.object(ovs, '_ovs_add_port') as add_port:
self.device_exists.side_effect = device_exists
ovs.plug('01234567-1234-1234-99',
'port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff',
bridge=None,
namespace=None)
add_port.assert_called_once_with('br-v',
'tap0',
'port-1234',
'aa:bb:cc:dd:ee:ff',
internal=True)
def _test_plug(self, additional_expectation=[], bridge=None,
namespace=None):
if not bridge:
bridge = 'br-int'
def device_exists(dev, root_helper=None, namespace=None):
return dev == bridge
vsctl_cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port',
'tap0', '--', 'add-port',
bridge, 'tap0', '--', 'set', 'Interface', 'tap0',
'type=internal', '--', 'set', 'Interface', 'tap0',
'external-ids:iface-id=port-1234', '--', 'set',
'Interface', 'tap0',
'external-ids:iface-status=active', '--', 'set',
'Interface', 'tap0',
'external-ids:attached-mac=aa:bb:cc:dd:ee:ff']
with mock.patch.object(utils, 'execute') as execute:
ovs = interface.OVSInterfaceDriver(self.conf)
self.device_exists.side_effect = device_exists
ovs.plug('01234567-1234-1234-99',
'port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff',
bridge=bridge,
namespace=namespace)
execute.assert_called_once_with(vsctl_cmd, 'sudo')
expected = [mock.call('sudo'),
mock.call().device('tap0'),
mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')]
expected.extend(additional_expectation)
if namespace:
expected.extend(
[mock.call().ensure_namespace(namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)])
expected.extend([mock.call().device().link.set_up()])
self.ip.assert_has_calls(expected)
def test_mtu_int(self):
self.assertIsNone(self.conf.network_device_mtu)
self.conf.set_override('network_device_mtu', 9000)
self.assertEqual(self.conf.network_device_mtu, 9000)
def test_plug_mtu(self):
self.conf.set_override('network_device_mtu', 9000)
self._test_plug([mock.call().device().link.set_mtu(9000)])
def test_unplug(self, bridge=None):
if not bridge:
bridge = 'br-int'
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs_br:
ovs = interface.OVSInterfaceDriver(self.conf)
ovs.unplug('tap0')
ovs_br.assert_has_calls([mock.call(bridge, 'sudo'),
mock.call().delete_port('tap0')])
class TestOVSInterfaceDriverWithVeth(TestOVSInterfaceDriver):
def setUp(self):
super(TestOVSInterfaceDriverWithVeth, self).setUp()
self.conf.set_override('ovs_use_veth', True)
def test_get_device_name(self):
br = interface.OVSInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_with_prefix(self):
self._test_plug(devname='qr-0', prefix='qr-')
def _test_plug(self, devname=None, bridge=None, namespace=None,
prefix=None, mtu=None):
if not devname:
devname = 'ns-0'
if not bridge:
bridge = 'br-int'
def device_exists(dev, root_helper=None, namespace=None):
return dev == bridge
ovs = interface.OVSInterfaceDriver(self.conf)
self.device_exists.side_effect = device_exists
root_dev = mock.Mock()
ns_dev = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev))
expected = [mock.call('sudo'),
mock.call().add_veth('tap0', devname,
namespace2=namespace)]
vsctl_cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port',
'tap0', '--', 'add-port',
bridge, 'tap0', '--', 'set', 'Interface', 'tap0',
'external-ids:iface-id=port-1234', '--', 'set',
'Interface', 'tap0',
'external-ids:iface-status=active', '--', 'set',
'Interface', 'tap0',
'external-ids:attached-mac=aa:bb:cc:dd:ee:ff']
with mock.patch.object(utils, 'execute') as execute:
ovs.plug('01234567-1234-1234-99',
'port-1234',
devname,
'aa:bb:cc:dd:ee:ff',
bridge=bridge,
namespace=namespace,
prefix=prefix)
execute.assert_called_once_with(vsctl_cmd, 'sudo')
ns_dev.assert_has_calls(
[mock.call.link.set_address('aa:bb:cc:dd:ee:ff')])
if mtu:
ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
self.ip.assert_has_calls(expected)
root_dev.assert_has_calls([mock.call.link.set_up()])
ns_dev.assert_has_calls([mock.call.link.set_up()])
def test_plug_mtu(self):
self.conf.set_override('network_device_mtu', 9000)
self._test_plug(mtu=9000)
def test_unplug(self, bridge=None):
if not bridge:
bridge = 'br-int'
with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs_br:
ovs = interface.OVSInterfaceDriver(self.conf)
ovs.unplug('ns-0', bridge=bridge)
ovs_br.assert_has_calls([mock.call(bridge, 'sudo'),
mock.call().delete_port('tap0')])
self.ip_dev.assert_has_calls([mock.call('ns-0', 'sudo', None),
mock.call().link.delete()])
class TestBridgeInterfaceDriver(TestBase):
def test_get_device_name(self):
br = interface.BridgeInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_no_ns(self):
self._test_plug()
def test_plug_with_ns(self):
self._test_plug(namespace='01234567-1234-1234-99')
def _test_plug(self, namespace=None, mtu=None):
def device_exists(device, root_helper=None, namespace=None):
return device.startswith('brq')
root_veth = mock.Mock()
ns_veth = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth))
self.device_exists.side_effect = device_exists
br = interface.BridgeInterfaceDriver(self.conf)
mac_address = 'aa:bb:cc:dd:ee:ff'
br.plug('01234567-1234-1234-99',
'port-1234',
'ns-0',
mac_address,
namespace=namespace)
ip_calls = [mock.call('sudo'),
mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)]
ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)])
if mtu:
ns_veth.assert_has_calls([mock.call.link.set_mtu(mtu)])
root_veth.assert_has_calls([mock.call.link.set_mtu(mtu)])
self.ip.assert_has_calls(ip_calls)
root_veth.assert_has_calls([mock.call.link.set_up()])
ns_veth.assert_has_calls([mock.call.link.set_up()])
def test_plug_dev_exists(self):
self.device_exists.return_value = True
with mock.patch('neutron.agent.linux.interface.LOG.info') as log:
br = interface.BridgeInterfaceDriver(self.conf)
br.plug('01234567-1234-1234-99',
'port-1234',
'tap0',
'aa:bb:cc:dd:ee:ff')
self.ip_dev.assert_has_calls([])
self.assertEqual(log.call_count, 1)
def test_plug_mtu(self):
self.device_exists.return_value = False
self.conf.set_override('network_device_mtu', 9000)
self._test_plug(mtu=9000)
def test_unplug_no_device(self):
self.device_exists.return_value = False
self.ip_dev().link.delete.side_effect = RuntimeError
with mock.patch('neutron.agent.linux.interface.LOG') as log:
br = interface.BridgeInterfaceDriver(self.conf)
br.unplug('tap0')
[mock.call(), mock.call('tap0', 'sudo'), mock.call().link.delete()]
self.assertEqual(log.error.call_count, 1)
def test_unplug(self):
self.device_exists.return_value = True
with mock.patch('neutron.agent.linux.interface.LOG.debug') as log:
br = interface.BridgeInterfaceDriver(self.conf)
br.unplug('tap0')
log.assert_called_once()
self.ip_dev.assert_has_calls([mock.call('tap0', 'sudo', None),
mock.call().link.delete()])
class TestMetaInterfaceDriver(TestBase):
def setUp(self):
super(TestMetaInterfaceDriver, self).setUp()
config.register_interface_driver_opts_helper(self.conf)
self.conf.register_opts(dhcp.OPTS)
self.client_cls_p = mock.patch('neutronclient.v2_0.client.Client')
client_cls = self.client_cls_p.start()
self.addCleanup(self.client_cls_p.stop)
self.client_inst = mock.Mock()
client_cls.return_value = self.client_inst
fake_network = {'network': {FLAVOR_NETWORK: 'fake1'}}
fake_port = {'ports':
[{'mac_address':
'aa:bb:cc:dd:ee:ffa', 'network_id': 'test'}]}
self.client_inst.list_ports.return_value = fake_port
self.client_inst.show_network.return_value = fake_network
self.conf.set_override('auth_url', 'http://localhost:35357/v2.0')
self.conf.set_override('auth_region', 'RegionOne')
self.conf.set_override('admin_user', 'neutron')
self.conf.set_override('admin_password', 'password')
self.conf.set_override('admin_tenant_name', 'service')
self.conf.set_override(
'meta_flavor_driver_mappings',
'fake1:neutron.agent.linux.interface.OVSInterfaceDriver,'
'fake2:neutron.agent.linux.interface.BridgeInterfaceDriver')
def test_get_driver_by_network_id(self):
meta_interface = interface.MetaInterfaceDriver(self.conf)
driver = meta_interface._get_driver_by_network_id('test')
self.assertIsInstance(driver, interface.OVSInterfaceDriver)
def test_set_device_plugin_tag(self):
meta_interface = interface.MetaInterfaceDriver(self.conf)
driver = meta_interface._get_driver_by_network_id('test')
meta_interface._set_device_plugin_tag(driver,
'tap0',
namespace=None)
expected = [mock.call('tap0', 'sudo', None),
mock.call().link.set_alias('fake1')]
self.ip_dev.assert_has_calls(expected)
namespace = '01234567-1234-1234-99'
meta_interface._set_device_plugin_tag(driver,
'tap1',
namespace=namespace)
expected = [mock.call('tap1', 'sudo', '01234567-1234-1234-99'),
mock.call().link.set_alias('fake1')]
self.ip_dev.assert_has_calls(expected)
def test_get_device_plugin_tag(self):
meta_interface = interface.MetaInterfaceDriver(self.conf)
self.ip_dev().link.alias = 'fake1'
plugin_tag0 = meta_interface._get_device_plugin_tag('tap0',
namespace=None)
expected = [mock.call('tap0', 'sudo', None)]
self.ip_dev.assert_has_calls(expected)
self.assertEqual('fake1', plugin_tag0)
namespace = '01234567-1234-1234-99'
expected = [mock.call('tap1', 'sudo', '01234567-1234-1234-99')]
plugin_tag1 = meta_interface._get_device_plugin_tag(
'tap1',
namespace=namespace)
self.ip_dev.assert_has_calls(expected)
self.assertEqual('fake1', plugin_tag1)
class TestIVSInterfaceDriver(TestBase):
def setUp(self):
super(TestIVSInterfaceDriver, self).setUp()
def test_get_device_name(self):
br = interface.IVSInterfaceDriver(self.conf)
device_name = br.get_device_name(FakePort())
self.assertEqual('ns-abcdef01-12', device_name)
def test_plug_with_prefix(self):
self._test_plug(devname='qr-0', prefix='qr-')
def _test_plug(self, devname=None, namespace=None,
prefix=None, mtu=None):
if not devname:
devname = 'ns-0'
def device_exists(dev, root_helper=None, namespace=None):
return dev == 'indigo'
ivs = interface.IVSInterfaceDriver(self.conf)
self.device_exists.side_effect = device_exists
root_dev = mock.Mock()
_ns_dev = mock.Mock()
ns_dev = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_dev, _ns_dev))
self.ip().device = mock.Mock(return_value=(ns_dev))
expected = [mock.call('sudo'), mock.call().add_veth('tap0', devname),
mock.call().device(devname)]
ivsctl_cmd = ['ivs-ctl', 'add-port', 'tap0']
with mock.patch.object(utils, 'execute') as execute:
ivs.plug('01234567-1234-1234-99',
'port-1234',
devname,
'aa:bb:cc:dd:ee:ff',
namespace=namespace,
prefix=prefix)
execute.assert_called_once_with(ivsctl_cmd, 'sudo')
ns_dev.assert_has_calls(
[mock.call.link.set_address('aa:bb:cc:dd:ee:ff')])
if mtu:
ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)])
if namespace:
expected.extend(
[mock.call().ensure_namespace(namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)])
self.ip.assert_has_calls(expected)
root_dev.assert_has_calls([mock.call.link.set_up()])
ns_dev.assert_has_calls([mock.call.link.set_up()])
def test_plug_mtu(self):
self.conf.set_override('network_device_mtu', 9000)
self._test_plug(mtu=9000)
def test_plug_namespace(self):
self._test_plug(namespace='mynamespace')
def test_unplug(self):
ivs = interface.IVSInterfaceDriver(self.conf)
ivsctl_cmd = ['ivs-ctl', 'del-port', 'tap0']
with mock.patch.object(utils, 'execute') as execute:
ivs.unplug('ns-0')
execute.assert_called_once_with(ivsctl_cmd, 'sudo')
self.ip_dev.assert_has_calls([mock.call('ns-0', 'sudo', None),
mock.call().link.delete()])
class TestMidonetInterfaceDriver(TestBase):
def setUp(self):
self.conf = config.setup_conf()
self.conf.register_opts(interface.OPTS)
config.register_root_helper(self.conf)
self.device_exists_p = mock.patch.object(ip_lib, 'device_exists')
self.device_exists = self.device_exists_p.start()
self.addCleanup(mock.patch.stopall)
self.driver = interface.MidonetInterfaceDriver(self.conf)
self.network_id = uuidutils.generate_uuid()
self.port_id = uuidutils.generate_uuid()
self.device_name = "tap0"
self.mac_address = "aa:bb:cc:dd:ee:ff"
self.bridge = "br-test"
self.namespace = "ns-test"
super(TestMidonetInterfaceDriver, self).setUp()
def test_plug(self):
cmd = ['mm-ctl', '--bind-port', self.port_id, 'tap0']
self.device_exists.return_value = False
root_dev = mock.Mock()
ns_dev = mock.Mock()
self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev))
with mock.patch.object(utils, 'execute') as execute:
self.driver.plug(
self.network_id, self.port_id,
self.device_name, self.mac_address,
self.bridge, self.namespace)
execute.assert_called_once_with(cmd, 'sudo')
expected = [mock.call(), mock.call('sudo'),
mock.call().add_veth(self.device_name,
self.device_name,
namespace2=self.namespace),
mock.call().ensure_namespace(self.namespace),
mock.call().ensure_namespace().add_device_to_namespace(
mock.ANY)]
ns_dev.assert_has_calls(
[mock.call.link.set_address(self.mac_address)])
root_dev.assert_has_calls([mock.call.link.set_up()])
ns_dev.assert_has_calls([mock.call.link.set_up()])
self.ip.assert_has_calls(expected, True)
def test_unplug(self):
self.driver.unplug(self.device_name, self.bridge, self.namespace)
self.ip_dev.assert_has_calls([
mock.call(self.device_name, self.driver.root_helper,
self.namespace),
mock.call().link.delete()])
self.ip.assert_has_calls(mock.call().garbage_collect_namespace())
| apache-2.0 | -3,377,458,539,263,908,400 | 38.510526 | 79 | 0.57231 | false |
davidzchen/tensorflow | tensorflow/python/framework/test_util_test.py | 2 | 33890 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_assert_ops_in_graph(self):
with ops.Graph().as_default():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegex(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegex(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegex(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllEqual(self):
i = variables.Variable([100], dtype=dtypes.int32, name="i")
j = constant_op.constant([20], dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertNotAllEqual([100] * 3, i)
self.assertNotAllEqual([120] * 3, k)
self.assertNotAllEqual([20] * 3, j)
with self.assertRaisesRegex(
AssertionError, r"two values are equal at all elements.*extra message"):
self.assertNotAllEqual([120], k, msg="extra message")
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new default graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
with context.eager_mode():
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
a_rand = random_ops.random_normal([1])
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
b_rand = random_ops.random_normal([1])
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertAllEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegex(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[1:2], ["run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
@combinations.generate(combinations.combine(arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_combinations(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFunctionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFunctionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
if ops.inside_function():
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFunctionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
class SkipTestTest(test_util.TensorFlowTestCase):
def _verify_test_in_set_up_or_tear_down(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def setUp(self):
super(SkipTestTest, self).setUp()
self._verify_test_in_set_up_or_tear_down()
def tearDown(self):
super(SkipTestTest, self).tearDown()
self._verify_test_in_set_up_or_tear_down()
def test_skip_if_error_should_skip(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("test message")
def test_skip_if_error_should_skip_with_list(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_expected_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_error_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError()
def test_skip_if_error_should_raise_message_mismatch(self):
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def test_skip_if_error_should_raise_no_message(self):
try:
with self.assertRaisesRegex(ValueError, ""):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError()
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
class RunFunctionsEagerlyInV2Test(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
[("_RunEagerly", True), ("_RunGraph", False)])
def test_run_functions_eagerly(self, run_eagerly): # pylint: disable=g-wrong-blank-lines
results = []
@def_function.function
def add_two(x):
for _ in range(5):
x += 2
results.append(x)
return x
with test_util.run_functions_eagerly(run_eagerly):
add_two(constant_op.constant(2.))
if context.executing_eagerly():
if run_eagerly:
self.assertTrue(isinstance(t, ops.EagerTensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 5,296,353,649,175,319,000 | 33.723361 | 91 | 0.653438 | false |
RaitoBezarius/crossbar | crossbar/controller/template.py | 3 | 11799 | #####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
import sys
import os
import shutil
import pkg_resources
import jinja2
__all__ = ('Templates',)
class Templates:
"""
Crossbar.io application templates.
"""
SKIP_FILES = ('.pyc', '.pyo', '.exe')
"""
File extensions of files to skip when instantiating an application template.
"""
TEMPLATES = [
{
"name": "default",
"help": "A WAMP router speaking WebSocket plus a static Web server.",
"basedir": "templates/default",
"params": {
}
},
{
"name": "hello:python",
"help": "A minimal Python WAMP application hosted in a router and a HTML5 client.",
"basedir": "templates/hello/python",
"params": {
"appname": "hello",
"realm": "realm1",
}
},
{
"name": "hello:nodejs",
"help": "A minimal NodeJS WAMP application hosted in a router and a HTML5 client.",
"get_started_hint": "Now install dependencies by doing 'npm install', start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/nodejs",
"params": {
"appname": "hello",
"realm": "realm1",
"url": "ws://127.0.0.1:8080/ws"
}
},
{
"name": "hello:browser",
"help": "A minimal JavaAScript WAMP application with two components running in the browser.",
"get_started_hint": "Start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/browser",
"params": {
"realm": "realm1",
}
},
{
"name": "hello:cpp",
"help": "A minimal C++11/AutobahnCpp WAMP application hosted in a router and a HTML5 client.",
"get_started_hint": "Now build the example by doing 'scons', start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/cpp",
"params": {
},
},
{
"name": "hello:csharp",
"help": "A minimal C#/WampSharp WAMP application hosted in a router and a HTML5 client.",
"get_started_hint": "Now build by opening 'src/Hello/Hello.sln' in Visual Studio, start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/csharp",
"params": {
},
"skip_jinja": []
},
{
"name": "hello:erlang",
"help": "A minimal Erlang/Erwa WAMP application hosted in a router and a HTML5 client.",
"get_started_hint": "Now build the Erlang/Erwa client by entering 'make', start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/erlang",
"params": {
},
# due to Erlang's common use of "{{" and "}}" in syntax, we reconfigure
# the escape characters used in Jinja templates
"jinja": {
"block_start_string": "@@",
"block_end_string": "@@",
"variable_start_string": "@=",
"variable_end_string": "=@",
"comment_start_string": "@#",
"comment_end_string": "#@",
},
# we need to skip binary files from being processed by Jinja
#
"skip_jinja": ["relx"]
},
{
"name": "hello:php",
"help": "A minimal PHP/Thruway WAMP application hosted in a router and a HTML5 client.",
"get_started_hint": "Now install dependencies for the PHP/Thruway client by entering 'make install', start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/php",
"params": {
},
},
{
"name": "hello:java",
"help": "A minimal Java/jawampa WAMP application hosted in a router and a HTML5 client.",
"get_started_hint": "Please follow the README.md to build the Java component first, then start Crossbar using 'crossbar start' and open http://localhost:8080 in your browser.",
"basedir": "templates/hello/java",
"params": {
},
},
{
"name": "hello:tessel",
"help": "A minimal JavaScript/wamp-tessel WAMP application running on a Tessel and with a HTML5 client.",
"get_started_hint": "Please follow the README.md to install npm dependencies, then start Crossbar using 'crossbar start', open http://localhost:8080 in your browser, and do 'tessel run tessel/hello.js'.",
"basedir": "templates/hello/tessel",
"params": {
},
},
]
"""
Application template definitions.
"""
def help(self):
"""
Print CLI help.
"""
print("\nAvailable Crossbar.io node templates:\n")
for t in self.TEMPLATES:
print(" {} {}".format(t['name'].ljust(16, ' '), t['help']))
print("")
def __contains__(self, template):
"""
Check if template exists.
:param template: The name of the application template to check.
:type template: str
"""
for t in self.TEMPLATES:
if t['name'] == template:
return True
return False
def __getitem__(self, template):
"""
Get template by name.
:param template: The name of the application template to get.
:type template: str
"""
for t in self.TEMPLATES:
if t['name'] == template:
return t
raise KeyError
def init(self, appdir, template, params=None, dryrun=False):
"""
Ctor.
:param appdir: The path of the directory to instantiate the application template in.
:type appdir: str
:param template: The name of the application template to instantiate.
:type template: str
:param dryrun: If `True`, only perform a dry run (don't actually do anything, only prepare).
:type dryrun: bool
"""
IS_WIN = sys.platform.startswith("win")
template = self.__getitem__(template)
basedir = pkg_resources.resource_filename("crossbar", template['basedir'])
if IS_WIN:
basedir = basedir.replace('\\', '/') # Jinja need forward slashes even on Windows
print("Using template from '{}'".format(basedir))
appdir = os.path.abspath(appdir)
if 'jinja' in template:
kwargs = template['jinja']
else:
kwargs = {}
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(basedir),
keep_trailing_newline=True, **kwargs)
_params = template['params'].copy()
if params:
_params.update(params)
created = []
try:
for root, dirs, files in os.walk(basedir):
for d in dirs:
reldir = os.path.relpath(os.path.join(root, d), basedir)
if 'appname' in _params:
reldir = reldir.replace('appname', _params['appname'])
create_dir_path = os.path.join(appdir, reldir)
print("Creating directory {}".format(create_dir_path))
if not dryrun:
os.mkdir(create_dir_path)
created.append(('dir', create_dir_path))
for f in files:
if not f.endswith(Templates.SKIP_FILES):
src_file = os.path.abspath(os.path.join(root, f))
src_file_rel_path = os.path.relpath(src_file, basedir)
reldir = os.path.relpath(root, basedir)
if 'appname' in _params:
reldir = reldir.replace('appname', _params['appname'])
f = f.replace('appname', _params['appname'])
dst_dir_path = os.path.join(appdir, reldir)
dst_file = os.path.abspath(os.path.join(dst_dir_path, f))
print("Creating file {}".format(dst_file))
if not dryrun:
if f in template.get('skip_jinja', []):
shutil.copy(src_file, dst_file)
else:
with open(dst_file, 'wb') as dst_file_fd:
if IS_WIN:
# Jinja need forward slashes even on Windows
src_file_rel_path = src_file_rel_path.replace('\\', '/')
page = jinja_env.get_template(src_file_rel_path)
contents = page.render(**_params).encode('utf8')
dst_file_fd.write(contents)
created.append(('file', dst_file))
# force exception to test rollback
# a = 1/0
return template.get('get_started_hint', None)
except Exception as e:
print("Error encountered ({}) - rolling back".format(e))
for ptype, path in reversed(created):
if ptype == 'file':
try:
print("Removing file {}".format(path))
if not dryrun:
os.remove(path)
except:
print("Warning: could not remove file {}".format(path))
elif ptype == 'dir':
try:
print("Removing directory {}".format(path))
if not dryrun:
os.rmdir(path)
except:
print("Warning: could not remove directory {}".format(path))
else:
raise Exception("logic error")
raise e
| agpl-3.0 | 8,186,858,726,359,370,000 | 38.199336 | 216 | 0.522756 | false |
gopythongo/aptly-api-client | aptly_api/tests/snapshots.py | 1 | 10499 | # -* encoding: utf-8 *-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from typing import Any
from unittest.case import TestCase
import iso8601
import pytz
import requests_mock
from aptly_api.base import AptlyAPIException
from aptly_api.parts.packages import Package
from aptly_api.parts.snapshots import SnapshotAPISection, Snapshot
@requests_mock.Mocker(kw='rmock')
class SnapshotAPISectionTests(TestCase):
def __init__(self, *args: Any) -> None:
super().__init__(*args)
self.sapi = SnapshotAPISection("http://test/")
self.maxDiff = None
def test_list(self, *, rmock: requests_mock.Mocker) -> None:
rmock.get("http://test/api/snapshots",
text='[{"Name":"stretch-security-1","CreatedAt":"2017-06-03T21:36:22.2692213Z",'
'"Description":"Snapshot from mirror [stretch-security]: '
'http://security.debian.org/debian-security/ stretch/updates"},'
'{"Name":"stretch-updates-1","CreatedAt":"2017-06-03T21:36:22.431767659Z",'
'"Description":"Snapshot from mirror [stretch-updates]: '
'http://ftp-stud.hs-esslingen.de/debian/ stretch-updates"}]')
self.assertSequenceEqual(
self.sapi.list(),
[
Snapshot(
name='stretch-security-1',
description='Snapshot from mirror [stretch-security]: http://security.debian.org/debian-security/ '
'stretch/updates',
created_at=iso8601.parse_date('2017-06-03T21:36:22.2692213Z')
),
Snapshot(
name='stretch-updates-1',
description='Snapshot from mirror [stretch-updates]: http://ftp-stud.hs-esslingen.de/debian/ '
'stretch-updates',
created_at=iso8601.parse_date('2017-06-03T21:36:22.431767659Z')
)
]
)
def test_list_invalid(self, *, rmock: requests_mock.Mocker) -> None:
with self.assertRaises(AptlyAPIException):
self.sapi.list("snoepsort")
def test_update_noparams(self, *, rmock: requests_mock.Mocker) -> None:
with self.assertRaises(AptlyAPIException):
self.sapi.update("test")
def test_create(self, *, rmock: requests_mock.Mocker) -> None:
rmock.post("http://test/api/repos/aptly-repo/snapshots",
text='{"Name":"aptly-repo-1","CreatedAt":"2017-06-03T23:43:40.275605639Z",'
'"Description":"Snapshot from local repo [aptly-repo]"}')
self.assertEqual(
self.sapi.create_from_repo("aptly-repo", "aptly-repo-1",
description='Snapshot from local repo [aptly-repo]'),
Snapshot(
name='aptly-repo-1',
description='Snapshot from local repo [aptly-repo]',
created_at=iso8601.parse_date('2017-06-03T23:43:40.275605639Z', default_timezone=pytz.UTC)
)
)
def test_list_packages(self, *, rmock: requests_mock.Mocker) -> None:
rmock.get("http://test/api/snapshots/aptly-repo-1/packages",
text='["Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1 5f70af798690300d"]')
self.assertEqual(
self.sapi.list_packages("aptly-repo-1"),
[
Package(
key='Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1 5f70af798690300d',
short_key=None,
files_hash=None,
fields=None
),
]
)
def test_list_packages_details(self, *, rmock: requests_mock.Mocker) -> None:
rmock.get("http://test/api/snapshots/aptly-repo-1/packages",
text='[{"Architecture":"all","Depends":"postgresql-9.6-postgis-2.3-scripts",'
'"Description":" transitional dummy package\\n This is a transitional dummy package. '
'It can safely be removed.\\n",'
'"Filename":"postgresql-9.6-postgis-scripts_2.3.2+dfsg-1~exp2.pgdg90+1_all.deb",'
'"FilesHash":"5f70af798690300d",'
'"Homepage":"http://postgis.net/",'
'"Installed-Size":"491",'
'"Key":"Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1 5f70af798690300d",'
'"MD5sum":"56de7bac497e4ac34017f4d11e75fffb",'
'"Maintainer":"Debian GIS Project \[email protected]\u003e",'
'"Package":"postgresql-9.6-postgis-scripts",'
'"Priority":"extra",'
'"SHA1":"61bb9250e7a35be9b78808944e8cfbae1e70f67d",'
'"SHA256":"01c0c4645e9100f7ddb6d05a9d36ad3866ac8d2e412b7c04163a9e65397ce05e",'
'"Section":"oldlibs",'
'"ShortKey":"Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1",'
'"Size":"468824","Source":"postgis","Version":"2.3.2+dfsg-1~exp2.pgdg90+1"}]')
parsed = self.sapi.list_packages("aptly-repo-1", query="Name (% postgresql-9.6.-postgis-sc*)", detailed=True,
with_deps=True)[0]
expected = Package(
key='Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1 5f70af798690300d',
short_key='Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1',
files_hash='5f70af798690300d',
fields={
'Maintainer': 'Debian GIS Project <[email protected]>',
'Size': '468824',
'MD5sum': '56de7bac497e4ac34017f4d11e75fffb',
'ShortKey': 'Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1',
'FilesHash': '5f70af798690300d',
'Filename': 'postgresql-9.6-postgis-scripts_2.3.2+dfsg-1~exp2.pgdg90+1_all.deb',
'Section': 'oldlibs',
'Homepage': 'http://postgis.net/',
'Description': ' transitional dummy package\n This is a transitional dummy package. '
'It can safely be removed.\n',
'Architecture': 'all',
'Priority': 'extra',
'Source': 'postgis',
'SHA1': '61bb9250e7a35be9b78808944e8cfbae1e70f67d',
'Installed-Size': '491',
'Version': '2.3.2+dfsg-1~exp2.pgdg90+1',
'Depends': 'postgresql-9.6-postgis-2.3-scripts',
'Key': 'Pall postgresql-9.6-postgis-scripts 2.3.2+dfsg-1~exp2.pgdg90+1 5f70af798690300d',
'SHA256': '01c0c4645e9100f7ddb6d05a9d36ad3866ac8d2e412b7c04163a9e65397ce05e',
'Package': 'postgresql-9.6-postgis-scripts'
}
)
# mypy should detect this as ensuring that parsed.fields is not None, but it doesn't
self.assertIsNotNone(parsed.fields)
self.assertIsNotNone(expected.fields)
self.assertDictEqual(
parsed.fields if parsed.fields else {}, # make sure that mypy doesn't error on this being potentially None
expected.fields if expected.fields else {}, # this can't happen unless Package.__init__ is fubared
)
def test_show(self, *, rmock: requests_mock.Mocker) -> None:
rmock.get("http://test/api/snapshots/aptly-repo-1",
text='{"Name":"aptly-repo-1",'
'"CreatedAt":"2017-06-03T23:43:40.275605639Z",'
'"Description":"Snapshot from local repo [aptly-repo]"}')
self.assertEqual(
self.sapi.show("aptly-repo-1"),
Snapshot(
name='aptly-repo-1',
description='Snapshot from local repo [aptly-repo]',
created_at=iso8601.parse_date('2017-06-03T23:43:40.275605639Z', default_timezone=pytz.UTC)
)
)
def test_update(self, *, rmock: requests_mock.Mocker) -> None:
rmock.put("http://test/api/snapshots/aptly-repo-1",
text='{"Name":"aptly-repo-2","CreatedAt":"2017-06-03T23:43:40.275605639Z",'
'"Description":"test"}')
self.assertEqual(
self.sapi.update("aptly-repo-1", newname="aptly-repo-2", newdescription="test"),
Snapshot(
name='aptly-repo-2',
description='test',
created_at=iso8601.parse_date('2017-06-03T23:43:40.275605639Z', default_timezone=pytz.UTC)
)
)
def test_delete(self, *, rmock: requests_mock.Mocker) -> None:
rmock.delete("http://test/api/snapshots/aptly-repo-1",
text='{}')
self.sapi.delete("aptly-repo-1", force=True)
def test_diff(self, *, rmock: requests_mock.Mocker) -> None:
rmock.get("http://test/api/snapshots/aptly-repo-1/diff/aptly-repo-2",
text='[{"Left":null,"Right":"Pamd64 authserver 0.1.14~dev0-1 1cc572a93625a9c9"},'
'{"Left":"Pamd64 radicale 1.1.1 fbc974fa526f14e9","Right":null}]')
self.assertSequenceEqual(
self.sapi.diff("aptly-repo-1", "aptly-repo-2"),
[
{'Left': None, 'Right': 'Pamd64 authserver 0.1.14~dev0-1 1cc572a93625a9c9'},
{'Left': 'Pamd64 radicale 1.1.1 fbc974fa526f14e9', 'Right': None}
]
)
def test_create_from_packages(self, *, rmock: requests_mock.Mocker) -> None:
rmock.post("http://test/api/snapshots",
text='{"Name":"aptly-repo-2","CreatedAt":"2017-06-07T14:19:07.706408213Z","Description":"test"}')
self.assertEqual(
self.sapi.create_from_packages(
"aptly-repo-2",
description="test",
package_refs=["Pamd64 dirmngr 2.1.18-6 4c7412c5f0d7b30a"],
source_snapshots=["aptly-repo-1"]
),
Snapshot(
name='aptly-repo-2',
description='test',
created_at=iso8601.parse_date('2017-06-07T14:19:07.706408213Z', default_timezone=pytz.UTC)
)
)
| bsd-3-clause | -6,255,424,940,221,813,000 | 50.214634 | 119 | 0.556624 | false |
sahiljain/catapult | dashboard/dashboard/find_anomalies.py | 1 | 12890 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Processes tests and creates new Anomaly entities.
This module contains the ProcessTest function, which searches the recent
points in a test for potential regressions or improvements, and creates
new Anomaly entities.
"""
import logging
from google.appengine.ext import ndb
from dashboard import email_sheriff
from dashboard import find_change_points
from dashboard.common import utils
from dashboard.models import alert_group
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
# Number of points to fetch and pass to FindChangePoints. A different number
# may be used if a test has a "max_window_size" anomaly config parameter.
DEFAULT_NUM_POINTS = 50
@ndb.synctasklet
def ProcessTests(test_keys):
"""Processes a list of tests to find new anoamlies.
Args:
test_keys: A list of TestMetadata ndb.Key's.
"""
yield ProcessTestsAsync(test_keys)
@ndb.tasklet
def ProcessTestsAsync(test_keys):
# Using a parallel yield here let's the tasklets for each _ProcessTest run
# in parallel.
yield [_ProcessTest(k) for k in test_keys]
@ndb.tasklet
def _ProcessTest(test_key):
"""Processes a test to find new anomalies.
Args:
test_key: The ndb.Key for a TestMetadata.
"""
test = yield test_key.get_async()
config = anomaly_config.GetAnomalyConfigDict(test)
max_num_rows = config.get('max_window_size', DEFAULT_NUM_POINTS)
rows = yield GetRowsToAnalyzeAsync(test, max_num_rows)
# If there were no rows fetched, then there's nothing to analyze.
if not rows:
# In some cases (e.g. if some points are deleted) it might be possible
# that last_alerted_revision is incorrect. In this case, reset it.
highest_rev = yield _HighestRevision(test_key)
if test.last_alerted_revision > highest_rev:
logging.error('last_alerted_revision %d is higher than highest rev %d '
'for test %s; setting last_alerted_revision to None.',
test.last_alerted_revision, highest_rev, test.test_path)
test.last_alerted_revision = None
yield test.put_async()
logging.error('No rows fetched for %s', test.test_path)
raise ndb.Return(None)
sheriff = yield _GetSheriffForTest(test)
if not sheriff:
logging.error('No sheriff for %s', test_key)
raise ndb.Return(None)
# Get anomalies and check if they happen in ref build also.
change_points = FindChangePointsForTest(rows, config)
change_points = yield _FilterAnomaliesFoundInRef(
change_points, test_key, len(rows))
anomalies = [_MakeAnomalyEntity(c, test, rows) for c in change_points]
# If no new anomalies were found, then we're done.
if not anomalies:
return
logging.info('Found at least one anomaly in: %s', test.test_path)
# Update the last_alerted_revision property of the test.
test.last_alerted_revision = anomalies[-1].end_revision
yield test.put_async()
yield alert_group.GroupAlertsAsync(
anomalies, utils.TestSuiteName(test.key), 'Anomaly')
# TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
# code will run serially.
# Email sheriff about any new regressions.
for anomaly_entity in anomalies:
if (anomaly_entity.bug_id is None and
not anomaly_entity.is_improvement and
not sheriff.summarize):
email_sheriff.EmailSheriff(sheriff, test, anomaly_entity)
yield ndb.put_multi_async(anomalies)
@ndb.synctasklet
def GetRowsToAnalyze(test, max_num_rows):
"""Gets the Row entities that we want to analyze.
Args:
test: The TestMetadata entity to get data for.
max_num_rows: The maximum number of points to get.
Returns:
A list of the latest Rows after the last alerted revision, ordered by
revision. These rows are fetched with t a projection query so they only
have the revision and value properties.
"""
result = yield GetRowsToAnalyzeAsync(test, max_num_rows)
raise ndb.Return(result)
@ndb.tasklet
def GetRowsToAnalyzeAsync(test, max_num_rows):
query = graph_data.Row.query(projection=['revision', 'value'])
query = query.filter(
graph_data.Row.parent_test == utils.OldStyleTestKey(test.key))
# The query is ordered in descending order by revision because we want
# to get the newest points.
query = query.filter(graph_data.Row.revision > test.last_alerted_revision)
query = query.order(-graph_data.Row.revision)
# However, we want to analyze them in ascending order.
rows = yield query.fetch_async(limit=max_num_rows)
raise ndb.Return(list(reversed(rows)))
@ndb.tasklet
def _HighestRevision(test_key):
"""Gets the revision number of the Row with the highest ID for a test."""
query = graph_data.Row.query(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
query = query.order(-graph_data.Row.revision)
highest_row_key = yield query.get_async(keys_only=True)
if highest_row_key:
raise ndb.Return(highest_row_key.id())
raise ndb.Return(None)
@ndb.tasklet
def _FilterAnomaliesFoundInRef(change_points, test_key, num_rows):
"""Filters out the anomalies that match the anomalies in ref build.
Background about ref build tests: Variation in test results can be caused
by changes in Chrome or changes in the test-running environment. The ref
build results are results from a reference (stable) version of Chrome run
in the same environment. If an anomaly happens in the ref build results at
the same time as an anomaly happened in the test build, that suggests that
the variation was caused by a change in the test-running environment, and
can be ignored.
Args:
change_points: ChangePoint objects returned by FindChangePoints.
test_key: ndb.Key of monitored TestMetadata.
num_rows: Number of Rows that were analyzed from the test. When fetching
the ref build Rows, we need not fetch more than |num_rows| rows.
Returns:
A copy of |change_points| possibly with some entries filtered out.
Any entries in |change_points| whose end revision matches that of
an anomaly found in the corresponding ref test will be filtered out.
"""
# Get anomalies for ref build.
ref_test = _CorrespondingRefTest(test_key)
if not ref_test:
raise ndb.Return(change_points[:])
ref_config = anomaly_config.GetAnomalyConfigDict(ref_test)
ref_rows = yield GetRowsToAnalyzeAsync(ref_test, num_rows)
ref_change_points = FindChangePointsForTest(ref_rows, ref_config)
if not ref_change_points:
raise ndb.Return(change_points[:])
change_points_filtered = []
test_path = utils.TestPath(test_key)
for c in change_points:
# Log information about what anomaly got filtered and what did not.
if not _IsAnomalyInRef(c, ref_change_points):
# TODO(qyearsley): Add test coverage. See catapult:#1346.
logging.info('Nothing was filtered out for test %s, and revision %s',
test_path, c.x_value)
change_points_filtered.append(c)
else:
logging.info('Filtering out anomaly for test %s, and revision %s',
test_path, c.x_value)
raise ndb.Return(change_points_filtered)
def _CorrespondingRefTest(test_key):
"""Returns the TestMetadata for the corresponding ref build trace, or None."""
test_path = utils.TestPath(test_key)
possible_ref_test_paths = [test_path + '_ref', test_path + '/ref']
for path in possible_ref_test_paths:
ref_test = utils.TestKey(path).get()
if ref_test:
return ref_test
return None
def _IsAnomalyInRef(change_point, ref_change_points):
"""Checks if anomalies are detected in both ref and non ref build.
Args:
change_point: A find_change_points.ChangePoint object to check.
ref_change_points: List of find_change_points.ChangePoint objects
found for a ref build series.
Returns:
True if there is a match found among the ref build series change points.
"""
for ref_change_point in ref_change_points:
if change_point.x_value == ref_change_point.x_value:
return True
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return False
@ndb.tasklet
def _GetSheriffForTest(test):
"""Gets the Sheriff for a test, or None if no sheriff."""
if test.sheriff:
sheriff = yield test.sheriff.get_async()
raise ndb.Return(sheriff)
raise ndb.Return(None)
def _GetImmediatelyPreviousRevisionNumber(later_revision, rows):
"""Gets the revision number of the Row immediately before the given one.
Args:
later_revision: A revision number.
rows: List of Row entities in ascending order by revision.
Returns:
The revision number just before the given one.
"""
for row in reversed(rows):
if row.revision < later_revision:
return row.revision
# TODO(qyearsley): Add test coverage. See catapult:#1346.
assert False, 'No matching revision found in |rows|.'
def _GetRefBuildKeyForTest(test):
"""TestMetadata key of the reference build for the given test, if one exists.
Args:
test: the TestMetadata entity to get the ref build for.
Returns:
A TestMetadata key if found, or None if not.
"""
potential_path = '%s/ref' % test.test_path
potential_test = utils.TestKey(potential_path).get()
if potential_test:
return potential_test.key
potential_path = '%s_ref' % test.test_path
potential_test = utils.TestKey(potential_path).get()
if potential_test:
return potential_test.key
return None
def _GetDisplayRange(old_end, rows):
"""Get the revision range using a_display_rev, if applicable.
Args:
old_end: the x_value from the change_point
rows: List of Row entities in asscending order by revision.
Returns:
A end_rev, start_rev tuple with the correct revision.
"""
start_rev = end_rev = 0
for row in reversed(rows):
if (row.revision == old_end and
hasattr(row, 'r_commit_pos')):
end_rev = row.r_commit_pos
elif (row.revision < old_end and
hasattr(row, 'r_commit_pos')):
start_rev = row.r_commit_pos + 1
break
if not end_rev or not start_rev:
end_rev = start_rev = None
return start_rev, end_rev
def _MakeAnomalyEntity(change_point, test, rows):
"""Creates an Anomaly entity.
Args:
change_point: A find_change_points.ChangePoint object.
test: The TestMetadata entity that the anomalies were found on.
rows: List of Row entities that the anomalies were found on.
Returns:
An Anomaly entity, which is not yet put in the datastore.
"""
end_rev = change_point.x_value
start_rev = _GetImmediatelyPreviousRevisionNumber(end_rev, rows) + 1
display_start = display_end = None
if test.master_name == 'ClankInternal':
display_start, display_end = _GetDisplayRange(change_point.x_value, rows)
median_before = change_point.median_before
median_after = change_point.median_after
return anomaly.Anomaly(
start_revision=start_rev,
end_revision=end_rev,
median_before_anomaly=median_before,
median_after_anomaly=median_after,
segment_size_before=change_point.size_before,
segment_size_after=change_point.size_after,
window_end_revision=change_point.window_end,
std_dev_before_anomaly=change_point.std_dev_before,
t_statistic=change_point.t_statistic,
degrees_of_freedom=change_point.degrees_of_freedom,
p_value=change_point.p_value,
is_improvement=_IsImprovement(test, median_before, median_after),
ref_test=_GetRefBuildKeyForTest(test),
test=test.key,
sheriff=test.sheriff,
internal_only=test.internal_only,
units=test.units,
display_start=display_start,
display_end=display_end)
def FindChangePointsForTest(rows, config_dict):
"""Gets the anomaly data from the anomaly detection module.
Args:
rows: The Row entities to find anomalies for, sorted backwards by revision.
config_dict: Anomaly threshold parameters as a dictionary.
Returns:
A list of find_change_points.ChangePoint objects.
"""
data_series = [(row.revision, row.value) for row in rows]
return find_change_points.FindChangePoints(data_series, **config_dict)
def _IsImprovement(test, median_before, median_after):
"""Returns whether the alert is an improvement for the given test.
Args:
test: TestMetadata to get the improvement direction for.
median_before: The median of the segment immediately before the anomaly.
median_after: The median of the segment immediately after the anomaly.
Returns:
True if it is improvement anomaly, otherwise False.
"""
if (median_before < median_after and
test.improvement_direction == anomaly.UP):
return True
if (median_before >= median_after and
test.improvement_direction == anomaly.DOWN):
return True
return False
| bsd-3-clause | 8,405,810,916,422,869,000 | 33.837838 | 80 | 0.717921 | false |
ankur-gupta91/horizon-net-ip | openstack_dashboard/dashboards/project/access_and_security/tests.py | 5 | 11916 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy # noqa
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
import six
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security \
import api_access
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:access_and_security:index')
class AccessAndSecurityTests(test.TestCase):
def setUp(self):
super(AccessAndSecurityTests, self).setUp()
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',),
api.nova: ('keypair_list',
'server_list',),
api.base: ('is_service_enabled',),
quotas: ('tenant_quota_usages',)})
def _test_index(self, ec2_enabled):
keypairs = self.keypairs.list()
sec_groups = self.security_groups.list()
floating_ips = self.floating_ips.list()
quota_data = self.quota_usages.first()
quota_data['security_groups']['available'] = 10
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(
IsA(http.HttpRequest)) \
.AndReturn(sec_groups)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'ec2').MultipleTimes() \
.AndReturn(ec2_enabled)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/access_and_security/index.html')
self.assertItemsEqual(res.context['keypairs_table'].data, keypairs)
self.assertItemsEqual(res.context['floating_ips_table'].data,
floating_ips)
# Security groups
sec_groups_from_ctx = res.context['security_groups_table'].data
# Context data needs to contains all items from the test data.
self.assertItemsEqual(sec_groups_from_ctx,
sec_groups)
# Sec groups in context need to be sorted by their ``name`` attribute.
# This assertion is somewhat weak since it's only meaningful as long as
# the sec groups in the test data are *not* sorted by name (which is
# the case as of the time of this addition).
self.assertTrue(
all([sec_groups_from_ctx[i].name <= sec_groups_from_ctx[i + 1].name
for i in range(len(sec_groups_from_ctx) - 1)]))
if ec2_enabled:
self.assertTrue(any(map(
lambda x: isinstance(x, api_access.tables.DownloadEC2),
res.context['endpoints_table'].get_table_actions()
)))
else:
self.assertFalse(any(map(
lambda x: isinstance(x, api_access.tables.DownloadEC2),
res.context['endpoints_table'].get_table_actions()
)))
def test_index(self):
self._test_index(ec2_enabled=True)
def test_index_with_ec2_disabled(self):
self._test_index(ec2_enabled=False)
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_association(self):
servers = [api.nova.Server(s, self.request)
for s in self.servers.list()]
# Add duplicate instance name to test instance name with [ID]
# Change id and private IP
server3 = api.nova.Server(self.servers.first(), self.request)
server3.id = 101
server3.addresses = deepcopy(server3.addresses)
server3.addresses['private'][0]['addr'] = "10.0.0.5"
servers.append(server3)
targets = [api.nova.FloatingIpTarget(s) for s in servers]
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(
IsA(http.HttpRequest)) \
.AndReturn(targets)
self.mox.ReplayAll()
res = self.client.get(reverse("horizon:project:access_and_security:"
"floating_ips:associate"))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<option value="1">server_1 (1)</option>')
self.assertContains(res, '<option value="101">server_1 (101)</option>')
self.assertContains(res, '<option value="2">server_2 (2)</option>')
class AccessAndSecurityNeutronProxyTests(AccessAndSecurityTests):
def setUp(self):
super(AccessAndSecurityNeutronProxyTests, self).setUp()
self.floating_ips = self.floating_ips_uuid
class SecurityGroupTabTests(test.TestCase):
def setUp(self):
super(SecurityGroupTabTests, self).setUp()
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_create_button_attributes(self):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
sec_groups = self.security_groups.list()
quota_data = self.quota_usages.first()
quota_data['security_groups']['available'] = 10
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.network.security_group_list(
IsA(http.HttpRequest)) \
.AndReturn(sec_groups)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest), 'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest), 'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__security_groups_tab")
security_groups = res.context['security_groups_table'].data
self.assertItemsEqual(security_groups, self.security_groups.list())
create_action = self.getAndAssertTableAction(res, 'security_groups',
'create')
self.assertEqual('Create Security Group',
six.text_type(create_action.verbose_name))
self.assertIsNone(create_action.policy_rules)
self.assertEqual(set(['ajax-modal']), set(create_action.classes))
url = 'horizon:project:access_and_security:security_groups:create'
self.assertEqual(url, create_action.url)
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def _test_create_button_disabled_when_quota_exceeded(self,
network_enabled):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
sec_groups = self.security_groups.list()
quota_data = self.quota_usages.first()
quota_data['security_groups']['available'] = 0
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.network.security_group_list(
IsA(http.HttpRequest)) \
.AndReturn(sec_groups)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest), 'network').MultipleTimes() \
.AndReturn(network_enabled)
api.base.is_service_enabled(
IsA(http.HttpRequest), 'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__security_groups_tab")
security_groups = res.context['security_groups_table'].data
self.assertItemsEqual(security_groups, self.security_groups.list())
create_action = self.getAndAssertTableAction(res, 'security_groups',
'create')
self.assertTrue('disabled' in create_action.classes,
'The create button should be disabled')
def test_create_button_disabled_when_quota_exceeded_neutron_disabled(self):
self._test_create_button_disabled_when_quota_exceeded(False)
def test_create_button_disabled_when_quota_exceeded_neutron_enabled(self):
self._test_create_button_disabled_when_quota_exceeded(True)
| apache-2.0 | 169,505,416,809,867,870 | 39.948454 | 79 | 0.584508 | false |
rryan/django-cms | cms/south_migrations/0041_auto__add_usersettings.py | 48 | 16369 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserSettings'
db.create_table(u'cms_usersettings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])),
('language', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal('cms', ['UserSettings'])
def backwards(self, orm):
# Deleting model 'UserSettings'
db.delete_table(u'cms_usersettings')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause | -6,151,111,665,315,267,000 | 79.245098 | 187 | 0.555623 | false |
noelbk/neutron-juniper | neutron/services/loadbalancer/drivers/haproxy/cfg.py | 5 | 7262 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import itertools
from neutron.agent.linux import utils
from neutron.plugins.common import constants as qconstants
from neutron.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_ACTIVE_CONNECTIONS: 'qcur',
constants.STATS_MAX_CONNECTIONS: 'qmax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',
constants.STATS_TOTAL_SESSIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
constants.STATS_CONNECTION_ERRORS: 'econ',
constants.STATS_RESPONSE_ERRORS: 'eresp'
}
ACTIVE = qconstants.ACTIVE
INACTIVE = qconstants.INACTIVE
def save_config(conf_path, logical_config, socket_path=None,
user_group='nogroup'):
"""Convert a logical configuration to the HAProxy version."""
data = []
data.extend(_build_global(logical_config, socket_path=socket_path,
user_group=user_group))
data.extend(_build_defaults(logical_config))
data.extend(_build_frontend(logical_config))
data.extend(_build_backend(logical_config))
utils.replace_file(conf_path, '\n'.join(data))
def _build_global(config, socket_path=None, user_group='nogroup'):
opts = [
'daemon',
'user nobody',
'group %s' % user_group,
'log /dev/log local0',
'log /dev/log local1 notice'
]
if socket_path:
opts.append('stats socket %s mode 0666 level user' % socket_path)
return itertools.chain(['global'], ('\t' + o for o in opts))
def _build_defaults(config):
opts = [
'log global',
'retries 3',
'option redispatch',
'timeout connect 5000',
'timeout client 50000',
'timeout server 50000',
]
return itertools.chain(['defaults'], ('\t' + o for o in opts))
def _build_frontend(config):
protocol = config['vip']['protocol']
opts = [
'option tcplog',
'bind %s:%d' % (
_get_first_ip_from_port(config['vip']['port']),
config['vip']['protocol_port']
),
'mode %s' % PROTOCOL_MAP[protocol],
'default_backend %s' % config['pool']['id'],
]
if config['vip']['connection_limit'] >= 0:
opts.append('maxconn %s' % config['vip']['connection_limit'])
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
return itertools.chain(
['frontend %s' % config['vip']['id']],
('\t' + o for o in opts)
)
def _build_backend(config):
protocol = config['pool']['protocol']
lb_method = config['pool']['lb_method']
opts = [
'mode %s' % PROTOCOL_MAP[protocol],
'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin')
]
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
# add the first health_monitor (if available)
server_addon, health_opts = _get_server_health_option(config)
opts.extend(health_opts)
# add session persistence (if available)
persist_opts = _get_session_persistence(config)
opts.extend(persist_opts)
# add the members
for member in config['members']:
if member['status'] in (ACTIVE, INACTIVE) and member['admin_state_up']:
server = (('server %(id)s %(address)s:%(protocol_port)s '
'weight %(weight)s') % member) + server_addon
if _has_http_cookie_persistence(config):
server += ' cookie %d' % config['members'].index(member)
opts.append(server)
return itertools.chain(
['backend %s' % config['pool']['id']],
('\t' + o for o in opts)
)
def _get_first_ip_from_port(port):
for fixed_ip in port['fixed_ips']:
return fixed_ip['ip_address']
def _get_server_health_option(config):
"""return the first active health option."""
for monitor in config['healthmonitors']:
# not checking the status of healthmonitor for two reasons:
# 1) status field is absent in HealthMonitor model
# 2) only active HealthMonitors are fetched with
# LoadBalancerCallbacks.get_logical_device
if monitor['admin_state_up']:
break
else:
return '', []
server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
opts = [
'timeout check %ds' % monitor['timeout']
]
if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
constants.HEALTH_MONITOR_HTTPS):
opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
opts.append(
'http-check expect rstatus %s' %
'|'.join(_expand_expected_codes(monitor['expected_codes']))
)
if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
opts.append('option ssl-hello-chk')
return server_addon, opts
def _get_session_persistence(config):
persistence = config['vip'].get('session_persistence')
if not persistence:
return []
opts = []
if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
opts.append('stick-table type ip size 10k')
opts.append('stick on src')
elif persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE:
opts.append('cookie SRV insert indirect nocache')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
persistence.get('cookie_name')):
opts.append('appsession %s len 56 timeout 3h' %
persistence['cookie_name'])
return opts
def _has_http_cookie_persistence(config):
return (config['vip'].get('session_persistence') and
config['vip']['session_persistence']['type'] ==
constants.SESSION_PERSISTENCE_HTTP_COOKIE)
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval
| apache-2.0 | -2,912,930,414,748,392,000 | 30.167382 | 79 | 0.619389 | false |
devs1991/test_edx_docmode | common/djangoapps/course_modes/tests/test_views.py | 2 | 17431 | """
Tests for course_modes views.
"""
from datetime import datetime
import unittest
import decimal
import ddt
import freezegun
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from util.testing import UrlResetMixin
from embargo.test_utils import restrict_course
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.tests.factories import CourseModeFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from student.models import CourseEnrollment
import lms.djangoapps.commerce.tests.test_utils as ecomm_test_utils
from course_modes.models import CourseMode, Mode
from openedx.core.djangoapps.theming.test_util import with_is_edx_domain
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseModeViewTest(UrlResetMixin, ModuleStoreTestCase):
@patch.dict(settings.FEATURES, {'MODE_CREATION_FOR_TESTING': True})
def setUp(self):
super(CourseModeViewTest, self).setUp('course_modes.urls')
self.course = CourseFactory.create()
self.user = UserFactory.create(username="Bob", email="[email protected]", password="edx")
self.client.login(username=self.user.username, password="edx")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
# is_active?, enrollment_mode, redirect?
(True, 'verified', True),
(True, 'honor', False),
(True, 'audit', False),
(False, 'verified', False),
(False, 'honor', False),
(False, 'audit', False),
(False, None, False),
)
@ddt.unpack
def test_redirect_to_dashboard(self, is_active, enrollment_mode, redirect):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
# Enroll the user in the test course
if enrollment_mode is not None:
CourseEnrollmentFactory(
is_active=is_active,
mode=enrollment_mode,
course_id=self.course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Check whether we were correctly redirected
if redirect:
self.assertRedirects(response, reverse('dashboard'))
else:
self.assertEquals(response.status_code, 200)
def test_no_id_redirect(self):
# Create the course modes
CourseModeFactory.create(mode_slug=CourseMode.NO_ID_PROFESSIONAL_MODE, course_id=self.course.id, min_price=100)
# Enroll the user in the test course
CourseEnrollmentFactory(
is_active=False,
mode=CourseMode.NO_ID_PROFESSIONAL_MODE,
course_id=self.course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Check whether we were correctly redirected
start_flow_url = reverse('verify_student_start_flow', args=[unicode(self.course.id)])
self.assertRedirects(response, start_flow_url)
def test_no_id_redirect_otto(self):
# Create the course modes
prof_course = CourseFactory.create()
CourseModeFactory(mode_slug=CourseMode.NO_ID_PROFESSIONAL_MODE, course_id=prof_course.id,
min_price=100, sku='TEST')
ecomm_test_utils.update_commerce_config(enabled=True)
# Enroll the user in the test course
CourseEnrollmentFactory(
is_active=False,
mode=CourseMode.NO_ID_PROFESSIONAL_MODE,
course_id=prof_course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(prof_course.id)])
response = self.client.get(url)
self.assertRedirects(response, 'http://testserver/test_basket/?sku=TEST', fetch_redirect_response=False)
ecomm_test_utils.update_commerce_config(enabled=False)
def test_no_enrollment(self):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
# User visits the track selection page directly without ever enrolling
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
@ddt.data(
'',
'1,,2',
'1, ,2',
'1, 2, 3'
)
def test_suggested_prices(self, price_list):
# Create the course modes
for mode in ('audit', 'honor'):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
CourseModeFactory.create(
mode_slug='verified',
course_id=self.course.id,
suggested_prices=price_list
)
# Enroll the user in the test course to emulate
# automatic enrollment
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
# Verify that the prices render correctly
response = self.client.get(
reverse('course_modes_choose', args=[unicode(self.course.id)]),
follow=False,
)
self.assertEquals(response.status_code, 200)
# TODO: Fix it so that response.templates works w/ mako templates, and then assert
# that the right template rendered
@ddt.data(
(['honor', 'verified', 'credit'], True),
(['honor', 'verified'], False),
)
@ddt.unpack
def test_credit_upsell_message(self, available_modes, show_upsell):
# Create the course modes
for mode in available_modes:
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
# Check whether credit upsell is shown on the page
# This should *only* be shown when a credit mode is available
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
if show_upsell:
self.assertContains(response, "Credit")
else:
self.assertNotContains(response, "Credit")
@ddt.data('professional', 'no-id-professional')
def test_professional_enrollment(self, mode):
# The only course mode is professional ed
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id, min_price=1)
# Go to the "choose your track" page
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(choose_track_url)
# Since the only available track is professional ed, expect that
# we're redirected immediately to the start of the payment flow.
start_flow_url = reverse('verify_student_start_flow', args=[unicode(self.course.id)])
self.assertRedirects(response, start_flow_url)
# Now enroll in the course
CourseEnrollmentFactory(
user=self.user,
is_active=True,
mode=mode,
course_id=unicode(self.course.id),
)
# Expect that this time we're redirected to the dashboard (since we're already registered)
response = self.client.get(choose_track_url)
self.assertRedirects(response, reverse('dashboard'))
# Mapping of course modes to the POST parameters sent
# when the user chooses that mode.
POST_PARAMS_FOR_COURSE_MODE = {
'audit': {},
'honor': {'honor_mode': True},
'verified': {'verified_mode': True, 'contribution': '1.23'},
'unsupported': {'unsupported_mode': True},
}
@ddt.data(
('honor', 'dashboard'),
('verified', 'start-flow'),
)
@ddt.unpack
def test_choose_mode_redirect(self, course_mode, expected_redirect):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
min_price = 0 if mode in ["honor", "audit"] else 1
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id, min_price=min_price)
# Choose the mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE[course_mode])
# Verify the redirect
if expected_redirect == 'dashboard':
redirect_url = reverse('dashboard')
elif expected_redirect == 'start-flow':
redirect_url = reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(self.course.id)}
)
else:
self.fail("Must provide a valid redirect URL name")
self.assertRedirects(response, redirect_url)
def test_remember_donation_for_course(self):
# Create the course modes
CourseModeFactory.create(mode_slug='honor', course_id=self.course.id)
CourseModeFactory.create(mode_slug='verified', course_id=self.course.id, min_price=1)
# Choose the mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['verified'])
# Expect that the contribution amount is stored in the user's session
self.assertIn('donation_for_course', self.client.session)
self.assertIn(unicode(self.course.id), self.client.session['donation_for_course'])
actual_amount = self.client.session['donation_for_course'][unicode(self.course.id)]
expected_amount = decimal.Decimal(self.POST_PARAMS_FOR_COURSE_MODE['verified']['contribution'])
self.assertEqual(actual_amount, expected_amount)
def test_successful_default_enrollment(self):
# Create the course modes
for mode in (CourseMode.DEFAULT_MODE_SLUG, 'verified'):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
# Enroll the user in the default mode (honor) to emulate
# automatic enrollment
params = {
'enrollment_action': 'enroll',
'course_id': unicode(self.course.id)
}
self.client.post(reverse('change_enrollment'), params)
# Explicitly select the honor mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE[CourseMode.DEFAULT_MODE_SLUG])
# Verify that the user's enrollment remains unchanged
mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course.id)
self.assertEqual(mode, CourseMode.DEFAULT_MODE_SLUG)
self.assertEqual(is_active, True)
def test_unsupported_enrollment_mode_failure(self):
# Create the supported course modes
for mode in ('honor', 'verified'):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
# Choose an unsupported mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['unsupported'])
self.assertEqual(400, response.status_code)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_default_mode_creation(self):
# Hit the mode creation endpoint with no querystring params, to create an honor mode
url = reverse('create_mode', args=[unicode(self.course.id)])
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
expected_mode = [Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None)]
course_mode = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_mode, expected_mode)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
(u'verified', u'Verified Certificate', 10, '10,20,30', 'usd'),
(u'professional', u'Professional Education', 100, '100,200', 'usd'),
)
@ddt.unpack
def test_verified_mode_creation(self, mode_slug, mode_display_name, min_price, suggested_prices, currency):
parameters = {}
parameters['mode_slug'] = mode_slug
parameters['mode_display_name'] = mode_display_name
parameters['min_price'] = min_price
parameters['suggested_prices'] = suggested_prices
parameters['currency'] = currency
url = reverse('create_mode', args=[unicode(self.course.id)])
response = self.client.get(url, parameters)
self.assertEquals(response.status_code, 200)
expected_mode = [Mode(mode_slug, mode_display_name, min_price, suggested_prices, currency, None, None, None)]
course_mode = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_mode, expected_mode)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_multiple_mode_creation(self):
# Create an honor mode
base_url = reverse('create_mode', args=[unicode(self.course.id)])
self.client.get(base_url)
# Excluding the currency parameter implicitly tests the mode creation endpoint's ability to
# use default values when parameters are partially missing.
parameters = {}
parameters['mode_slug'] = u'verified'
parameters['mode_display_name'] = u'Verified Certificate'
parameters['min_price'] = 10
parameters['suggested_prices'] = '10,20'
# Create a verified mode
url = reverse('create_mode', args=[unicode(self.course.id)])
self.client.get(url, parameters)
honor_mode = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None)
verified_mode = Mode(u'verified', u'Verified Certificate', 10, '10,20', 'usd', None, None, None)
expected_modes = [honor_mode, verified_mode]
course_modes = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_modes, expected_modes)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@with_is_edx_domain(True)
def test_hide_nav(self):
# Create the course modes
for mode in ["honor", "verified"]:
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)
# Load the track selection page
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Verify that the header navigation links are hidden for the edx.org version
self.assertNotContains(response, "How it Works")
self.assertNotContains(response, "Find courses")
self.assertNotContains(response, "Schools & Partners")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@freezegun.freeze_time('2015-01-02')
def test_course_closed(self):
for mode in ["honor", "verified"]:
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
self.course.enrollment_end = datetime(2015, 01, 01)
modulestore().update_item(self.course, self.user.id)
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# URL-encoded version of 1/1/15, 12:00 AM
redirect_url = reverse('dashboard') + '?course_closed=1%2F1%2F15%2C+12%3A00+AM'
self.assertRedirects(response, redirect_url)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TrackSelectionEmbargoTest(UrlResetMixin, ModuleStoreTestCase):
"""Test embargo restrictions on the track selection page. """
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(TrackSelectionEmbargoTest, self).setUp('embargo')
# Create a course and course modes
self.course = CourseFactory.create()
CourseModeFactory.create(mode_slug='honor', course_id=self.course.id)
CourseModeFactory.create(mode_slug='verified', course_id=self.course.id, min_price=10)
# Create a user and log in
self.user = UserFactory.create(username="Bob", email="[email protected]", password="edx")
self.client.login(username=self.user.username, password="edx")
# Construct the URL for the track selection page
self.url = reverse('course_modes_choose', args=[unicode(self.course.id)])
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restrict(self):
with restrict_course(self.course.id) as redirect_url:
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url)
def test_embargo_allow(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
| agpl-3.0 | -2,856,916,298,906,904,600 | 41.103865 | 119 | 0.651426 | false |
z5tron/scrpmlsli | parse_item.py | 1 | 1756 | #!/usr/bin/python3
import aiohttp
import asyncio
import async_timeout
import os
import re
import sys
import json
from bs4 import BeautifulSoup
import requests
import time
from datetime import datetime
import random
import operator
import requests
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
async def download_coroutine(session, url, datadir):
with async_timeout.timeout(10):
async with session.get(url) as response:
filename = os.path.join(datadir, os.path.basename(url))
with open(filename, 'wb') as f_handle:
while True:
chunk = await response.content.read(1024)
if not chunk:
break
f_handle.write(chunk)
return await response.release()
async def main(loop, urls, datadir):
async with aiohttp.ClientSession(loop=loop) as session:
tasks = [download_coroutine(session, url, datadir) for url in urls]
await asyncio.gather(*tasks)
if __name__ == '__main__':
for fpage in open('pages.txt', 'r').readlines():
fpage = fpage.strip()
datadir = fpage.replace("/index.html", "")
print(datadir)
text = open(fpage, 'r', errors='ignore').read()
soup = BeautifulSoup(text, 'html.parser')
pics = []
for i,p in enumerate(soup.select('#photo-carousel li img')):
if os.path.exists(datadir + '/' + os.path.basename(p['src'])): continue
print(" ", i, p['src'])
pics.append(p['src'])
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop, pics, datadir))
time.sleep(3)
| gpl-3.0 | -4,720,003,237,735,610,000 | 28.266667 | 108 | 0.602506 | false |
macropin/xml-models-redux | xml_models/xpath_twister.py | 2 | 7351 | """
Copyright 2009 Chris Tarttelin and Point2 Technologies
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the FreeBSD Project.
"""
import unittest
from xml.dom import minidom
import xpath
class MultipleNodesReturnedException(Exception):
pass
lxml_available = False
try:
from lxml import etree, objectify
lxml_available = True
except:
pass
def find_unique(xml, expression, namespace=None):
if lxml_available:
return _lxml_xpath(xml, expression, namespace)
else:
return _pydom_xpath(xml, expression, namespace)
def find_all(xml, expression, namespace=None):
if lxml_available:
return _lxml_xpath_all(xml, expression, namespace)
else:
return _pydom_xpath_all(xml, expression, namespace)
def _lxml_xpath(xml_doc, expression, namespace):
if namespace:
find = etree.XPath(get_xpath(expression, namespace), namespaces={'x': namespace})
else:
find = etree.XPath(get_xpath(expression, namespace))
matches = find(xml_doc)
if len(matches) == 1:
matched = matches[0]
if type(matched) == type(''):
return unicode(matched).strip()
if isinstance(matched, etree._ElementStringResult):
return str(matched)
if isinstance(matched, etree._ElementUnicodeResult):
return unicode(matched)
if matched is None or matched == False:
return unicode(matched.text).strip()
if isinstance(matched, etree._Element):
if matched.text is not None:
return unicode(matched.text)
if len(matches) > 1:
raise MultipleNodesReturnedException
def _lxml_xpath_all(xml, expression, namespace):
if namespace:
find = etree.XPath(get_xpath(expression, namespace), namespaces={'x': namespace})
else:
find = etree.XPath(get_xpath(expression,namespace))
matches = find(xml)
return [etree.tostring(match) for match in matches]
def domify(xml):
if lxml_available:
return objectify.fromstring(xml)
else:
return minidom.parseString(xml)
def _pydom_xpath_all(xml, expression, namespace):
nodelist = xpath.find(expression, xml, default_namespace=namespace)
return [fragment.toxml() for fragment in nodelist]
def _pydom_xpath(xml, expression, namespace):
nodelist = xpath.find(expression, xml, default_namespace=namespace)
if len(nodelist) > 1:
raise MultipleNodesReturnedException
if len(nodelist) == 0:
return None
if nodelist[0].nodeType == minidom.Node.DOCUMENT_NODE:
node = nodelist[0].firstChild.firstChild
else:
node = nodelist[0].firstChild
if node == None:
return None
if node.nodeType == minidom.Node.TEXT_NODE:
return node.nodeValue
else:
return None
def get_xpath(xpath, namespace):
if namespace:
xpath_list = xpath.split('/')
xpath_with_ns = ""
for element in xpath_list:
if not element.startswith('@') and not element == '' :
xpath_with_ns += "/x:" + element
elif element == '':
pass
else:
xpath_with_ns += "/" + element
return xpath_with_ns
else:
return xpath
class XPathTest(unittest.TestCase):
def test_xpath_returns_expected_element_value(self):
#setup
xml = minidom.parseString("<foo><baz>dcba</baz><bar>abcd</bar></foo>")
#execute
val = _pydom_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals("abcd", val)
def test_xpath_returns_expected_element_value_from_unicode_xml_fragment(self):
#setup
xml = minidom.parseString(u"<foo><baz>dcba</baz><bar>abcd\xe9</bar></foo>".encode('utf-8'))
#execute
val = _pydom_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals(u"abcd\xe9", val)
def test_xpath_returns_expected_attribute_value(self):
#setup
xml = minidom.parseString('<foo><baz name="Arthur">dcba</baz><bar>abcd</bar></foo>')
#execute
val = _pydom_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals("Arthur", val)
def test_xpath_returns_expected_attribute_value_from_unicode_xml_fragment(self):
#setup
xml = minidom.parseString(u'<foo><baz name="Arthur\xe9">dcba</baz><bar>abcd</bar></foo>'.encode('utf-8'))
#execute
val = _pydom_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals(u"Arthur\xe9", val)
def test_lxml_returns_expected_element_value(self):
#setup
xml = objectify.fromstring('<foo><baz name="Arthur">dcba</baz><bar>abcd</bar></foo>')
#execute
val = _lxml_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals("abcd", val)
def test_lxml_returns_expected_element_value_from_unicode_xml_fragment(self):
#setup
xml = objectify.fromstring(u'<foo><baz name="Arthur">dcba</baz><bar>abcd\xe9</bar></foo>'.encode('utf-8'))
#execute
val = _lxml_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals(u"abcd\xe9", val)
def test_lxml_returns_expected_attribute_value(self):
#setup
xml = objectify.fromstring('<foo><baz name="Arthur">dcba</baz><bar>abcd</bar></foo>')
#execute
val = _lxml_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals("Arthur", val)
def test_lxml_returns_expected_attribute_value_from_unicode_xml_fragment(self):
#setup
xml = objectify.fromstring(u'<foo><baz name="Arthur\xe9">dcba</baz><bar>abcd</bar></foo>'.encode('utf-8'))
#execute
val = _lxml_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals(u"Arthur\xe9", val)
if __name__=='__main__':
unittest.main() | bsd-2-clause | -7,501,652,764,034,484,000 | 36.896907 | 114 | 0.646307 | false |
nckx/dstat | plugins/dstat_ib.py | 2 | 3149 | ### Author: Dmitry Fedin <[email protected]>
class dstat_plugin(dstat):
ibdirname = '/sys/class/infiniband'
"""
Bytes received or sent through infiniband/RoCE interfaces
Usage:
dstat --ib -N <adapter name>:<port>,total
default dstat --ib is the same as
dstat --ib -N total
example for Mellanox adapter, transfering data via port 2
dstat --ib -Nmlx4_0:2
"""
def __init__(self):
self.nick = ('recv', 'send')
self.type = 'd'
self.cols = 2
self.width = 6
def discover(self, *objlist):
ret = []
for subdirname in os.listdir(self.ibdirname):
if not os.path.isdir(os.path.join(self.ibdirname,subdirname)) : continue
device_dir = os.path.join(self.ibdirname, subdirname, 'ports')
for subdirname2 in os.listdir(device_dir) :
if not os.path.isdir(os.path.join(device_dir,subdirname2)): continue
name = subdirname + ":" + subdirname2
ret.append(name)
ret.sort()
for item in objlist: ret.append(item)
return ret
def vars(self):
ret = []
if op.netlist:
varlist = op.netlist
elif not op.full:
varlist = ('total',)
else:
varlist = self.discover
varlist.sort()
for name in varlist:
if name in self.discover + ['total']:
ret.append(name)
if not ret:
raise Exception, "No suitable network interfaces found to monitor"
return ret
def name(self):
return ['ib/'+name for name in self.vars]
def extract(self):
self.set2['total'] = [0, 0]
ifaces = self.discover
for name in self.vars: self.set2[name] = [0, 0]
for name in ifaces:
l=name.split(':');
if len(l) < 2:
continue
rcv_counter_name=os.path.join('/sys/class/infiniband', l[0], 'ports', l[1], 'counters_ext/port_rcv_data_64')
xmit_counter_name=os.path.join('/sys/class/infiniband', l[0], 'ports', l[1], 'counters_ext/port_xmit_data_64')
rcv_lines = dopen(rcv_counter_name).readlines()
xmit_lines = dopen(xmit_counter_name).readlines()
if len(rcv_lines) < 1 or len(xmit_lines) < 1:
continue
rcv_value = long(rcv_lines[0])
xmit_value = long(xmit_lines[0])
if name in self.vars :
self.set2[name] = (rcv_value, xmit_value)
self.set2['total'] = ( self.set2['total'][0] + rcv_value, self.set2['total'][1] + xmit_value)
if update:
for name in self.set2.keys():
self.val[name] = [
(self.set2[name][0] - self.set1[name][0]) * 4.0 / elapsed,
(self.set2[name][1] - self.set1[name][1]) * 4.0/ elapsed,
]
if self.val[name][0] < 0: self.val[name][0] += maxint + 1
if self.val[name][1] < 0: self.val[name][1] += maxint + 1
if step == op.delay:
self.set1.update(self.set2)
| gpl-2.0 | -4,872,774,046,081,488,000 | 36.939759 | 122 | 0.529057 | false |
t-cas/JumpSSH | tests/docker/image_restserver/app.py | 2 | 2021 | #!flask/bin/python
from functools import wraps
from flask import request, Response, Flask, make_response
import json
app = Flask(__name__)
def check_auth(username, password):
"""This function is called to check if a username password combination is valid."""
return username == 'admin' and password == 'secret'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
@app.route('/')
def index():
return "Hello, World!"
@app.route('/json')
def json_body():
json_object = {"key1": "value1", "key2": ["value21", "value22"]}
response = make_response(json.dumps(json_object))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/authentication-required')
@requires_auth
def secret_page():
return "Authentication successful"
@app.route('/echo-parameters', methods=['GET', 'POST'])
def echo_parameters():
# use `flat=False` to have all values returned as lists for a given key.
return json.dumps(request.args.to_dict(flat=False))
@app.route('/echo-method', methods=['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'PATCH', 'DELETE'])
def echo_method():
response = make_response()
response.headers['Request-Method'] = request.method
return response
@app.route('/echo-headers', methods=['GET', 'POST'])
def echo_headers():
return json.dumps(dict(request.headers))
@app.route('/echo-body', methods=['POST'])
def echo_body():
return request.stream.read()
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| mit | -776,990,306,693,598,600 | 25.592105 | 96 | 0.657595 | false |
iemejia/beam | sdks/python/apache_beam/runners/portability/artifact_service_test.py | 5 | 7263 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test cases for :module:`artifact_service_client`."""
# pytype: skip-file
import contextlib
import io
import threading
import unittest
from urllib.parse import quote
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.portability import artifact_service
from apache_beam.utils import proto_utils
class InMemoryFileManager(object):
def __init__(self, contents=()):
self._contents = dict(contents)
def get(self, path):
return self._contents[path]
def file_reader(self, path):
return io.BytesIO(self._contents[path])
def file_writer(self, name):
path = 'prefix:' + name
@contextlib.contextmanager
def writable():
buffer = io.BytesIO()
yield buffer
buffer.seek(0)
self._contents[path] = buffer.read()
return writable(), path
class ArtifactServiceTest(unittest.TestCase):
def file_artifact(self, path):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path).SerializeToString())
def embedded_artifact(self, data, name=None):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.EMBEDDED.urn,
type_payload=beam_runner_api_pb2.EmbeddedFilePayload(
data=data).SerializeToString(),
role_urn=common_urns.artifact_roles.STAGING_TO.urn if name else None,
role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
staged_name=name).SerializeToString() if name else None)
def test_file_retrieval(self):
file_manager = InMemoryFileManager({
'path/to/a': b'a', 'path/to/b': b'b' * 37
})
retrieval_service = artifact_service.ArtifactRetrievalService(
file_manager.file_reader, chunk_size=10)
dep_a = self.file_artifact('path/to/a')
self.assertEqual(
retrieval_service.ResolveArtifacts(
beam_artifact_api_pb2.ResolveArtifactsRequest(artifacts=[dep_a])),
beam_artifact_api_pb2.ResolveArtifactsResponse(replacements=[dep_a]))
self.assertEqual(
list(
retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=dep_a))),
[beam_artifact_api_pb2.GetArtifactResponse(data=b'a')])
self.assertEqual(
list(
retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(
artifact=self.file_artifact('path/to/b')))),
[
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 10),
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 10),
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 10),
beam_artifact_api_pb2.GetArtifactResponse(data=b'b' * 7)
])
def test_embedded_retrieval(self):
retrieval_service = artifact_service.ArtifactRetrievalService(None)
embedded_dep = self.embedded_artifact(b'some_data')
self.assertEqual(
list(
retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(
artifact=embedded_dep))),
[beam_artifact_api_pb2.GetArtifactResponse(data=b'some_data')])
def test_url_retrieval(self):
retrieval_service = artifact_service.ArtifactRetrievalService(None)
url_dep = beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.URL.urn,
type_payload=beam_runner_api_pb2.ArtifactUrlPayload(
url='file:' + quote(__file__)).SerializeToString())
content = b''.join([
r.data for r in retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=url_dep))
])
with open(__file__, 'rb') as fin:
self.assertEqual(content, fin.read())
def test_push_artifacts(self):
unresolved = beam_runner_api_pb2.ArtifactInformation(type_urn='unresolved')
resolved_a = self.embedded_artifact(data=b'a', name='a.txt')
resolved_b = self.embedded_artifact(data=b'bb', name='b.txt')
dep_big = self.embedded_artifact(data=b'big ' * 100, name='big.txt')
class TestArtifacts(object):
def ResolveArtifacts(self, request):
replacements = []
for artifact in request.artifacts:
if artifact.type_urn == 'unresolved':
replacements += [resolved_a, resolved_b]
else:
replacements.append(artifact)
return beam_artifact_api_pb2.ResolveArtifactsResponse(
replacements=replacements)
def GetArtifact(self, request):
if request.artifact.type_urn == common_urns.artifact_types.EMBEDDED.urn:
content = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.EmbeddedFilePayload).data
for k in range(0, len(content), 13):
yield beam_artifact_api_pb2.GetArtifactResponse(
data=content[k:k + 13])
else:
raise NotImplementedError
file_manager = InMemoryFileManager()
server = artifact_service.ArtifactStagingService(file_manager.file_writer)
server.register_job('staging_token', {'env': [unresolved, dep_big]})
# "Push" artifacts as if from a client.
t = threading.Thread(
target=lambda: artifact_service.offer_artifacts(
server, TestArtifacts(), 'staging_token'))
t.daemon = True
t.start()
resolved_deps = server.resolved_deps('staging_token', timeout=5)['env']
expected = {
'a.txt': b'a',
'b.txt': b'bb',
'big.txt': b'big ' * 100,
}
for dep in resolved_deps:
self.assertEqual(dep.type_urn, common_urns.artifact_types.FILE.urn)
self.assertEqual(dep.role_urn, common_urns.artifact_roles.STAGING_TO.urn)
type_payload = proto_utils.parse_Bytes(
dep.type_payload, beam_runner_api_pb2.ArtifactFilePayload)
role_payload = proto_utils.parse_Bytes(
dep.role_payload, beam_runner_api_pb2.ArtifactStagingToRolePayload)
self.assertTrue(
type_payload.path.endswith(role_payload.staged_name),
type_payload.path)
self.assertEqual(
file_manager.get(type_payload.path),
expected.pop(role_payload.staged_name))
self.assertEqual(expected, {})
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,212,710,564,767,731,700 | 37.632979 | 80 | 0.673964 | false |
mrquim/mrquimrepo | repo/script.module.pycryptodome/lib/Crypto/Hash/MD5.py | 7 | 3084 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""MD5 cryptographic hash algorithm.
MD5 is specified in RFC1321_ and produces the 128 bit digest of a message.
>>> from Crypto.Hash import MD5
>>>
>>> h = MD5.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD5 stand for Message Digest version 5, and it was invented by Rivest in 1991.
This algorithm is insecure. Do not use it for new designs.
.. _RFC1321: http://tools.ietf.org/html/rfc1321
"""
__all__ = ['new', 'block_size', 'digest_size']
from Crypto.Util.py3compat import *
def __make_constructor():
try:
# The md5 module is deprecated in Python 2.6, so use hashlib when possible.
from hashlib import md5 as _hash_new
except ImportError:
from md5 import new as _hash_new
h = _hash_new()
if hasattr(h, 'new') and hasattr(h, 'name') and hasattr(h, 'digest_size') and hasattr(h, 'block_size'):
# The module from stdlib has the API that we need. Just use it.
return _hash_new
else:
# Wrap the hash object in something that gives us the expected API.
_copy_sentinel = object()
class _MD5(object):
digest_size = 16
block_size = 64
oid = "1.2.840.113549.2.5"
def __init__(self, *args):
if args and args[0] is _copy_sentinel:
self._h = args[1]
else:
self._h = _hash_new(*args)
def copy(self):
return _MD5(_copy_sentinel, self._h.copy())
def update(self, *args):
f = self.update = self._h.update
f(*args)
def digest(self):
f = self.digest = self._h.digest
return f()
def hexdigest(self):
f = self.hexdigest = self._h.hexdigest
return f()
_MD5.new = _MD5
return _MD5
new = __make_constructor()
del __make_constructor
#: The size of the resulting hash in bytes.
digest_size = new().digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = new().block_size
| gpl-2.0 | -3,931,963,354,330,889,000 | 34.860465 | 107 | 0.594358 | false |
jessekl/flixr | venv/lib/python2.7/site-packages/wtforms/ext/appengine/ndb.py | 51 | 17121 | """
Form generation utilities for App Engine's new ``ndb.Model`` class.
The goal of ``model_form()`` is to provide a clean, explicit and predictable
way to create forms based on ``ndb.Model`` classes. No malabarism or black
magic should be necessary to generate a form for models, and to add custom
non-model related fields: ``model_form()`` simply generates a form class
that can be used as it is, or that can be extended directly or even be used
to create other forms using ``model_form()``.
Example usage:
.. code-block:: python
from google.appengine.ext import ndb
from wtforms.ext.appengine.ndb import model_form
# Define an example model and add a record.
class Contact(ndb.Model):
name = ndb.StringProperty(required=True)
city = ndb.StringProperty()
age = ndb.IntegerProperty(required=True)
is_admin = ndb.BooleanProperty(default=False)
new_entity = Contact(key_name='test', name='Test Name', age=17)
new_entity.put()
# Generate a form based on the model.
ContactForm = model_form(Contact)
# Get a form populated with entity data.
entity = Contact.get_by_key_name('test')
form = ContactForm(obj=entity)
Properties from the model can be excluded from the generated form, or it can
include just a set of properties. For example:
.. code-block:: python
# Generate a form based on the model, excluding 'city' and 'is_admin'.
ContactForm = model_form(Contact, exclude=('city', 'is_admin'))
# or...
# Generate a form based on the model, only including 'name' and 'age'.
ContactForm = model_form(Contact, only=('name', 'age'))
The form can be generated setting field arguments:
.. code-block:: python
ContactForm = model_form(Contact, only=('name', 'age'), field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
}
})
The class returned by ``model_form()`` can be used as a base class for forms
mixing non-model fields and/or other model forms. For example:
.. code-block:: python
# Generate a form based on the model.
BaseContactForm = model_form(Contact)
# Generate a form based on other model.
ExtraContactForm = model_form(MyOtherModel)
class ContactForm(BaseContactForm):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Add the other model form as a subform.
extra = f.FormField(ExtraContactForm)
The class returned by ``model_form()`` can also extend an existing form
class:
.. code-block:: python
class BaseContactForm(Form):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Generate a form based on the model.
ContactForm = model_form(Contact, base_class=BaseContactForm)
"""
from wtforms import Form, validators, fields as f
from wtforms.compat import string_types
from wtforms.ext.appengine.fields import GeoPtPropertyField, KeyPropertyField, StringListPropertyField, IntegerListPropertyField
def get_TextField(kwargs):
"""
Returns a ``TextField``, applying the ``ndb.StringProperty`` length limit
of 500 bytes.
"""
kwargs['validators'].append(validators.length(max=500))
return f.TextField(**kwargs)
def get_IntegerField(kwargs):
"""
Returns an ``IntegerField``, applying the ``ndb.IntegerProperty`` range
limits.
"""
v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff)
kwargs['validators'].append(v)
return f.IntegerField(**kwargs)
class ModelConverterBase(object):
def __init__(self, converters=None):
"""
Constructs the converter, setting the converter callables.
:param converters:
A dictionary of converter callables for each property type. The
callable must accept the arguments (model, prop, kwargs).
"""
self.converters = {}
for name in dir(self):
if not name.startswith('convert_'):
continue
self.converters[name[8:]] = getattr(self, name)
def convert(self, model, prop, field_args):
"""
Returns a form field for a single model property.
:param model:
The ``db.Model`` class that contains the property.
:param prop:
The model property: a ``db.Property`` instance.
:param field_args:
Optional keyword arguments to construct the field.
"""
prop_type_name = type(prop).__name__
#check for generic property
if(prop_type_name == "GenericProperty"):
#try to get type from field args
generic_type = field_args.get("type")
if generic_type:
prop_type_name = field_args.get("type")
#if no type is found, the generic property uses string set in convert_GenericProperty
kwargs = {
'label': prop._code_name.replace('_', ' ').title(),
'default': prop._default,
'validators': [],
}
if field_args:
kwargs.update(field_args)
if prop._required and prop_type_name not in self.NO_AUTO_REQUIRED:
kwargs['validators'].append(validators.required())
if kwargs.get('choices', None):
# Use choices in a select field.
kwargs['choices'] = [(v, v) for v in kwargs.get('choices')]
return f.SelectField(**kwargs)
if prop._choices:
# Use choices in a select field.
kwargs['choices'] = [(v, v) for v in prop._choices]
return f.SelectField(**kwargs)
else:
converter = self.converters.get(prop_type_name, None)
if converter is not None:
return converter(model, prop, kwargs)
else:
return self.fallback_converter(model, prop, kwargs)
class ModelConverter(ModelConverterBase):
"""
Converts properties from a ``ndb.Model`` class to form fields.
Default conversions between properties and fields:
+====================+===================+==============+==================+
| Property subclass | Field subclass | datatype | notes |
+====================+===================+==============+==================+
| StringProperty | TextField | unicode | TextArea | repeated support
| | | | if multiline |
+--------------------+-------------------+--------------+------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+------------------+
| IntegerProperty | IntegerField | int or long | | repeated support
+--------------------+-------------------+--------------+------------------+
| FloatProperty | TextField | float | |
+--------------------+-------------------+--------------+------------------+
| DateTimeProperty | DateTimeField | datetime | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| DateProperty | DateField | date | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TimeProperty | DateTimeField | time | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TextProperty | TextAreaField | unicode | |
+--------------------+-------------------+--------------+------------------+
| GeoPtProperty | TextField | db.GeoPt | |
+--------------------+-------------------+--------------+------------------+
| KeyProperty | KeyProperyField | ndb.Key | |
+--------------------+-------------------+--------------+------------------+
| BlobKeyProperty | None | ndb.BlobKey | always skipped |
+--------------------+-------------------+--------------+------------------+
| UserProperty | None | users.User | always skipped |
+--------------------+-------------------+--------------+------------------+
| StructuredProperty | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| LocalStructuredPro | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| JsonProperty | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| PickleProperty | None | bytedata | always skipped |
+--------------------+-------------------+--------------+------------------+
| GenericProperty | None | generic | always skipped |
+--------------------+-------------------+--------------+------------------+
| ComputedProperty | none | | always skipped |
+====================+===================+==============+==================+
"""
# Don't automatically add a required validator for these properties
NO_AUTO_REQUIRED = frozenset(['ListProperty', 'StringListProperty', 'BooleanProperty'])
def convert_StringProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.StringProperty``."""
if prop._repeated:
return StringListPropertyField(**kwargs)
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BooleanProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BooleanProperty``."""
return f.BooleanField(**kwargs)
def convert_IntegerProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.IntegerProperty``."""
if prop._repeated:
return IntegerListPropertyField(**kwargs)
return get_IntegerField(kwargs)
def convert_FloatProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.FloatProperty``."""
return f.FloatField(**kwargs)
def convert_DateTimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateTimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateTimeField(format='%Y-%m-%d %H:%M:%S', **kwargs)
def convert_DateProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateField(format='%Y-%m-%d', **kwargs)
def convert_TimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateTimeField(format='%H:%M:%S', **kwargs)
def convert_RepeatedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_UserProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.UserProperty``."""
return None
def convert_StructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_LocalStructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_JsonProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_PickleProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_GenericProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BlobKeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BlobKeyProperty``."""
return f.FileField(**kwargs)
def convert_TextProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TextProperty``."""
return f.TextAreaField(**kwargs)
def convert_ComputedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ComputedProperty``."""
return None
def convert_GeoPtProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.GeoPtProperty``."""
return GeoPtPropertyField(**kwargs)
def convert_KeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.KeyProperty``."""
if 'reference_class' not in kwargs:
try:
reference_class = prop._kind
except AttributeError:
reference_class = prop._reference_class
if isinstance(reference_class, string_types):
# reference class is a string, try to retrieve the model object.
mod = __import__(model.__module__, None, None, [reference_class], 0)
reference_class = getattr(mod, reference_class)
kwargs['reference_class'] = reference_class
kwargs.setdefault('allow_blank', not prop._required)
return KeyPropertyField(**kwargs)
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Extracts and returns a dictionary of form fields for a given
``db.Model`` class.
:param model:
The ``db.Model`` class to extract fields from.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to a keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
# Get the field names we want to include or exclude, starting with the
# full list of model properties.
props = model._properties
field_names = list(x[0] for x in sorted(props.items(), key=lambda x: x[1]._creation_counter))
if only:
field_names = list(f for f in only if f in field_names)
elif exclude:
field_names = list(f for f in field_names if f not in exclude)
# Create all fields.
field_dict = {}
for name in field_names:
field = converter.convert(model, props[name], field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None, field_args=None,
converter=None):
"""
Creates and returns a dynamic ``wtforms.Form`` class for a given
``ndb.Model`` class. The form class can be used as it is or serve as a base
for extended form classes, which can then mix non-model related fields,
subforms with other model forms, among other possibilities.
:param model:
The ``ndb.Model`` class to generate a form for.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
# Extract the fields from the model.
field_dict = model_fields(model, only, exclude, field_args, converter)
# Return a dynamically created form class, extending from base_class and
# including the created fields as properties.
return type(model._get_kind() + 'Form', (base_class,), field_dict)
| mit | 8,825,862,294,843,290,000 | 39.95933 | 128 | 0.554874 | false |
wrightjb/bolt-planar | py.py | 4 | 2214 | #############################################################################
# Copyright (c) 2010 by Casey Duncan
# Portions copyright (c) 2009 The Super Effective Team
# (www.supereffective.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name(s) of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#############################################################################
"""Convenience namespace module for importing Python class implementations"""
__all__ = ('Vec2', 'Point', 'Vec2Array', 'Seq2',
'Affine', 'Line', 'Ray', 'LineSegment', 'BoundingBox', 'Polygon')
from planar.vector import Vec2, Vec2Array, Seq2
from planar.vector import Vec2 as Point
from planar.transform import Affine
from planar.line import Line, Ray, LineSegment
from planar.box import BoundingBox
from planar.polygon import Polygon
| bsd-3-clause | -1,469,202,583,659,859,000 | 54.35 | 78 | 0.71364 | false |
vvinuv/pymorph | pymorph/maskfunc_easy.py | 1 | 2102 | import os
import fitsio
import numpy as np
import pymconvolve
from mask_or_fit import *
class MaskFunc:
"""
The class for making mask for GALFIT. It uses the masking conditions
from config.py. The output mask image will have the name
M_string(galid).fits
"""
def __init__(self, mimg, xcntr, ycntr, NXPTS, NYPTS, values):
self.mimg = mimg
self.xcntr = xcntr
self.ycntr = ycntr
self.NXPTS = NXPTS
self.NYPTS = NYPTS
self.values = values
def gmask(self, threshold, thresh_area, fstring, seg_fits, seg_cat,
avoidme=0, NoMask=False, seg_limit=1e-5, verbose=False):
target = GetSExObj(NXPTS=self.NXPTS, NYPTS=self.NYPTS,
values=self.values)
if verbose:
print('Maskfunc: mimg ', mimg)
fseg = fitsio.FITS(seg_fits, 'r')
seg_mask = fseg[0].read()
fseg.close()
#print('1, seg_mask')
#print(seg_mask)
for line_j in open(seg_cat, 'r'):
if line_j[0] != '#': #line is not a comment
obj = []
for line in line_j.split():
obj.append(float(line))
neighbor = GetSExObj(NXPTS=self.NXPTS, NYPTS=self.NYPTS,
values=obj)
if target.get_mask(neighbor, threshold, thresh_area, avoidme) != 1:
#print('Yes', neighbor.sex_num)
seg_mask[np.where(seg_mask == neighbor.sex_num)] = 0
if NoMask:
seg_mask[np.where(seg_mask > 0)] = 0
else:
pass
boxcar = np.reshape(np.ones(3 * 3), (3, 3))
seg_mask = pymconvolve.Convolve(seg_mask, boxcar)
#print('2, seg_mask')
#print(seg_mask)
#sys.exit()
seg_mask[seg_mask > seg_limit] = 1
seg_mask[seg_mask != 1] = 0
#seg_mask = np.where(seg_mask > seg_limit, 1, 0)
fseg = fitsio.FITS(self.mimg, 'rw')
fseg.write(seg_mask, clobber=True)
fseg.close()
| gpl-2.0 | -3,370,562,929,278,238,000 | 27.794521 | 83 | 0.519981 | false |
d-quinn/bio_quinn2013 | single_alignment/pipeline-singleend_altfastas.py | 1 | 13742 | #!/usr/bin/python
## Global modules
import os
from os.path import join
import logging
import shutil
import pysam
## Local modules
import mglobals
import helpers
import snp2gene
log = logging.getLogger('pipeline')
@helpers.log_func
def my_tophat():
if mglobals.original:
log.info('Aligning reads to the original reference fasta')
fastas = mglobals.original_fastas
else:
log.info('Aligning reads to alternate reference fastas')
fastas = mglobals.alternate_fastas
@helpers.multiprocess(zip(mglobals.samples_list, fastas))
def tophat_call(sample, ref_fasta):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
ref_fasta_base = ref_fasta.split('.')[0]
mismatches = '2'
number_of_samples = len(mglobals.samples_list)
threads_per_sample = mglobals.cpu_count//number_of_samples
threads = str(threads_per_sample)
log.info('threads per sample ' + threads)
log.info('tophat: aligning sample {} with ref fasta {}'.format(sample, ref_fasta))
tophat_params = ['nice', '-n', '5',
'tophat',
'-p', threads,
'-G', mglobals.dros_gtf,
'--transcriptome-index=../transcriptome_data/known',
'-N', mismatches,
'--b2-L', '20',
'--b2-N', '1',
'--read-edit-dist', mismatches,
'-o', (sample + '_thout'),
'--no-novel-juncs',
ref_fasta_base,
join(mglobals.samples_path, (sample + '.fastq'))]
helpers.sub_call(tophat_params)
log.info('tophat: finished analyzing sample: {} with ref fasta: {}'.format(sample, ref_fasta))
# Copy transcriptome index to original_path and alternate_path
for path in [mglobals.original_path, mglobals.alternate_path]:
if not os.path.exists(join(path, 'transcriptome_data')):
log.info('Linking transcriptome data to ' + path)
os.symlink(mglobals.dros_gtf_index, join(path, 'transcriptome_data'))
tophat_call()
@helpers.log_func
def my_alignment_filter():
@helpers.multiprocess(mglobals.samples_list)
def filter_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample, (sample + '_thout')))
else:
os.chdir(join(mglobals.alternate_path, sample, (sample + '_thout')))
log.info('Filtering aligned reads for: ' + sample)
# Index each bamfile
if not os.path.exists('accepted_hits.bam.bai'):
pysam.index('accepted_hits.bam')
# Sort by NH flag
raw_reads = pysam.Samfile('accepted_hits.bam', 'rb')
filter_reads = pysam.Samfile('filter.bam', 'wb', template=raw_reads)
for read in raw_reads.fetch():
if ('NH', 1) in read.tags:
filter_reads.write(read)
raw_reads.close()
filter_reads.close()
pysam.index('filter.bam')
filter_call()
@helpers.log_func
def my_pileup(out_file_extension='.mpileup'):
if mglobals.original:
fastas = mglobals.original_fastas
else:
fastas = mglobals.alternate_fastas
@helpers.multiprocess(zip(mglobals.samples_list, fastas))
def pileup_call(sample, ref_fasta):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('mpileup: creating .mpileup file for {} with ref fasta: {}'.format(sample, ref_fasta))
pileup_command = ['nice', '-n', '5',
'samtools', 'mpileup',
'-B',
'-d10000000',
'-f', ref_fasta,
join((sample + '_thout'), 'filter.bam')]
output_file = sample + out_file_extension
with open(output_file, 'w') as output_file:
helpers.sub_call(pileup_command, stdout=output_file)
log.info('mpileup: finished for {} with ref fasta: {}'.format(sample, ref_fasta))
pileup_call()
@helpers.log_func
def my_variant_calls(in_file_extension='.mpileup', out_file_extension='.vcf'):
'''
Note, build alternate fastas depends on the out_file_extension being '.vcf'.
'''
@helpers.multiprocess(mglobals.samples_list)
def variant_calls_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Varscan: creating csv for: ' + sample)
varscan_command = ['nice', '-n', '5',
'java', '-jar', mglobals.varscan_path,
'mpileup2snp',
(sample + in_file_extension),
'--min-coverage', '2',
'--min-avg-qual', '20',
'--strand-filter', '0',
'--p-value', '1',
'--min-var-freq', '1e-10',
'--output-vcf', '1',
]
output_file = sample + out_file_extension
with open(output_file, 'w') as out:
helpers.sub_call(varscan_command, stdout=out)
log.info('varscan finished for: ' + sample)
variant_calls_call()
@helpers.log_func
def cov_and_dgrp_filter(in_file_extension='.vcf', out_file_extension='_freeze2.vcf'):
@helpers.multiprocess(mglobals.samples_list)
def filter_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Filtering {0} by coverage'.format(sample))
helpers.filter_vcf_by_coverage_cutoffs(vcf=(sample + in_file_extension),
cutoff_table=mglobals.coverage_cutoffs)
log.info('Filtering {0} according to SNP file: {1}'.format(sample, mglobals.current_snp_file))
dgrp_intersect_command = ['nice', '-n', '5',
'intersectBed',
'-a', (sample + '_covfil.vcf'), # the output of the helper
# function above.
'-b', mglobals.current_snp_file,
'-wa'
]
sample_dgrp_intersect = sample + out_file_extension
with open(sample_dgrp_intersect, 'w') as out:
helpers.sub_call(dgrp_intersect_command, stdout=out)
filter_call()
@helpers.log_func
def annotate_vcf(in_file_extension='_freeze2.vcf', out_file_extension='_geneannot.vcf'):
@helpers.multiprocess(mglobals.samples_list)
def annotate_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Annotating ' + sample + in_file_extension + ' with ' + mglobals.current_genes_file)
gtf_intersect_command = ['nice', '-n', '5',
'intersectBed',
'-a', (sample + in_file_extension),
'-b', mglobals.current_genes_file,
'-wa',
'-wb'
]
sample_gtf_intersect = sample + out_file_extension
with open(sample_gtf_intersect, 'w') as out:
helpers.sub_call(gtf_intersect_command, stdout=out)
annotate_call()
@helpers.log_func
def build_alternate_fastas(in_file_extension='_geneannot.vcf'):
# If we are doing the original alignment, we can now build the alternate
# reference fastas for each sample
@helpers.multiprocess(mglobals.samples_list)
def build_fastas_call(sample):
os.chdir(join(mglobals.original_path, sample))
log.info('Beginning to build alternate fasta for: ' + sample)
fixed_vcf = sample + '_fix.vcf'
log.info('Removing duplicated annotations (per transcript annotations)')
helpers.remove_dups(input_f=(sample + in_file_extension),
output_f=(sample + '.temp'))
log.info('Removing duplicate alleles and adding header')
# The fact that the original vcf was named sample.vcf is hardcoded
# here. Be careful.
helpers.vcf_fix(template_f=(sample + '.vcf'),
input_f=(sample + '.temp'),
output_f=fixed_vcf)
# Delete temporary file
os.remove(sample + '.temp')
log.info('Creating alternate fasta')
new_fasta = sample + '_unfixed.fa'
helpers.sub_call(['nice', '-n', '5',
'java', '-Xmx2g', '-jar',
mglobals.gatk_path,
'-R', 'genome.fa',
'-T', 'FastaAlternateReferenceMaker',
'-o', new_fasta,
'--variant', fixed_vcf])
# Fix the fasta
log.info('Fixing gatk fasta')
# If you change this name, you need to change the alternate fastas list as well.
final_fasta = sample + '.fa'
helpers.fasta_fix(input_f=new_fasta, output_f=final_fasta)
# Delete the unfixed version
os.remove(new_fasta)
log.info('Moving new fasta to: ' + join(mglobals.alternate_path, sample))
shutil.move(final_fasta, join(mglobals.alternate_path, sample))
log.info('Indexing new fasta')
os.chdir(join(mglobals.alternate_path, sample))
helpers.sub_call(['bowtie2-build',
'-f', final_fasta,
sample])
build_fastas_call()
@helpers.log_func
def vcf_to_csv(in_file_extension='_geneannot.vcf',
out_file_extension='_INTER_py.csv'):
@helpers.multiprocess(mglobals.samples_list)
def vcf_to_csv_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Converting vcf to csv for: ' + sample)
snp2gene.converter(input_f=(sample + in_file_extension),
output_f=(sample + out_file_extension))
vcf_to_csv_call()
@helpers.log_func
def combine_snps(in_file_extension='_INTER_py.csv'):
os.chdir(mglobals.samples_path)
@helpers.multiprocess(mglobals.samples_list)
def combine_snps_call(sample):
log.info('Combining SNPs for: ' + sample)
snps_combine.combine_SNPs(orig_f=join(mglobals.original_path, sample,
(sample + in_file_extension)),
new_f=join(mglobals.alternate_path, sample,
(sample + in_file_extension)),
orig_bam=join(mglobals.original_path, sample,
(sample + '_thout'), 'filter.bam'),
new_bam=join(mglobals.alternate_path, sample,
(sample + '_thout'), 'filter.bam'),
ref_vcf=join(mglobals.current_snp_file),
output_f=(sample + '_snps.csv'),
cutoff_table=mglobals.coverage_cutoffs)
mean_propR, snp_count = snps_combine.quick_mean_propR(sample + '_snps.csv')
log.info('Mean proportion reference for {} = {}'.format(sample, mean_propR))
log.info('\tNumber of snps = {}'.format(snp_count))
combine_snps_call()
@helpers.log_func
def csv_recalibrate(in_file_extension='_INTER_py.csv', out_file_extension='_genes.csv'):
os.chdir(mglobals.samples_path)
@helpers.multiprocess(mglobals.samples_list)
def csv_recalibrate_call(sample):
log.info('Combining genes for: ' + sample)
snp2gene.snp2gene(input_orig=join(mglobals.original_path, sample,
(sample + in_file_extension)),
input_new=join(mglobals.alternate_path, sample,
(sample + in_file_extension)),
output_f=(sample + out_file_extension),
orig_bam=join(mglobals.original_path, sample,
(sample + '_thout'), 'filter.bam'),
new_bam=join(mglobals.alternate_path, sample,
(sample + '_thout'), 'filter.bam'),
ref_vcf=mglobals.current_snp_file,
snp_stats_f=(sample + '_snp_stats.csv'),
cutoff_table=mglobals.coverage_cutoffs)
csv_recalibrate_call()
def main():
mglobals.original = True
my_tophat()
my_alignment_filter()
my_pileup()
my_variant_calls()
cov_and_dgrp_filter()
annotate_vcf()
# build_alternate_fastas()
vcf_to_csv()
mglobals.original = False
# my_tophat()
# my_alignment_filter()
# my_pileup()
# my_variant_calls()
# cov_and_dgrp_filter()
# annotate_vcf()
# vcf_to_csv()
# combine_snps()
# csv_recalibrate()
log.info('Pipeline completed successfully!')
if __name__ == '__main__':
main()
| mit | 5,876,809,104,163,541,000 | 36.342391 | 103 | 0.538641 | false |
lmaycotte/quark | quark/tests/plugin_modules/test_networks.py | 1 | 19262 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import json
import mock
from neutron import context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from quark.db import api as db_api
from quark.db import models
from quark import exceptions as q_exc
from quark import network_strategy
from quark import plugin_views
from quark.tests import test_quark_plugin
class TestQuarkGetNetworks(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, nets=None, subnets=None):
net_mods = []
subnet_mods = []
for subnet in subnets:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
subnet_mods.append(subnet_mod)
if isinstance(nets, list):
for net in nets:
net_mod = models.Network()
net_mod.update(net)
net_mod["subnets"] = subnet_mods
net_mods.append(net_mod)
else:
if nets:
net_mods = nets.copy()
net_mods["subnets"] = subnet_mods
else:
net_mods = nets
db_mod = "quark.db.api"
with mock.patch("%s.network_find" % db_mod) as net_find:
net_find.return_value = net_mods
yield
def test_get_networks(self):
subnet = dict(id=1)
net = dict(id=1, tenant_id=self.context.tenant_id, name="public",
status="ACTIVE")
with self._stubs(nets=[net], subnets=[subnet]):
nets = self.plugin.get_networks(self.context, {})
for key in net.keys():
self.assertEqual(nets[0][key], net[key])
self.assertEqual(nets[0]["subnets"][0], 1)
def test_get_network(self):
subnet = dict(id=1)
net = dict(id=1, tenant_id=self.context.tenant_id, name="public",
status="ACTIVE")
expected = net.copy()
expected["admin_state_up"] = True
expected["shared"] = False
expected["status"] = "ACTIVE"
with self._stubs(nets=net, subnets=[subnet]):
res = self.plugin.get_network(self.context, 1)
for key in expected.keys():
self.assertEqual(res[key], expected[key])
self.assertEqual(res["subnets"][0], 1)
def test_get_network_no_network_fails(self):
with self._stubs(nets=None, subnets=[]):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.get_network(self.context, 1)
class TestQuarkGetNetworksShared(test_quark_plugin.TestQuarkPlugin):
def setUp(self):
super(TestQuarkGetNetworksShared, self).setUp()
self.strategy = {"public_network":
{"bridge": "xenbr0",
"subnets": {"4": "public_v4",
"6": "public_v6"}}}
self.strategy_json = json.dumps(self.strategy)
self.old = plugin_views.STRATEGY
plugin_views.STRATEGY = network_strategy.JSONStrategy(
self.strategy_json)
cfg.CONF.set_override("default_net_strategy", self.strategy_json,
"QUARK")
def tearDown(self):
plugin_views.STRATEGY = self.old
@contextlib.contextmanager
def _stubs(self, nets=None, subnets=None):
net_mods = []
if isinstance(nets, list):
for net in nets:
subnet_mods = []
subnets = net.pop('subnets', [])
for subnet in subnets:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
subnet_mods.append(subnet_mod)
net_mod = models.Network()
net_mod.update(net)
net_mod["subnets"] = subnet_mods
net_mods.append(net_mod)
else:
if nets:
net_mods = nets.copy()
else:
net_mods = nets
db_mod = "quark.db.api"
db_api.STRATEGY = network_strategy.JSONStrategy(self.strategy_json)
network_strategy.STRATEGY = network_strategy.JSONStrategy(
self.strategy_json)
with mock.patch("%s._network_find" % db_mod) as net_find:
net_find.return_value = net_mods
yield net_find
def test_get_networks_shared(self):
net0 = dict(id='public_network', tenant_id=self.context.tenant_id,
name="mynet", status="ACTIVE", subnets=[dict(id=0)])
net1 = dict(id=1, tenant_id=self.context.tenant_id, name="mynet",
status="ACTIVE", subnets=[dict(id=1)])
with self._stubs(nets=[net0, net1]) as net_find:
ret = self.plugin.get_networks(self.context, None, None, None,
False, {"shared": [True]})
""" Includes regression for RM8483. """
for net in ret:
if net['shared']:
self.assertEqual(2, len(net['subnets']))
else:
self.assertEqual(1, len(net['subnets']))
net_find.assert_called_with(self.context, None, None, None, False,
None, join_subnets=True,
defaults=["public_network"],
provider_query=False)
def test_get_networks_shared_false(self):
net0 = dict(id='public_network', tenant_id=self.context.tenant_id,
name="mynet", status="ACTIVE", subnets=[dict(id=0)])
net1 = dict(id=1, tenant_id=self.context.tenant_id, name="mynet",
status="ACTIVE")
with self._stubs(nets=[net0, net1]) as net_find:
invert = db_api.INVERT_DEFAULTS
self.plugin.get_networks(self.context, None, None, None, False,
{"shared": [False]})
net_find.assert_called_with(self.context, None, None, None, False,
None, join_subnets=True,
defaults=[invert, "public_network"],
provider_query=False)
def test_get_networks_no_shared(self):
net0 = dict(id='public_network', tenant_id=self.context.tenant_id,
name="mynet", status="ACTIVE", subnets=[dict(id=0)])
net1 = dict(id=1, tenant_id=self.context.tenant_id, name="mynet",
status="ACTIVE")
with self._stubs(nets=[net0, net1]) as net_find:
self.plugin.get_networks(self.context, None, None, None, False)
net_find.assert_called_with(self.context, None, None, None,
False, None, join_subnets=True,
defaults=[], provider_query=False)
class TestQuarkGetNetworkCount(test_quark_plugin.TestQuarkPlugin):
def test_get_port_count(self):
"""This isn't really testable."""
with mock.patch("quark.db.api.network_count_all"):
self.plugin.get_networks_count(self.context, {})
class TestQuarkUpdateNetwork(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, net=None):
net_mod = net
if net:
net_mod = net.copy()
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.network_update" % db_mod)
) as (net_find, net_update):
net_find.return_value = net_mod
net_update.return_value = net_mod
yield net_update
def test_update_network(self):
net = dict(id=1, tenant_id=self.context.tenant_id)
new_net = net.copy()
new_net["ipam_strategy"] = "BOTH_REQUIRED"
with self._stubs(net=net) as net_update:
self.plugin.update_network(self.context, 1, dict(network=new_net))
net_update.assert_called_once_with(
self.context, net, id=net["id"],
tenant_id=self.context.tenant_id)
def test_update_network_not_found_fails(self):
with self._stubs(net=None):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.update_network(self.context, 1, None)
def test_update_network_admin_set_ipam_strategy(self):
net = dict(id=1, tenant_id=self.context.tenant_id)
new_net = net.copy()
new_net["ipam_strategy"] = "BOTH_REQUIRED"
admin_ctx = self.context.elevated()
with self._stubs(net=net) as net_update:
self.plugin.update_network(admin_ctx, 1, dict(network=new_net))
net_update.assert_called_once_with(
admin_ctx, net, ipam_strategy=new_net["ipam_strategy"],
id=net["id"], tenant_id=self.context.tenant_id)
class TestQuarkDeleteNetwork(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, net=None, ports=None, subnets=None):
subnets = subnets or []
net_mod = net
port_mods = []
subnet_mods = []
for port in ports:
port_model = models.Port()
port_model.update(port)
port_mods.append(port_model)
for subnet in subnets:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
subnet_mods.append(subnet_mod)
if net:
net_mod = models.Network()
net_mod.update(net)
net_mod.ports = port_mods
net_mod["subnets"] = subnet_mods
net_mod["network_plugin"] = "BASE"
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.network_delete" % db_mod),
mock.patch("quark.drivers.base.BaseDriver.delete_network"),
mock.patch("%s.subnet_delete" % db_mod)
) as (net_find, net_delete, driver_net_delete, subnet_del):
net_find.return_value = net_mod
yield net_delete
def test_delete_network(self):
net = dict(id=1, tenant_id=self.context.tenant_id)
with self._stubs(net=net, ports=[]) as net_delete:
self.plugin.delete_network(self.context, 1)
self.assertTrue(net_delete.called)
def test_delete_network_with_ports_fails(self):
net = dict(id=1, tenant_id=self.context.tenant_id)
port = dict(id=2)
with self._stubs(net=net, ports=[port]):
with self.assertRaises(n_exc.NetworkInUse):
self.plugin.delete_network(self.context, 1)
def test_delete_network_not_found_fails(self):
with self._stubs(net=None, ports=[]):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.delete_network(self.context, 1)
def test_delete_network_with_subnets_passes(self):
net = dict(id=1, tenant_id=self.context.tenant_id)
subnet = dict(id=1)
with self._stubs(net=net, ports=[], subnets=[subnet]) as net_delete:
self.plugin.delete_network(self.context, 1)
self.assertTrue(net_delete.called)
class TestQuarkCreateNetwork(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, net=None, subnet=None, ports=None, find_net=False):
net_mod = net
subnet_mod = None
if net:
net_mod = models.Network()
net_mod.update(net)
if subnet:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
found_net = None
if find_net:
found_net = models.Network()
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.network_create" % db_mod),
mock.patch("%s.subnet_create" % db_mod),
mock.patch("quark.drivers.base.BaseDriver.create_network"),
mock.patch("%s.network_find" % db_mod)
) as (net_create, sub_create, driver_net_create, net_find):
net_create.return_value = net_mod
sub_create.return_value = subnet_mod
net_find.return_value = found_net
yield net_create
def test_create_network(self):
net = dict(id=1, name="public", admin_state_up=True,
tenant_id=0, network_plugin="BASE")
with self._stubs(net=net) as net_create:
net = self.plugin.create_network(self.context, dict(network=net))
self.assertTrue(net_create.called)
self.assertEqual(len(net.keys()), 7)
self.assertIsNotNone(net["id"])
self.assertEqual(net["name"], "public")
self.assertTrue(net["admin_state_up"])
self.assertEqual(net["status"], "ACTIVE")
self.assertEqual(net["subnets"], [])
self.assertEqual(net["shared"], False)
self.assertEqual(net["tenant_id"], 0)
def test_create_network_with_subnets(self):
subnet = dict(id=2, cidr="172.168.0.0/24", tenant_id=0)
net = dict(id=1, name="public", admin_state_up=True,
tenant_id=0)
with self._stubs(net=net, subnet=subnet) as net_create:
net.update(dict(subnets=[dict(subnet=subnet)]))
net = self.plugin.create_network(self.context, dict(network=net))
self.assertTrue(net_create.called)
self.assertEqual(len(net.keys()), 7)
self.assertIsNotNone(net["id"])
self.assertEqual(net["name"], "public")
self.assertTrue(net["admin_state_up"])
self.assertEqual(net["status"], "ACTIVE")
self.assertEqual(net["subnets"], [2])
self.assertEqual(net["shared"], False)
self.assertEqual(net["tenant_id"], 0)
def test_create_network_with_id(self):
net = dict(id="abcdef", name="public", admin_state_up=True,
tenant_id=0)
ctxt = context.Context('fake', 'fake', is_admin=True,
load_admin_roles=False)
with self._stubs(net=net):
res = self.plugin.create_network(ctxt, dict(network=net))
self.assertEqual(net["id"], res["id"])
def test_create_network_with_id_already_exists_raises(self):
net = dict(id="abcdef", name="public", admin_state_up=True,
tenant_id=0)
ctxt = context.Context('fake', 'fake', is_admin=True,
load_admin_roles=False)
with self._stubs(net=net, find_net=True):
with self.assertRaises(q_exc.NetworkAlreadyExists):
self.plugin.create_network(ctxt, dict(network=net))
def test_create_network_with_id_not_admin_ignores_id(self):
net = dict(id="abcdef", name="public", admin_state_up=True,
tenant_id=0)
with self._stubs(net=net):
res = self.plugin.create_network(self.context, dict(network=net))
self.assertNotEqual(net["id"], res["id"])
def test_create_network_with_ipam_strategy(self):
net = dict(id="abcdef", name="public", admin_state_up=True,
tenant_id=0, ipam_strategy="BOTH")
admin_context = self.context.elevated()
original = cfg.CONF.QUARK.show_ipam_strategy
cfg.CONF.set_override('show_ipam_strategy', True, "QUARK")
with self._stubs(net=net):
res = self.plugin.create_network(admin_context, dict(network=net))
self.assertEqual(res["ipam_strategy"], net["ipam_strategy"])
cfg.CONF.set_override('show_ipam_strategy', original, "QUARK")
def test_create_network_with_bad_ipam_strategy_raises(self):
net = dict(id="abcdef", name="public", admin_state_up=True,
tenant_id=0, ipam_strategy="BUSTED")
admin_context = self.context.elevated()
with self._stubs(net=net):
with self.assertRaises(q_exc.InvalidIpamStrategy):
self.plugin.create_network(admin_context, dict(network=net))
class TestQuarkDiagnoseNetworks(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, nets=None, subnets=None):
net_mods = []
subnet_mods = []
if subnets:
for subnet in subnets:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
subnet_mods.append(subnet_mod)
if nets:
for net in nets:
net_mod = models.Network()
net_mod["network_plugin"] = "BASE"
net_mod.update(net)
net_mod["subnets"] = subnet_mods
net_mods.append(net_mod)
else:
if nets:
net_mods = nets.copy()
net_mods["subnets"] = subnet_mods
else:
net_mods = nets
db_mod = "quark.db.api"
with mock.patch("%s.network_find" % db_mod) as net_find:
net_find.return_value = net_mods
yield
def test_diagnose_network_no_network_found(self):
with self._stubs():
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.diagnose_network(self.context.elevated(), "12345",
None)
def test_diagnose_network_not_authorized(self):
with self._stubs():
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.diagnose_network(self.context, "12345",
None)
def test_diagnose_network_with_wildcard_and_no_networks(self):
db_mod = "quark.db.api"
with mock.patch("%s.network_find" % db_mod) as net_find:
net_find.return_value = []
actual = self.plugin.diagnose_network(self.context.elevated(),
"*", {})
expected = {'networks': []}
self.assertEqual(expected, actual)
def test_diagnose_network_with_wildcard_and_networks(self):
subnet = dict(id=1)
net = dict(id=1, tenant_id=self.context.tenant_id, name="public",
status="ACTIVE", network_plugin="BASE")
with self._stubs(nets=[net], subnets=[subnet]):
db_mod = "quark.db.api"
with mock.patch("%s.network_find" % db_mod) as net_find:
net_find.return_value = [net]
nets = self.plugin.diagnose_network(self.context.elevated(),
"*", {})
net.pop("network_plugin")
for key in net.keys():
self.assertEqual(nets['networks'][0][key], net[key])
| apache-2.0 | -7,518,295,207,149,516,000 | 39.982979 | 78 | 0.563285 | false |
eayunstack/fuel-web | nailgun/nailgun/test/unit/test_fault_tolerance.py | 6 | 1604 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.orchestrator.provisioning_serializers import \
ProvisioningSerializer
from nailgun.test import base
class TestFaultTolerance(base.BaseTestCase):
def test_generating_fault_tolerance_data(self):
cluster = self.env.create(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['controller']},
{'roles': ['controller', 'cinder']},
{'roles': ['compute', 'cinder']},
{'roles': ['compute']},
{'roles': ['cinder']}])
cluster_db = self.db.query(Cluster).get(cluster['id'])
uids = [node.uid for node in cluster_db.nodes
if 'compute' in node.roles]
correct_res = [{'uids': uids, 'percentage': 2}]
res = ProvisioningSerializer.fault_tolerance(cluster_db,
cluster_db.nodes)
self.assertEqual(res, correct_res)
| apache-2.0 | -7,439,808,265,353,819,000 | 40.128205 | 78 | 0.619701 | false |
Grogdor/CouchPotatoServer | couchpotato/core/plugins/base.py | 6 | 15865 | import threading
from urllib import quote
from urlparse import urlparse
import glob
import inspect
import os.path
import re
import time
import traceback
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \
randomString
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from tornado import template
from tornado.web import StaticFileHandler
log = CPLog(__name__)
class Plugin(object):
_class_name = None
_database = None
plugin_path = None
enabled_option = 'enabled'
auto_register_static = True
_needs_shutdown = False
_running = None
_locks = {}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'
http_last_use = {}
http_last_use_queue = {}
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
new_plugin.registerPlugin()
return new_plugin
def registerPlugin(self):
addEvent('app.do_shutdown', self.doShutdown)
addEvent('plugin.running', self.isRunning)
self._running = []
if self.auto_register_static:
self.registerStatic(inspect.getfile(self.__class__))
# Setup database
if self._database:
addEvent('database.setup', self.databaseSetup)
def databaseSetup(self):
for index_name in self._database:
klass = self._database[index_name]
fireEvent('database.setup_index', index_name, klass)
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')[0].lower()
return Env.setting(attr, section = section if section else class_name, value = value, default = default)
def deleteConf(self, attr):
return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower())
def getName(self):
return self._class_name or self.__class__.__name__
def setName(self, name):
self._class_name = name
def renderTemplate(self, parent_file, templ, **params):
t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read())
return t.generate(**params)
def registerStatic(self, plugin_file, add_to_head = True):
# Register plugin path
self.plugin_path = os.path.dirname(plugin_file)
static_folder = toUnicode(os.path.join(self.plugin_path, 'static'))
if not os.path.isdir(static_folder):
return
# Get plugin_name from PluginName
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__)
class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# View path
path = 'static/plugin/%s/' % class_name
# Add handler to Tornado
Env.get('app').add_handlers(".*$", [(Env.get('web_base') + path + '(.*)', StaticFileHandler, {'path': static_folder})])
# Register for HTML <HEAD>
if add_to_head:
for f in glob.glob(os.path.join(self.plugin_path, 'static', '*')):
ext = getExt(f)
if ext in ['js', 'css']:
fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f)
def createFile(self, path, content, binary = False):
path = sp(path)
self.makeDir(os.path.dirname(path))
if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path)
write_type = 'w+' if not binary else 'w+b'
# Stream file using response object
if isinstance(content, requests.models.Response):
# Write file to temp
with open('%s.tmp' % path, write_type) as f:
for chunk in content.iter_content(chunk_size = 1048576):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# Rename to destination
os.rename('%s.tmp' % path, path)
else:
try:
f = open(path, write_type)
f.write(content)
f.close()
os.chmod(path, Env.getPermission('file'))
except:
log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = sp(path)
try:
if not os.path.isdir(path):
os.makedirs(path, Env.getPermission('folder'))
return True
except Exception as e:
log.error('Unable to create folder "%s": %s', (path, e))
return False
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for item in os.listdir(folder):
full_folder = sp(os.path.join(folder, item))
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for subfolder, dirs, files in os.walk(full_folder, topdown = False):
try:
os.rmdir(subfolder)
except:
if show_error:
log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc()))
try:
os.rmdir(folder)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False):
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))
headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
headers['Host'] = headers.get('Host', None)
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host)
else:
return ''
else:
del self.http_failed_request[host]
del self.http_failed_disabled[host]
self.wait(host, url)
status_code = None
try:
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
'stream': stream,
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, **kwargs)
status_code = response.status_code
if response.status_code == requests.codes.ok:
data = response if stream else response.content
else:
response.raise_for_status()
self.http_failed_request[host] = 0
except (IOError, MaxRetryError, Timeout):
if show_error:
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))
# Save failed requests by hosts
try:
# To many requests
if status_code in [429]:
self.http_failed_request[host] = 1
self.http_failed_disabled[host] = time.time()
if not self.http_failed_request.get(host):
self.http_failed_request[host] = 1
else:
self.http_failed_request[host] += 1
# Disable temporarily
if self.http_failed_request[host] > 5 and not isLocalIP(host):
self.http_failed_disabled[host] = time.time()
except:
log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc()))
raise
self.http_last_use[host] = time.time()
return data
def wait(self, host = '', url = ''):
if self.http_time_between_calls == 0:
return
try:
if host not in self.http_last_use_queue:
self.http_last_use_queue[host] = []
self.http_last_use_queue[host].append(url)
while True and not self.shuttingDown():
wait = (self.http_last_use.get(host, 0) - time.time()) + self.http_time_between_calls
if self.http_last_use_queue[host][0] != url:
time.sleep(.1)
continue
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait)))
time.sleep(min(wait, 30))
else:
self.http_last_use_queue[host] = self.http_last_use_queue[host][1:]
self.http_last_use[host] = time.time()
break
except:
log.error('Failed handling waiting call: %s', traceback.format_exc())
time.sleep(self.http_time_between_calls)
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
def afterCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__), False)
def doShutdown(self, *args, **kwargs):
self.shuttingDown(True)
return True
def shuttingDown(self, value = None):
if value is None:
return self._needs_shutdown
self._needs_shutdown = value
def isRunning(self, value = None, boolean = True):
if value is None:
return self._running
if boolean:
self._running.append(value)
else:
try:
self._running.remove(value)
except:
log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key")
def getCache(self, cache_key, url = None, **kwargs):
use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files')
if use_cache:
cache_key_md5 = md5(cache_key)
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
return cache
if url:
try:
cache_timeout = 300
if 'cache_timeout' in kwargs:
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data and cache_timeout > 0 and use_cache:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(cache_key)
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, media, unique_tag = False):
release_name = data.get('name')
tag = self.cpTag(media, unique_tag = unique_tag)
# Check if password is filename
name_password = scanForPassword(data.get('name'))
if name_password:
release_name, password = name_password
tag += '{{%s}}' % password
elif data.get('password'):
tag += '{{%s}}' % data.get('password')
max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames
return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag)
def createFileName(self, data, filedata, media, unique_tag = False):
name = self.createNzbName(data, media, unique_tag = unique_tag)
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, media, unique_tag = False):
tag = ''
if Env.setting('enabled', 'renamer') or unique_tag:
identifier = getIdentifier(media) or ''
unique_tag = ', ' + randomString() if unique_tag else ''
tag = '.cp('
tag += identifier
tag += ', ' if unique_tag and identifier else ''
tag += randomString() if unique_tag else ''
tag += ')'
return tag if len(tag) > 7 else ''
def checkFilesChanged(self, files, unchanged_for = 60):
now = time.time()
file_too_new = False
file_time = []
for cur_file in files:
# File got removed while checking
if not os.path.isfile(cur_file):
file_too_new = now
break
# File has changed in last 60 seconds
file_time = self.getFileTimes(cur_file)
for t in file_time:
if t > now - unchanged_for:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
return file_too_new, time_string
return False, None
def getFileTimes(self, file_path):
return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0]
def isDisabled(self):
return not self.isEnabled()
def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None
def acquireLock(self, key):
lock = self._locks.get(key)
if not lock:
self._locks[key] = threading.RLock()
log.debug('Acquiring lock: %s', key)
self._locks.get(key).acquire()
def releaseLock(self, key):
lock = self._locks.get(key)
if lock:
log.debug('Releasing lock: %s', key)
self._locks.get(key).release()
| gpl-3.0 | -6,757,331,334,861,494,000 | 33.56427 | 136 | 0.547305 | false |
evancich/apm_motor | modules/waf/playground/folder_hashing/fhash.py | 5 | 1430 | #! /usr/bin/env python
# encoding: utf-8
"""
Modification to handle folders as if they were files.
Usually, the target folders are created automatically (Node.find_or_declare)
for files that need them so this is not really necessary.
This modification incurs a performance penalty (computing hashes,
creating additional tasks, checking if the folders are there
vs just creating the folders if missing), and can conceal serious
errors (confusing files and folders for example).
The build order will not look at the parent folder relationships,
we will need a testcase for this (overriding the function
Task.set_file_constraints is trivial)
"""
import stat, os
from waflib import Utils, Task
from waflib.TaskGen import feature
def h_file(filename):
"""now folders can have a signature too"""
st = os.stat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]):
return Utils.md5(filename).digest()
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
Utils.h_file = h_file
@feature('mkdir')
def make_target_folder(self):
"""code provided as an example"""
try:
node = self.target
except AttributeError:
raise self.bld.errors.WafError('Missing target attribute on task generator %r' % self)
self.create_task('mkdir', [], node)
class mkdir(Task.Task):
"""calling node.mkdir() will be more efficient than creating folders"""
def run(self):
self.outputs[0].mkdir()
| gpl-3.0 | 8,427,203,223,392,257,000 | 28.183673 | 88 | 0.746154 | false |
quarkonics/zstack-woodpecker | integrationtest/vm/virtualrouter/eip/test_2l3s_eip_pf.py | 2 | 4843 | '''
Test Description:
Will create 1 VM with 2 l3 networks. 1 vnic will be assigned eip, the other will be assigned with pf.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.config_operations as conf_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import zstackwoodpecker.zstack_test.zstack_test_port_forwarding as zstack_pf_header
import apibinding.inventory as inventory
import os
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
PfRule = test_state.PfRule
Port = test_state.Port
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
eip_snatInboundTraffic_default_value = None
pf_snatInboundTraffic_default_value = None
def test():
global eip_snatInboundTraffic_default_value
global pf_snatInboundTraffic_default_value
#enable snatInboundTraffic and save global config value
eip_snatInboundTraffic_default_value = \
conf_ops.change_global_config('eip', 'snatInboundTraffic', 'true')
pf_snatInboundTraffic_default_value = \
conf_ops.change_global_config('portForwarding', \
'snatInboundTraffic', 'true')
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
l3_net_list = [l3_net_uuid]
l3_name = os.environ.get('l3VlanDNATNetworkName')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
l3_net_list.append(l3_net_uuid)
vm = test_stub.create_vm(l3_net_list, image_uuid, '2_l3_pf_vm')
test_obj_dict.add_vm(vm)
l3_name = os.environ.get('l3NoVlanNetworkName1')
vr_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid)
temp_vm1 = None
if not vrs:
#create temp_vm2 for getting novlan's vr for test pf_vm portforwarding
temp_vm1 = test_stub.create_user_vlan_vm()
test_obj_dict.add_vm(temp_vm1)
vr1 = test_lib.lib_find_vr_by_vm(temp_vm1.vm)[0]
else:
vr1 = vrs[0]
#we do not need temp_vm1, since we just use their VRs.
if temp_vm1:
temp_vm1.destroy()
test_obj_dict.rm_vm(temp_vm1)
vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1)
vm.check()
vm_nic1 = vm.vm.vmNics[0]
vm_nic1_uuid = vm_nic1.uuid
vm_nic2 = vm.vm.vmNics[1]
vm_nic2_uuid = vm_nic2.uuid
pri_l3_uuid = vm_nic1.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip1 = test_stub.create_vip('vip1_2l3s_vm_test', l3_uuid)
test_obj_dict.add_vip(vip1)
vip1_uuid = vip1.get_vip().uuid
vip2 = test_stub.create_vip('vip2_2l3s_vm_test', l3_uuid)
test_obj_dict.add_vip(vip2)
vip2_uuid = vip2.get_vip().uuid
eip1 = test_stub.create_eip('2l3 eip test1', vip_uuid=vip1.get_vip().uuid)
vip1.attach_eip(eip1)
vip1.check()
eip1.attach(vm_nic1_uuid, vm)
vip1.check()
pf_creation_opt1 = PfRule.generate_pf_rule_option(vr1_pub_ip, protocol=inventory.TCP, vip_target_rule=Port.rule3_ports, private_target_rule=Port.rule3_ports, vip_uuid=vip2_uuid, vm_nic_uuid=vm_nic2_uuid)
test_pf1 = zstack_pf_header.ZstackTestPortForwarding()
test_pf1.set_creation_option(pf_creation_opt1)
test_pf1.create(vm)
vip2.attach_pf(test_pf1)
vip2.check()
vm.stop()
vm.start()
vm.check()
vip1.check()
vip2.check()
vm.destroy()
vip1.check()
vip2.check()
vip1.delete()
test_obj_dict.rm_vip(vip1)
vip2.delete()
test_obj_dict.rm_vip(vip2)
conf_ops.change_global_config('eip', 'snatInboundTraffic', \
eip_snatInboundTraffic_default_value )
conf_ops.change_global_config('portForwarding', 'snatInboundTraffic', \
pf_snatInboundTraffic_default_value)
test_util.test_pass('Create 1 VM with 2 l3_network with 1 eip and 1 PF testing successfully.')
#Will be called only if exception happens in test().
def error_cleanup():
global eip_snatInboundTraffic_default_value
global pf_snatInboundTraffic_default_value
conf_ops.change_global_config('eip', 'snatInboundTraffic', \
eip_snatInboundTraffic_default_value )
conf_ops.change_global_config('portForwarding', 'snatInboundTraffic', \
pf_snatInboundTraffic_default_value)
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 | 65,755,118,501,402,860 | 34.969466 | 207 | 0.666529 | false |
jblouse/word_search | tasks.py | 1 | 1048 | """Build automation driver using invoke"""
from invoke import task
SETUP_COMMAND = 'rm -rf venv && virtualenv venv && source venv/bin/activate'
PIP_COMMAND = 'venv/bin/pip install -r requirements.pip'
LINT_COMMAND = 'venv/bin/flake8 *.py && venv/bin/pep8 *.py ' \
'&& venv/bin/pylint *.py'
TEST_COMMAND = 'venv/bin/nosetests --with-coverage --cover-erase ' \
'--cover-package=word_search --cover-package=file_search ' \
'--cover-package=utilities --cover-package=constants' \
' && rm -rf .coverage _indices_ && find . -name "*.pyc" -delete'
@task
def pip(ctx):
"""Install pip requirements"""
ctx.run(PIP_COMMAND, hide=True)
@task
def setup(ctx):
"""Perform setup tasks"""
ctx.run(SETUP_COMMAND, hide=True)
pip(ctx)
@task
def lint(ctx):
"""Run lint check"""
ctx.run(LINT_COMMAND)
@task
def test(ctx):
"""Run tests with coverage"""
ctx.run(TEST_COMMAND)
@task(default=True)
def build(ctx):
"""Perform build"""
lint(ctx)
test(ctx)
| mit | 2,379,484,420,884,448,000 | 22.818182 | 79 | 0.614504 | false |
hickeroar/project-euler | 040/solution045.py | 1 | 1070 | """
Triangle, pentagonal, and hexagonal numbers are generated by the following formulae:
Triangle Tn=n(n+1)/2
Pentagonal Pn=n(3n-1)/2
Hexagonal Hn=n(2n-1)
It can be verified that T285 = P165 = H143 = 40755.
Find the next triangle number that is also pentagonal and hexagonal.
"""
from sys import exit
def calcTri(n):
return (n*(n+1))/2
pentNums = {1:1}
pentN = 1
maxPent = 1
def calcPent(n):
newPent = (n*((3*n)-1))/2
pentNums[newPent] = newPent
return newPent
def isPent(n):
global pentN, maxPent
while maxPent < n:
pentN += 1
maxPent = calcPent(pentN)
return n in pentNums
hexNums = {1:1}
hexN = 1
maxHex = 1
def calcHex(n):
newHex = n*((2*n)-1)
hexNums[newHex] = newHex
return newHex
def isHex(n):
global hexN, maxHex
while maxHex < n:
hexN += 1
maxHex = calcHex(hexN)
return n in hexNums
triN = 285
while True:
triN += 1
testTri = calcTri(triN)
if isPent(testTri):
if isHex(testTri):
print testTri
exit() | mit | 2,311,550,960,107,173,000 | 16.557377 | 84 | 0.604673 | false |
MarsBighead/mustang | Python/inheritance.py | 1 | 1202 | #!/usr/bin/python
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print "Name:", self.lastName + ",", self.firstName
print "ID:", self.idNumber
class Student(Person):
def __init__(self,firstName, lastName, idNum, scores ):
Person.__init__(self,firstName, lastName, idNum)
self.scores=scores
def calculate(self):
l=len(self.scores)
sum = 0
for s in self.scores:
sum += s
avg=sum/len(self.scores)
if avg>=90 and avg <=100 :
return 'O'
elif avg<90 and avg >=80:
return 'E'
elif avg<80 and avg >=70:
return 'A'
elif avg<70 and avg >=55:
return 'P'
elif avg<55 and avg >=40:
return 'D'
elif avg<40:
return 'T'
line = raw_input().split()
firstName = line[0]
lastName = line[1]
idNum = line[2]
numScores = int(raw_input()) # not needed for Python
scores = map(int, raw_input().split())
s = Student(firstName, lastName, idNum, scores)
s.printPerson()
print "Grade:", s.calculate()
| mit | -1,258,043,017,852,399,600 | 25.130435 | 59 | 0.577371 | false |
FedericoCeratto/debian-py3status | py3status/modules/spaceapi.py | 1 | 2604 | # -*- coding: utf-8 -*-
"""
This module shows if your favorite hackerspace is open or not
Last modified: 2015-02-01
Author: @timmszigat
License: WTFPL http://www.wtfpl.net/txt/copying/
"""
import codecs
import datetime
import json
from time import time
import urllib.request
class Py3status:
"""
Configuration Parameters:
- cache_timeout: Set timeout between calls in seconds
- closed_color: color if space is closed
- closed_text: text if space is closed, strftime parameters will be translated
- open_color: color if space is open
- open_text: text if space is open, strftime parmeters will be translated
- url: URL to SpaceAPI json file of your space
"""
# available configuration parameters
cache_timeout = 60
closed_color = None
closed_text = 'closed since %H:%M'
open_color = None
open_text = 'open since %H:%M'
url = 'http://status.chaospott.de/status.json'
def check(self, i3s_output_list, i3s_config):
response = {
'cached_until': time() + self.cache_timeout
}
try:
# if color isn't set, set basic color schema
if not self.open_color:
self.open_color = i3s_config['color_good']
if not self.closed_color:
self.closed_color = ''
# grab json file
json_file=urllib.request.urlopen(self.url)
reader = codecs.getreader("utf-8")
data = json.load(reader(json_file))
json_file.close()
if(data['state']['open'] == True):
response['full_text'] = self.open_text
response['short_text'] = '%H:%M'
if self.open_color:
response['color'] = self.open_color
else:
response['full_text'] = self.closed_text
response['short_text'] = ''
if self.closed_color:
response['color'] = self.closed_color
# apply strftime to full and short text
dt = datetime.datetime.fromtimestamp(data['state']['lastchange'])
response['full_text'] = dt.strftime(response['full_text'])
response['short_text'] = dt.strftime(response['short_text'])
except:
response['full_text'] = '';
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
while True:
print(x.check([], {'color_good': 'green'}))
sleep(1)
| bsd-2-clause | -5,483,487,193,983,475,000 | 30 | 86 | 0.565668 | false |
aristanetworks/EosSdk | examples/MplsTunnelLivenessAgent.py | 1 | 20866 | #!/usr/bin/env python
# Copyright (c) 2015 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
""" This program provides an agent that sends and responds to
health-check packets in order to determine the liveliness of the
configured MPLS tunnels. """
import eossdk
import eossdk_utils
import functools
import json
import os
import pyinotify
import scapy
import scapy.fields
import scapy.layers.l2
import scapy.layers.inet
import scapy.packet
import scapy.route
import scapy.sendrecv
import socket
import struct
import sys
import time
# Requires: RPMs for scapy and EosSdk, as well as the eossdk_utils.py
# script (for debugging). Tunnel configuration is done at the bottom
# of this file in the main function.
# The main agent is located in the MplsTunnelLivenessAgent class below.
POLL_TIME = 1 # how often to send a liveness packet in seconds
TIMEOUT_TIME = 5 # seconds before a tunnel is declared dead
STARTUP_GRACEPERIOD = 0 # seconds after startup before we start checking a tunnel
# Make sure your IP tables are up to date on the switch:
# > sudo iptables -I INPUT -p UDP --dport 17171 -j ACCEPT
UDP_PORT = 17171
MAX_PKT_SIZE = 2048 # The maximum payload size of our packet
MAX_INT = 0xffffffff # The maximum size of a 4 byte unsigned int
class Message(object):
""" A Message is the payload of the health-check packets that this
agent sends out and receives. It consists of two parts. The first
is a header that contains an number that identifies which tunnel
the sender sent this message out of. The header also contains a
numeric id of the packet, and finally, a number describing how many
'entries' are in the second part of the packet. This second part is
a list of 0 or more 'tunnel status entries'. Each entry contains a
numeric tunnel identifier and a boolean describing whether the
sending switch thinks that tunnel is alive or not."""
# Header consists of (version, pid, sender's tunnel key, msg id,
# num status entries), as integers, in little-endian:
header_format = '<IIIII'
header_len = struct.calcsize(header_format)
tunnel_entry_format = '<I?' # tunnel_key, bool
tunnel_entry_len = struct.calcsize(tunnel_entry_format)
def __init__(self, pid, egress_tunnel_key, msg_id, tunnel_liveness):
self.pid = pid
self.egress_tunnel_key = egress_tunnel_key
self.msg_id = msg_id
# Mapping from tunnel_key to boolean whether this is alive or not
self.tunnel_liveness = tunnel_liveness
def serialize(self):
# First put the length of this packet
ret = struct.pack(Message.header_format, 1, self.pid, self.egress_tunnel_key,
self.msg_id, len(self.tunnel_liveness))
for tunnel_key, is_alive in self.tunnel_liveness.iteritems():
ret += struct.pack(Message.tunnel_entry_format, tunnel_key, is_alive)
if len(ret) > MAX_PKT_SIZE:
assert False, "Packet %s too large to send!" % self.__str__()
return ret
def __str__(self):
return "Message(sender_pid=%d, egress_tunnel_key=%d, id=%d, %r)" % (
self.pid, self.egress_tunnel_key, self.msg_id, self.tunnel_liveness)
@staticmethod
def deserialize(buf):
""" Given a buffer, create and return a Message from the
buffer's contents. If the buffer does not contain a valid
Message, this returns None.
"""
if len(buf) < Message.header_len:
return None
version, pid, egress_tunnel_key, msg_id, num_entries = struct.unpack(
Message.header_format, buf[:Message.header_len])
if version != 1:
return None
msg_len = Message.header_len + Message.tunnel_entry_len * num_entries
if len(buf) < msg_len:
return None
liveness = {}
for i in xrange(Message.header_len, msg_len,
Message.tunnel_entry_len):
# Unpack each status entry reported in this packet
key, is_alive = struct.unpack(Message.tunnel_entry_format,
buf[i: i + Message.tunnel_entry_len])
liveness[key] = is_alive
return Message(pid, egress_tunnel_key, msg_id, liveness)
class EgressTunnel(object):
""" Contains the configuration and status of this switch's outgoing
tunnels. """
def __init__(self, label, nexthop_ip_addr):
# Configurable attributes
self.mpls_label = label
self.nexthop_ip = nexthop_ip_addr
# Dynamic attributes:
# The bridging MAC of the nexthop:
self.nexthop_eth_addr = None
# The interface the nexthop_eth_addr lives on:
self.egress_intf = None
# ... and the MAC address of that interface:
self.egress_intf_eth_addr = None
self.last_update_time = 0
self.is_alive = True
class RemoteTunnelStatus(object):
""" Tracks the status of a remote tunnel (a tunnel where the packet
sender is the remote switch). """
def __init__(self):
self.last_rx_msg_id = 0
self.last_update_time = time.time()
class RemoteSwitch(object):
""" This object stores the configuration for our outgoing tunnels to
this remote switch, as well as a status collection containing our view on
the liveness of that switch's tunnels to us. """
def __init__(self, dest_ip):
# Configuration
# The IP address of the remote switch
self.destination_ip = dest_ip
# The following dictionary keeps track of our outgoing tunnels
# to this switch. It is a mapping from integer tunnel_key to a
# EgressTunnel()
self.egress_tunnels = {}
# Status
self.last_tx_msg_id = 0
self.last_rx_msg_id = 0
self.pid = 0
# The `remote_tunnel_status` variable keeps track of whether their
# tunnels are alive or not. It is a mapping from an integer
# tunnel_key to a RemoteTunnelStatus() object. Note that these
# keys correspond to the remote switche's tunnel collection, and
# is not the same as the keys for the `tunnels` variable above.
self.remote_tunnel_status = {}
def liveness_dict(self, cur_time):
ret = {}
for key, tunnel_status in self.remote_tunnel_status.items():
time_delta = cur_time - tunnel_status.last_update_time
if time_delta > (TIMEOUT_TIME * 10):
# Stop sending tunnels that we haven't heard from in a
# really long time.
del self.remote_tunnel_status[key]
elif time_delta > TIMEOUT_TIME:
# Tunnel is dead!
ret[key] = False
else:
ret[key] = True
return ret
class MPLS(scapy.packet.Packet):
""" Create an MPLS header that can be used with scapy packets """
name = "MPLS"
fields_desc = [ scapy.fields.BitField("label", 9, 20),
scapy.fields.BitField("cos", 0, 3),
scapy.fields.BitField("s", 1, 1),
scapy.fields.ByteField("ttl", 0) ]
scapy.packet.bind_layers(scapy.layers.l2.Ether, MPLS, type=0x8847)
class InotifyHandler(pyinotify.ProcessEvent):
""" A helper class handles inotify updates """
parent = None
def my_init(self, **kwargs):
self.parent = kwargs['parent']
def process_IN_MODIFY(self, event):
self.parent.process_config()
class MplsTunnelLivenessAgent(eossdk_utils.EosSdkAgent,
eossdk.AgentHandler,
eossdk.FdHandler,
eossdk.TimeoutHandler):
""" This agent is responsible for tracking the liveness of specified
MPLS tunnels. """
def __init__(self, sdk, config_file="MplsTunnelLivenessConfig.json"):
""" Create the agent. Requires an eossdk handle, as well as the
input configuration """
self.agent_mgr = sdk.get_agent_mgr()
self.eth_intf_mgr = sdk.get_eth_intf_mgr()
self.ip_intf_mgr = sdk.get_ip_intf_mgr()
self.mac_table_mgr = sdk.get_mac_table_mgr()
self.neighbor_table_mgr = sdk.get_neighbor_table_mgr()
self.tracer = eossdk.Tracer("MplsTunnelLivenessAgent")
eossdk_utils.EosSdkAgent.__init__(self)
eossdk.AgentHandler.__init__(self, self.agent_mgr)
eossdk.TimeoutHandler.__init__(self, sdk.get_timeout_mgr())
eossdk.FdHandler.__init__(self)
self.tracer.trace0("MPLS tunnel liveness agent constructed")
self.initialized = False
self.pid = os.getpid()
# The l3 interface we should grab our "SRC IP" from. Read from
# the config:
self.src_intf = None
self.src_ip = None # Resolved after reading from config
# A UDP socket that receives liveness packets from other
# agents. Created during on_initialized
self.rx_sock = None
# A mapping from remote switch IP to RemoteSwitch()
self.remote_switches = {}
self.config_file = config_file
self.wm = pyinotify.WatchManager()
handler = functools.partial(InotifyHandler, parent=self)
# pylint: disable-msg=E1101
self.wm.watch_transient_file(config_file, pyinotify.IN_MODIFY, handler)
# pylint: enable-msg=E1101
self.notifier = pyinotify.AsyncNotifier(self.wm,
InotifyHandler(parent=self))
self.notifier.coalesce_events(True)
self.inotify_fd = self.wm.get_fd()
self.watch_readable(self.inotify_fd, True)
# Read our initial configuration
self.process_config()
def on_initialized(self):
""" Update our configured egress tunnels. Start all tunnels as
alive, with a last_update_time of now + any grace
period. Calculate the output interfaces for each tunnel based
off of that tunnel's nexthop MAC address. """
self.initialized = True
self.tracer.trace2("Looking up the IP address for interface " + self.src_intf)
src_ips = self.ip_intf_mgr.ip_addrs(eossdk.IntfId(self.src_intf))
if not src_ips:
assert False, "No IP addresses assigned to %s" % self.src_intf
self.src_ip = src_ips[0].addr().to_string()
self.tracer.trace2("Using src IP address " + self.src_ip)
self.tracer.trace2("Create the socket that receives remote probes")
self.rx_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.rx_sock.bind((self.src_ip, UDP_PORT))
self.rx_sock.setblocking(0)
self.watch_readable(self.rx_sock.fileno(), True)
self.resolve_config()
def handle_tunnel_alive(self, dst_ip, tunnel_key, tunnel):
self.tracer.trace3("Tunnel %d to %s came back!" % (tunnel_key, dst_ip))
# Do any other logic (a.k.a. alert another agent that
# tunnel.mpls_label is usable again)
def handle_tunnel_death(self, dst_ip, tunnel_key, tunnel):
self.tracer.trace3("Tunnel %d to %s died!" % (tunnel_key, dst_ip))
# Do any other logic (a.k.a. alert another agent that
# tunnel.mpls_label is no longer a valid tunnel)
def on_timeout(self):
""" Time to send some packets to our neighbors! Our poller
fired, so we should send out our heartbeat packets. We also
check if we haven't heard about any of our tunnels recently, and
if so, mark them as dead. """
cur_time = time.time()
for host in self.remote_switches.itervalues():
liveness_dict = host.liveness_dict(cur_time)
host.last_tx_msg_id += 1
if host.last_tx_msg_id > MAX_INT:
host.last_tx_msg_id = 1
for key, tunnel in host.egress_tunnels.iteritems():
msg = Message(self.pid, key, host.last_tx_msg_id, liveness_dict)
self.send_packet(host.destination_ip, tunnel, msg)
if tunnel.is_alive and (
time.time() - tunnel.last_update_time > TIMEOUT_TIME):
# There has been no updates to this tunnel at all
# within our timeout period.
tunnel.is_alive = False
self.handle_tunnel_death(host.destination_ip, key, tunnel)
# Schedule us to be called again in the future
self.timeout_time_is(eossdk.now() + POLL_TIME)
def on_readable(self, fd):
""" We got a packet on our UDP port! Read the packet, update our
views of the remote tunnel's liveness, and then parse the
packet's payload to inspect what the remote packet thinks of
*our* tunnel liveness. If any liveness changed, then fire our
handlers. """
if fd == self.inotify_fd:
self.tracer.trace6("Inotify fd %d is readable" % self.inotify_fd)
self.notifier.handle_read()
return
if fd != self.rx_sock.fileno():
assert False, "Unknown socket became readable %d" % fd
data, addr = self.rx_sock.recvfrom(MAX_PKT_SIZE)
src_ip = addr[0]
self.tracer.trace6("Received message from %r" % src_ip)
if not data:
self.tracer.trace7("Received empty message, ignoring.")
return
msg = Message.deserialize(data)
if not msg:
self.tracer.trace7("Received invalid message, ignoring! "
"First 500 bytes of pkt: %r" % data[:500])
return
self.tracer.trace8("Got message %s" % str(msg))
if src_ip not in self.remote_switches:
self.tracer.trace7("Got packet from unknown host: %r" % src_ip)
return
remote_switch = self.remote_switches[src_ip]
remote_tunnel_status = remote_switch.remote_tunnel_status.setdefault(
msg.egress_tunnel_key, RemoteTunnelStatus())
if msg.pid != remote_switch.pid:
# This is the either the first message we've received from
# them, or their remote switch restarted. In any case, the
# msg IDs they are sending will have been reset.
remote_switch.pid = msg.pid
remote_switch.last_rx_msg_id = 0
remote_tunnel_status.last_rx_msg_id = 0
# First track we got a packet from the sender's tunnel named
# in the packet.
if self.is_new_id(remote_tunnel_status.last_rx_msg_id, msg.msg_id):
# Do we care about packets coming in out of order?
remote_tunnel_status.last_update_time = time.time()
remote_tunnel_status.last_rx_msg_id = msg.msg_id
# Then inspect the body of the packet that tells me which of
# my tunnel statuses the remote switch has seen.
if not self.is_new_id(remote_switch.last_rx_msg_id, msg.msg_id):
# We've already seen newer messages. Ignore the this.
self.tracer.trace7("Got old message with id: %d (currently at %d)"
% (msg.msg_id, remote_switch.last_rx_msg_id))
return
remote_switch.last_rx_msg_id = msg.msg_id
for tunnel_key, is_alive in msg.tunnel_liveness.iteritems():
if tunnel_key not in remote_switch.egress_tunnels:
# They are telling us about one of our egress tunnels that
# we have no record of...
self.tracer.trace0("Got tunnel status for an unknown key: %r" %
tunnel_key)
continue
tunnel = remote_switch.egress_tunnels[tunnel_key]
tunnel.last_update_time = time.time()
# Check if the remote switch thinks our egress tunnel is
# up or down. If it changed, call our handlers!
if tunnel.is_alive == is_alive:
self.tracer.trace9("No change to liveness for tunnel %d" % tunnel_key)
continue
elif is_alive:
tunnel.is_alive = True
self.handle_tunnel_alive(src_ip, tunnel_key, tunnel)
else:
tunnel.is_alive = False
self.handle_tunnel_death(src_ip, tunnel_key, tunnel)
def resolve_egress_tunnel(self, tunnel):
self.tracer.trace8("Resolve the nexthop IP %s to an ethernet address" %
tunnel.nexthop_ip)
neighbor_key = eossdk.NeighborKey(
eossdk.IpAddr(tunnel.nexthop_ip), eossdk.IntfId())
neighbor_entry = self.neighbor_table_mgr.neighbor_entry_status(neighbor_key)
if neighbor_entry == eossdk.NeighborEntry():
self.tracer.trace8("Checking static ARP entries")
neighbor_entry = self.neighbor_table_mgr.neighbor_entry(neighbor_key)
if neighbor_entry == eossdk.NeighborEntry():
self.tracer.trace0("IP address %r has no ARP entry" %
tunnel.nexthop_ip)
assert False, "Unlearned nexthop IP %s" % tunnel.nexthop_ip
nexthop_eth_addr = neighbor_entry.eth_addr()
self.tracer.trace5("IP %s lives on %s" %
(tunnel.nexthop_ip, nexthop_eth_addr.to_string()))
tunnel.nexthop_eth_addr = nexthop_eth_addr.to_string()
self.tracer.trace8("Now resolving that MAC entry to an interface.")
# TODO: Is this necessary if we send it out of the "fabric"
# interface?
vlan_id = 1
mac_entry = self.mac_table_mgr.mac_entry(vlan_id, nexthop_eth_addr)
if mac_entry.intf() == eossdk.IntfId():
self.tracer.trace0("Mac entry %r not on any interface" %
tunnel.nexthop_eth_addr)
assert False, "Unlearned nexthop MAC %s" % tunnel.nexthop_eth_addr
intf = mac_entry.intf().to_string()
# Convert the interface names to the kernel interface names
intf = intf.replace("Ethernet", "et")
intf = intf.replace("Port-Channel", "po")
self.tracer.trace5("MAC entry %s is learned on inteface %r" %
(tunnel.nexthop_eth_addr, intf))
tunnel.egress_intf = intf
self.tracer.trace8("Looking up that interface's MAC address")
egress_eth_addr = self.eth_intf_mgr.eth_addr(mac_entry.intf())
if egress_eth_addr == eossdk.EthAddr():
assert False, "Interface %s has no MAC address" % intf
self.tracer.trace5("Intf %s has MAC address %s" %
(intf, egress_eth_addr.to_string()))
tunnel.egress_intf_eth_addr = egress_eth_addr.to_string()
def send_packet(self, dst_ip, tunnel, msg):
""" Wrap `msg` in a UDP-over-MPLS packet, using `dst_ip` and the tunnel's
MPLS label, and send the packet out of the tunnel's egress interface."""
self.tracer.trace8("Sending message %s" % str(msg))
payload = msg.serialize()
pkt = scapy.layers.l2.Ether(src=tunnel.egress_intf_eth_addr,
dst=tunnel.nexthop_eth_addr)
pkt = (pkt / MPLS(label=tunnel.mpls_label, ttl=64) /
scapy.layers.inet.IP(src=self.src_ip,
dst=dst_ip) /
scapy.layers.inet.UDP(dport=UDP_PORT) /
(payload))
# In the real world we might make this non-blocking, but for now
# we assume packet always get sent in one go. Also, it might be
# worth maintaining our own socket to the egress interface to
# save us the work of creating/tearing down raw sockets
# constantly.
scapy.sendrecv.sendp(pkt, iface=tunnel.egress_intf, verbose=0)
def process_config(self):
self.tracer.trace1("Processing configuration change on %s" %
self.config_file)
with open(self.config_file) as f:
cfg = json.loads(f.read())
if not self.initialized:
# Write the src_intf only once.
self.src_intf = cfg["src_intf"]
# Clear out the previous config:
self.remote_switches = {}
# And signify that we are a new process by changing our
# advertised pid. It would be preferable to just only update the
# newly configured tunnels, but that's more complicated for now.
self.pid -= 1
for rs in cfg["remote_switches"]:
dst_ip = rs["destination_ip"]
dst = RemoteSwitch(dst_ip)
for tunnel_key_str, tunnel_info in rs["tunnels"].iteritems():
tunnel_key = int(tunnel_key_str)
dst.egress_tunnels[tunnel_key] = EgressTunnel(
tunnel_info["label"], tunnel_info["nexthop_ip"])
self.remote_switches[dst_ip] = dst
if self.initialized:
self.resolve_config()
def resolve_config(self):
self.tracer.trace2("Resolving all of our configured tunnels")
for host in self.remote_switches.itervalues():
for tunnel in host.egress_tunnels.itervalues():
tunnel.last_update_time = time.time() + STARTUP_GRACEPERIOD
self.resolve_egress_tunnel(tunnel)
self.timeout_time_is(eossdk.now() + POLL_TIME)
def is_new_id(self, last_seen_id, new_id):
# Returns True if the new_id is larger than the last_seen_id, or
# the new_id has wrapped around.
return (last_seen_id < new_id) or ((last_seen_id - new_id) > (MAX_INT / 2))
def main(args):
sdk = eossdk.Sdk()
_ = MplsTunnelLivenessAgent(sdk)
sdk.main_loop(args)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | -8,754,187,638,724,721,000 | 40.732 | 85 | 0.637544 | false |
hypermindr/barbante | barbante/recommendation/RecommenderHRRandom.py | 1 | 4448 | """ Hybrid Recommender HRRandom.
"""
import random
from barbante.recommendation.HybridRecommender import HybridRecommender
import barbante.utils.logging as barbante_logging
log = barbante_logging.get_logger(__name__)
class RecommenderHRRandom(HybridRecommender):
""" Hybrid Recommender HRRandom.
It merges different algorithms randomly, respecting the probability assigned to each algorithm
and the relative orders of the recommendations produces by each strategy.
"""
def __init__(self, session_context):
super().__init__(session_context)
def get_suffix(self):
""" See barbante.recommendation.Recommender.
"""
return "HRRandom"
def obtain_cdf(self, recommendations_by_algorithm, item_idx_by_algorithm):
""" Computes the cumulative distribution function associated to the algorithms
that are used by this recommender.
:param recommendations_by_algorithm: The set of algorithms to be sampled.
:param item_idx_by_algorithm: Points to the next item to be consumed from the list of contributions
of a given algorithm. When such index is equal to the size of the list, that algorithm has
nothing left to contribute.
:returns: A list with (Sum(p_i), alg) pairs, where Sum(p_i) is the cumulative
probability for algorithm *alg*.
"""
cdf = []
cumulative_prob = 0
for algorithm, prob in self.session_context.algorithm_weights[self.get_suffix()]:
if item_idx_by_algorithm[algorithm] < len(recommendations_by_algorithm.get(algorithm, {})):
cumulative_prob += prob
cdf += [(algorithm, cumulative_prob)]
# normalizes
cdf = [(cdf_item[0], cdf_item[1] / cumulative_prob) for cdf_item in cdf]
return cdf
@staticmethod
def choose_algorithm(cdf):
result = None
rand = random.random()
for algorithm, cumulative_prob in cdf:
if rand < cumulative_prob:
result = algorithm
break
return result
def merge_algorithm_contributions(self, sorted_scores_by_algorithm, n_recommendations):
""" See barbante.recommend.HybridRecommender.
"""
log.debug("Merging contributions...")
recommendations = []
recommendations_set = set() # avoids duplicates among different algorithms
contributions_by_algorithm = {alg: 0 for alg in self.algorithms} # for logging
item_idx_by_algorithm = {alg: 0 for alg in self.algorithms} # to keep track of traversal position
# Selects recommendations randomly, based on the probability distribution given by the algorithm weights.
cdf = self.obtain_cdf(sorted_scores_by_algorithm, item_idx_by_algorithm)
n_items_left_to_fill = n_recommendations - len(recommendations)
while n_items_left_to_fill > 0:
algorithm = self.choose_algorithm(cdf)
if algorithm is None:
break # all algorithm contributions have been exhausted
sorted_candidate_scores = sorted_scores_by_algorithm.get(algorithm)
if sorted_candidate_scores is None:
continue
while item_idx_by_algorithm[algorithm] < len(sorted_candidate_scores):
score, candidate = sorted_candidate_scores[item_idx_by_algorithm[algorithm]]
item_idx_by_algorithm[algorithm] += 1
if candidate not in recommendations_set:
recommendations_set.add(candidate)
contributions_by_algorithm[algorithm] += 1
# prepends the identification of the source algorithm in the score tuple
recommendations += [([algorithm] + score, candidate)]
break
updated_n_items_left_to_fill = n_recommendations - len(recommendations)
if updated_n_items_left_to_fill == n_items_left_to_fill:
# chosen algorithm has no more contributions to give -- let's update the cdf
cdf = self.obtain_cdf(sorted_scores_by_algorithm, item_idx_by_algorithm)
n_items_left_to_fill = updated_n_items_left_to_fill
for alg in self.algorithms:
log.info("Algorithm [%s] contributed [%d] items" % (alg, contributions_by_algorithm[alg]))
return recommendations
| mit | 4,428,037,169,724,290,600 | 41.769231 | 113 | 0.63759 | false |
frodo4fingers/gimod | gimod.py | 1 | 9198 | #!/usr/bin/env python
# encoding: UTF-8
try:
from PyQt5.QtWidgets import (QMainWindow, QApplication, QVBoxLayout,
QSizePolicy, QStatusBar, QTabWidget, QSplitter, QAction, QMessageBox,
QFileDialog, QMenu)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QCursor
except ImportError:
from PyQt4.QtGui import (QMainWindow, QApplication, QVBoxLayout,
QSizePolicy, QStatusBar, QTabWidget, QSplitter, QAction, QMessageBox,
QIcon, QFileDialog, QMenu, QCursor)
from PyQt4.QtCore import Qt
import sys
import pygimli as pg
from pygimli.meshtools import createMesh, exportPLC, exportFenicsHDF5Mesh, readPLC
from pygimli.mplviewer import drawMeshBoundaries, drawMesh, drawPLC, drawModel
from core import Builder
# try:
from core.imagery import ImageTools
# opencv = True
# except ModuleNotFoundError:
# # set global flag
# opencv = False
from gui import InfoTree, MeshOptions, PlotWidget, PolyToolBar
class GIMod(QMainWindow):
"""The main class that holds all subclasses and design GIMod."""
def __init__(self, parent=None):
"""
Call parent class to receive full functionality, initialize the
layout of GIMod and connect all signals to their respective methods.
"""
super(GIMod, self).__init__(parent)
# self.cursor = QCursor()
self.initUI()
self.image_tools = ImageTools(self)
# when resizing the mainwindow
if self.toolBar.acn_gridToggle.isChecked():
self.resizeEvent(self.builder.grid.getCanvasHeight)
# menu actions
self.mb_aboutVerison.triggered.connect(self.aboutVersion)
self.mb_open_file.triggered.connect(self.openAnyFile)
self.mb_save_poly.triggered.connect(self.exportPoly)
self.mb_save_mesh.triggered.connect(self.exportMesh)
# connect the toolbar action signals to their methods if opencv is present
if self.image_tools.found_cv:
self.toolBar.acn_imageAsBackground.stateChanged.connect(
self.image_tools.imageryBackground)
self.toolBar.acn_imageThreshold1.valueChanged.connect(
self.image_tools.updateImagery)
self.toolBar.acn_imageThreshold2.valueChanged.connect(
self.image_tools.updateImagery)
self.toolBar.acn_imageDensity.valueChanged.connect(
self.image_tools.updateImagery)
self.toolBar.acn_imagePolys.valueChanged.connect(
self.image_tools.polysFromImage)
self.toolBar.acn_image.triggered.connect(self.image_tools.imagery)
else: # disable their use
self.toolBar.acn_imageThreshold1.setEnabled(False)
self.toolBar.acn_imageThreshold2.setEnabled(False)
self.toolBar.acn_imageDensity.setEnabled(False)
self.toolBar.acn_imagePolys.setEnabled(False)
self.toolBar.acn_polygonize.triggered.connect(self.builder.formPolygonFromFigure)
self.toolBar.acn_reset_figure.triggered.connect(self.builder.resetFigure)
self.toolBar.acn_world.triggered.connect(self.builder.formPolyWorld)
self.toolBar.acn_rectangle.triggered.connect(self.builder.formPolyRectangle)
self.toolBar.acn_circle.triggered.connect(self.builder.formPolyCircle)
self.toolBar.acn_line.triggered.connect(self.builder.formPolyLine)
self.toolBar.acn_polygon.triggered.connect(self.builder.formPolygon)
self.toolBar.acn_markerCheck.triggered.connect(self.builder.markersMove)
self.toolBar.acn_gridToggle.triggered.connect(self.builder.toggleGrid)
self.toolBar.acn_magnetizeGrid.triggered.connect(self.builder.toggleMagnetizedGrid)
self.toolBar.acn_magnetizePoly.triggered.connect(self.builder.magnetizePoly)
self.info_tree.btn_redraw.clicked.connect(self.info_tree.redrawTable)
self.info_tree.btn_undo.clicked.connect(self.builder.undoPoly)
self.info_tree.btn_redo.clicked.connect(self.builder.redoPoly)
def initUI(self):
"""Set the GUI together from the other widgets."""
# instanciate the status bar to prompt some information of what is
# going on beneath the hood
self.statusbar = QStatusBar()
self.setStatusBar(self.statusbar)
# instanciate the toolbar with the polytool functionality
self.toolBar = PolyToolBar(self)
self.addToolBar(self.toolBar)
self.menubar = self.menuBar()
# call the menu generation
self.menuBarItems()
menu_file = self.menubar.addMenu("&File")
# menu_file_open = QMenu("&Open", self)
# menu_file_open.addAction(self.mb_open_file)
menu_file_save = QMenu("&Save", self)
menu_file_save.addAction(self.mb_save_poly)
menu_file_save.addAction(self.mb_save_mesh)
menu_file.addAction(self.mb_open_file)
menu_file.addSeparator()
menu_file.addMenu(menu_file_save)
menu_about = self.menubar.addMenu("&About")
menu_about.addAction(self.mb_aboutVerison)
# instanciate the plot widget where everything will be drawn
self.plotWidget = PlotWidget(self)
# instanciate all the core functions
self.builder = Builder(self)
# instanciate the info table for the polygons
self.info_tree = InfoTree(self)
# instanciate the mesh options tab to adjust the mesh parameters
self.mesh_opt = MeshOptions(self)
tabBox = QTabWidget(self)
tabBox.setTabPosition(QTabWidget.West)
tabBox.addTab(self.info_tree, QIcon('icons/ic_info.svg'), "Polygons")
tabBox.addTab(self.mesh_opt, QIcon('icons/ic_mesh.svg'), "Mesh Options")
# tabBox.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
v_plotWidget = QVBoxLayout()
v_plotWidget.addWidget(self.plotWidget)
# tile the GUI in two resizable sides
splitter = QSplitter(Qt.Horizontal)
splitter.addWidget(tabBox)
splitter.addWidget(self.plotWidget)
self.setCentralWidget(splitter)
# self.setGeometry(1500, 100, 1000, 600)
# window name
self.setWindowTitle("GIMod")
self.setWindowIcon(QIcon('icons/logo.png'))
self.show()
def menuBarItems(self):
"""Create all entries visible in the menubar and its submenus."""
# instanciate entries for "About"
self.mb_aboutVerison = QAction("Version", self)
# instanciate entries for "File"
# action to save a poly figure
self.mb_save_poly = QAction(QIcon('icons/ic_save.svg'), '&Poly', self)
self.mb_save_poly.setStatusTip("Save the created polygon file to a plc")
self.mb_save_poly.setEnabled(False)
# action to save a mesh
self.mb_save_mesh = QAction(QIcon('icons/ic_save.svg'), '&Mesh', self)
self.mb_save_mesh.setStatusTip("Save the generated mesh file")
self.mb_save_mesh.setEnabled(False)
# action to open a file
self.mb_open_file = QAction(QIcon('icons/ic_open.svg'), "&Open File", self)
self.mb_open_file.setStatusTip("Open a file and lets see if GIMod can handle it")
self.mb_open_file.setEnabled(False)
def aboutVersion(self):
"""
Read the file where the version script puts the version number.
Todo
----
+ just generate it on the fly instead of an extra file?!
"""
with open('version.json') as v:
content = v.read()
QMessageBox.information(self, "About", content)
def exportPoly(self):
"""Export the poly figure."""
export_poly = QFileDialog.getSaveFileName(
self, caption='Save Poly Figure')[0]
# if export_poly:
if export_poly.endswith('.poly'):
exportPLC(self.builder.poly, export_poly)
else:
exportPLC(self.builder.poly, export_poly + '.poly')
def exportMesh(self):
"""
Export the final mesh.
Todo
----
+ implement submenus to mesh save for different formats
"""
filename = QFileDialog.getSaveFileName(
self, caption="Save Mesh")[0]
# if export_poly:
if filename.endswith(".bms"):
self.mesh_opt.mesh.save(filename)
# exportFenicsHDF5Mesh(self.mesh_opt.mesh, filename)
else:
self.mesh_opt.mesh.save(filename + '.bms')
# exportFenicsHDF5Mesh(self.mesh_opt.mesh, filename + ".bms")
def openAnyFile(self):
"""
Open a qt filedialogbox and open a file.
Todo
----
+ open a poly
+ strip down the polyfile to fill the treewidget with editable info
+ open a picture
+ open a mesh
"""
to_open = QFileDialog.getOpenFileName(self, caption="Open File")[0]
if to_open:
self.builder.poly = readPLC(to_open)
self.builder.drawPoly(to_merge=False)
else:
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setApplicationName("GIMod")
main = GIMod()
main.show()
sys.exit(app.exec_())
| gpl-3.0 | -489,390,433,122,109,600 | 36.696721 | 91 | 0.654164 | false |
SUSE/azure-sdk-for-python | azure-batch/azure/batch/models/start_task.py | 3 | 4412 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line.
:type resource_files: list of :class:`ResourceFile
<azure.batch.models.ResourceFile>`
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: :class:`UserIdentity
<azure.batch.models.UserIdentity>`
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and scheduling error detail. If false,
the Batch service will not wait for the start task to complete. In this
case, other tasks can start executing on the compute node while the start
task is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None):
self.command_line = command_line
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
| mit | -3,775,787,359,515,068,000 | 53.469136 | 155 | 0.68563 | false |
Techblogogy/magic-mirror-base | server/cvison/store.py | 1 | 11783 | # from dbase.dbase import dbase as db
from dbase.dataset import Dataset
# from api_cal.weather import Weather
import random, json, requests
class Clothes(Dataset):
# self.d_codes = ["business-casual", "casual", "formal", "sportswear"]
def create_tables(self):
# Import constants
self.tag_limit = self._cfg.getint("DRESS CODE", "tag_limit")
self.site_url = self._cfg.get("DRESS CODE", "dresscode_url")
# Main Clothes Storage Table
self._db.qry("""
CREATE TABLE IF NOT EXISTS clothes (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
name TEXT,
thumbnail TEXT NOT NULL,
dresscode TEXT NOT NULL,
t_wears INT NOT NULL DEFAULT 0,
liked INT NOT NULL DEFAULT 0,
deleted INTEGER NOT NULL DEFAULT 0
)
""")
# Clothes tags table
self._db.qry("""
CREATE TABLE IF NOT EXISTS clothes_tags (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
c_id INTEGER NOT NULL,
tag TEXT NOT NULL
)
""")
# Clothes Metadata Table (add value when item is worn)
self._db.qry("""
CREATE TABLE IF NOT EXISTS clothes_meta (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
c_id INTEGER NOT NULL,
temperature INT NOT NULL,
t_time TEXT
)
""")
# Create indexes to speed up perfomance
def init_tables(self):
self._db.qry("CREATE INDEX IF NOT EXISTS code_dx ON clothes(dresscode)")
self._db.qry("CREATE INDEX IF NOT EXISTS wears_dx ON clothes(t_wears)")
self._db.qry("CREATE INDEX IF NOT EXISTS tag_dx ON clothes_tags(tag)")
self._db.qry("CREATE INDEX IF NOT EXISTS id_meta_dx ON clothes_meta(c_id)")
self._db.qry("CREATE INDEX IF NOT EXISTS id_tags_dx ON clothes_tags(c_id)")
# self.fill_tanya()
# Add clothing item
def add(self, dresscode, thumbnail, name=None):
# file = {'file': open(app_dir+'/cls/'+thumbnail, 'rb')}
# r = requests.post(self.site_url, files=file)
# cnt = json.loads(r.content)
# Temporal debug option
cnt = {'dress': [{"code":"casual"}]}
self._log.debug(cnt['dress'])
self._db.qry(
"INSERT INTO clothes(name, thumbnail, dresscode) VALUES (?, ?, ?)",
(name, thumbnail, cnt['dress'][0]['code'], )
)
return self._db.qry("SELECT * FROM clothes WHERE id=?", (self._db.last_id(), ) )
# Add Tags to items
def add_tags(self, c_id, tags):
# self._db.qry("DELETE FROM clothes_tags WHERE c_id=?", (c_id,))
# return "[]"
count = self._db.qry("""
SELECT COUNT(*) as cnt
FROM clothes_tags
WHERE c_id=?
""", (c_id,))[0]["cnt"]
#
# print "[TB count]: %d" % (count)
if count > self.tag_limit:
return "[]"
a_tags = tags.strip().split(",")
a_list = []
# print a_list
for a_tag in a_tags:
a_list.append( (c_id, a_tag, c_id, a_tag) )
self._db.qry_many("""
INSERT INTO clothes_tags(c_id, tag)
SELECT ?,?
WHERE NOT EXISTS(SELECT tag FROM clothes_tags WHERE c_id=? AND tag=?)
""", a_list)
return self._db.qry("SELECT * FROM clothes_tags WHERE c_id=?", (c_id,))
# Returns video id
def get_video(self, id):
self._log.debug("id is: %s", (id))
path = self._db.qry("SELECT thumbnail FROM clothes WHERE id=?", (id,))[0]['thumbnail']
path = path.split(".")
return path[0] + ".mp4"
def get_smart(self, query, lim, ofs):
d_codes = ["business-casual", "casual", "formal", "sportswear"]
# return self._db.qry("SELECT * FROM sqlite_master WHERE type = 'index';")
base_qry = """
SELECT
*,
(SELECT group_concat(tag, ', ') as tags
FROM clothes_tags
WHERE clothes_tags.c_id = clothes.id
GROUP BY c_id) as tags
FROM
(SELECT
c_id,
CASE
WHEN temp_group(temperature) = ? THEN 2
WHEN temp_group(temperature) < ? THEN 1
ELSE 0 END as temp_rank,
temp_group(temperature) as temp,
COUNT(temp_group(temperature)) as temp_count,
(SELECT MAX(t_time) FROM clothes_meta WHERE clothes_meta.c_id=cm.c_id ) as last_date
FROM clothes_meta as cm
GROUP BY c_id, temp
ORDER BY temp_rank DESC, temp DESC, temp_count DESC) as t_qry
JOIN clothes ON( clothes.id=t_qry.c_id )
WHERE deleted = 0 %s
ORDER BY liked DESC, temp_rank DESC, temp DESC, t_wears DESC, temp_count DESC
LIMIT ? OFFSET ?
"""
w_rng = self.weather.w_temp_range()[0]
w_temp = self._db._temp_group(w_rng)
self._log.debug("[DEBUG] Current temperatue: %d", (w_rng))
self._log.debug("[DEBUG] Temperature Range: %d", (w_temp))
# self._log.debug( self._db.qry(base_qry) )
#
# return ""
try:
d_codes.index(query)
return self._db.qry(base_qry % ("AND dresscode=?"), (w_temp, w_temp, query, lim, ofs*lim))
except ValueError:
return self._db.qry(base_qry % ("AND tags LIKE ?"), (w_temp, w_temp, "%"+query+"%", lim, ofs*lim))
except:
return {'error': "TOTAL ERROR"}
# Get all items
def get_all(self):
return self._db.qry("""
SELECT
id, thumbnail, dresscode, t_wears,
(SELECT group_concat(tag, ', ') as tags
FROM clothes_tags
WHERE clothes_tags.c_id = clothes.id
GROUP BY c_id) as tags
FROM clothes
WHERE deleted=0
""")
# Get items in range
def get(self, lim, ofs):
return self._db.qry("""
SELECT id, thumbnail, dresscode, t_wears,
(SELECT group_concat(tag, ', ') as tags
FROM clothes_tags
WHERE clothes_tags.c_id = clothes.id
GROUP BY c_id) as tags
FROM clothes
WHERE deleted=0
ORDER BY id DESC
LIMIT ? OFFSET ?
""",
(lim, ofs*lim)
)
# Get page items
def page_count(self, pp):
all_items = self._db.qry("SELECT COUNT(*) as ct FROM clothes")[0]["ct"]
return all_items/pp
# Get item by id
def get_item(self, id):
return self._db.qry("""
SELECT
id, thumbnail, dresscode, t_wears,
(SELECT group_concat(tag, ', ') as tags
FROM clothes_tags
WHERE clothes_tags.c_id = clothes.id
GROUP BY c_id) as tags
FROM clothes
WHERE deleted=0 AND id=?
""",
(id,)
)
# Mark item as worn
def worn(self, id):
self._db.qry(
"UPDATE clothes SET t_wears=t_wears+1 WHERE id=?",
(id, )
)
self._db.qry(
"INSERT INTO clothes_meta (c_id, temperature, t_time) VALUES (?, ?, date('now'))",
(id, self.weather.w_current_temp(), )
)
return ""
# return self._db.qry("SELECT * FROM clothes_meta")
# return self.weather.w_current_temp()
# Get items meta
def get_meta(self):
return self._db.qry("""
SELECT * FROM clothes_meta
""")
def worn_tmp(self, c_id, w, dt):
self._db.qry(
"UPDATE clothes SET t_wears=t_wears+1 WHERE id=?",
(c_id, )
)
self._db.qry(
"INSERT INTO clothes_meta (c_id, temperature, t_time) VALUES (?, ?, ?)",
(c_id, w, dt,)
)
# Like item (ID of element, Like state (0) for no, (1) for yes)
def set_like(self, id, like):
self._db.qry(
"UPDATE clothes SET liked=? WHERE id=?",
(id, like, )
)
def delete(self, id):
self._db.qry("DELETE FROM clothes WHERE id=?", (id, ))
self._db.qry("DELETE FROM clothes_meta WHERE id=?", (id, ))
self._db.qry("DELETE FROM clothes_tags WHERE id=?", (id, ))
def fill_tanya(self):
d_tags = ["university", "meetups", "work", "whatelse", "favourite"]
# Clear out clothes table
self._db.qry("DELETE FROM clothes")
self._db.qry("VACUUM")
self._db.qry("DELETE FROM sqlite_sequence WHERE name='clothes'")
# Clear out clothes meta table
self._db.qry("DELETE FROM clothes_meta")
self._db.qry("VACUUM")
self._db.qry("DELETE FROM sqlite_sequence WHERE name='clothes_meta'")
# Clear out clothes tags table
self._db.qry("DELETE FROM clothes_tags")
self._db.qry("VACUUM")
self._db.qry("DELETE FROM sqlite_sequence WHERE name='clothes_tags'")
for i in range(1,12):
# print random.choice(d_codes)
self.add("casual", "thum%s.jpg"%str(i))
i_id = i #self._db.last_id()
self._log.debug(i_id)
# Randomly add tags
for t in range( 1, random.randint(1, 4) ):
self.add_tags(i_id, random.choice(d_tags) )
# # 20% chanse to set like
# if random.random() <= 0.1:
# self.set_like(1, i_id)
# Randomly wear items
for a in range(1,random.randint(2,40)):
self.worn_tmp(str(i_id), str(random.randint(-15,30)), "%s-%02d-%02d"%( str(random.randint(2013,2016)), random.randint(1,8), random.randint(1,30) ) )
# NOTE: Testing data fill
def fill_junk(self):
d_codes = ["business-casual", "casual", "formal", "sportswear"]
d_tags = ["clubwear", "meetups", "beach", "work", "time", "special", "bugs", "whatistag", "needhelp", "surprise", "nonono", "whatelse"]
# Clear out clothes table
self._db.qry("DELETE FROM clothes")
self._db.qry("VACUUM")
self._db.qry("DELETE FROM sqlite_sequence WHERE name='clothes'")
# Clear out clothes meta table
self._db.qry("DELETE FROM clothes_meta")
self._db.qry("VACUUM")
self._db.qry("DELETE FROM sqlite_sequence WHERE name='clothes_meta'")
# Clear out clothes tags table
self._db.qry("DELETE FROM clothes_tags")
self._db.qry("VACUUM")
self._db.qry("DELETE FROM sqlite_sequence WHERE name='clothes_tags'")
for i in range(1,100):
# print random.choice(d_codes)
self.add(random.choice(d_codes), "thum%s.jpg"%str(random.randint(1,13)))
i_id = self._db.last_id()
# Randomly add tags
for t in range( 1, random.randint(1, len(d_tags)+1 ) ):
self.add_tags(i_id, random.choice(d_tags) )
# 20% chanse to set like
if random.random() <= 0.1:
self.set_like(1, i_id)
# Randomly wear items
for a in range(1,random.randint(2,40)):
self.worn_tmp(str(i_id), str(random.randint(-15,30)), "%s-%02d-%02d"%( str(random.randint(2013,2016)), random.randint(1,8), random.randint(1,30) ) )
return self.get_all()
# EDIT dresscode
def edit_dresscode(self, c_id, dresscode):
self._db.qry("UPDATE clothes SET dresscode=? WHERE id=?", (dresscode, c_id, ))
# return "[]"
| gpl-3.0 | -107,756,641,582,690,480 | 32.665714 | 164 | 0.51778 | false |
cfelton/rhea | rhea/build/boards/xilinx/_atlys.py | 1 | 1343 |
from rhea.build import FPGA
from rhea.build.toolflow import ISE
class Atlys(FPGA):
vendor = 'xilinx'
family = 'spartan6'
device = 'XC6SLX45'
package = 'CSG324'
speed = -3
_name = 'atlys'
default_clocks = {
'clock': dict(frequency=50e6, pins=('L15',)),
}
default_resets = {
'reset': dict(active=0, isasync=True, pins=('T15',)),
}
default_ports = {
'led': dict(pins=('U18', 'M14', 'N14', 'L14',
'M13', 'D4', 'P16', 'N12',)),
'btn': dict(pins=('N4', 'P4', 'P3', 'F6', 'F5',)),
'sw': dict(pins=('A10', 'D14', 'C14', 'P15',
'P12', 'R5', 'T5', 'E4',)),
# the port names are from the FPGA perspective, swapped
# from the net names used in the digilent reference manual
'uart_rx': dict(pins=('A16',)), # external transmit / internal rx
'uart_tx': dict(pins=('B16',)), # external receive / internal tx
'pmod': dict(pins=('T3', 'R3', 'P6', 'N5',
'V9', 'T9', 'V4', 'T4')),
# VHDCI expansion connector (diff pairs)
'iop': dict(pins=('U16', 'U15', 'U13', 'M11',)),
'ion': dict(pins=('V16', 'V15', 'V13', 'N11',))
}
def get_flow(self, top=None):
return ISE(brd=self, top=top) | mit | -5,544,081,105,533,911,000 | 28.866667 | 74 | 0.481757 | false |
sofianehaddad/gosa | pygosa/tests/polynomial_uc.py | 1 | 1636 | # -*- Python -*-
#
# @file polynomial_uc.py
# @brief Polynomial use case, probabilistic and functions models
#
# Copyright (C) 2017 Airbus-IMACS
#
# Written by Sofiane Haddad, [email protected]
# Nabil Rachdi, [email protected]
#
# This program is free software; you can redistribute it and/or
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
import openturns as ot
def define_polynomial_function(beta=1):
"""
Model function of the use case
"""
# Problem parameters
dimension = 2
# Create the function
input_variables = ["xi1","xi2"]
formula = ["3.0 + " + "xi1 + " + str(beta) + " * xi2"]
polynomial_model = ot.SymbolicFunction(input_variables, formula)
polynomial_model.setName("Polynomial")
return polynomial_model
def define_polynomial_distribution():
"""
Probabilistic model
"""
# Create the input distribution
marginals = 2 * [ot.Normal(0, 1)]
distribution_polynomial = ot.ComposedDistribution(marginals)
return distribution_polynomial
| lgpl-3.0 | -4,907,977,554,803,348,000 | 33.083333 | 78 | 0.682763 | false |
PaddlePaddle/Paddle | python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py | 1 | 2795 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from paddle.fluid.optimizer import GradientMergeOptimizer as GM
from .meta_optimizer_base import MetaOptimizerBase
__all__ = []
class GradientMergeOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(GradientMergeOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
self.wrapped_opt = None
self.meta_optimizers_white_list = [
"AMPOptimizer",
"LarsOptimizer",
"LambOptimizer",
"GraphExecutionOptimizer",
"RecomputeOptimizer",
]
self.meta_optimizers_black_list = []
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
super(GradientMergeOptimizer, self)._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy)
def _init_wrapped_opt(self):
config = self.user_defined_strategy.gradient_merge_configs
self.wrapped_opt = GM(self.inner_opt)
self.wrapped_opt._set_k_steps(
self.user_defined_strategy.gradient_merge_configs["k_steps"])
self.wrapped_opt._set_avg(
self.user_defined_strategy.gradient_merge_configs["avg"])
def _can_apply(self):
if not self.role_maker._is_collective:
return False
can_apply = (self.user_defined_strategy.gradient_merge == True) and \
self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1
return can_apply
def _disable_strategy(self, dist_strategy):
dist_strategy.gradient_merge = False
dist_strategy.gradient_merge_configs = {}
def _enable_strategy(self, dist_strategy, context):
# we currently do not support auto-enable GradientMerge
return
def minimize_impl(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
self._init_wrapped_opt()
optimize_ops, params_grads = \
self.wrapped_opt.minimize(loss, startup_program,
parameter_list, no_grad_set)
return optimize_ops, params_grads
| apache-2.0 | 5,555,346,110,310,294,000 | 37.819444 | 77 | 0.63864 | false |
vjpai/grpc | tools/run_tests/python_utils/filter_pull_request_tests.py | 5 | 7854 | #!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter out tests based on file differences compared to merge target branch"""
from __future__ import print_function
import re
import six
import subprocess
class TestSuite:
"""
Contains label to identify job as belonging to this test suite and
triggers to identify if changed files are relevant
"""
def __init__(self, labels):
"""
Build TestSuite to group tests based on labeling
:param label: strings that should match a jobs's platform, config, language, or test group
"""
self.triggers = []
self.labels = labels
def add_trigger(self, trigger):
"""
Add a regex to list of triggers that determine if a changed file should run tests
:param trigger: regex matching file relevant to tests
"""
self.triggers.append(trigger)
# Create test suites
_CORE_TEST_SUITE = TestSuite(['c'])
_CPP_TEST_SUITE = TestSuite(['c++'])
_CSHARP_TEST_SUITE = TestSuite(['csharp'])
_NODE_TEST_SUITE = TestSuite(['grpc-node'])
_OBJC_TEST_SUITE = TestSuite(['objc'])
_PHP_TEST_SUITE = TestSuite(['php', 'php7'])
_PYTHON_TEST_SUITE = TestSuite(['python'])
_RUBY_TEST_SUITE = TestSuite(['ruby'])
_LINUX_TEST_SUITE = TestSuite(['linux'])
_WINDOWS_TEST_SUITE = TestSuite(['windows'])
_MACOS_TEST_SUITE = TestSuite(['macos'])
_ALL_TEST_SUITES = [
_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, _NODE_TEST_SUITE,
_OBJC_TEST_SUITE, _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
_LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE
]
# Dictionary of allowlistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that
# the changed files should not trigger any tests. Any changed file that does not
# match any of these regexes will trigger all tests
# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_ALLOWLIST_DICT = {
'^doc/': [],
'^examples/': [],
'^include/grpc\+\+/': [_CPP_TEST_SUITE],
'^include/grpcpp/': [_CPP_TEST_SUITE],
'^summerofcode/': [],
'^src/cpp/': [_CPP_TEST_SUITE],
'^src/csharp/': [_CSHARP_TEST_SUITE],
'^src/objective\-c/': [_OBJC_TEST_SUITE],
'^src/php/': [_PHP_TEST_SUITE],
'^src/python/': [_PYTHON_TEST_SUITE],
'^src/ruby/': [_RUBY_TEST_SUITE],
'^templates/': [],
'^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
'^test/cpp/': [_CPP_TEST_SUITE],
'^test/distrib/cpp/': [_CPP_TEST_SUITE],
'^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
'^test/distrib/php/': [_PHP_TEST_SUITE],
'^test/distrib/python/': [_PYTHON_TEST_SUITE],
'^test/distrib/ruby/': [_RUBY_TEST_SUITE],
'^tools/run_tests/xds_k8s_test_driver/': [],
'^tools/internal_ci/linux/grpc_xds_k8s.*': [],
'^vsprojects/': [_WINDOWS_TEST_SUITE],
'composer\.json$': [_PHP_TEST_SUITE],
'config\.m4$': [_PHP_TEST_SUITE],
'CONTRIBUTING\.md$': [],
'Gemfile$': [_RUBY_TEST_SUITE],
'grpc\.def$': [_WINDOWS_TEST_SUITE],
'grpc\.gemspec$': [_RUBY_TEST_SUITE],
'gRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
'BUILDING\.md$': [],
'LICENSE$': [],
'MANIFEST\.md$': [],
'package\.json$': [_PHP_TEST_SUITE],
'package\.xml$': [_PHP_TEST_SUITE],
'PATENTS$': [],
'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
'README\.md$': [],
'requirements\.txt$': [_PYTHON_TEST_SUITE],
'setup\.cfg$': [_PYTHON_TEST_SUITE],
'setup\.py$': [_PYTHON_TEST_SUITE]
}
# Regex that combines all keys in _ALLOWLIST_DICT
_ALL_TRIGGERS = "(" + ")|(".join(_ALLOWLIST_DICT.keys()) + ")"
# Add all triggers to their respective test suites
for trigger, test_suites in six.iteritems(_ALLOWLIST_DICT):
for test_suite in test_suites:
test_suite.add_trigger(trigger)
def _get_changed_files(base_branch):
"""
Get list of changed files between current branch and base of target merge branch
"""
# Get file changes between branch and merge-base of specified branch
# Not combined to be Windows friendly
base_commit = subprocess.check_output(
["git", "merge-base", base_branch, "HEAD"]).decode("UTF-8").rstrip()
return subprocess.check_output(
["git", "diff", base_commit, "--name-only",
"HEAD"]).decode("UTF-8").splitlines()
def _can_skip_tests(file_names, triggers):
"""
Determines if tests are skippable based on if all files do not match list of regexes
:param file_names: list of changed files generated by _get_changed_files()
:param triggers: list of regexes matching file name that indicates tests should be run
:return: safe to skip tests
"""
for file_name in file_names:
if any(re.match(trigger, file_name) for trigger in triggers):
return False
return True
def _remove_irrelevant_tests(tests, skippable_labels):
"""
Filters out tests by config or language - will not remove sanitizer tests
:param tests: list of all tests generated by run_tests_matrix.py
:param skippable_labels: list of languages and platforms with skippable tests
:return: list of relevant tests
"""
# test.labels[0] is platform and test.labels[2] is language
# We skip a test if both are considered safe to skip
return [test for test in tests if test.labels[0] not in skippable_labels or \
test.labels[2] not in skippable_labels]
def affects_c_cpp(base_branch):
"""
Determines if a pull request's changes affect C/C++. This function exists because
there are pull request tests that only test C/C++ code
:param base_branch: branch that a pull request is requesting to merge into
:return: boolean indicating whether C/C++ changes are made in pull request
"""
changed_files = _get_changed_files(base_branch)
# Run all tests if any changed file is not in the allowlist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return True
return not _can_skip_tests(
changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
def filter_tests(tests, base_branch):
"""
Filters out tests that are safe to ignore
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
print(
'Finding file differences between gRPC %s branch and pull request...\n'
% base_branch)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
print(' %s' % changed_file)
print('')
# Run all tests if any changed file is not in the allowlist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return (tests)
# Figure out which language and platform tests to run
skippable_labels = []
for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels:
print(' %s tests safe to skip' % label)
skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels)
return tests
| apache-2.0 | 6,104,482,322,715,836,000 | 37.312195 | 94 | 0.662338 | false |
BinPy/BinPy | BinPy/analog/converters.py | 4 | 18201 | from BinPy import *
import math
class A2D(object):
"""
This class is used to convert a single digital connector / bus with analog value
to a 64 bit / 32 bit IEEE Floating point equivalent representation.
The output can also be 4 / 8 / 16 bit based on requirement.
TYPE OF ADC
===========
Successive Approximation Type ADC. No clock is needed.
Conversion happens with a minor delay.
At the end of the conversion the valid bit is set.
Refer: http://www.asdlib.org/onlineArticles/elabware/Scheeline_ADC/ADC_ADC_SucAprrox.html
IEEE 754 STANDARD
=================
This module also allows conversion to IEEE 754 Single / Double Precision Format.
The converted floating point value is a direct absolute representation of the analog input.
Refer: http://en.wikipedia.org/wiki/IEEE_floating_point
ATTRIBUTES / PARAMETERS :
========================
analog_input - Analog type connector
digital_outputs - Digital type connector outputs
typ - 1 : 4 bit output
- 2 : 8 bit output
- 3 : 16 bit output
- 4 : 32 bit output [ IEEE-754 Floating Point Format ]
- 5 : 64 bit output [ IEEE-754 Floation Point Format ]
refp - Positive reference.
refn - Negative reference.
scale - scale factor to be multiplied with the voltage before conversion.
valid - 1-Bit digital Bus Indicating the end of conversion.
USAGE
=====
>>> input_analog = Bus(Connector(voltage = 6.4))
>>> input_analog.set_type(analog = True)
>>> output_digital = Bus(16)
>>> VREF = Connector(voltage = 5.0)
>>> GND = Connector(voltage = 0)
>>> a2d_16bit = A2D(input_analog, output_digital, 3, VREF, GND, scale = 0.5)
>>> time.sleep(0.5) # To allow conversion to take place.
>>> print output_digital.get_logic_all(as_list = False)
0b1010001111010111
>>> input_analog[0].set_voltage(4.2)
>>> time.sleep(0.5) # To allow conversion to take place.
>>> print output_digital.get_logic_all(as_list = False)
0b0110101110000101
>>> ieee_64bit = Bus(64)
>>> a2d_IEEE64 = A2D(input_analog, ieee_64bit, 5)
>>> time.sleep(0.5) # To allow conversion to take place.
>>> print ieee_64bit.get_logic_all(as_list = False)
0b0100000000010000110011001100110011001100110011001100110011001101
http://babbage.cs.qc.cuny.edu/IEEE-754.old/Decimal.html
"""
def __init__(
self,
analog_input,
digital_output,
typ,
refp=None,
refn=None,
scale=1):
# Input signal attenuation factor
self.scale = float(scale)
self.typ = typ
self.valid = Bus(1)
self._history = None
self._enable_history = None
self.inputs = Bus(1)
self.inputs.set_type(analog=True)
self.enable = Bus(1)
self.enable[0].set_logic(1)
self.ref = Bus(2)
self.ref.set_type(analog=True)
if typ not in range(1, 6):
raise Exception("ERROR: Invalid output type")
self.outputs = Bus(2 ** (typ + 1))
if not isinstance(digital_output, Bus):
raise Exception(
"ERROR: Invalid ouput. Only Bus can serve as an output.")
if typ not in [4, 5]:
# Linking the reference inputs.
if (type(refp) not in [Bus, Connector]) or (
type(refn) not in [Bus, Connector]):
raise Exception("ERROR: Invalid reference inputs")
else:
self.set_ref(refp, refn)
self.set_inputs(analog_input)
self.set_outputs(digital_output)
# Update the values.
self.trigger()
@property
def possible_states(self):
return 2 ** (self.outputs.width)
@property
def resolution(self):
if self.typ in [1, 2, 3]:
vref = 0.0
vref = float(self.ref[0]) - float(self.ref[1])
return float(vref) / float(self.possible_states)
elif self.typ == 4:
return 1.0e-05
else:
return 1.0e-10 # approximate resolutions.
def set_inputs(self, analog_input):
"""
To 'link' the analog connector with the module's inputs.
"""
with AutoUpdater._lock:
if isinstance(
analog_input,
Bus) and analog_input.analog and (
analog_input.width == 1):
AutoUpdater.remove_link(
self.inputs) # Remove old links to the inputs
AutoUpdater.add_link(
analog_input,
self.inputs)
elif isinstance(analog_input, Connector) and (analog_input.analog):
AutoUpdater.remove_link(
self.inputs) # Remove old links to the inputs
AutoUpdater.add_link(
[analog_input],
self.inputs)
else:
raise Exception(
"ERROR: Invalid input. Only Analog Connnector / Bus can be linked to input")
def set_outputs(self, outputs):
"""
To link the output of the A2D instance with the external outputs
"""
with AutoUpdater._lock:
if not isinstance(outputs, Bus):
raise Exception(
"ERROR: Invalid output. Output must only be a Bus instance")
AutoUpdater.remove_link(
self.outputs) # Remove old links from the output
AutoUpdater.add_link(
self.outputs,
outputs,
bind_to=A2D.trigger,
params=[self])
def set_enable(self, enable):
"""
Link the external enable bus / connector to the A2D instance.
"""
with AutoUpdater._lock:
AutoUpdater.remove_link(enable)
if isinstance(enable, Bus) or isinstance(enable, Connector):
AutoUpdater.add_link(
enable,
self.enable)
else:
raise Exception(
"ERROR: Invalid input. Only Analog Connnector / Bus can be linked to input")
def set_ref(self, refp, refn):
"""
Sets the reference voltage levels. Accepts 1-Bit Bus / Connector.
"""
with AutoUpdater._lock:
AutoUpdater.remove_link(self.ref)
AutoUpdater.add_link(
refp,
self.ref[0])
AutoUpdater.add_link(
refn,
self.ref[1])
def set_valid(self, val):
self.valid[0].set_logic(bool(val))
def trigger(self):
with AutoUpdater._lock:
cur_inputs = float(self.inputs[0])
cur_ref = (float(self.ref[0]), float(self.ref[1]))
cur_enable = bool(self.enable[0])
# return if the input has not changed.
if not cur_enable:
self.set_valid(True)
return
ref = 0
self.set_valid(False)
ref = cur_ref[0] - cur_ref[1]
outp = ''
analog_val = (cur_inputs - cur_ref[1]) * float(self.scale)
cumulative_op = 0.0
if self.typ in range(1, 4):
# Successively approximate and set the bits accordingly.
for i in range(self.outputs.width):
ref_i = float(ref) / float(2 ** (i + 1))
if (float(cumulative_op) + ref_i) < analog_val:
cumulative_op += float(ref_i)
outp += '1'
else:
outp += '0'
with AutoUpdater._lock:
self.outputs.set_logic_all(outp)
self.set_valid(True)
return
if analog_val == 0.0:
with AutoUpdater._lock:
self.outputs.set_logic_all('0' * self.outputs.width)
self.set_valid(True)
return
if self.typ == 4:
len_mant = 23
len_exp = 8
excess = 127
elif self.typ == 5:
len_mant = 52
len_exp = 11
excess = 1023
s = '0' if analog_val >= 0 else '1'
analog = abs(analog_val)
frac = analog - math.floor(analog)
b = ''
i = 0
flag = 0
ttl = excess
while i <= len_mant + 1:
trial = frac * 2.0
trial_decimal = math.floor(trial)
trial_fraction = trial - trial_decimal
b += str(int(trial_decimal))
frac = trial_fraction
if (not flag) and (b[-1] == '1'):
flag = 1
i += flag
# Maximum iterations is limited by the no of binary shifts.
ttl -= 1
if ttl < 0: # Maximum possible no. of binary shifts
break
m = bin(int(math.floor(analog)))[2:] + '.' + ''.join(map(str, b))
e = m.find('.') - m.find('1')
e = bin(
(e +
excess) if e < 0 else (
e -
1 +
excess))[
2:].zfill(len_exp)
ieee = '0b' + s[:1] + e[:len_exp] + \
(m[m.find('1') + 1:]).replace('.', '')[:len_mant].ljust(len_mant, '0')
with AutoUpdater._lock:
self.outputs.set_logic_all(ieee)
self.set_valid(True)
def __del__(self):
try:
BinPyIndexer.unindex(self)
AutoUpdater.remove_link(self.inputs)
AutoUpdater.remove_link(self.outputs)
AutoUpdater.remove_link(self.enable)
except (AttributeError, KeyError, ValueError, TypeError) as e:
pass
class D2A(object):
"""
Digital To Analog Converter Block.
This Block is used to Convert 4 / 8 / 16 Bit wide digital representation to
its analog equivalent.
Conversion delay is very minimal.
IEEE 754 STANDARDS CONVERSION
=============================
It also has provision to convert from 32 Bit ( Single Precision ) or
64 Bit ( Double Precision ) IEEE 754 Format to its analog Equivalent.
ATTRIBUTES:
==========
digital_inputs - Analog type connector
typ - 1 : 4 bit input
- 2 : 8 bit input
- 3 : 16 bit input
- 4 : 32 bit input [ IEEE-754 Floating Point Format ]
- 5 : 64 bit input [ IEEE-754 Floation Point Format ]
refp - Positive reference.
refn - Negative reference.
gnd - Module reference.
scale - scale factor to be multiplied with the voltage before conversion.
USAGE:
=====
>>> output_analog = Bus(Connector(voltage = 6.4))
>>> output_analog.set_type(analog = True)
>>> input_digital = Bus(16)
>>> input_digital.set_logic_all('0110101110000101')
>>> VREF = Connector(voltage = 5.0)
>>> GND = Connector(voltage = 0)
>>> d2a_16bit = D2A(input_digital, output_analog, 3, VREF, GND, scale = 2)
>>> time.sleep(0.1) # To allow conversion to take place.
>>> print output_analog[0].get_voltage()
4.19998168945
>>> ieee_64bit = Bus(64)
>>> ieee_64bit.set_logic_all('0b0100000000011001100110011001100110011001100110011001100110011010')
>>> ieee_packed = Bus(1)
>>> ieee_packed.set_type(analog = True)
>>> d2a_ieee64 = D2A(ieee_64bit, ieee_packed, 5)
>>> time.sleep(0.1)
>>> print ieee_packed[0].get_voltage()
http://babbage.cs.qc.cuny.edu/IEEE-754.old/Decimal.html
"""
def __init__(
self,
digital_inputs,
analog_output,
typ,
refp=None,
refn=None,
scale=1):
if typ not in range(1, 6):
raise Exception("ERROR: Invalid output type")
# Input signal attenuation factor
self.scale = float(scale)
self.typ = typ
self.valid = Bus(Connector(1))
self._history = None
self._enable_history = None
self.outputs = Bus(1)
self.outputs.set_type(analog=True)
self.enable = Bus(1)
self.enable[0].set_logic(1)
self.ref = Bus(2) # ref+, ref-
self.ref.set_type(analog=True)
self.inputs = Bus(2 ** (typ + 1))
self.inputs.set_type(analog=False)
if not isinstance(digital_inputs, Bus):
raise Exception(
"ERROR: Invalid input. Only Bus can serve as an input.")
if typ not in [4, 5]:
# Linking the reference inputs.
if (type(refp) not in [Bus, Connector]) or (
type(refn) not in [Bus, Connector]):
raise Exception("ERROR: Invalid reference inputs")
else:
self.set_ref(refp, refn)
self.set_inputs(digital_inputs)
self.set_outputs(analog_output)
# Update the values.
self.trigger()
@property
def possible_states(self):
return 2 ** (self.inputs.width)
@property
def resolution(self):
if self.typ in [1, 2, 3]:
vref = 0.0
vref = float(self.ref[0]) - float(self.ref[1])
return float(vref) / float(self.possible_states)
elif self.typ == 4:
return 1.0e-05
else:
return 1.0e-10 # approximate resolutions.
def set_inputs(self, digital_inputs):
"""
To 'link' the Digital Bus with the module's inputs.
"""
with AutoUpdater._lock:
if isinstance(digital_inputs, Bus) and (not digital_inputs.analog) and (
digital_inputs.width == 2 ** (self.typ + 1)):
AutoUpdater.remove_link(
self.inputs) # Remove old links to the inputs
AutoUpdater.add_link(
digital_inputs,
self.inputs)
else:
raise Exception(
"ERROR: Invalid input. Only Digital Bus can be linked to input")
def set_outputs(self, analog_output):
"""
To link the output of the D2A instance with the external output
"""
with AutoUpdater._lock:
if isinstance(analog_output, Bus) and (analog_output.analog):
AutoUpdater.remove_link(
self.outputs) # Remove old links from the output
AutoUpdater.add_link(
self.outputs,
analog_output,
bind_to=D2A.trigger,
params=[self])
elif isinstance(analog_output, Connector) and (analog_output.analog):
AutoUpdater.remove_link(
self.outputs) # Remove old links from the outputs
AutoUpdater.add_link(
self.outputs,
[analog_output],
bind_to=D2A.trigger,
params=[self])
raise Exception(
"ERROR: Invalid output. Output must only be a analog Bus / Connector instance")
def set_enable(self, enable):
"""
Link the external enable bus / connector to the A2D instance.
"""
with AutoUpdater._lock:
AutoUpdater.remove_link(enable)
if isinstance(enable, Bus):
AutoUpdater.add_link(
enable,
self.enable)
elif isinstance(enable, Connector):
AutoUpdater.add_link(
[enable],
self.enable)
else:
raise Exception(
"ERROR: Invalid input. Only Analog Connnector / Bus can be linked to input")
def set_ref(self, refp, refn):
"""
Sets the reference voltage levels
"""
with AutoUpdater._lock:
AutoUpdater.remove_link(self.ref)
AutoUpdater.add_link(
refp,
self.ref[0])
AutoUpdater.add_link(
refn,
self.ref[1])
def set_valid(self, val):
self.valid[0].set_logic(bool(val))
def trigger(self):
with AutoUpdater._lock:
cur_inputs = self.inputs.get_logic_all()
cur_inputsb = self.inputs.get_logic_all(as_list=False)
cur_enable = bool(self.enable[0])
cur_ref = (float(self.ref[0]), float(self.ref[1]))
cur_resl = self.resolution
if not cur_enable:
# The output is valid for the given inputs ( enable set to false )
self.set_valid(True)
return
self.set_valid(False)
ref = cur_ref[0] - cur_ref[1]
dig = cur_inputsb
if self.typ in range(1, 4):
analog = float(int(dig, 2)) * float(cur_resl)
result = (analog + float(cur_ref[1])) * float(self.scale)
with AutoUpdater._lock:
self.outputs[0].set_voltage(result)
self.set_valid(True)
return
dig = dig[2:]
if self.typ == 4:
len_mant = 23
len_exp = 8
excess = 127
elif self.typ == 5:
len_mant = 52
len_exp = 11
excess = 1023
s = 1 if dig[0] == '0' else -1
exp = 2 ** (int(dig[1:(len_exp + 1)], 2) - excess)
man = (float(1) / ((2 ** len_mant) - 1)) * \
float(int(dig[len_exp + 1:], 2))
analog = s * exp * (man + 1)
result = (analog + float(cur_ref[1])) * float(self.scale)
with AutoUpdater._lock:
self.outputs[0].set_voltage(result)
self.set_valid(True)
def __del__(self):
try:
BinPyIndexer.unindex(self)
AutoUpdater.remove_link(self.inputs)
AutoUpdater.remove_link(self.outputs)
AutoUpdater.remove_link(self.enable)
except (AttributeError, KeyError, ValueError, TypeError) as e:
pass
| bsd-3-clause | 7,206,825,919,337,837,000 | 29.641414 | 102 | 0.519807 | false |
wolffcm/voltdb | lib/python/voltcli/future.d/sql.py | 7 | 2978 | # This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
bundles = VOLT.ConnectionBundle(),
description = 'Run the interactive SQL interpreter.',
description2 = 'Optional arguments are executed as non-interactive queries.',
options = [
VOLT.EnumOption(None, '--format', 'format',
'output format', 'fixed', 'csv', 'tab',
default = 'fixed'),
VOLT.BooleanOption(None, '--clean', 'clean',
'clean output with no headings or counts',
default = False),
VOLT.BooleanOption(None, '--exception-stacks', 'exception_stacks',
'display exception stack traces',
default = False),
],
arguments = [
VOLT.StringArgument('query', min_count = 0, max_count = None,
help = 'One or more queries to execute non-interactively.')
],
)
def sql(runner):
args = []
if runner.opts.host:
if runner.opts.host.host:
args.append('--servers=%s' % runner.opts.host.host)
if runner.opts.host.port:
args.append('--port=%d' % runner.opts.host.port)
if runner.opts.username:
args.append('--user=%s' % runner.opts.username)
args.append('--password=%s' % runner.opts.password)
for query in runner.opts.query:
args.append('--query=%s' % query)
if runner.opts.format:
args.append('--output-format=%s' % runner.opts.format.lower())
if runner.opts.clean:
args.append('--output-skip-metadata')
if runner.opts.exception_stacks:
args.append('--debug')
runner.java_execute('org.voltdb.utils.SQLCommand', None, *args)
| agpl-3.0 | -4,831,763,133,827,403,000 | 44.121212 | 87 | 0.65413 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/numpy/polynomial/hermite.py | 2 | 52601 | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermpow` -- raise a Hermite series to a positive integer power.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl/2])
else:
return np.array([off])
def hermfromroots(roots):
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots, hermefromroots
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([0.+0.j, 0.+0.j])
"""
return pu._fromroots(hermline, hermmul, roots)
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmulx, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([2., 4., 6., 4.])
"""
return pu._add(c1, c2)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmulx, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([0., 0., 0., 4.])
"""
return pu._sub(c1, c2)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
hermadd, hermsub, hermmul, hermdiv, hermpow
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermmulx, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmulx, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([1., 2., 3.]), array([0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([1., 2., 3.]), array([2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([1., 2., 3.]), array([1., 1.]))
"""
return pu._div(hermmul, c1, c2)
def hermpow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmulx, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([81., 52., 82., 12., 9.])
"""
return pu._pow(hermmul, c, pow, maxpower)
def hermder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._deprecate_as_int(m, "the order of integration")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(hermval, c, x, y)
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(hermval, c, x, y)
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(hermval, c, x, y, z)
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(hermval, c, x, y, z)
def hermvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.moveaxis(v, 0, -1)
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d, hermval2d, hermval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander2d(hermvander, x, y, deg)
def hermvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d, hermval2d, hermval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander3d(hermvander, x, y, z, deg)
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([1.0218, 1.9986, 2.9999]) # may vary
"""
return pu._fit(hermvander, x, y, deg, rcond, full, w)
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
# rotated companion matrix reduces error
m = hermcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_n(x, n):
"""
Evaluate a normalized Hermite polynomial.
Compute the value of the normalized Hermite polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized Hermite function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard Hermite functions
overflow when n >= 207.
"""
if n == 0:
return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi)))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(2./nd)
nd = nd - 1.0
return c0 + c1*x*np.sqrt(2)
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1], dtype=np.float64)
m = hermcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_n(x, ideg)
df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\\exp(-x^2)` and the interval of
integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
class Hermite(ABCPolyBase):
"""An Hermite series class.
The Hermite class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Hermite coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermadd)
_sub = staticmethod(hermsub)
_mul = staticmethod(hermmul)
_div = staticmethod(hermdiv)
_pow = staticmethod(hermpow)
_val = staticmethod(hermval)
_int = staticmethod(hermint)
_der = staticmethod(hermder)
_fit = staticmethod(hermfit)
_line = staticmethod(hermline)
_roots = staticmethod(hermroots)
_fromroots = staticmethod(hermfromroots)
# Virtual properties
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
basis_name = 'H'
| apache-2.0 | 1,981,357,270,426,100,000 | 30.554289 | 87 | 0.604399 | false |
ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert | lib/flows/general/webplugin_test.py | 2 | 4080 | #!/usr/bin/env python
"""Test the filesystem related flows."""
import os
from grr.client import client_utils_linux
from grr.client import client_utils_osx
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class TestChromePlugins(test_lib.FlowTestsBaseclass):
"""Test the chrome extension flow."""
def testGetExtension(self):
"""Test that finding the Chrome plugin works."""
# Set up client info
self.client = aff4.FACTORY.Open(self.client_id, mode="rw",
token=self.token)
self.client.Set(self.client.Schema.SYSTEM("Linux"))
user_list = self.client.Schema.USER()
for u in [rdfvalue.User(username="Foo",
full_name="FooFoo",
last_logon=150),
rdfvalue.User(username="test",
full_name="test user",
homedir="/home/test/",
last_logon=250)]:
user_list.Append(u)
self.client.AddAttribute(self.client.Schema.USER, user_list)
self.client.Close()
client_mock = action_mocks.ActionMock(
"ReadBuffer", "FingerprintFile", "TransferBuffer", "StatFile",
"ListDirectory", "HashBuffer", "Find")
# TODO(user): Find a way to do this on Windows.
# Mock the client to make it look like the root partition is mounted off the
# test image. This will force all flow access to come off the image.
def MockGetMountpoints():
return {
"/": (os.path.join(self.base_path, "test_img.dd"), "ext2")
}
orig_linux_mp = client_utils_linux.GetMountpoints
orig_osx_mp = client_utils_osx.GetMountpoints
client_utils_linux.GetMountpoints = MockGetMountpoints
client_utils_osx.GetMountpoints = MockGetMountpoints
try:
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper(
"ChromePlugins", client_mock, client_id=self.client_id,
username="test", download_files=True, output="analysis/plugins",
token=self.token, pathtype=rdfvalue.PathSpec.PathType.TSK):
pass
# Now check that the right files were downloaded.
fs_path = ("/home/test/.config/google-chrome/Default/Extensions/"
"nlbjncdgjeocebhnmkbbbdekmmmcbfjd/2.1.3_0")
# Check if the output VFile is created
output_path = self.client_id.Add("fs/tsk").Add(
"/".join([self.base_path.replace("\\", "/"),
"test_img.dd"])).Add(fs_path)
fd = aff4.FACTORY.Open(output_path, token=self.token)
children = list(fd.OpenChildren())
self.assertEqual(len(children), 3)
# Check for Analysis dir
output_path = self.client_id.Add(
"analysis/plugins/RSS Subscription Extension (by Google)/2.1.3")
fd = aff4.FACTORY.Open(output_path, token=self.token)
self.assertEqual(fd.Get(fd.Schema.NAME),
"RSS Subscription Extension (by Google)")
self.assertEqual(fd.Get(fd.Schema.VERSION),
"2.1.3")
self.assertEqual(fd.Get(fd.Schema.CHROMEID),
"nlbjncdgjeocebhnmkbbbdekmmmcbfjd")
self.assertEqual(fd.Get(fd.Schema.EXTENSIONDIR),
fs_path)
# check for file downloads
urns = [str(c.urn) for c in children
if str(c.urn).endswith("testfile.txt")]
self.assertEqual(len(urns), 1)
fd = aff4.FACTORY.Open(urns[0], token=self.token)
expect = "This should be downloaded automatically."
self.assertTrue(fd.Read(10000).startswith(expect))
self.assertEqual(fd.size, 41)
finally:
client_utils_linux.GetMountpoints = orig_linux_mp
client_utils_osx.GetMountpoints = orig_osx_mp
class FlowTestLoader(test_lib.GRRTestLoader):
base_class = TestChromePlugins
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader())
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | 8,596,634,113,550,921,000 | 34.172414 | 80 | 0.628922 | false |
summanlp/gensim | gensim/summarization/keywords.py | 8 | 7843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
from gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word
from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
from gensim.summarization.commons import build_graph as _build_graph
from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
from gensim.utils import to_unicode
from itertools import combinations as _combinations
from six.moves.queue import Queue as _Queue
from six.moves import xrange
from six import iteritems
WINDOW_SIZE = 2
"""
Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters
Example: filter for nouns and adjectives:
INCLUDING_FILTER = ['NN', 'JJ']
"""
INCLUDING_FILTER = ['NN', 'JJ']
EXCLUDING_FILTER = []
def _get_pos_filters():
return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)
def _get_words_for_graph(tokens, pos_filter):
if pos_filter is None:
include_filters, exclude_filters = _get_pos_filters()
else:
include_filters = set(pos_filter)
exclude_filters = frozenset([])
if include_filters and exclude_filters:
raise ValueError("Can't use both include and exclude filters, should use only one")
result = []
for word, unit in iteritems(tokens):
if exclude_filters and unit.tag in exclude_filters:
continue
if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:
result.append(unit.token)
return result
def _get_first_window(split_text):
return split_text[:WINDOW_SIZE]
def _set_graph_edge(graph, tokens, word_a, word_b):
if word_a in tokens and word_b in tokens:
lemma_a = tokens[word_a].token
lemma_b = tokens[word_b].token
edge = (lemma_a, lemma_b)
if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):
graph.add_edge(edge)
def _process_first_window(graph, tokens, split_text):
first_window = _get_first_window(split_text)
for word_a, word_b in _combinations(first_window, 2):
_set_graph_edge(graph, tokens, word_a, word_b)
def _init_queue(split_text):
queue = _Queue()
first_window = _get_first_window(split_text)
for word in first_window[1:]:
queue.put(word)
return queue
def _process_word(graph, tokens, queue, word):
for word_to_compare in _queue_iterator(queue):
_set_graph_edge(graph, tokens, word, word_to_compare)
def _update_queue(queue, word):
queue.get()
queue.put(word)
assert queue.qsize() == (WINDOW_SIZE - 1)
def _process_text(graph, tokens, split_text):
queue = _init_queue(split_text)
for i in xrange(WINDOW_SIZE, len(split_text)):
word = split_text[i]
_process_word(graph, tokens, queue, word)
_update_queue(queue, word)
def _queue_iterator(queue):
iterations = queue.qsize()
for i in xrange(iterations):
var = queue.get()
yield var
queue.put(var)
def _set_graph_edges(graph, tokens, split_text):
_process_first_window(graph, tokens, split_text)
_process_text(graph, tokens, split_text)
def _extract_tokens(lemmas, scores, ratio, words):
lemmas.sort(key=lambda s: scores[s], reverse=True)
# If no "words" option is selected, the number of sentences is
# reduced by the provided ratio, else, the ratio is ignored.
length = len(lemmas) * ratio if words is None else words
return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
def _lemmas_to_words(tokens):
lemma_to_word = {}
for word, unit in iteritems(tokens):
lemma = unit.token
if lemma in lemma_to_word:
lemma_to_word[lemma].append(word)
else:
lemma_to_word[lemma] = [word]
return lemma_to_word
def _get_keywords_with_score(extracted_lemmas, lemma_to_word):
"""
:param extracted_lemmas:list of tuples
:param lemma_to_word: dict of {lemma:list of words}
:return: dict of {keyword:score}
"""
keywords = {}
for score, lemma in extracted_lemmas:
keyword_list = lemma_to_word[lemma]
for keyword in keyword_list:
keywords[keyword] = score
return keywords
def _strip_word(word):
stripped_word_list = list(_tokenize_by_word(word))
return stripped_word_list[0] if stripped_word_list else ""
def _get_combined_keywords(_keywords, split_text):
"""
:param keywords:dict of keywords:scores
:param split_text: list of strings
:return: combined_keywords:list
"""
result = []
_keywords = _keywords.copy()
len_text = len(split_text)
for i in xrange(len_text):
word = _strip_word(split_text[i])
if word in _keywords:
combined_word = [word]
if i + 1 == len_text:
result.append(word) # appends last word if keyword and doesn't iterate
for j in xrange(i + 1, len_text):
other_word = _strip_word(split_text[j])
if other_word in _keywords and other_word == split_text[j] and other_word not in combined_word:
combined_word.append(other_word)
else:
for keyword in combined_word:
_keywords.pop(keyword)
result.append(" ".join(combined_word))
break
return result
def _get_average_score(concept, _keywords):
word_list = concept.split()
word_counter = 0
total = 0
for word in word_list:
total += _keywords[word]
word_counter += 1
return total / word_counter
def _format_results(_keywords, combined_keywords, split, scores):
"""
:param keywords:dict of keywords:scores
:param combined_keywords:list of word/s
"""
combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)
if scores:
return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]
if split:
return combined_keywords
return "\n".join(combined_keywords)
def keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter=['NN', 'JJ'], lemmatize=False, deacc=True):
# Gets a dict of word -> lemma
text = to_unicode(text)
tokens = _clean_text_by_word(text, deacc=deacc)
split_text = list(_tokenize_by_word(text))
# Creates the graph and adds the edges
graph = _build_graph(_get_words_for_graph(tokens, pos_filter))
_set_graph_edges(graph, tokens, split_text)
del split_text # It's no longer used
_remove_unreachable_nodes(graph)
# Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score
pagerank_scores = _pagerank(graph)
extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)
# The results can be polluted by many variations of the same word
if lemmatize:
lemmas_to_word = {}
for word, unit in iteritems(tokens):
lemmas_to_word[unit.token] = [word]
else:
lemmas_to_word = _lemmas_to_words(tokens)
keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)
# text.split() to keep numbers and punctuation marks, so separeted concepts are not combined
combined_keywords = _get_combined_keywords(keywords, text.split())
return _format_results(keywords, combined_keywords, split, scores)
def get_graph(text):
tokens = _clean_text_by_word(text)
split_text = list(_tokenize_by_word(text))
graph = _build_graph(_get_words_for_graph(tokens))
_set_graph_edges(graph, tokens, split_text)
return graph
| lgpl-2.1 | -8,331,178,064,665,565,000 | 31.543568 | 123 | 0.655234 | false |
wen-bo-yang/Paddle | paddle/gserver/tests/pyDataProvider.py | 13 | 4672 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import struct
import traceback
def header_creator():
ret = ""
ret += struct.pack('i', 3) # slot num
ret += struct.pack('i', 1) # sequence flag
ret += struct.pack('i', 0) # slot0 dense type
ret += struct.pack('i', 3) # slot0 dim
ret += struct.pack('i', 1) # slot1 sparse non value type
ret += struct.pack('i', 7) # slot1 dim
ret += struct.pack('i', 3) # slot2 index type
ret += struct.pack('i', 2) # slot2 dim
return ret
def dense_value_creator(sample_num):
ret = ""
ret += struct.pack('i', sample_num) # slot0 sample num
for i in range(sample_num): # slot0 value
ret += struct.pack('f', 1.0)
ret += struct.pack('f', 2.0)
ret += struct.pack('f', 3.0)
return ret
def sparse_value_creator(sample_num):
ret = ""
ret += struct.pack('i', sample_num) # slot1 sample num
for i in range(sample_num): # slot1 index
ret += struct.pack('i', i * 2)
ret += struct.pack('i', sample_num * 2) #slot1 length
for i in range(sample_num): # slot1 value
ret += struct.pack('i', 1)
ret += struct.pack('i', 2)
return ret
def index_value_creator(sample_num):
ret = ""
ret += struct.pack('i', sample_num) # slot2 sample num
for i in range(sample_num): # slot2 value
ret += struct.pack('i', 0)
return ret
def sequenceStartPositions_creator():
ret = ""
ret += struct.pack('i', 2) # slot0 sequence num
ret += struct.pack('i', 0) # slot0 sequence value1
ret += struct.pack('i', 1) # slot0 sequence value2
ret += struct.pack('i', 1) # slot1 sequence num
ret += struct.pack('i', 0) # slot1 sequence value1
ret += struct.pack('i', 2) # slot2 sequence num
ret += struct.pack('i', 0) # slot2 sequence value1
ret += struct.pack('i', 1) # slot2 sequence value2
return ret
def subSequenceStartPositions_creator():
ret = ""
ret += struct.pack('i', 3) # slot0 subsequence num
ret += struct.pack('i', 0) # slot0 subsequence value1
ret += struct.pack('i', 1) # slot0 subsequence value2
ret += struct.pack('i', 2) # slot0 subsequence value3
ret += struct.pack('i', 2) # slot1 subsequence num
ret += struct.pack('i', 0) # slot1 subsequence value1
ret += struct.pack('i', 1) # slot1 subsequence value2
ret += struct.pack('i', 3) # slot2 subsequence num
ret += struct.pack('i', 0) # slot2 subsequence value1
ret += struct.pack('i', 1) # slot2 subsequence value2
ret += struct.pack('i', 2) # slot2 subsequence value3
return ret
class SimpleDataProvider:
def __init__(self, *file_list):
self.file_list = file_list
def shuffle(self):
pass
def reset(self):
pass
def getHeader(self):
return header_creator()
def getNextBatch(self, batch_size):
ret = ""
ret += struct.pack('i', 2) # batch size
ret += dense_value_creator(2) # slot0
ret += sparse_value_creator(2) # slot1
ret += index_value_creator(2) # slot2
ret += sequenceStartPositions_creator()
return ret
class SimpleNestDataProvider:
def __init__(self, *file_list):
self.file_list = file_list
def shuffle(self):
pass
def reset(self):
pass
def getHeader(self):
return header_creator()
def getNextBatch(self, batch_size):
ret = ""
ret += struct.pack('i', 2) # batch size
ret += dense_value_creator(4) # slot0
ret += sparse_value_creator(4) # slot1
ret += index_value_creator(4) # slot2
ret += sequenceStartPositions_creator()
ret += subSequenceStartPositions_creator()
return ret
if __name__ == "__main__":
# test code
data_provider = SimpleDataProvider('./test_batch')
print len(data_provider.getHeader())
print len(data_provider.getNextBatch(2))
data_provider = SimpleNestDataProvider('./test_batch')
print len(data_provider.getHeader())
print len(data_provider.getNextBatch(2))
| apache-2.0 | -693,251,186,616,893,000 | 30.782313 | 74 | 0.614726 | false |
shahbazn/neutron | neutron/tests/unit/db/test_ipam_pluggable_backend.py | 16 | 23118 | # Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import webob.exc
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import ipam_pluggable_backend
from neutron.ipam import requests as ipam_req
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base
class UseIpamMixin(object):
def setUp(self):
cfg.CONF.set_override("ipam_driver", 'internal')
super(UseIpamMixin, self).setUp()
class TestIpamHTTPResponse(UseIpamMixin, test_db_base.TestV2HTTPResponse):
pass
class TestIpamPorts(UseIpamMixin, test_db_base.TestPortsV2):
pass
class TestIpamNetworks(UseIpamMixin, test_db_base.TestNetworksV2):
pass
class TestIpamSubnets(UseIpamMixin, test_db_base.TestSubnetsV2):
pass
class TestIpamSubnetPool(UseIpamMixin, test_db_base.TestSubnetPoolsV2):
pass
class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
def setUp(self):
cfg.CONF.set_override("ipam_driver", 'internal')
super(TestDbBasePluginIpam, self).setUp()
self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
def _prepare_mocks(self):
mocks = {
'driver': mock.Mock(),
'subnet': mock.Mock(),
'subnet_request': ipam_req.SpecificSubnetRequest(
self.tenant_id,
self.subnet_id,
'10.0.0.0/24',
'10.0.0.1',
[netaddr.IPRange('10.0.0.2', '10.0.0.254')]),
}
mocks['driver'].get_subnet.return_value = mocks['subnet']
mocks['driver'].allocate_subnet.return_value = mocks['subnet']
mocks['driver'].get_subnet_request_factory = (
ipam_req.SubnetRequestFactory)
mocks['driver'].get_address_request_factory = (
ipam_req.AddressRequestFactory)
mocks['subnet'].get_details.return_value = mocks['subnet_request']
return mocks
def _prepare_ipam(self):
mocks = self._prepare_mocks()
mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
return mocks
def _prepare_mocks_with_pool_mock(self, pool_mock):
mocks = self._prepare_mocks()
pool_mock.get_instance.return_value = mocks['driver']
return mocks
def _get_allocate_mock(self, auto_ip='10.0.0.2',
fail_ip='127.0.0.1',
error_message='SomeError'):
def allocate_mock(request):
if type(request) == ipam_req.SpecificAddressRequest:
if request.address == netaddr.IPAddress(fail_ip):
raise n_exc.InvalidInput(error_message=error_message)
else:
return str(request.address)
else:
return auto_ip
return allocate_mock
def _validate_allocate_calls(self, expected_calls, mocks):
self.assertTrue(mocks['subnet'].allocate.called)
actual_calls = mocks['subnet'].allocate.call_args_list
self.assertEqual(len(expected_calls), len(actual_calls))
i = 0
for call in expected_calls:
if call['ip_address']:
self.assertIsInstance(actual_calls[i][0][0],
ipam_req.SpecificAddressRequest)
self.assertEqual(netaddr.IPAddress(call['ip_address']),
actual_calls[i][0][0].address)
else:
self.assertIsInstance(actual_calls[i][0][0],
ipam_req.AnyAddressRequest)
i += 1
def _convert_to_ips(self, data):
ips = [{'ip_address': ip,
'subnet_id': data[ip][1],
'subnet_cidr': data[ip][0]} for ip in data]
return sorted(ips, key=lambda t: t['subnet_cidr'])
def _gen_subnet_id(self):
return uuidutils.generate_uuid()
def test_deallocate_single_ip(self):
mocks = self._prepare_ipam()
ip = '192.168.12.45'
data = {ip: ['192.168.12.0/24', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'],
mock.ANY, ips)
mocks['driver'].get_subnet.assert_called_once_with(data[ip][1])
mocks['subnet'].deallocate.assert_called_once_with(ip)
def test_deallocate_multiple_ips(self):
mocks = self._prepare_ipam()
data = {'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()],
'172.23.158.84': ['172.23.128.0/17', self._gen_subnet_id()],
'8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'],
mock.ANY, ips)
get_calls = [mock.call(data[ip][1]) for ip in data]
mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
ip_calls = [mock.call(ip) for ip in data]
mocks['subnet'].deallocate.assert_has_calls(ip_calls, any_order=True)
def _single_ip_allocate_helper(self, mocks, ip, network, subnet):
ips = [{'subnet_cidr': network,
'subnet_id': subnet}]
if ip:
ips[0]['ip_address'] = ip
allocated_ips = mocks['ipam']._ipam_allocate_ips(
mock.ANY, mocks['driver'], mock.ANY, ips)
mocks['driver'].get_subnet.assert_called_once_with(subnet)
self.assertTrue(mocks['subnet'].allocate.called)
request = mocks['subnet'].allocate.call_args[0][0]
return {'ips': allocated_ips,
'request': request}
def test_allocate_single_fixed_ip(self):
mocks = self._prepare_ipam()
ip = '192.168.15.123'
mocks['subnet'].allocate.return_value = ip
results = self._single_ip_allocate_helper(mocks,
ip,
'192.168.15.0/24',
self._gen_subnet_id())
self.assertIsInstance(results['request'],
ipam_req.SpecificAddressRequest)
self.assertEqual(netaddr.IPAddress(ip), results['request'].address)
self.assertEqual(ip, results['ips'][0]['ip_address'],
'Should allocate the same ip as passed')
def test_allocate_single_any_ip(self):
mocks = self._prepare_ipam()
network = '192.168.15.0/24'
ip = '192.168.15.83'
mocks['subnet'].allocate.return_value = ip
results = self._single_ip_allocate_helper(mocks, '', network,
self._gen_subnet_id())
self.assertIsInstance(results['request'], ipam_req.AnyAddressRequest)
self.assertEqual(ip, results['ips'][0]['ip_address'])
def test_allocate_eui64_ip(self):
mocks = self._prepare_ipam()
ip = {'subnet_id': self._gen_subnet_id(),
'subnet_cidr': '2001:470:abcd::/64',
'mac': '6c:62:6d:de:cf:49',
'eui64_address': True}
eui64_ip = ipv6_utils.get_ipv6_addr_by_EUI64(ip['subnet_cidr'],
ip['mac'])
mocks['ipam']._ipam_allocate_ips(mock.ANY, mocks['driver'],
mock.ANY, [ip])
request = mocks['subnet'].allocate.call_args[0][0]
self.assertIsInstance(request, ipam_req.AutomaticAddressRequest)
self.assertEqual(eui64_ip, request.address)
def test_allocate_multiple_ips(self):
mocks = self._prepare_ipam()
data = {'': ['172.23.128.0/17', self._gen_subnet_id()],
'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()],
'8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
auto_ip='172.23.128.94')
mocks['ipam']._ipam_allocate_ips(
mock.ANY, mocks['driver'], mock.ANY, ips)
get_calls = [mock.call(data[ip][1]) for ip in data]
mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
self._validate_allocate_calls(ips, mocks)
def test_allocate_multiple_ips_with_exception(self):
mocks = self._prepare_ipam()
auto_ip = '172.23.128.94'
fail_ip = '192.168.43.15'
data = {'': ['172.23.128.0/17', self._gen_subnet_id()],
fail_ip: ['192.168.43.0/24', self._gen_subnet_id()],
'8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]}
ips = self._convert_to_ips(data)
mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
auto_ip=auto_ip, fail_ip=fail_ip)
# Exception should be raised on attempt to allocate second ip.
# Revert action should be performed for the already allocated ips,
# In this test case only one ip should be deallocated
# and original error should be reraised
self.assertRaises(n_exc.InvalidInput,
mocks['ipam']._ipam_allocate_ips,
mock.ANY,
mocks['driver'],
mock.ANY,
ips)
# get_subnet should be called only for the first two networks
get_calls = [mock.call(data[ip][1]) for ip in ['', fail_ip]]
mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True)
# Allocate should be called for the first two ips only
self._validate_allocate_calls(ips[:-1], mocks)
# Deallocate should be called for the first ip only
mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cidr = '192.168.0.0/24'
allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
with self.subnet(allocation_pools=allocation_pools,
cidr=cidr):
pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_ipv6_pd_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cfg.CONF.set_override('default_ipv6_subnet_pool',
constants.IPV6_PD_POOL_ID)
cidr = constants.PROVISIONAL_IPV6_PD_PREFIX
allocation_pools = [netaddr.IPRange('::2', '::ffff:ffff:ffff:ffff')]
with self.subnet(cidr=None, ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC):
pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
self.assertEqual(allocation_pools, request.allocation_pools)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_subnet_over_ipam_with_rollback(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['driver'].allocate_subnet.side_effect = ValueError
cidr = '10.0.2.0/24'
with self.network() as network:
self._create_subnet(self.fmt, network['network']['id'],
cidr, expected_res_status=500)
pool_mock.get_instance.assert_called_once_with(None, mock.ANY)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
# Verify no subnet was created for network
req = self.new_show_request('networks', network['network']['id'])
res = req.get_response(self.api)
net = self.deserialize(self.fmt, res)
self.assertEqual(0, len(net['network']['subnets']))
@mock.patch('neutron.ipam.driver.Pool')
def test_ipam_subnet_deallocated_if_create_fails(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cidr = '10.0.2.0/24'
with mock.patch.object(
ipam_backend_mixin.IpamBackendMixin, '_save_subnet',
side_effect=ValueError), self.network() as network:
self._create_subnet(self.fmt, network['network']['id'],
cidr, expected_res_status=500)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
self.assertTrue(mocks['driver'].allocate_subnet.called)
request = mocks['driver'].allocate_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
# Verify remove ipam subnet was called
mocks['driver'].remove_subnet.assert_called_once_with(
self.subnet_id)
@mock.patch('neutron.ipam.driver.Pool')
def test_update_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
with self.subnet(allocation_pools=allocation_pools,
cidr=cidr) as subnet:
data = {'subnet': {'allocation_pools': [
{'start': '10.0.0.10', 'end': '10.0.0.20'},
{'start': '10.0.0.30', 'end': '10.0.0.40'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(200, res.status_code)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
self.assertTrue(mocks['driver'].update_subnet.called)
request = mocks['driver'].update_subnet.call_args[0][0]
self.assertIsInstance(request, ipam_req.SpecificSubnetRequest)
self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr)
ip_ranges = [netaddr.IPRange(p['start'],
p['end']) for p in data['subnet']['allocation_pools']]
self.assertEqual(ip_ranges, request.allocation_pools)
@mock.patch('neutron.ipam.driver.Pool')
def test_delete_subnet_over_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
mocks['driver'].remove_subnet.assert_called_once_with(
subnet['subnet']['id'])
@mock.patch('neutron.ipam.driver.Pool')
def test_delete_subnet_over_ipam_with_rollback(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['driver'].remove_subnet.side_effect = ValueError
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServerError.code, res.status_int)
pool_mock.get_instance.assert_any_call(None, mock.ANY)
self.assertEqual(2, pool_mock.get_instance.call_count)
mocks['driver'].remove_subnet.assert_called_once_with(
subnet['subnet']['id'])
# Verify subnet was recreated after failed ipam call
subnet_req = self.new_show_request('subnets',
subnet['subnet']['id'])
raw_res = subnet_req.get_response(self.api)
sub_res = self.deserialize(self.fmt, raw_res)
self.assertIn(sub_res['subnet']['cidr'], cidr)
self.assertIn(sub_res['subnet']['gateway_ip'],
gateway_ip)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_port_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
auto_ip = '10.0.0.2'
expected_calls = [{'ip_address': ''}]
mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
auto_ip=auto_ip)
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], auto_ip)
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._validate_allocate_calls(expected_calls, mocks)
@mock.patch('neutron.ipam.driver.Pool')
def test_create_port_ipam_with_rollback(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
mocks['subnet'].allocate.side_effect = ValueError
with self.network() as network:
with self.subnet(network=network):
net_id = network['network']['id']
data = {
'port': {'network_id': net_id,
'tenant_id': network['network']['tenant_id']}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServerError.code,
res.status_int)
# verify no port left after failure
req = self.new_list_request('ports', self.fmt,
"network_id=%s" % net_id)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(0, len(res['ports']))
@mock.patch('neutron.ipam.driver.Pool')
def test_update_port_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
auto_ip = '10.0.0.2'
new_ip = '10.0.0.15'
expected_calls = [{'ip_address': ip} for ip in ['', new_ip]]
mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
auto_ip=auto_ip)
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], auto_ip)
# Update port with another new ip
data = {"port": {"fixed_ips": [{
'subnet_id': subnet['subnet']['id'],
'ip_address': new_ip}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(new_ip, ips[0]['ip_address'])
# Allocate should be called for the first two networks
self._validate_allocate_calls(expected_calls, mocks)
# Deallocate should be called for the first ip only
mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
@mock.patch('neutron.ipam.driver.Pool')
def test_delete_port_ipam(self, pool_mock):
mocks = self._prepare_mocks_with_pool_mock(pool_mock)
auto_ip = '10.0.0.2'
mocks['subnet'].allocate.side_effect = self._get_allocate_mock(
auto_ip=auto_ip)
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], auto_ip)
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mocks['subnet'].deallocate.assert_called_once_with(auto_ip)
def test_recreate_port_ipam(self):
ip = '10.0.0.2'
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], ip)
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
with self.port(subnet=subnet, fixed_ips=ips) as port:
ips = port['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(ips[0]['ip_address'], ip)
| apache-2.0 | 4,042,543,583,361,711,600 | 44.240705 | 78 | 0.573579 | false |
ncoghlan/dnf | tests/conf/test_parser.py | 12 | 1349 | # Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import tests.support
import dnf.conf.parser
class SubstituteTest(tests.support.TestCase):
def test_read(self):
substs = {'lies' : 'fact'}
rawstr = '$Substitute some $lies.'
self.assertEqual(dnf.conf.parser.substitute(rawstr, substs),
'$Substitute some fact.')
| gpl-2.0 | 8,564,778,151,108,379,000 | 47.178571 | 77 | 0.737583 | false |
dkua/textcusec | textcusec/settings/prod.py | 1 | 4957 | """Production settings and globals."""
from os import environ
from memcacheify import memcacheify
from postgresify import postgresify
from S3 import CallingFormat
from common import *
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', '[email protected]')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = postgresify()
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = memcacheify()
########## END CACHE CONFIGURATION
########## CELERY CONFIGURATION
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-transport
BROKER_TRANSPORT = 'amqplib'
# Set this number to the amount of allowed concurrent connections on your AMQP
# provider, divided by the amount of active workers you have.
#
# For example, if you have the 'Little Lemur' CloudAMQP plan (their free tier),
# they allow 3 concurrent connections. So if you run a single worker, you'd
# want this number to be 3. If you had 3 workers running, you'd lower this
# number to 1, since 3 workers each maintaining one open connection = 3
# connections total.
#
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-pool-limit
BROKER_POOL_LIMIT = 3
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-connection-max-retries
BROKER_CONNECTION_MAX_RETRIES = 0
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-url
BROKER_URL = environ.get('RABBITMQ_URL') or environ.get('CLOUDAMQP_URL')
# See: http://docs.celeryproject.org/en/latest/configuration.html#celery-result-backend
CELERY_RESULT_BACKEND = 'amqp'
########## END CELERY CONFIGURATION
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = environ.get('AWS_STORAGE_BUCKET_NAME', '')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIRY,
AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## COMPRESSION CONFIGURATION
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = DEFAULT_FILE_STORAGE
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_CSS_FILTERS
COMPRESS_CSS_FILTERS += [
'compressor.filters.cssmin.CSSMinFilter',
]
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_JS_FILTERS
COMPRESS_JS_FILTERS += [
'compressor.filters.jsmin.JSMinFilter',
]
########## END COMPRESSION CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = environ.get('SECRET_KEY', SECRET_KEY)
########## END SECRET CONFIGURATION
########## ALLOWED HOSTS CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.herokuapp.com']
########## END ALLOWED HOST CONFIGURATION
| mit | 4,069,435,689,542,618,600 | 35.448529 | 109 | 0.737946 | false |
abstract-open-solutions/account-financial-tools | account_check_deposit/__openerp__.py | 7 | 2370 | # -*- coding: utf-8 -*-
###############################################################################
#
# account_check_deposit for Odoo/OpenERP
# Copyright (C) 2012-2015 Akretion (http://www.akretion.com/)
# @author: Benoît GUILLOT <[email protected]>
# @author: Chafique DELLI <[email protected]>
# @author: Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Account Check Deposit',
'version': '8.0.0.1.0',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Manage deposit of checks to the bank',
'description': """
Account Check Deposit
=====================
This module allows you to easily manage check deposits : you can select all
the checks you received as payments and create a global deposit for the
selected checks.
A journal for received checks is automatically created.
You must configure on this journal the default debit account and the default
credit account. You must also configure on the company the account for
check deposits.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com/',
'depends': [
'account_accountant',
'report_webkit',
],
'data': [
'account_deposit_view.xml',
'account_move_line_view.xml',
'account_deposit_sequence.xml',
'company_view.xml',
'security/ir.model.access.csv',
'security/check_deposit_security.xml',
'account_data.xml',
'report.xml',
'report/report_checkdeposit.xml',
],
'installable': True,
'application': True,
}
| agpl-3.0 | -6,295,883,561,096,515,000 | 37.209677 | 79 | 0.633179 | false |
bionikspoon/pivotal-github-status | app/views.py | 1 | 2582 | from . import app
from .model import github, pivotal
from flask import request, abort
import requests
import re
PIVOTAL_ACCESS_TOKEN = app.config['PIVOTAL_ACCESS_TOKEN']
GITHUB_ACCESS_TOKEN = app.config['GITHUB_ACCESS_TOKEN']
BLACKLISTED_GITHUB_ACTIONS = ('labeled', 'unlabeled')
def log_and_abort(e):
app.logger.warning('%s. Authorization: %s', e.args[0],
e.request.headers.get('Authorization', 'null'))
return abort(e.response.status_code)
def get_story_ids(title):
_re_story_ids = re.compile(r'[^#]*#(\d+)(?=[,\]\s])')
return (match for match in _re_story_ids.findall(title))
def pull_requests_for_story(owner, repo, story_id):
try:
pull_requests = github.pull_requests(
owner, repo, access_token=GITHUB_ACCESS_TOKEN)
except requests.HTTPError as e:
return log_and_abort(e)
for pull_request in pull_requests:
if story_id not in pull_request['title']:
continue
yield pull_request
def set_pull_request_labels(pull_request, owner, repo):
story_ids = get_story_ids(pull_request['title'])
try:
labels = (story['current_state']
for story in pivotal.stories(
story_ids, access_token=PIVOTAL_ACCESS_TOKEN))
github.set_labels(
pull_request,
owner,
repo,
labels,
access_token=GITHUB_ACCESS_TOKEN)
except requests.HTTPError as e:
return log_and_abort(e)
@app.route('/')
def index():
return ('', 200)
@app.route('/github/<string:secret_key>', methods=['POST'])
def github_hook(secret_key):
if request.json['action'] in BLACKLISTED_GITHUB_ACTIONS:
app.logger.info('Ignoring %r event from github',
request.json['action'])
return ('', 200)
owner = request.json['repository']['owner']['login']
repo = request.json['repository']['name']
pull_request_number = request.json['number']
pull_request = github.pull_request(
owner, repo, pull_request_number, access_token=GITHUB_ACCESS_TOKEN)
set_pull_request_labels(pull_request, owner, repo)
return ('', 204)
@app.route(
'/pivotal/<string:owner>/<string:repo>/<string:secret_key>', # noqa E501
methods=['POST'])
def pivotal_hook(owner, repo, secret_key):
for change in request.json['changes']:
story_id = str(change['id'])
for pull_request in pull_requests_for_story(owner, repo, story_id):
set_pull_request_labels(pull_request, owner, repo)
return ('', 204)
| mit | -7,255,046,701,445,157,000 | 28.340909 | 77 | 0.622386 | false |
sahmed95/sympy | sympy/polys/tests/test_fields.py | 62 | 8769 | """Test sparse rational functions. """
from sympy.polys.fields import field, sfield, FracField
from sympy.polys.rings import ring
from sympy.polys.domains import ZZ, QQ
from sympy.polys.orderings import lex
from sympy.utilities.pytest import raises, XFAIL
from sympy.core import symbols, E
from sympy import sqrt, Rational, exp, log
def test_FracField___init__():
F1 = FracField("x,y", ZZ, lex)
F2 = FracField("x,y", ZZ, lex)
F3 = FracField("x,y,z", ZZ, lex)
assert F1.x == F1.gens[0]
assert F1.y == F1.gens[1]
assert F1.x == F2.x
assert F1.y == F2.y
assert F1.x != F3.x
assert F1.y != F3.y
def test_FracField___hash__():
F, x, y, z = field("x,y,z", QQ)
assert hash(F)
def test_FracField___eq__():
assert field("x,y,z", QQ)[0] == field("x,y,z", QQ)[0]
assert field("x,y,z", QQ)[0] is field("x,y,z", QQ)[0]
assert field("x,y,z", QQ)[0] != field("x,y,z", ZZ)[0]
assert field("x,y,z", QQ)[0] is not field("x,y,z", ZZ)[0]
assert field("x,y,z", ZZ)[0] != field("x,y,z", QQ)[0]
assert field("x,y,z", ZZ)[0] is not field("x,y,z", QQ)[0]
assert field("x,y,z", QQ)[0] != field("x,y", QQ)[0]
assert field("x,y,z", QQ)[0] is not field("x,y", QQ)[0]
assert field("x,y", QQ)[0] != field("x,y,z", QQ)[0]
assert field("x,y", QQ)[0] is not field("x,y,z", QQ)[0]
def test_sfield():
x = symbols("x")
F = FracField((E, exp(exp(x)), exp(x)), ZZ, lex)
e, exex, ex = F.gens
assert sfield(exp(x)*exp(exp(x) + 1 + log(exp(x) + 3)/2)**2/(exp(x) + 3)) \
== (F, e**2*exex**2*ex)
F = FracField((x, exp(1/x), log(x), x**QQ(1, 3)), ZZ, lex)
_, ex, lg, x3 = F.gens
assert sfield(((x-3)*log(x)+4*x**2)*exp(1/x+log(x)/3)/x**2) == \
(F, (4*F.x**2*ex + F.x*ex*lg - 3*ex*lg)/x3**5)
F = FracField((x, log(x), sqrt(x + log(x))), ZZ, lex)
_, lg, srt = F.gens
assert sfield((x + 1) / (x * (x + log(x))**QQ(3, 2)) - 1/(x * log(x)**2)) \
== (F, (F.x*lg**2 - F.x*srt + lg**2 - lg*srt)/
(F.x**2*lg**2*srt + F.x*lg**3*srt))
def test_FracElement___hash__():
F, x, y, z = field("x,y,z", QQ)
assert hash(x*y/z)
def test_FracElement_copy():
F, x, y, z = field("x,y,z", ZZ)
f = x*y/3*z
g = f.copy()
assert f == g
g.numer[(1, 1, 1)] = 7
assert f != g
def test_FracElement_as_expr():
F, x, y, z = field("x,y,z", ZZ)
f = (3*x**2*y - x*y*z)/(7*z**3 + 1)
X, Y, Z = F.symbols
g = (3*X**2*Y - X*Y*Z)/(7*Z**3 + 1)
assert f != g
assert f.as_expr() == g
X, Y, Z = symbols("x,y,z")
g = (3*X**2*Y - X*Y*Z)/(7*Z**3 + 1)
assert f != g
assert f.as_expr(X, Y, Z) == g
raises(ValueError, lambda: f.as_expr(X))
def test_FracElement_from_expr():
x, y, z = symbols("x,y,z")
F, X, Y, Z = field((x, y, z), ZZ)
f = F.from_expr(1)
assert f == 1 and isinstance(f, F.dtype)
f = F.from_expr(Rational(3, 7))
assert f == F(3)/7 and isinstance(f, F.dtype)
f = F.from_expr(x)
assert f == X and isinstance(f, F.dtype)
f = F.from_expr(Rational(3,7)*x)
assert f == 3*X/7 and isinstance(f, F.dtype)
f = F.from_expr(1/x)
assert f == 1/X and isinstance(f, F.dtype)
f = F.from_expr(x*y*z)
assert f == X*Y*Z and isinstance(f, F.dtype)
f = F.from_expr(x*y/z)
assert f == X*Y/Z and isinstance(f, F.dtype)
f = F.from_expr(x*y*z + x*y + x)
assert f == X*Y*Z + X*Y + X and isinstance(f, F.dtype)
f = F.from_expr((x*y*z + x*y + x)/(x*y + 7))
assert f == (X*Y*Z + X*Y + X)/(X*Y + 7) and isinstance(f, F.dtype)
f = F.from_expr(x**3*y*z + x**2*y**7 + 1)
assert f == X**3*Y*Z + X**2*Y**7 + 1 and isinstance(f, F.dtype)
raises(ValueError, lambda: F.from_expr(2**x))
raises(ValueError, lambda: F.from_expr(7*x + sqrt(2)))
def test_FracElement__lt_le_gt_ge__():
F, x, y = field("x,y", ZZ)
assert F(1) < 1/x < 1/x**2 < 1/x**3
assert F(1) <= 1/x <= 1/x**2 <= 1/x**3
assert -7/x < 1/x < 3/x < y/x < 1/x**2
assert -7/x <= 1/x <= 3/x <= y/x <= 1/x**2
assert 1/x**3 > 1/x**2 > 1/x > F(1)
assert 1/x**3 >= 1/x**2 >= 1/x >= F(1)
assert 1/x**2 > y/x > 3/x > 1/x > -7/x
assert 1/x**2 >= y/x >= 3/x >= 1/x >= -7/x
def test_FracElement___neg__():
F, x,y = field("x,y", QQ)
f = (7*x - 9)/y
g = (-7*x + 9)/y
assert -f == g
assert -g == f
def test_FracElement___add__():
F, x,y = field("x,y", QQ)
f, g = 1/x, 1/y
assert f + g == g + f == (x + y)/(x*y)
assert x + F.ring.gens[0] == F.ring.gens[0] + x == 2*x
F, x,y = field("x,y", ZZ)
assert x + 3 == 3 + x
assert x + QQ(3,7) == QQ(3,7) + x == (7*x + 3)/7
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
f = (u*v + x)/(y + u*v)
assert dict(f.numer) == {(1, 0, 0, 0): 1, (0, 0, 0, 0): u*v}
assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): u*v}
Ruv, u,v = ring("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Ruv)
f = (u*v + x)/(y + u*v)
assert dict(f.numer) == {(1, 0, 0, 0): 1, (0, 0, 0, 0): u*v}
assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): u*v}
def test_FracElement___sub__():
F, x,y = field("x,y", QQ)
f, g = 1/x, 1/y
assert f - g == (-x + y)/(x*y)
assert x - F.ring.gens[0] == F.ring.gens[0] - x == 0
F, x,y = field("x,y", ZZ)
assert x - 3 == -(3 - x)
assert x - QQ(3,7) == -(QQ(3,7) - x) == (7*x - 3)/7
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
f = (u*v - x)/(y - u*v)
assert dict(f.numer) == {(1, 0, 0, 0):-1, (0, 0, 0, 0): u*v}
assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0):-u*v}
Ruv, u,v = ring("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Ruv)
f = (u*v - x)/(y - u*v)
assert dict(f.numer) == {(1, 0, 0, 0):-1, (0, 0, 0, 0): u*v}
assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0):-u*v}
def test_FracElement___mul__():
F, x,y = field("x,y", QQ)
f, g = 1/x, 1/y
assert f*g == g*f == 1/(x*y)
assert x*F.ring.gens[0] == F.ring.gens[0]*x == x**2
F, x,y = field("x,y", ZZ)
assert x*3 == 3*x
assert x*QQ(3,7) == QQ(3,7)*x == 3*x/7
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
f = ((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)
assert dict(f.numer) == {(1, 1, 0, 0): u + 1, (0, 0, 0, 0): 1}
assert dict(f.denom) == {(0, 0, 1, 0): v - 1, (0, 0, 0, 1): -u*v, (0, 0, 0, 0): -1}
Ruv, u,v = ring("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Ruv)
f = ((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)
assert dict(f.numer) == {(1, 1, 0, 0): u + 1, (0, 0, 0, 0): 1}
assert dict(f.denom) == {(0, 0, 1, 0): v - 1, (0, 0, 0, 1): -u*v, (0, 0, 0, 0): -1}
def test_FracElement___div__():
F, x,y = field("x,y", QQ)
f, g = 1/x, 1/y
assert f/g == y/x
assert x/F.ring.gens[0] == F.ring.gens[0]/x == 1
F, x,y = field("x,y", ZZ)
assert x*3 == 3*x
assert x/QQ(3,7) == (QQ(3,7)/x)**-1 == 7*x/3
raises(ZeroDivisionError, lambda: x/0)
raises(ZeroDivisionError, lambda: 1/(x - x))
raises(ZeroDivisionError, lambda: x/(x - x))
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
f = (u*v)/(x*y)
assert dict(f.numer) == {(0, 0, 0, 0): u*v}
assert dict(f.denom) == {(1, 1, 0, 0): 1}
g = (x*y)/(u*v)
assert dict(g.numer) == {(1, 1, 0, 0): 1}
assert dict(g.denom) == {(0, 0, 0, 0): u*v}
Ruv, u,v = ring("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Ruv)
f = (u*v)/(x*y)
assert dict(f.numer) == {(0, 0, 0, 0): u*v}
assert dict(f.denom) == {(1, 1, 0, 0): 1}
g = (x*y)/(u*v)
assert dict(g.numer) == {(1, 1, 0, 0): 1}
assert dict(g.denom) == {(0, 0, 0, 0): u*v}
def test_FracElement___pow__():
F, x,y = field("x,y", QQ)
f, g = 1/x, 1/y
assert f**3 == 1/x**3
assert g**3 == 1/y**3
assert (f*g)**3 == 1/(x**3*y**3)
assert (f*g)**-3 == (x*y)**3
raises(ZeroDivisionError, lambda: (x - x)**-3)
def test_FracElement_diff():
F, x,y,z = field("x,y,z", ZZ)
assert ((x**2 + y)/(z + 1)).diff(x) == 2*x/(z + 1)
@XFAIL
def test_FracElement___call__():
F, x,y,z = field("x,y,z", ZZ)
f = (x**2 + 3*y)/z
r = f(1, 1, 1)
assert r == 4 and not isinstance(r, FracElement)
raises(ZeroDivisionError, lambda: f(1, 1, 0))
def test_FracElement_evaluate():
F, x,y,z = field("x,y,z", ZZ)
Fyz = field("y,z", ZZ)[0]
f = (x**2 + 3*y)/z
assert f.evaluate(x, 0) == 3*Fyz.y/Fyz.z
raises(ZeroDivisionError, lambda: f.evaluate(z, 0))
def test_FracElement_subs():
F, x,y,z = field("x,y,z", ZZ)
f = (x**2 + 3*y)/z
assert f.subs(x, 0) == 3*y/z
raises(ZeroDivisionError, lambda: f.subs(z, 0))
def test_FracElement_compose():
pass
| bsd-3-clause | -77,161,251,841,424,020 | 26.838095 | 87 | 0.47839 | false |
w1kke/pylearn2 | pylearn2/models/maxout.py | 10 | 58407 | """
MLP Layer objects related to the paper
Maxout Networks. Ian J. Goodfellow, David Warde-Farley, Mehdi Mirza, Aaron
Courville, and Yoshua Bengio. ICML 2013.
If you use this code in your research, please cite this paper.
The objects in this module are Layer objects for use with
pylearn2.models.mlp.MLP. You need to make an MLP object in
order for these to do anything. For an example of how to build
an MLP with maxout hidden layers, see pylearn2/scripts/papers/maxout.
Note that maxout is designed for use with dropout, so you probably should
use dropout in your MLP when using these layers. If not using dropout, it
is best to use only 2 pieces per unit.
Note to developers / maintainers: when making changes to this module,
ensure that the changes do not break the examples in
pylearn2/scripts/papers/maxout.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import functools
import logging
import numpy as np
import warnings
from theano.compat.six.moves import xrange
from theano.compat.six.moves import zip as izip
from theano.sandbox import cuda
from theano import tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models.mlp import Layer
from pylearn2.models.model import Model
from pylearn2.space import Conv2DSpace
from pylearn2.space import VectorSpace
from pylearn2.utils import py_integer_types
from pylearn2.utils import sharedX
from pylearn2.linear.conv2d_c01b import setup_detector_layer_c01b
from pylearn2.linear import local_c01b
if cuda.cuda_available:
from pylearn2.sandbox.cuda_convnet.pool import max_pool_c01b
else:
max_pool_c01b = None
from pylearn2.sandbox.cuda_convnet import check_cuda
logger = logging.getLogger(__name__)
class Maxout(Layer):
"""
A hidden layer that does max pooling over groups of linear
units. If you use this code in a research project, please
cite
"Maxout Networks" Ian J. Goodfellow, David Warde-Farley,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. ICML 2013
Parameters
----------
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer. Each layer in an MLP must have a unique
name.
num_units : int
The number of maxout units to use in this layer.
num_pieces: int
The number of linear pieces to use in each maxout unit.
pool_stride : int, optional
The distance between the start of each max pooling region. Defaults
to num_pieces, which makes the pooling regions disjoint. If set to
a smaller number, can do overlapping pools.
randomize_pools : bool, optional
If True, does max pooling over randomized subsets of the linear
responses, rather than over sequential subsets.
irange : float, optional
If specified, initializes each weight randomly in
U(-irange, irange)
sparse_init : int, optional
if specified, irange must not be specified.
This is an integer specifying how many weights to make non-zero.
All non-zero weights will be initialized randomly in
N(0, sparse_stdev^2)
sparse_stdev : float, optional
WRITEME
include_prob : float, optional
probability of including a weight element in the set
of weights initialized to U(-irange, irange). If not included
a weight is initialized to 0. This defaults to 1.
init_bias : float or ndarray, optional
A value that can be broadcasted to a numpy vector.
All biases are initialized to this number.
W_lr_scale: float, optional
The learning rate on the weights for this layer is multiplied by
this scaling factor
b_lr_scale: float, optional
The learning rate on the biases for this layer is multiplied by
this scaling factor
max_col_norm: float, optional
The norm of each column of the weight matrix is constrained to
have at most this norm. If unspecified, no constraint. Constraint
is enforced by re-projection (if necessary) at the end of each
update.
max_row_norm: float, optional
Like max_col_norm, but applied to the rows.
mask_weights: ndarray, optional
A binary matrix multiplied by the weights after each update,
allowing you to restrict their connectivity.
min_zero: bool, optional
If true, includes a zero in the set we take a max over for each
maxout unit. This is equivalent to pooling over rectified
linear units.
"""
def __str__(self):
"""
Returns
-------
rval : str
A string representation of the object. In this case, just the
class name.
"""
return "Maxout"
def __init__(self,
layer_name,
num_units,
num_pieces,
pool_stride=None,
randomize_pools=False,
irange=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
max_col_norm=None,
max_row_norm=None,
mask_weights=None,
min_zero=False):
super(Maxout, self).__init__()
detector_layer_dim = num_units * num_pieces
pool_size = num_pieces
if pool_stride is None:
pool_stride = pool_size
self.__dict__.update(locals())
del self.self
self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias,
name=(layer_name + '_b'))
if max_row_norm is not None:
raise NotImplementedError()
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm))
@functools.wraps(Model.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
def set_input_space(self, space):
"""
Tells the layer to use the specified input space.
This resets parameters! The weight matrix is initialized with the
size needed to receive input from this space.
Parameters
----------
space : Space
The Space that the input will lie in.
"""
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (0 == ((self.detector_layer_dim - self.pool_size) %
self.pool_stride)):
if self.pool_stride == self.pool_size:
raise ValueError("detector_layer_dim = %d, pool_size = %d. "
"Should be divisible but remainder is %d" %
(self.detector_layer_dim,
self.pool_size,
self.detector_layer_dim % self.pool_size))
raise ValueError()
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = ((self.detector_layer_dim - self.pool_size) /
self.pool_stride + 1)
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if not hasattr(self, 'randomize_pools'):
self.randomize_pools = False
if self.randomize_pools:
permute = np.zeros((self.detector_layer_dim,
self.detector_layer_dim))
for j in xrange(self.detector_layer_dim):
i = rng.randint(self.detector_layer_dim)
permute[i, j] = 1
self.permute = sharedX(permute)
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) +
" but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
def _modify_updates(self, updates):
"""
Replaces the values in `updates` if needed to enforce the options set
in the __init__ method, including `mask_weights`
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters (including parameters not
belonging to this model) to updated values of those parameters.
The dictionary passed in contains the updates proposed by the
learning algorithm. This function modifies the dictionary
directly. The modified version will be compiled and executed
by the learning algorithm.
"""
# Patch old pickle files
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
@functools.wraps(Model.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@functools.wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@functools.wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.abs_(W).sum()
@functools.wraps(Model.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.get_value()
if not hasattr(self, 'randomize_pools'):
self.randomize_pools = False
if self.randomize_pools:
warnings.warn("randomize_pools makes get_weights multiply by the "
"permutation matrix. If you call set_weights(W) and "
"then call get_weights(), the return value will "
"WP not W.")
P = self.permute.get_value()
return np.dot(W, P)
return W
@functools.wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@functools.wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@functools.wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@functools.wraps(Model.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@functools.wraps(Model.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total // cols
if rows * cols < total:
rows = rows + 1
return rows, cols
@functools.wraps(Model.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
assert self.input_space.num_channels in [1, 3]
viewer_space = Conv2DSpace(shape=self.input_space.shape,
num_channels=self.input_space.num_channels,
axes=('b', 0, 1, 'c'))
W = self.desired_space.format_as(W.T, viewer_space)
rval = W.eval()
return rval
@functools.wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
row_norms_min = row_norms.min()
row_norms_min.__doc__ = ("The smallest norm of any row of the "
"weight matrix W. This is a measure of the "
"least influence any visible unit has.")
rval = OrderedDict([('row_norms_min', row_norms_min),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
P = state
if self.pool_size == 1:
vars_and_prefixes = [(P, '')]
else:
vars_and_prefixes = [(P, 'p_')]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer
# rather than outer_of_inner or
# something like that because I want mean_x.* to appear next to
# each other in the alphabetical list, as these are commonly
# plotted together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
@functools.wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below) + self.b
if not hasattr(self, 'randomize_pools'):
self.randomize_pools = False
if not hasattr(self, 'pool_stride'):
self.pool_stride = self.pool_size
if self.randomize_pools:
z = T.dot(z, self.permute)
if not hasattr(self, 'min_zero'):
self.min_zero = False
if self.min_zero:
p = 0.
else:
p = None
last_start = self.detector_layer_dim - self.pool_size
for i in xrange(self.pool_size):
cur = z[:, i:last_start + i + 1:self.pool_stride]
if p is None:
p = cur
else:
p = T.maximum(cur, p)
p.name = self.layer_name + '_p_'
return p
class MaxoutConvC01B(Layer):
"""
Maxout units arranged in a convolutional layer, with
spatial max pooling on top of the maxout. If you use this
code in a research project, please cite
"Maxout Networks" Ian J. Goodfellow, David Warde-Farley,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. ICML 2013
This uses the C01B ("channels", topological axis 0,
topological axis 1, "batch") format of tensors for input
and output.
The back-end is Alex Krizhevsky's cuda-convnet library,
so it is extremely fast, but requires a GPU.
Parameters
----------
num_channels : int
The number of output channels the layer should have.
Note that it must internally compute num_channels * num_pieces
convolution channels.
num_pieces : int
The number of linear pieces used to make each maxout unit.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
This is redundant as cuda-convnet requires the pool shape to
be square.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to
monitoring channels related to this layer.
irange : float, optional
if specified, initializes each weight randomly in
U(-irange, irange)
init_bias : float, optional
All biases are initialized to this number
W_lr_scale : float, optional
The learning rate on the weights for this layer is
multiplied by this scaling factor
b_lr_scale : float, optional
The learning rate on the biases for this layer is
multiplied by this scaling factor
pad : int, optional
The amount of zero-padding to implicitly add to the boundary of the
image when computing the convolution. Useful for making sure pixels
at the edge still get to influence multiple hidden units.
fix_pool_shape : bool, optional
If True, will modify self.pool_shape to avoid having
pool shape bigger than the entire detector layer.
If you have this on, you should probably also have
fix_pool_stride on, since the pool shape might shrink
smaller than the stride, even if the stride was initially
valid.
The "fix" parameters are useful for working with a hyperparameter
optimization package, which might often propose sets of
hyperparameters that are not feasible, but can easily be projected
back into the feasible set.
fix_pool_stride : bool, optional
WRITEME
fix_kernel_shape : bool, optional
if True, will modify self.kernel_shape to avoid having the kernel
shape bigger than the implicitly zero padded input layer
partial_sum : int, optional
a parameter that controls whether to prefer runtime savings
or memory savings when computing the gradient with respect to
the kernels. See pylearn2.sandbox.cuda_convnet.weight_acts.py
for details. The default is to prefer high speed.
Note that changing this setting may change the value of computed
results slightly due to different rounding error.
tied_b : bool, optional
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently.
max_kernel_norm : float, optional
If specified, each kernel is constrained to have at most this norm.
input_normalization : callable, optional
see output normalization
detector_normalization : callable, optional
see output normalization
min_zero : bool, optional
WRITEME
output_normalization : callable, optional
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- input: the input the layer receives can be normalized right
away
- detector: the maxout units can be normalized prior to the
spatial pooling
- output: the output of the layer, after sptial pooling,
can be normalized as well
kernel_stride : tuple, optional
vertical and horizontal pixel stride between each detector.
"""
def __init__(self,
num_channels,
num_pieces,
kernel_shape,
pool_shape,
pool_stride,
layer_name,
irange=None,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
pad=0,
fix_pool_shape=False,
fix_pool_stride=False,
fix_kernel_shape=False,
partial_sum=1,
tied_b=False,
max_kernel_norm=None,
input_normalization=None,
detector_normalization=None,
min_zero=False,
output_normalization=None,
kernel_stride=(1, 1)):
check_cuda(str(type(self)))
super(MaxoutConvC01B, self).__init__()
detector_channels = num_channels * num_pieces
self.__dict__.update(locals())
del self.self
@functools.wraps(Model.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
def set_input_space(self, space):
"""
Tells the layer to use the specified input space.
This resets parameters! The kernel tensor is initialized with the
size needed to receive input from this space.
Parameters
----------
space : Space
The Space that the input will lie in.
"""
setup_detector_layer_c01b(layer=self,
input_space=space,
rng=self.mlp.rng)
rng = self.mlp.rng
detector_shape = self.detector_space.shape
def handle_pool_shape(idx):
if self.pool_shape[idx] < 1:
raise ValueError("bad pool shape: " + str(self.pool_shape))
if self.pool_shape[idx] > detector_shape[idx]:
if self.fix_pool_shape:
assert detector_shape[idx] > 0
self.pool_shape[idx] = detector_shape[idx]
else:
raise ValueError("Pool shape exceeds detector layer shape "
"on axis %d" % idx)
map(handle_pool_shape, [0, 1])
assert self.pool_shape[0] == self.pool_shape[1]
assert self.pool_stride[0] == self.pool_stride[1]
assert all(isinstance(elem, py_integer_types)
for elem in self.pool_stride)
if self.pool_stride[0] > self.pool_shape[0]:
if self.fix_pool_stride:
warnings.warn("Fixing the pool stride")
ps = self.pool_shape[0]
assert isinstance(ps, py_integer_types)
self.pool_stride = [ps, ps]
else:
raise ValueError("Stride too big.")
assert all(isinstance(elem, py_integer_types)
for elem in self.pool_stride)
dummy_detector = sharedX(self.detector_space.get_origin_batch(2)[0:16,
:,
:,
:])
dummy_p = max_pool_c01b(c01b=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[1],
dummy_p.shape[2]],
num_channels=self.num_channels,
axes=('c', 0, 1, 'b'))
logger.info('Output space: {0}'.format(self.output_space.shape))
def _modify_updates(self, updates):
"""
Replaces the values in `updates` if needed to enforce the options set
in the __init__ method, including `max_kernel_norm`.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters (including parameters not
belonging to this model) to updated values of those parameters.
The dictionary passed in contains the updates proposed by the
learning algorithm. This function modifies the dictionary
directly. The modified version will be compiled and executed
by the learning algorithm.
"""
if self.max_kernel_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))
desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)
scales = desired_norms / (1e-7 + row_norms)
updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))
@functools.wraps(Model.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@functools.wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@functools.wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@functools.wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@functools.wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@functools.wraps(Model.get_weights_topo)
def get_weights_topo(self):
return self.transformer.get_weights_topo()
@functools.wraps(Layer.fprop)
def fprop(self, state_below):
check_cuda(str(type(self)))
self.input_space.validate(state_below)
if not hasattr(self, 'input_normalization'):
self.input_normalization = None
if self.input_normalization:
state_below = self.input_normalization(state_below)
# Alex's code requires # input channels to be <= 3 or a multiple of 4
# so we add dummy channels if necessary
if not hasattr(self, 'dummy_channels'):
self.dummy_channels = 0
if self.dummy_channels > 0:
zeros = T.zeros_like(state_below[0:self.dummy_channels, :, :, :])
state_below = T.concatenate((state_below, zeros), axis=0)
z = self.transformer.lmul(state_below)
if not hasattr(self, 'tied_b'):
self.tied_b = False
if self.tied_b:
b = self.b.dimshuffle(0, 'x', 'x', 'x')
else:
b = self.b.dimshuffle(0, 1, 2, 'x')
z = z + b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
self.detector_space.validate(z)
assert self.detector_space.num_channels % 16 == 0, (
'Wrong channels number: ' + str(self.detector_space.num_channels) +
'. The number of channels should be a multiple of 16. Note that '
'the number of channels is determined as: num_channels * '
'num_pieces')
if self.output_space.num_channels % 16 == 0:
# alex's max pool op only works when the number of channels
# is divisible by 16. we can only do the cross-channel pooling
# first if the cross-channel pooling preserves that property
if self.num_pieces != 1:
s = None
for i in xrange(self.num_pieces):
t = z[i::self.num_pieces, :, :, :]
if s is None:
s = t
else:
s = T.maximum(s, t)
z = s
if self.detector_normalization:
z = self.detector_normalization(z)
p = max_pool_c01b(c01b=z, pool_shape=self.pool_shape,
pool_stride=self.pool_stride)
else:
if self.detector_normalization is not None:
raise NotImplementedError("We can't normalize the detector "
"layer because the detector layer "
"never exists as a stage of "
"processing in this implementation.")
z = max_pool_c01b(c01b=z, pool_shape=self.pool_shape,
pool_stride=self.pool_stride)
if self.num_pieces != 1:
s = None
for i in xrange(self.num_pieces):
t = z[i::self.num_pieces, :, :, :]
if s is None:
s = t
else:
s = T.maximum(s, t)
z = s
p = z
self.output_space.validate(p)
if hasattr(self, 'min_zero') and self.min_zero:
p = p * (p > 0.)
if not hasattr(self, 'output_normalization'):
self.output_normalization = None
if self.output_normalization:
p = self.output_normalization(p)
return p
@functools.wraps(Model.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_channels
cols = self.num_pieces
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total // cols
if rows * cols < total:
rows = rows + 1
return rows, cols
@functools.wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(0, 1, 2)))
rval = OrderedDict([('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
P = state
vars_and_prefixes = [(P, '')]
for var, prefix in vars_and_prefixes:
assert var.ndim == 4
v_max = var.max(axis=(1, 2, 3))
v_min = var.min(axis=(1, 2, 3))
v_mean = var.mean(axis=(1, 2, 3))
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer rather than
# outer_of_inner or something like that because I want
# mean_x.* to appear next to each other in the
# alphabetical list, as these are commonly plotted
# together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
class MaxoutLocalC01B(Layer):
"""
Maxout units arranged in a convolutional layer, with
spatial max pooling on top of the maxout. If you use this
code in a research project, please cite
"Maxout Networks" Ian J. Goodfellow, David Warde-Farley,
Mehdi Mirza, Aaron Courville, and Yoshua Bengio. ICML 2013
This uses the C01B ("channels", topological axis 0,
topological axis 1, "batch") format of tensors for input
and output.
Unlike MaxoutConvC01B, this class supports operation on CPU,
thanks to James Bergstra's TheanoLinear library, which
pylearn2 has forked. The GPU code is still based on Alex
Krizvhevsky's cuda_convnet library.
Parameters
----------
num_channels : int
The number of output channels the layer should have.
Note that it must internally compute num_channels * num_pieces
convolution channels.
num_pieces : int
The number of linear pieces used to make each maxout unit.
kernel_shape : tuple
The shape of the convolution kernel.
layer_name : str
A name for this layer that will be prepended to
monitoring channels related to this layer.
pool_shape : tuple, optional
The shape of the spatial max pooling. A two-tuple of ints.
This is redundant as cuda-convnet requires the pool shape to
be square.
Defaults to None, which means no spatial pooling
pool_stride : tuple, optional
The stride of the spatial max pooling. Also must be square.
Defaults to None, which means no spatial pooling.
irange : float, optional
if specified, initializes each weight randomly in
U(-irange, irange)
init_bias : float, optional
All biases are initialized to this number
W_lr_scale : float, optional
The learning rate on the weights for this layer is
multiplied by this scaling factor
b_lr_scale : float, optional
The learning rate on the biases for this layer is
multiplied by this scaling factor
pad : int, optional
The amount of zero-padding to implicitly add to the boundary of the
image when computing the convolution. Useful for making sure pixels
at the edge still get to influence multiple hidden units.
fix_pool_shape : bool, optional
If True, will modify self.pool_shape to avoid having
pool shape bigger than the entire detector layer.
If you have this on, you should probably also have
fix_pool_stride on, since the pool shape might shrink
smaller than the stride, even if the stride was initially
valid.
The "fix" parameters are useful for working with a hyperparameter
optimization package, which might often propose sets of
hyperparameters that are not feasible, but can easily be projected
back into the feasible set.
fix_pool_stride : bool, optional
WRITEME
fix_kernel_shape : bool, optional
if True, will modify self.kernel_shape to avoid
having the kernel shape bigger than the implicitly
zero padded input layer
partial_sum : int, optional
a parameter that controls whether to prefer runtime savings
or memory savings when computing the gradient with respect to
the kernels. See pylearn2.sandbox.cuda_convnet.weight_acts.py
for details. The default is to prefer high speed.
Note that changing this setting may change the value of computed
results slightly due to different rounding error.
tied_b : bool, optional
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently.
max_kernel_norm : float, optional
If specified, each kernel is constrained to have at most this norm.
input_normalization : callable
see output_normalization
detector_normalization : callable
see output_normalization
min_zero : bool, optional
WRITEME
output_normalization : callable
if specified, should be a callable object. the state of the network
is optionally replaced with normalization(state) at each of the 3
points in processing:
- input: the input the layer receives can be normalized right
away
- detector: the maxout units can be normalized prior to the
spatial pooling
- output: the output of the layer, after sptial pooling, can be
normalized as well
kernel_stride : tuple, optional
Vertical and horizontal pixel stride between each detector.
input_groups : int, optional
WRITEME
"""
def __init__(self,
num_channels,
num_pieces,
kernel_shape,
layer_name,
pool_shape=None,
pool_stride=None,
irange=None,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
pad=0,
fix_pool_shape=False,
fix_pool_stride=False,
fix_kernel_shape=False,
partial_sum=1,
tied_b=False,
max_kernel_norm=None,
input_normalization=None,
detector_normalization=None,
min_zero=False,
output_normalization=None,
input_groups=1,
kernel_stride=(1, 1)):
assert (pool_shape is None) == (pool_stride is None)
detector_channels = num_channels * num_pieces
self.__dict__.update(locals())
del self.self
@functools.wraps(Model.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
def set_input_space(self, space):
"""
Tells the layer to use the specified input space.
This resets parameters! The weight tensor is initialized with the
size needed to receive input from this space.
Parameters
----------
space : Space
The Space that the input will lie in.
"""
self.input_space = space
if not isinstance(self.input_space, Conv2DSpace):
raise TypeError("The input to a convolutional layer should be a "
"Conv2DSpace, but layer " + self.layer_name +
" got " + str(type(self.input_space)))
# note: I think the desired space thing is actually redundant,
# since LinearTransform will also dimshuffle the axes if needed
# It's not hurting anything to have it here but we could reduce
# code complexity by removing it
self.desired_space = Conv2DSpace(shape=space.shape,
channels=space.num_channels,
axes=('c', 0, 1, 'b'))
ch = self.desired_space.num_channels
rem = ch % 4
if ch > 3 and rem != 0:
self.dummy_channels = 4 - rem
else:
self.dummy_channels = 0
self.dummy_space = Conv2DSpace(shape=space.shape,
channels=(space.num_channels +
self.dummy_channels),
axes=('c', 0, 1, 'b'))
rng = self.mlp.rng
output_shape = \
[int(np.ceil((i_sh + 2. * self.pad - k_sh) / float(k_st))) + 1
for i_sh, k_sh, k_st in izip(self.input_space.shape,
self.kernel_shape,
self.kernel_stride)]
def handle_kernel_shape(idx):
if self.kernel_shape[idx] < 1:
raise ValueError("kernel must have strictly positive size on "
"all axes but has shape: " +
str(self.kernel_shape))
if output_shape[idx] <= 0:
if self.fix_kernel_shape:
self.kernel_shape[idx] = (self.input_space.shape[idx] +
2 * self.pad)
assert self.kernel_shape[idx] != 0
output_shape[idx] = 1
warnings.warn("Had to change the kernel shape to make "
"network feasible")
else:
raise ValueError("kernel too big for input (even with "
"zero padding)")
map(handle_kernel_shape, [0, 1])
self.detector_space = Conv2DSpace(shape=output_shape,
num_channels=self.detector_channels,
axes=('c', 0, 1, 'b'))
if self.pool_shape is not None:
def handle_pool_shape(idx):
if self.pool_shape[idx] < 1:
raise ValueError("bad pool shape: " + str(self.pool_shape))
if self.pool_shape[idx] > output_shape[idx]:
if self.fix_pool_shape:
assert output_shape[idx] > 0
self.pool_shape[idx] = output_shape[idx]
else:
raise ValueError("Pool shape exceeds detector layer "
"shape on axis %d" % idx)
map(handle_pool_shape, [0, 1])
assert self.pool_shape[0] == self.pool_shape[1]
assert self.pool_stride[0] == self.pool_stride[1]
assert all(isinstance(elem, py_integer_types)
for elem in self.pool_stride)
if self.pool_stride[0] > self.pool_shape[0]:
if self.fix_pool_stride:
warnings.warn("Fixing the pool stride")
ps = self.pool_shape[0]
assert isinstance(ps, py_integer_types)
self.pool_stride = [ps, ps]
else:
raise ValueError("Stride too big.")
assert all(isinstance(elem, py_integer_types)
for elem in self.pool_stride)
if self.irange is not None:
self.transformer = local_c01b.make_random_local(
input_groups=self.input_groups,
irange=self.irange,
input_axes=self.desired_space.axes,
image_shape=self.desired_space.shape,
output_axes=self.detector_space.axes,
input_channels=self.dummy_space.num_channels,
output_channels=self.detector_space.num_channels,
kernel_shape=self.kernel_shape,
kernel_stride=self.kernel_stride,
pad=self.pad,
partial_sum=self.partial_sum,
rng=rng)
W, = self.transformer.get_params()
W.name = 'W'
if self.tied_b:
self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
self.init_bias)
else:
self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
self.b.name = 'b'
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info(self.layer_name +
' detector space: {0}'.format(self.detector_space.shape))
assert self.detector_space.num_channels >= 16
if self.pool_shape is None or np.prod(self.pool_shape) == 1:
self.output_space = Conv2DSpace(shape=self.detector_space.shape,
num_channels=self.num_channels,
axes=('c', 0, 1, 'b'))
elif max_pool_c01b is not None:
ds = self.detector_space
dummy_detector = sharedX(ds.get_origin_batch(2)[0:16, :, :, :])
dummy_p = max_pool_c01b(c01b=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[1],
dummy_p.shape[2]],
num_channels=self.num_channels,
axes=('c', 0, 1, 'b'))
else:
raise NotImplementedError("Pooling is not implemented for CPU")
logger.info('Output space: {0}'.format(self.output_space.shape))
def _modify_updates(self, updates):
"""
Replaces the values in `updates` if needed to enforce the options set
in the __init__ method, including `max_kernel_norm`.
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters (including parameters not
belonging to this model) to updated values of those parameters.
The dictionary passed in contains the updates proposed by the
learning algorithm. This function modifies the dictionary
directly. The modified version will be compiled and executed
by the learning algorithm.
"""
if self.max_kernel_norm is not None:
W, = self.transformer.get_params()
if W in updates:
# TODO: push some of this into the transformer itself
updated_W = updates[W]
updated_norms = self.get_filter_norms(updated_W)
desired_norms = T.clip(updated_norms, 0, self.max_kernel_norm)
scales = desired_norms / (1e-7 + updated_norms)
updates[W] = (updated_W *
scales.dimshuffle(0, 1, 'x', 'x', 'x', 2, 3))
@functools.wraps(Model.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@functools.wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@functools.wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@functools.wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@functools.wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@functools.wraps(Layer.get_weights_topo)
def get_weights_topo(self):
return self.transformer.get_weights_topo()
def get_filter_norms(self, W=None):
"""
Returns
-------
norms : theano 4 tensor
A theano expression for the norms of the different filters in
the layer.
TODO: explain significance of each of the 4 axes, and what
order they'll be in.
"""
# TODO: push this into the transformer class itself
if W is None:
W, = self.transformer.get_params()
assert W.ndim == 7
sq_W = T.sqr(W)
norms = T.sqrt(sq_W.sum(axis=(2, 3, 4)))
return norms
@functools.wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
state_below = self.input_space.format_as(state_below,
self.desired_space)
if not hasattr(self, 'input_normalization'):
self.input_normalization = None
if self.input_normalization:
state_below = self.input_normalization(state_below)
# Alex's code requires # input channels to be <= 3 or a multiple of 4
# so we add dummy channels if necessary
if not hasattr(self, 'dummy_channels'):
self.dummy_channels = 0
if self.dummy_channels > 0:
zeros = T.zeros_like(state_below[0:self.dummy_channels, :, :, :])
state_below = T.concatenate((state_below, zeros), axis=0)
z = self.transformer.lmul(state_below)
if not hasattr(self, 'tied_b'):
self.tied_b = False
if self.tied_b:
b = self.b.dimshuffle(0, 'x', 'x', 'x')
else:
b = self.b.dimshuffle(0, 1, 2, 'x')
z = z + b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
self.detector_space.validate(z)
assert self.detector_space.num_channels % 16 == 0
if self.output_space.num_channels % 16 == 0:
# alex's max pool op only works when the number of channels
# is divisible by 16. we can only do the cross-channel pooling
# first if the cross-channel pooling preserves that property
if self.num_pieces != 1:
s = None
for i in xrange(self.num_pieces):
t = z[i::self.num_pieces, :, :, :]
if s is None:
s = t
else:
s = T.maximum(s, t)
z = s
if self.detector_normalization:
z = self.detector_normalization(z)
if self.pool_shape is None or np.prod(self.pool_shape) == 1:
p = z
else:
p = max_pool_c01b(c01b=z,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride)
else:
if self.detector_normalization is not None:
raise NotImplementedError("We can't normalize the detector "
"layer because the detector layer "
"never exists as a stage of "
"processing in this "
"implementation.")
if self.pool_shape is not None or np.prod(self.pool_shape) > 1:
z = max_pool_c01b(c01b=z,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride)
if self.num_pieces != 1:
s = None
for i in xrange(self.num_pieces):
t = z[i::self.num_pieces, :, :, :]
if s is None:
s = t
else:
s = T.maximum(s, t)
z = s
p = z
self.output_space.validate(p)
if hasattr(self, 'min_zero') and self.min_zero:
p = p * (p > 0.)
if not hasattr(self, 'output_normalization'):
self.output_normalization = None
if self.output_normalization:
p = self.output_normalization(p)
return p
@functools.wraps(Model.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_channels
cols = self.num_pieces
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total // cols
if rows * cols < total:
rows = rows + 1
return rows, cols
@functools.wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
filter_norms = self.get_filter_norms()
rval = OrderedDict([('filter_norms_min', filter_norms.min()),
('filter_norms_mean', filter_norms.mean()),
('filter_norms_max', filter_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
P = state
vars_and_prefixes = [(P, '')]
for var, prefix in vars_and_prefixes:
assert var.ndim == 4
v_max = var.max(axis=(1, 2, 3))
v_min = var.min(axis=(1, 2, 3))
v_mean = var.mean(axis=(1, 2, 3))
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer rather than
# outer_of_inner or something like that because I want
# mean_x.* to appear next to each other in the
# alphabetical list, as these are commonly plotted
# together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
| bsd-3-clause | -5,272,755,779,270,390,000 | 37.680132 | 79 | 0.5455 | false |
cvmfs/cvmfs | cvmfs/shrinkwrap/scripts/spec_diff.py | 2 | 4059 | #
# This file is part of the CernVM File System.
#
import argparse
class TreeNode:
def __init__(self, mode):
self.children = {}
self.mode = mode
def getString(self, prefix, wildcard):
stringRes = ''
if self.mode == '!':
stringRes = '!' + prefix + '\n'
if not wildcard:
if self.mode == '^':
stringRes = '^'+prefix+'*\n'
elif self.mode == '*':
stringRes = prefix + '/*\n'
elif self.mode == '/':
if len(prefix) == 0:
stringRes = "^/\n"
stringRes = '^' + prefix + '\n'
for key, val in self.children.items():
stringRes+=val.getString(prefix+'/'+key, self.mode == '*')
return stringRes
def __str__(self):
return self.getString("", False)
class DiffBuilder:
def __init__(self, args):
self.infiles = args.infiles
self.outfile = args.outfile
self.depth = args.depth
self.root = TreeNode('/')
def build_diff(self):
with open(self.infiles[0], 'r') as specFile0:
for curLine in specFile0:
(curLine, mode) = self.get_info(curLine)
path_parts = curLine.split('/')
curNode = self.add_node(path_parts, mode)
curNode.mode = self.calc_new_mode(curNode.mode, mode)
for curfile in self.infiles[1:]:
with open(curfile, 'r') as curSpecFile:
for curLine in curSpecFile:
(curLine, mode) = self.get_info(curLine)
path_parts = curLine.split('/')
if (mode == '!'):
curNode = self.add_node(path_parts, mode)
curNode.mode = self.calc_new_mode(curNode.mode, mode)
else:
curNode = self.root
passthrough = '-' if mode=='!' else '_'
curDepth = 0
mergable = True
for part in path_parts:
curDepth+=1
if not part in curNode.children\
and curDepth > self.depth\
and mergable:
print("Found mergable")
curNode.mode = self.calc_new_mode(curNode.mode, '*')
break
elif not part in curNode.children:
mergable = False
curNode.children[part] = TreeNode(passthrough)
curNode = curNode.children[part]
curNode.mode = self.calc_new_mode(curNode.mode, passthrough)
curNode.mode = self.calc_new_mode(curNode.mode, mode)
with open(self.outfile, "w") as specFile:
specFile.write(str(self.root))
def add_node(self, path_parts, mode):
curNode = self.root
passthrough = '-' if mode=='!' else '_'
for part in path_parts:
if not part in curNode.children:
curNode.children[part] = TreeNode(passthrough)
curNode = curNode.children[part]
curNode.mode = self.calc_new_mode(curNode.mode, passthrough)
return curNode
def calc_new_mode(self, old, update):
if update == '!':
return update
if old == '-':
return update
if update == '-':
return old
if old == '_':
return update
if old == '/' and update in ['^', '*']:
return update
if old == '^' and update == '*':
return update
return old
def get_info(self, curLine):
curLine = curLine.strip()
mode = curLine[0]
wildcard = False
if (curLine[-1] == '*'):
wildcard = True
curLine = curLine[:-1]
if (mode == '/'):
if (wildcard):
mode = '*'
curLine = curLine[1:]
else:
if not wildcard and mode=='^':
mode = '/'
curLine = curLine[2:]
return (curLine, mode)
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument("depth",
type=int,
help="The trace log file")
argparser.add_argument("infiles",
type=str,
nargs="+",
help="The trace log file")
argparser.add_argument("outfile",
type=str,
help="The output file")
return argparser.parse_args()
def main():
args = parse_args()
diffBuilder = DiffBuilder(args)
diffBuilder.build_diff()
if __name__ == "__main__":
main() | bsd-3-clause | 2,479,917,954,669,699,600 | 27 | 74 | 0.554816 | false |
landlab/landlab | landlab/plot/colors.py | 3 | 1405 | # -*- coding: utf-8 -*-
"""colors.py.
Created on Mon Jan 18 13:28:17 2016
@author: gtucker
"""
from matplotlib.colors import LinearSegmentedColormap
def water_colormap():
"""Return matplotlib colormap with 'water' theme."""
cdict = {
"red": ((0.0, 0.0, 169.0 / 255.0), (1.0, 38.0 / 255.0, 1.0)),
"green": ((0.0, 0.0, 222.0 / 255.0), (1.0, 39.0 / 255.0, 1.0)),
"blue": ((0.0, 0.0, 242.0 / 255.0), (1.0, 23.0 / 255.0, 1.0)),
}
return LinearSegmentedColormap("landlab_water", cdict)
def earth_colormap():
"""Return matplotlib colormap with 'earth' theme."""
cdict = {
"red": ((0.0, 0.0, 252.0 / 255.0), (1.0, 33.0 / 255.0, 1.0)),
"green": ((0.0, 0.0, 237.0 / 255.0), (1.0, 38.0 / 255.0, 1.0)),
"blue": ((0.0, 0.0, 179.0 / 255.0), (1.0, 24.0 / 255.0, 1.0)),
}
return LinearSegmentedColormap("landlab_earth", cdict)
def colormap(name):
"""Return named Landlab colormap as a matplotlib colormap.
Parameters
----------
name : str
Name of colormap
Currently available maps are:
'water': black to light blue
'earth': dark olive to light sand color
"""
colormap_fns = {"water": water_colormap(), "earth": earth_colormap()}
try:
return colormap_fns[name]
except KeyError:
print('Warning: colormap "' + name + '" does not exist')
return None
| mit | 8,065,253,437,613,464,000 | 27.673469 | 73 | 0.554448 | false |
seung-lab/neuroglancer | python/tests/merge_tool_test.py | 3 | 2344 | # @license
# Copyright 2018 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for merge_tool.py"""
from __future__ import absolute_import
from neuroglancer.tool import merge_tool
def test_basic():
mask = merge_tool.BlockMask()
mask.add(0, (5, 3, 1))
assert mask.blocks == [{(5, 3, 1): 1}, {(2, 1, 0): 1}, {(1, 0, 0): 1}, {(0, 0, 0): 1}]
mask.add(0, (5, 3, 0))
assert mask.blocks == [
{
(5, 3, 0): 1,
(5, 3, 1): 1
},
{
(2, 1, 0): 2
},
{
(1, 0, 0): 2
},
{
(0, 0, 0): 2
},
]
mask.add(0, (5, 2, 1))
mask.add(0, (5, 2, 0))
mask.add(0, (4, 2, 1))
mask.add(0, (4, 2, 0))
mask.add(0, (4, 3, 1))
assert mask.blocks == [
{
(4, 2, 1): 1,
(4, 2, 0): 1,
(4, 3, 1): 1,
(5, 2, 0): 1,
(5, 2, 1): 1,
(5, 3, 0): 1,
(5, 3, 1): 1
},
{
(2, 1, 0): 7
},
{
(1, 0, 0): 7
},
{
(0, 0, 0): 7
},
]
mask.add(0, (4, 3, 0))
assert mask.blocks == [
{},
{
(2, 1, 0): 8
},
{
(1, 0, 0): 8
},
{
(0, 0, 0): 8
},
]
mask.remove(0, (4, 3, 0))
assert mask.blocks == [
{
(4, 2, 1): 1,
(4, 2, 0): 1,
(4, 3, 1): 1,
(5, 2, 0): 1,
(5, 2, 1): 1,
(5, 3, 0): 1,
(5, 3, 1): 1
},
{
(2, 1, 0): 7
},
{
(1, 0, 0): 7
},
{
(0, 0, 0): 7
},
]
mask.remove(1, (2, 1, 0))
assert mask.blocks == [{}, {}, {}, {}]
| apache-2.0 | -6,905,571,312,890,334,000 | 20.703704 | 90 | 0.385239 | false |
Itxaka/libcloud | libcloud/dns/types.py | 4 | 3008 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.types import LibcloudError
__all__ = [
'Provider',
'RecordType',
'ZoneError',
'ZoneDoesNotExistError',
'ZoneAlreadyExistsError',
'RecordError',
'RecordDoesNotExistError',
'RecordAlreadyExistsError'
]
class Provider(object):
DUMMY = 'dummy'
LINODE = 'linode'
RACKSPACE = 'rackspace'
ZERIGO = 'zerigo'
ROUTE53 = 'route53'
HOSTVIRTUAL = 'hostvirtual'
GANDI = 'gandi'
GOOGLE = 'google'
SOFTLAYER = 'softlayer'
# Deprecated
RACKSPACE_US = 'rackspace_us'
RACKSPACE_UK = 'rackspace_uk'
class RecordType(object):
"""
DNS record type.
"""
A = 'A'
AAAA = 'AAAA'
MX = 'MX'
NS = 'NS'
CNAME = 'CNAME'
DNAME = 'DNAME'
TXT = 'TXT'
PTR = 'PTR'
SOA = 'SOA'
SPF = 'SPF'
SRV = 'SRV'
PTR = 'PTR'
NAPTR = 'NAPTR'
REDIRECT = 'REDIRECT'
GEO = 'GEO'
URL = 'URL'
WKS = 'WKS'
LOC = 'LOC'
class ZoneError(LibcloudError):
error_type = 'ZoneError'
kwargs = ('zone_id', )
def __init__(self, value, driver, zone_id):
self.zone_id = zone_id
super(ZoneError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, zone_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.zone_id, self.value))
class ZoneDoesNotExistError(ZoneError):
error_type = 'ZoneDoesNotExistError'
class ZoneAlreadyExistsError(ZoneError):
error_type = 'ZoneAlreadyExistsError'
class RecordError(LibcloudError):
error_type = 'RecordError'
def __init__(self, value, driver, record_id):
self.record_id = record_id
super(RecordError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, record_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.record_id, self.value))
class RecordDoesNotExistError(RecordError):
error_type = 'RecordDoesNotExistError'
class RecordAlreadyExistsError(RecordError):
error_type = 'RecordAlreadyExistsError'
| apache-2.0 | -3,806,777,273,879,569,400 | 24.931034 | 74 | 0.637301 | false |
v4nz666/7drl2017 | RoguePy/UI/Colors.py | 1 | 7033 | from ..libtcod import libtcod
black = libtcod.black
darkest_grey = libtcod.darkest_grey
darker_grey = libtcod.darker_grey
dark_grey = libtcod.dark_grey
grey = libtcod.grey
light_grey = libtcod.light_grey
lighter_grey = libtcod.lighter_grey
lightest_grey = libtcod.lightest_grey
darkest_gray = libtcod.darkest_gray
darker_gray = libtcod.darker_gray
dark_gray = libtcod.dark_gray
gray = libtcod.gray
light_gray = libtcod.light_gray
lighter_gray = libtcod.lighter_gray
lightest_gray = libtcod.lightest_gray
white = libtcod.white
red = libtcod.red
flame = libtcod.flame
orange = libtcod.orange
amber = libtcod.amber
yellow = libtcod.yellow
lime = libtcod.lime
chartreuse = libtcod.chartreuse
green = libtcod.green
sea = libtcod.sea
turquoise = libtcod.turquoise
cyan = libtcod.cyan
sky = libtcod.sky
azure = libtcod.azure
blue = libtcod.blue
han = libtcod.han
sepia = libtcod.sepia
violet = libtcod.violet
purple = libtcod.purple
fuchsia = libtcod.fuchsia
magenta = libtcod.magenta
pink = libtcod.pink
crimson = libtcod.crimson
dark_red = libtcod.dark_red
dark_flame = libtcod.dark_flame
dark_orange = libtcod.dark_orange
dark_amber = libtcod.dark_amber
dark_yellow = libtcod.dark_yellow
dark_lime = libtcod.dark_lime
dark_chartreuse = libtcod.dark_chartreuse
dark_green = libtcod.dark_green
dark_sea = libtcod.dark_sea
dark_turquoise = libtcod.dark_turquoise
dark_cyan = libtcod.dark_cyan
dark_sky = libtcod.dark_sky
dark_azure = libtcod.dark_azure
dark_blue = libtcod.dark_blue
dark_han = libtcod.dark_han
dark_sepia = libtcod.dark_sepia
dark_violet = libtcod.dark_violet
dark_purple = libtcod.dark_purple
dark_fuchsia = libtcod.dark_fuchsia
dark_magenta = libtcod.dark_magenta
dark_pink = libtcod.dark_pink
dark_crimson = libtcod.dark_crimson
darker_red = libtcod.darker_red
darker_flame = libtcod.darker_flame
darker_orange = libtcod.darker_orange
darker_amber = libtcod.darker_amber
darker_yellow = libtcod.darker_yellow
darker_lime = libtcod.darker_lime
darker_chartreuse = libtcod.darker_chartreuse
darker_green = libtcod.darker_green
darker_sea = libtcod.darker_sea
darker_turquoise = libtcod.darker_turquoise
darker_cyan = libtcod.darker_cyan
darker_sky = libtcod.darker_sky
darker_azure = libtcod.darker_azure
darker_blue = libtcod.darker_blue
darker_han = libtcod.darker_han
darker_sepia = libtcod.darker_sepia
darker_violet = libtcod.darker_violet
darker_purple = libtcod.darker_purple
darker_fuchsia = libtcod.darker_fuchsia
darker_magenta = libtcod.darker_magenta
darker_pink = libtcod.darker_pink
darker_crimson = libtcod.darker_crimson
darkest_red = libtcod.darkest_red
darkest_flame = libtcod.darkest_flame
darkest_orange = libtcod.darkest_orange
darkest_amber = libtcod.darkest_amber
darkest_yellow = libtcod.darkest_yellow
darkest_lime = libtcod.darkest_lime
darkest_chartreuse = libtcod.darkest_chartreuse
darkest_green = libtcod.darkest_green
darkest_sea = libtcod.darkest_sea
darkest_turquoise = libtcod.darkest_turquoise
darkest_cyan = libtcod.darkest_cyan
darkest_sky = libtcod.darkest_sky
darkest_azure = libtcod.darkest_azure
darkest_blue = libtcod.darkest_blue
darkest_han = libtcod.darkest_han
darkest_sepia = libtcod.darkest_sepia
darkest_violet = libtcod.darkest_violet
darkest_purple = libtcod.darkest_purple
darkest_fuchsia = libtcod.darkest_fuchsia
darkest_magenta = libtcod.darkest_magenta
darkest_pink = libtcod.darkest_pink
darkest_crimson = libtcod.darkest_crimson
light_red = libtcod.light_red
light_flame = libtcod.light_flame
light_orange = libtcod.light_orange
light_amber = libtcod.light_amber
light_yellow = libtcod.light_yellow
light_lime = libtcod.light_lime
light_chartreuse = libtcod.light_chartreuse
light_green = libtcod.light_green
light_sea = libtcod.light_sea
light_turquoise = libtcod.light_turquoise
light_cyan = libtcod.light_cyan
light_sky = libtcod.light_sky
light_azure = libtcod.light_azure
light_blue = libtcod.light_blue
light_han = libtcod.light_han
light_sepia = libtcod.light_sepia
light_violet = libtcod.light_violet
light_purple = libtcod.light_purple
light_fuchsia = libtcod.light_fuchsia
light_magenta = libtcod.light_magenta
light_pink = libtcod.light_pink
light_crimson = libtcod.light_crimson
lighter_red = libtcod.lighter_red
lighter_flame = libtcod.lighter_flame
lighter_orange = libtcod.lighter_orange
lighter_amber = libtcod.lighter_amber
lighter_yellow = libtcod.lighter_yellow
lighter_lime = libtcod.lighter_lime
lighter_chartreuse = libtcod.lighter_chartreuse
lighter_green = libtcod.lighter_green
lighter_sea = libtcod.lighter_sea
lighter_turquoise = libtcod.lighter_turquoise
lighter_cyan = libtcod.lighter_cyan
lighter_sky = libtcod.lighter_sky
lighter_azure = libtcod.lighter_azure
lighter_blue = libtcod.lighter_blue
lighter_han = libtcod.lighter_han
lighter_sepia = libtcod.lighter_sepia
lighter_violet = libtcod.lighter_violet
lighter_purple = libtcod.lighter_purple
lighter_fuchsia = libtcod.lighter_fuchsia
lighter_magenta = libtcod.lighter_magenta
lighter_pink = libtcod.lighter_pink
lighter_crimson = libtcod.lighter_crimson
lightest_red = libtcod.lightest_red
lightest_flame = libtcod.lightest_flame
lightest_orange = libtcod.lightest_orange
lightest_amber = libtcod.lightest_amber
lightest_yellow = libtcod.lightest_yellow
lightest_lime = libtcod.lightest_lime
lightest_chartreuse = libtcod.lightest_chartreuse
lightest_green = libtcod.lightest_green
lightest_sea = libtcod.lightest_sea
lightest_turquoise = libtcod.lightest_turquoise
lightest_cyan = libtcod.lightest_cyan
lightest_sky = libtcod.lightest_sky
lightest_azure = libtcod.lightest_azure
lightest_blue = libtcod.lightest_blue
lightest_han = libtcod.lightest_han
lightest_sepia = libtcod.lightest_sepia
lightest_violet = libtcod.lightest_violet
lightest_purple = libtcod.lightest_purple
lightest_fuchsia = libtcod.lightest_fuchsia
lightest_magenta = libtcod.lightest_magenta
lightest_pink = libtcod.lightest_pink
lightest_crimson = libtcod.lightest_crimson
desaturated_red = libtcod.desaturated_red
desaturated_flame = libtcod.desaturated_flame
desaturated_orange = libtcod.desaturated_orange
desaturated_amber = libtcod.desaturated_amber
desaturated_yellow = libtcod.desaturated_yellow
desaturated_lime = libtcod.desaturated_lime
desaturated_chartreuse = libtcod.desaturated_chartreuse
desaturated_green = libtcod.desaturated_green
desaturated_sea = libtcod.desaturated_sea
desaturated_turquoise = libtcod.desaturated_turquoise
desaturated_cyan = libtcod.desaturated_cyan
desaturated_sky = libtcod.desaturated_sky
desaturated_azure = libtcod.desaturated_azure
desaturated_blue = libtcod.desaturated_blue
desaturated_han = libtcod.desaturated_han
desaturated_violet = libtcod.desaturated_violet
desaturated_purple = libtcod.desaturated_purple
desaturated_fuchsia = libtcod.desaturated_fuchsia
desaturated_magenta = libtcod.desaturated_magenta
desaturated_pink = libtcod.desaturated_pink
desaturated_crimson = libtcod.desaturated_crimson
brass = libtcod.brass
copper = libtcod.copper
gold = libtcod.gold
silver = libtcod.silver
celadon = libtcod.celadon
peach = libtcod.peach
| gpl-3.0 | 8,231,612,658,221,367,000 | 32.650718 | 55 | 0.810607 | false |
dongjoon-hyun/tensorflow | tensorflow/python/kernel_tests/broadcast_to_ops_test.py | 4 | 5339 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast_to ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test as test_lib
class BroadcastToTest(test_util.TensorFlowTestCase):
def testBroadcastToBasic(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
with self.session(use_gpu=True):
x = np.array([1, 2, 3], dtype=dtype)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToString(self):
with self.session(use_gpu=True):
x = np.array([b"1", b"2", b"3"])
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToBool(self):
with self.session(use_gpu=True):
x = np.array([True, False, True], dtype=np.bool)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToShape(self):
for input_dim in range(1, 6):
for output_dim in range(input_dim, 6):
with self.cached_session(use_gpu=True):
input_shape = [2] * input_dim
output_shape = [2] * output_dim
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToScalar(self):
with self.session(use_gpu=True):
x = np.array(1, dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastScalarToNonScalar(self):
with self.session(use_gpu=True):
x = np.array(1.0, dtype=np.float)
v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4])
v_np = np.broadcast_to(x, [2, 3, 4])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToShapeTypeAndInference(self):
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(use_gpu=True):
x = np.array([1, 2, 3])
v_tf = array_ops.broadcast_to(
constant_op.constant(x),
constant_op.constant([3, 3], dtype=dtype))
shape = v_tf.get_shape().as_list()
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
# check shape inference when shape input is constant
self.assertAllEqual(shape, v_np.shape)
def testGradientForScalar(self):
x = constant_op.constant(1, dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [2, 4, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(), out,
out.get_shape())
self.assertLess(err, 1e-4)
def testGradientWithSameRank(self):
x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [2, 5, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
def testGradientWithIncreasingRank(self):
x = constant_op.constant([[1], [2]],
dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [5, 2, 3])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
def testGradientWithBroadcastAllDimensions(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [5, 4, 6])
out = 2 * v
with self.cached_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
if __name__ == "__main__":
test_lib.main()
| apache-2.0 | -3,745,499,597,733,895,000 | 39.44697 | 80 | 0.613973 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/MailChimp/Ping.py | 5 | 2761 | # -*- coding: utf-8 -*-
###############################################################################
#
# Ping
# Test connection to MailChimp services.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Ping(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Ping Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Ping, self).__init__(temboo_session, '/Library/MailChimp/Ping')
def new_input_set(self):
return PingInputSet()
def _make_result_set(self, result, path):
return PingResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PingChoreographyExecution(session, exec_id, path)
class PingInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Ping
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp)
"""
super(PingInputSet, self)._set_input('APIKey', value)
class PingResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Ping Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Mailchimp.)
"""
return self._output.get('Response', None)
class PingChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PingResultSet(response, path)
| gpl-2.0 | 7,038,981,433,311,374,000 | 32.670732 | 120 | 0.662441 | false |
flacjacket/sympy | sympy/printing/preview.py | 2 | 7063 | from __future__ import with_statement
import os
import time
import tempfile
from latex import latex
def preview(expr, output='png', viewer=None, euler=True, packages=(), **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using available the TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png') # doctest: +SKIP
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp') # doctest: +SKIP
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi') # doctest: +SKIP
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer') # doctest: +SKIP
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception. You may also enter 'file' for the viewer
argument. Doing so will cause this function to return a file object in
read-only mode.
Currently this depends on pexpect, which is not available for windows.
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'}) # doctest: +SKIP
"""
# we don't want to depend on anything not in the
# standard library with SymPy by default
import pexpect
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
candidates = {
"dvi" : [ "evince", "okular", "kdvi", "xdvi" ],
"ps" : [ "evince", "okular", "gsview", "gv" ],
"pdf" : [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
if pexpect.which(candidate):
viewer = candidate
break
else:
raise SystemError("No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer not in special and not pexpect.which(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
format = r"""\documentclass[12pt]{article}
%s
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
""" % (package_includes, "%s")
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
tmp = tempfile.mktemp()
with open(tmp + ".tex", "w") as tex:
tex.write(format % latex_string)
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
if os.system("latex -halt-on-error %s.tex" % tmp) != 0:
raise SystemError("Failed to generate DVI output.")
os.remove(tmp + ".tex")
os.remove(tmp + ".aux")
os.remove(tmp + ".log")
if output != "dvi":
command = {
"ps" : "dvips -o %s.ps %s.dvi",
"pdf" : "dvipdf %s.dvi %s.pdf",
"png" : "dvipng -T tight -z 9 " + \
"--truecolor -o %s.png %s.dvi",
}
try:
if os.system(command[output] % (tmp, tmp)) != 0:
raise SystemError("Failed to generate '%s' output." % output)
else:
os.remove(tmp + ".dvi")
except KeyError:
raise SystemError("Invalid output format: %s" % output)
src = "%s.%s" % (tmp, output)
src_file = None
if viewer == "file":
src_file = open(src, 'rb')
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for plotting.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(src, decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width = img.width + 2*offset,
height = img.height + 2*offset,
caption = "sympy",
resizable = False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
os.system("%s %s &> /dev/null &" % (viewer, src))
time.sleep(2) # wait for the viewer to read data
os.remove(src)
os.chdir(cwd)
if src_file is not None:
return src_file
| bsd-3-clause | -9,208,264,140,758,000,000 | 31.851163 | 96 | 0.560951 | false |
socialplanning/opencore | opencore/listen/mailinglist.py | 1 | 3103 | from Products.CMFDefault.DublinCore import DefaultDublinCoreImpl
from Products.listen.content.mailinglist import MailingList
from fieldproperty import ListNameFieldProperty
from interfaces import IOpenMailingList
from opencore.configuration import PROJECTNAME
from opencore.listen.interfaces import ISyncWithProjectMembership
from zope.interface import implements
from zope.component import getMultiAdapter
from zope.component import queryMultiAdapter
PKG_NAME = 'listen'
factory_type_information = ( {
'id' : 'Open Mailing List',
'icon' : 'mailboxer_icon.png',
'meta_type' : 'OpenMailingList',
'description' : "A mailing list manages user subscriptions and "\
"processes incoming mail",
'product' : PROJECTNAME,
'factory' : 'addOpenMailingList',
'immediate_view' : 'edit',
'aliases' : {'(Default)' :'mailinglist_view',
'index.html' :'mailinglist_view',
'view' :'mailinglist_view',
'sharing' :'folder_localrole_form',
'subscribers' :'@@editAllSubscribers',
'edit' :'@@edit'},
'actions' : (
),
'filter_content_types' : True,
'allowed_content_types' : (),
'global_allow' : True,
'allow_discussion' : False,
},
)
fti = factory_type_information[0].copy()
def addOpenMailingList(self, id, title=u''):
""" Add an OpenMailingList """
o = OpenMailingList(id, title)
self._setObject(id, o)
class OpenMailingList(MailingList, DefaultDublinCoreImpl):
"""
Some OpenPlans specific tweaks to listen mailing lists.
"""
implements(IOpenMailingList)
portal_type = "Open Mailing List"
meta_type = "OpenMailingList"
creator = ""
mailto = ListNameFieldProperty(IOpenMailingList['mailto'])
# this overrides MailBoxer's limit of 10 emails in 10 minutes
# so now, up to 100 emails are allowed in 10 minutes before the
# sender is disabled
senderlimit = 100
@property
def sync_project_membership(self):
return ISyncWithProjectMembership.providedBy(self)
def manage_event(self, event_codes, headers):
""" Handle event conditions passed up from smtp2zope.
Primarily this method will be called by XMLRPC from smtp2zope.
Copied from mailboxer to avoid having to acquire the mail template
but instead try to get templates for them
"""
for code in event_codes:
from_ = headers.get('from')
if from_ is None:
continue
view = queryMultiAdapter((self, self.REQUEST), name='event_template_sender_%d' % code)
if view is None:
view = getMultiAdapter((self, self.REQUEST), name='event_template_sender_default')
msg = view(code, headers)
returnpath = self.getValueFor('returnpath') or self.manager_email
self._send_msgs([from_], msg, returnpath)
| gpl-3.0 | -3,168,084,858,627,626,500 | 36.385542 | 98 | 0.620367 | false |
yuyu2172/chainercv | examples/fcis/mxnet2npz.py | 3 | 12879 | import argparse
import chainer
import mxnet as mx
from chainercv.experimental.links import FCISResNet101
def main():
parser = argparse.ArgumentParser(
description='Script to convert mxnet params to chainer npz')
parser.add_argument(
'mxnet_param_file', metavar='mxnet-param-file',
help='Mxnet param file i.e. fcis_coco-0000.params')
parser.add_argument('--process', action='store_true')
parser.add_argument(
'--dataset', choices=('sbd', 'coco'), type=str, default='sbd')
parser.add_argument(
'--out', '-o', type=str, default=None)
args = parser.parse_args()
if args.dataset == 'sbd':
model = FCISResNet101(
n_fg_class=20,
pretrained_model=None)
elif args.dataset == 'coco':
model = FCISResNet101(
n_fg_class=80,
pretrained_model=None,
anchor_scales=[4, 8, 16, 32],
proposal_creator_params={
'nms_thresh': 0.7,
'n_train_pre_nms': 6000,
'n_train_post_nms': 300,
'n_test_pre_nms': 6000,
'n_test_post_nms': 300,
'force_cpu_nms': False,
'min_size': 2})
params = mx.nd.load(args.mxnet_param_file)
print('mxnet param is loaded: {}'.format(args.mxnet_param_file))
print('start conversion')
if args.process:
tests = [k for k in params.keys() if k.endswith('_test')]
for test in tests:
params[test.replace('_test', '')] = params.pop(test)
model = convert(model, params)
print('finish conversion')
if args.out is None:
out = 'fcis_resnet101_{}_converted.npz'.format(args.dataset)
print('saving to {}'.format(out))
chainer.serializers.save_npz(out, model)
def convert(model, params):
finished_keys = []
for key, value in params.items():
value = value.asnumpy()
param_type, param_name = key.split(':')
if param_type == 'arg':
if param_name.endswith('_test'):
continue
elif param_name.startswith('rpn'):
if param_name == 'rpn_bbox_pred_bias':
value = value.reshape((-1, 4))
value = value[:, [1, 0, 3, 2]]
value = value.reshape(-1)
assert model.rpn.loc.b.shape == value.shape
model.rpn.loc.b.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_bbox_pred_weight':
value = value.reshape((-1, 4, 512, 1, 1))
value = value[:, [1, 0, 3, 2]]
value = value.reshape((-1, 512, 1, 1))
assert model.rpn.loc.W.shape == value.shape
model.rpn.loc.W.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_cls_score_bias':
value = value.reshape((2, -1))
value = value.transpose((1, 0))
value = value.reshape(-1)
assert model.rpn.score.b.shape == value.shape
model.rpn.score.b.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_cls_score_weight':
value = value.reshape((2, -1, 512, 1, 1))
value = value.transpose((1, 0, 2, 3, 4))
value = value.reshape((-1, 512, 1, 1))
assert model.rpn.score.W.shape == value.shape
model.rpn.score.W.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_conv_3x3_bias':
assert model.rpn.conv1.b.shape == value.shape
model.rpn.conv1.b.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_conv_3x3_weight':
assert model.rpn.conv1.W.shape == value.shape
model.rpn.conv1.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('conv1'):
if param_name == 'conv1_weight':
assert model.extractor.conv1.conv.W.shape \
== value.shape
model.extractor.conv1.conv.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('bn_conv1'):
if param_name == 'bn_conv1_beta':
assert model.extractor.conv1.bn.beta.shape \
== value.shape
model.extractor.conv1.bn.beta.array[:] = value
finished_keys.append(key)
elif param_name == 'bn_conv1_gamma':
assert model.extractor.conv1.bn.gamma.shape \
== value.shape
model.extractor.conv1.bn.gamma.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('fcis'):
if param_name == 'fcis_bbox_bias':
value = value.reshape((2, 4, 7 * 7))
value = value[:, [1, 0, 3, 2]]
value = value.reshape(392)
assert model.head.ag_loc.b.shape == value.shape
model.head.ag_loc.b.array[:] = value
finished_keys.append(key)
elif param_name == 'fcis_bbox_weight':
value = value.reshape((2, 4, 7 * 7, 1024, 1, 1))
value = value[:, [1, 0, 3, 2]]
value = value.reshape((392, 1024, 1, 1))
assert model.head.ag_loc.W.shape == value.shape
model.head.ag_loc.W.array[:] = value
finished_keys.append(key)
elif param_name == 'fcis_cls_seg_bias':
assert model.head.cls_seg.b.shape == value.shape
model.head.cls_seg.b.array[:] = value
finished_keys.append(key)
elif param_name == 'fcis_cls_seg_weight':
assert model.head.cls_seg.W.shape == value.shape
model.head.cls_seg.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('conv_new_1'):
if param_name == 'conv_new_1_bias':
assert model.head.conv1.b.shape == value.shape
model.head.conv1.b.array[:] = value
finished_keys.append(key)
elif param_name == 'conv_new_1_weight':
assert model.head.conv1.W.shape == value.shape
model.head.conv1.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('res'):
block_name, branch_name, prm_name = param_name.split('_')
resblock_name = block_name[:4]
resblock = getattr(model.extractor, resblock_name)
if block_name[4:] == 'a':
blck_name = block_name[4:]
elif block_name[4:] == 'b':
blck_name = 'b1'
elif block_name[4:].startswith('b'):
blck_name = block_name[4:]
elif block_name[4:] == 'c':
blck_name = 'b2'
block = getattr(resblock, blck_name)
if branch_name == 'branch1':
conv_bn_name = 'residual_conv'
elif branch_name == 'branch2a':
conv_bn_name = 'conv1'
elif branch_name == 'branch2b':
conv_bn_name = 'conv2'
elif branch_name == 'branch2c':
conv_bn_name = 'conv3'
conv_bn = getattr(block, conv_bn_name)
if prm_name == 'weight':
assert conv_bn.conv.W.shape == value.shape
conv_bn.conv.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('bn'):
block_name, branch_name, prm_name = param_name.split('_')
resblock_name = 'res{}'.format(block_name[2])
resblock = getattr(model.extractor, resblock_name)
if block_name[3:] == 'a':
blck_name = block_name[3:]
elif block_name[3:] == 'b':
blck_name = 'b1'
elif block_name[3:].startswith('b'):
blck_name = block_name[3:]
elif block_name[3:] == 'c':
blck_name = 'b2'
block = getattr(resblock, blck_name)
if branch_name == 'branch1':
conv_bn_name = 'residual_conv'
elif branch_name == 'branch2a':
conv_bn_name = 'conv1'
elif branch_name == 'branch2b':
conv_bn_name = 'conv2'
elif branch_name == 'branch2c':
conv_bn_name = 'conv3'
conv_bn = getattr(block, conv_bn_name)
if prm_name == 'beta':
assert conv_bn.bn.beta.shape == value.shape
conv_bn.bn.beta.array[:] = value
finished_keys.append(key)
elif prm_name == 'gamma':
assert conv_bn.bn.gamma.shape == value.shape
conv_bn.bn.gamma.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
else:
print('param: {} is not converted'.format(key))
elif param_type == 'aux':
if param_name.endswith('_test'):
continue
elif param_name.startswith('bn_conv1'):
if param_name == 'bn_conv1_moving_mean':
assert model.extractor.conv1.bn.avg_mean.shape \
== value.shape
model.extractor.conv1.bn.avg_mean[:] = value
finished_keys.append(key)
elif param_name == 'bn_conv1_moving_var':
assert model.extractor.conv1.bn.avg_var.shape \
== value.shape
model.extractor.conv1.bn.avg_var[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('bn'):
block_name, branch_name, _, prm_name = \
param_name.split('_')
resblock_name = 'res{}'.format(block_name[2])
resblock = getattr(model.extractor, resblock_name)
if block_name[3:] == 'a':
blck_name = block_name[3:]
elif block_name[3:] == 'b':
blck_name = 'b1'
elif block_name[3:].startswith('b'):
blck_name = block_name[3:]
elif block_name[3:] == 'c':
blck_name = 'b2'
block = getattr(resblock, blck_name)
if branch_name == 'branch1':
conv_bn_name = 'residual_conv'
elif branch_name == 'branch2a':
conv_bn_name = 'conv1'
elif branch_name == 'branch2b':
conv_bn_name = 'conv2'
elif branch_name == 'branch2c':
conv_bn_name = 'conv3'
conv_bn = getattr(block, conv_bn_name)
if prm_name == 'mean':
assert conv_bn.bn.avg_mean.shape == value.shape
conv_bn.bn.avg_mean[:] = value
finished_keys.append(key)
elif prm_name == 'var':
assert conv_bn.bn.avg_var.shape == value.shape
conv_bn.bn.avg_var[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
else:
print('param: {} is not converted'.format(key))
else:
print('param: {} is not converted'.format(key))
return model
if __name__ == '__main__':
main()
| mit | 4,281,009,090,641,691,000 | 44.031469 | 73 | 0.466962 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.