repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
daveti/sarp | run/perf.py | 1 | 1108 | #!/usr/bin/python
# Perf.py
# Accept a file (time.perf) with a number at each line
# Compute the min/avg/max/mdev then
# Nov 10, 2013
# [email protected]
# http://davejingtian.org
import sys
def readPerfFile(filePath):
'''
Read a perf file into a list
'''
data = []
try:
fnObj = open(filePath, 'r')
for line in fnObj:
line = line.strip()
data.append(float(line))
finally:
fnObj.close()
return(data)
def computeMdev(data, avg):
'''
Compute the mean deviation given the list and the average value
'''
num = len(data)
sum = 0.0
for d in data:
sum += abs(d - avg)
return(sum/num)
def main():
'''
Main method
'''
if len(sys.argv) != 2:
print('Error: invalid number of parameters')
return(1)
# Read the file
data = readPerfFile(sys.argv[1])
# Find the min
minV = min(data)
# Compute the avg
avgV = sum(data) / float(len(data))
# Find the max
maxV = max(data)
# Compute the mdev
mdevV = computeMdev(data, avgV)
# Output
print('time in ms min/avg/max/mdev')
print('%f/%f/%f/%f' %(minV,avgV,maxV,mdevV))
if __name__ == '__main__':
main()
| gpl-2.0 | -8,832,709,386,577,751,000 | 15.057971 | 64 | 0.630866 | false |
tanmaykm/JuliaBox | engine/src/juliabox/plugins/api_admin/api.py | 8 | 6105 | __author__ = 'tan'
import os
from juliabox.handlers import JBPluginHandler, JBPluginUI
from juliabox.jbox_util import JBoxCfg
from juliabox.db import JBoxUserV2, JBoxAPISpec, JBoxDBItemNotFound
class APIAdminUIModule(JBPluginUI):
provides = [JBPluginUI.JBP_UI_CONFIG_SECTION]
TEMPLATE_PATH = os.path.dirname(__file__)
@staticmethod
def get_template(plugin_type):
if plugin_type == JBPluginUI.JBP_UI_CONFIG_SECTION:
return os.path.join(APIAdminUIModule.TEMPLATE_PATH, "api_admin.tpl")
return None
@staticmethod
def get_user_id(handler):
sessname = handler.get_session_id()
user_id = handler.get_user_id()
if (sessname is None) or (user_id is None):
handler.send_error()
return
return user_id
@staticmethod
def is_allowed(handler):
user_id = APIAdminUIModule.get_user_id(handler)
user = JBoxUserV2(user_id)
return user.has_resource_profile(JBoxUserV2.RES_PROF_API_PUBLISHER)
class APIAdminHandler(JBPluginHandler):
provides = [JBPluginHandler.JBP_HANDLER, JBPluginHandler.JBP_JS_TOP]
@staticmethod
def get_js():
return "/assets/plugins/api_admin/api_admin.js"
@staticmethod
def register(app):
app.add_handlers(".*$", [(r"/jboxplugin/api_admin/", APIAdminHandler)])
def get(self):
return self.post()
def post(self):
self.log_debug("API management handler got POST request")
sessname = self.get_session_id()
user_id = self.get_user_id()
if (sessname is None) or (user_id is None):
self.send_error()
return
user = JBoxUserV2(user_id)
is_admin = sessname in JBoxCfg.get("admin_sessnames", []) or user.has_role(JBoxUserV2.ROLE_SUPER)
self.log_info("API manager. user_id[%s] is_admin[%r]", user_id, is_admin)
if user.has_resource_profile(JBoxUserV2.RES_PROF_API_PUBLISHER):
if self.handle_get_api_info(user_id, is_admin):
return
if self.handle_create_api(user_id, is_admin):
return
if self.handle_delete_api(user_id, is_admin):
return
else:
if self.handle_enable_api(user_id, is_admin):
return
self.log_error("no handlers found")
# only AJAX requests responded to
self.send_error()
def handle_enable_api(self, user_id, is_admin):
mode = self.get_argument('mode', None)
if (mode is None) or (mode != "enable"):
return False
user = JBoxUserV2(user_id)
user.set_resource_profile(JBoxUserV2.RES_PROF_API_PUBLISHER)
user.save()
response = {'code': 0, 'data': ''}
self.write(response)
return True
def handle_get_api_info(self, user_id, is_admin):
mode = self.get_argument('mode', None)
if (mode is None) or (mode != "info"):
return False
publisher = user_id
api_name = None
if is_admin:
publisher = self.get_argument('publisher', user_id)
api_name = self.get_argument('api_name', None)
apiinfo = JBoxAPISpec.get_api_info(publisher, api_name)
response = {'code': 0, 'data': apiinfo}
self.write(response)
return True
def handle_delete_api(self, user_id, is_admin):
mode = self.get_argument('mode', None)
if (mode is None) or (mode != "delete"):
return False
api_name = self.get_argument('api_name', None)
if api_name is None:
self.log_error("missing api_name")
self.send_error()
return True
publisher = user_id
if is_admin:
publisher = self.get_argument('publisher', publisher)
try:
api = JBoxAPISpec(api_name=api_name)
if api.get_publisher() != publisher:
response = {'code': -1, 'data': 'No delete permission on this API'}
self.write(response)
return True
api.delete()
response = {'code': 0, 'data': 'API ' + api_name + ' was deleted'}
self.write(response)
return True
except JBoxDBItemNotFound:
response = {'code': 1, 'data': 'No such API - ' + api_name}
self.write(response)
return True
def handle_create_api(self, user_id, is_admin):
mode = self.get_argument('mode', None)
if (mode is None) or (mode != "create"):
return False
api_name = self.get_argument('api_name', '', strip=True)
cmd = self.get_argument('cmd', '', strip=True)
description = self.get_argument('description', '', strip=True)
for val in (api_name, cmd, description):
if len(val) == 0:
self.log_error("mandatory parameters missing")
response = {'code': -1, 'data': 'manadatory attributes missing'}
self.write(response)
return True
if len(api_name) > 32 or len(cmd) > 512 or len(description) > 512:
self.log_error("api specification fields too large")
response = {'code': -1, 'data': 'API specification fields too large'}
self.write(response)
return True
publisher = user_id
if is_admin:
publisher = self.get_argument('publisher', publisher, strip=True)
try:
JBoxAPISpec(api_name=api_name)
response = {'code': -1, 'data': 'API already exists'}
self.write(response)
return True
except JBoxDBItemNotFound:
pass
api = JBoxAPISpec(api_name, cmd=cmd, description=description, publisher=publisher, create=True)
if api.get_publisher() != publisher:
# API got created by someone else!
response = {'code': -1, 'data': 'API already exists'}
self.write(response)
return True
response = {'code': 0, 'data': ''}
self.write(response)
return True | mit | -6,546,364,685,952,526,000 | 33.693182 | 105 | 0.575594 | false |
Schamnad/cclib | test/data/testvib.py | 1 | 7511 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test logfiles with vibration output in cclib"""
import os
import unittest
from skip import skipForParser
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericIRTest(unittest.TestCase):
"""Generic vibrational frequency unittest"""
# Unit tests should normally give this value for the largest IR intensity.
max_IR_intensity = 100
def testvibdisps(self):
"""Are the dimensions of vibdisps consistent with 3N-6 x N x 3"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(self.data.vibdisps.shape,
(numvib, len(self.data.atomnos), 3))
def testlengths(self):
"""Are the lengths of vibfreqs and vibirs (and if present, vibsyms) correct?"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(len(self.data.vibfreqs), numvib)
if hasattr(self.data, 'vibirs'):
self.assertEqual(len(self.data.vibirs), numvib)
if hasattr(self.data, 'vibsyms'):
self.assertEqual(len(self.data.vibsyms), numvib)
def testfreqval(self):
"""Is the highest freq value 3630 +/- 200 cm-1?"""
self.assertAlmostEqual(max(self.data.vibfreqs), 3630, delta=200)
@skipForParser('Psi', 'Psi cannot print IR intensities')
def testirintens(self):
"""Is the maximum IR intensity 100 +/- 10 km mol-1?"""
self.assertAlmostEqual(max(self.data.vibirs), self.max_IR_intensity, delta=10)
class FireflyIRTest(GenericIRTest):
"""Customized vibrational frequency unittest"""
max_IR_intensity = 135
class GaussianIRTest(GenericIRTest):
"""Customized vibrational frequency unittest"""
def testvibsyms(self):
"""Is the length of vibsyms correct?"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(len(self.data.vibsyms), numvib)
class JaguarIRTest(GenericIRTest):
"""Customized vibrational frequency unittest"""
def testvibsyms(self):
"""Is the length of vibsyms correct?"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(len(self.data.vibsyms), numvib)
class OrcaIRTest(GenericIRTest):
"""Customized vibrational frequency unittest"""
# We have not been able to determine why ORCA gets such a different
# maximum IR intensity. The coordinates are exactly the same, and
# the basis set seems close enough to other programs. It would be nice
# to determine whether this difference is algorithmic in nature,
# but in the meanwhile we will expect to parse this value.
max_IR_intensity = 215
class QChemIRTest(GenericIRTest):
"""Customized vibrational frequency unittest"""
def testtemperature(self):
"""Is the temperature 298.15 K?"""
self.assertEqual(298.15, self.data.temperature)
# def testenthalpy(self):
# """Is the enthalpy ..."""
# self.assertInside(self.data.enthalpy, )
# def testentropy(self):
# """Is the entropy ..."""
# self.assertInside(self.data.entropy, )
# def testfreeenergy(self):
# """Is the free energy ..."""
# self.assertInside(self.data.freeenergy, )
# Molecular mass of DVB in mD.
molecularmass = 130078.25
def testatommasses(self):
"""Do the atom masses sum up to the molecular mass (130078.25+-0.1mD)?"""
mm = 1000*sum(self.data.atommasses)
self.assertAlmostEqual(mm, 130078.25, delta=0.1, msg = "Molecule mass: %f not 130078 +- 0.1mD" % mm)
def testhessian(self):
"""Do the frequencies from the Hessian match the printed frequencies?"""
class GamessIRTest(GenericIRTest):
"""Customized vibrational frequency unittest"""
# Molecular mass of DVB in mD.
molecularmass = 130078.25
def testatommasses(self):
"""Do the atom masses sum up to the molecular mass (130078.25+-0.1mD)?"""
mm = 1000*sum(self.data.atommasses)
self.assertAlmostEqual(mm, 130078.25, delta=0.1, msg = "Molecule mass: %f not 130078 +- 0.1mD" % mm)
class GenericIRimgTest(unittest.TestCase):
"""Generic imaginary vibrational frequency unittest"""
def testvibdisps(self):
"""Are the dimensions of vibdisps consistent with 3N-6 x N x 3"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(self.data.vibdisps.shape,
(numvib, len(self.data.atomnos), 3))
def testlengths(self):
"""Are the lengths of vibfreqs and vibirs correct?"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(len(self.data.vibfreqs), numvib)
self.assertEqual(len(self.data.vibirs), numvib)
def testfreqval(self):
"""Is the lowest freq value negative?"""
self.assertTrue(self.data.vibfreqs[0] < 0)
## def testmaxvibdisps(self):
## """What is the maximum value of displacement for a H vs a C?"""
## Cvibdisps = compress(self.data.atomnos==6, self.data.vibdisps, 1)
## Hvibdisps = compress(self.data.atomnos==1, self.data.vibdisps, 1)
## self.assertEqual(max(abs(Cvibdisps).flat), 1.0)
class GenericRamanTest(unittest.TestCase):
"""Generic Raman unittest"""
# This value is in amu.
max_raman_intensity = 575
def testlengths(self):
"""Is the length of vibramans correct?"""
numvib = 3*len(self.data.atomnos) - 6
self.assertEqual(len(self.data.vibramans), numvib)
# The tolerance for this number has been increased, since ORCA
# failed to make it inside +/-5, but it would be nice in the future
# to determine is it's not too much work whether this is due to
# algorithmic differences, or to differences in the input basis set
# or coordinates. The first would be OK, but in the second case the
# unit test jobs should be made more comparable. With cclib, we first
# of all want to succeed in parsing, but would also like to remain
# as comparable between programs as possible (for these tests).
# Note also that this value is adjusted for Gaussian and DALTON - why?
def testramanintens(self):
"""Is the maximum Raman intensity correct?"""
self.assertAlmostEqual(max(self.data.vibramans), self.max_raman_intensity, delta=8)
# We used to test this, but it seems to vary wildly between
# programs... perhaps we could use it if we knew why...
#self.assertInside(self.data.vibramans[1], 2.6872, 0.0001)
def testvibdisps(self):
"""Is the length and value of vibdisps correct?"""
assert hasattr(self.data, "vibdisps")
assert len(self.data.vibdisps) == 54
class DALTONRamanTest(GenericRamanTest):
"""Customized Raman unittest"""
max_raman_intensity = 745
class GaussianRamanTest(GenericRamanTest):
"""Customized Raman unittest"""
max_raman_intensity = 1066
class QChemRamanTest(GenericRamanTest):
"""Customized Raman unittest"""
max_raman_intensity = 588
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['vib'])
suite.testall()
| bsd-3-clause | -7,288,958,256,935,820,000 | 33.429245 | 108 | 0.642524 | false |
nickbp/bbusb | scripts/bbrss.py | 1 | 3476 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
bbrss.py - Prints a single random article from a specified RSS feed
Copyright (C) 2009 Nicholas Parker <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
paths = {
'reuters':'http://feeds.reuters.com/reuters/topNews?format=xml',
'cnn':'http://rss.cnn.com/rss/cnn_topstories.rss',
'msnbc':'http://rss.msnbc.msn.com/id/3032091/device/rss/rss.xml'
}
import feedparser, cPickle, sys, os.path, time, re, urlparse, random
def get_feed_cached(webpath,cachekey,cachetimeout):
cachename = sys.argv[0]+".cache."+cachekey
if (not os.path.exists(cachename) or \
time.time() - os.stat(cachename).st_mtime > cachetimeout):
feed = feedparser.parse(webpath)
if feed.has_key('bozo_exception'):
sys.stderr.write("Unable to retrieve '%s': " % webpath)
if feed.has_key('bozo_exception'):
sys.stderr.write(str(feed['bozo_exception'])+"\n")
else:
sys.stderr.write("Unknown error.\n")
sys.exit(1)
cPickle.dump(feed,file(cachename,'w'))
return feed
else:
return cPickle.load(file(cachename))
def help(myname):
sys.stderr.write("""Syntax: %s <source/rssurl>
Sources: %s (Or use your own custom url)\n""" % \
(myname,", ".join(paths.keys())))
sys.exit(1)
if len(sys.argv) < 2:
help(sys.argv[0])
if paths.has_key(sys.argv[1]):
#predefined feed from dict
feed = get_feed_cached(paths[sys.argv[1]], sys.argv[1], 1800)
else:
#custom url
urlobj = urlparse.urlparse(sys.argv[1])
if not urlobj.netloc:
#assume bad url
print "Bad url: %s" % sys.argv[1]
help(sys.argv[0])
feed = get_feed_cached(sys.argv[1], urlobj.netloc, 1800)
if not feed or not feed['feed']:
sys.stderr.write("Invalid feed content: %s\n" % feed)
sys.exit(1)
striphtmltags = re.compile(r'<[^>]*?>')
entry = random.choice(feed['entries'])
#pick from list of preferred colors (with matching between shadow/text):
#avoid too-bright and too-dim colors:
headlinecolors=["102","120","210","021","201","210"]
textcolors = ["002","020","200","011","101","110"]
if not len(headlinecolors) == len(textcolors):
raise Exception("length of headlinecolors must match length of textcolors")
randindex = random.randint(0, len(headlinecolors)-1)
headlinecolor = headlinecolors[randindex]
textcolor = textcolors[randindex]
#if paths.has_key(sys.argv[1]):
# sitetitle = sys.argv[1].upper()
#else:
# sitetitle = feed['feed']['title']
entrytitle = entry['title']
entrycontent = striphtmltags.sub('', entry['description']).strip()
#avoid crashing on utf characters:
out = "<color%s><scolor%s><shadow>%s</shadow>: <color%s>%s" % \
(headlinecolor, textcolor, entrytitle, textcolor, entrycontent)
print unicode(out).encode("utf-8")
| gpl-3.0 | -5,275,057,104,576,273,000 | 34.835052 | 79 | 0.662543 | false |
SerialShadow/SickRage | sickbeard/providers/fnt.py | 3 | 7034 | # -*- coding: latin-1 -*-
# Author: raver2046 <[email protected]> from djoole <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import traceback
import re
import requests
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.bs4_parser import BS4Parser
class FNTProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "FNT")
self.supportsBacklog = True
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = FNTCache(self)
self.urls = {'base_url': 'https://fnt.nu',
'search': 'https://www.fnt.nu/torrents/recherche/',
'login': 'https://fnt.nu/account-login.php',
}
self.url = self.urls['base_url']
self.search_params = {
"afficher": 1, "c118": 1, "c129": 1, "c119": 1, "c120": 1, "c121": 1, "c126": 1,
"c137": 1, "c138": 1, "c146": 1, "c122": 1, "c110": 1, "c109": 1, "c135": 1, "c148": 1,
"c153": 1, "c149": 1, "c150": 1, "c154": 1, "c155": 1, "c156": 1, "c114": 1,
"visible": 1, "freeleech": 0, "nuke": 1, "3D": 0, "sort": "size", "order": "desc"
}
def isEnabled(self):
return self.enabled
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'submit' : 'Se loguer'
}
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if not re.search('Pseudo ou mot de passe non valide', response):
return True
else:
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
# check for auth
if not self._doLogin():
return results
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
self.search_params['recherche'] = search_string
data = self.getURL(self.urls['search'], params=self.search_params)
if not data:
continue
try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
result_table = html.find('table', {'id': 'tablealign3bis'})
if not result_table:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
if result_table:
rows = result_table.findAll("tr", {"class" : "ligntorrent"})
for row in rows:
link = row.findAll('td')[1].find("a", href=re.compile("fiche_film"))
if link:
try:
title = link.text
download_url = self.urls['base_url'] + "/" + row.find("a", href=re.compile(r"download\.php"))['href']
except (AttributeError, TypeError):
continue
try:
detailseedleech = link['mtcontent']
seeders = int(detailseedleech.split("<font color='#00b72e'>")[1].split("</font>")[0])
leechers = int(detailseedleech.split("<font color='red'>")[1].split("</font>")[0])
#FIXME
size = -1
except Exception:
logger.log(u"Unable to parse torrent id & seeders & leechers. Traceback: %s " % traceback.format_exc(), logger.DEBUG)
continue
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class FNTCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# Only poll FNT every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_strings)}
provider = FNTProvider()
| gpl-3.0 | 6,732,549,903,188,624,000 | 38.740113 | 199 | 0.507819 | false |
nextgis/nextgisweb_helper_scripts | kml2geojson/kml2geojson.py | 1 | 3989 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# kml2geojson.py
# ---------------------------------------------------------
# Convert from KML to GeoJSON and keep colors. Points only yet.
# More: https://github.com/nextgis/nextgisweb_helper_scripts
#
# Usage:
# kml2geojson.py [-h] [-c] input output
# where:
# -h show this help message and exit
# input input KML
# output output GeoJSON
# -c create QML style as well
# Example:
# python kml2geojson.py -c input.kml output.geojson
#
# Copyright (C) 2016 Maxim Dubinin ([email protected])
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/copyleft/gpl.html>. You can also obtain it by writing
# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#
#******************************************************************************
from bs4 import BeautifulSoup
import geojson
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='Input KML')
parser.add_argument('output', help='Output GeoJSON')
parser.add_argument('-c','--create_qml', action="store_true", help='Create QML style as well')
args = parser.parse_args()
# sanity checking, only work on kml files
if args.input.endswith('.kml') == 0: sys.exit(-1)
print "Reading file: " + args.input
def create_qml(name):
qml_name = name.replace('geojson','qml')
template = """<!DOCTYPE qgis PUBLIC 'http://mrcc.com/qgis.dtd' 'SYSTEM'>
<qgis>
<renderer-v2 type="singleSymbol">
<symbols>
<symbol type="marker" name="0">
<layer class="SimpleMarker">
<prop k="color" v="167,204,93,255"/>
<prop k="color_dd_active" v="1"/>
<prop k="color_dd_expression" v=""/>
<prop k="color_dd_field" v="color"/>
<prop k="color_dd_useexpr" v="0"/>
<prop k="name" v="circle"/>
<prop k="outline_color" v="0,0,0,0"/>
<prop k="outline_style" v="solid"/>
<prop k="outline_width" v="0"/>
<prop k="outline_width_map_unit_scale" v="0,0,0,0,0,0"/>
<prop k="outline_width_unit" v="MM"/>
<prop k="scale_method" v="diameter"/>
</layer>
</symbol>
</symbols>
</renderer-v2>
</qgis>
"""
with open(qml_name,'wb') as outfile:
outfile.write(template)
if __name__ == '__main__':
soup = BeautifulSoup(open(args.input,'rb'), 'xml')
features = []
for placemark in soup.findAll('Placemark'):
extendeddata = placemark.find('ExtendedData')
data = extendeddata.findAll('Data')
properties={}
for item in data:
tag_name = item['name']
tag_val = item.find('value').text
properties[tag_name] = tag_val
color_ge = placemark.find('Style').find('color').text[2:]
color = '#' + color_ge[4:7] + color_ge[2:4] + color_ge[0:2]
properties['color'] = color
lat = placemark.find('coordinates').text.split(',')[1]
lon = placemark.find('coordinates').text.split(',')[0]
pnt = geojson.Point((float(lon), float(lat)))
feat = geojson.Feature(geometry=pnt, properties=properties)
features.append(feat)
with open(args.output,'wb') as outfile:
collection = geojson.FeatureCollection(features)
geojson.dump(collection, outfile)
if args.create_qml:
create_qml(args.output)
| gpl-2.0 | 8,337,087,196,105,098,000 | 35.59633 | 94 | 0.601404 | false |
ggravlingen/pytradfri | tests/test_util.py | 2 | 2259 | from pytradfri.error import PytradfriError
from pytradfri.util import load_json, save_json, BitChoices
import shutil
import tempfile
from os import path
import unittest
from unittest.mock import patch
import json
import pytest
class UtilTestsBitChoices(unittest.TestCase):
def test_bitchoices(self):
WEEKDAYS = BitChoices((("tue", "Tuesday"),))
assert WEEKDAYS.get_selected_keys(1) == ["tue"]
assert len(WEEKDAYS) == 1
assert [x for x in WEEKDAYS] == [(1, "Tuesday")]
class UtilTestsJSON(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_json_save(self):
FILENAME = path.join(self.test_dir, "sample_psk_file.txt")
conf = {"identity": "pytradfri", "key": "123abc"}
written_file = save_json(FILENAME, conf)
self.assertTrue(written_file)
def test_json_load(self):
f = open(path.join(self.test_dir, "sample_psk_file2.txt"), "w")
config = {"identity": "hashstring", "key": "secretkey"}
data = json.dumps(config, sort_keys=True, indent=4)
f.write(data)
f.close()
json_data = load_json(path.join(self.test_dir, "sample_psk_file2.txt"))
self.assertEqual(json_data, {"identity": "hashstring", "key": "secretkey"})
def test_load_file_not_found(self):
assert not load_json(path.join(self.test_dir, "not_a_file"))
def test_load_not_json(self):
f = open(path.join(self.test_dir, "sample_psk_file3.txt"), "w")
data = "{not valid json"
f.write(data)
f.close()
with pytest.raises(PytradfriError):
load_json(path.join(self.test_dir, "sample_psk_file3.txt"))
def test_save_not_serializable(self):
FILENAME = path.join(self.test_dir, "should_not_save")
conf = b"bytes are not serializable"
with pytest.raises(PytradfriError):
save_json(FILENAME, conf)
def test_os_error(self):
with patch("builtins.open", side_effect=OSError(-1)):
with pytest.raises(PytradfriError):
load_json("whatever")
with pytest.raises(PytradfriError):
save_json("whatever", {})
| mit | 8,701,882,857,752,581,000 | 32.220588 | 83 | 0.625498 | false |
bunburya/bunbot | plugins/goodreads.py | 1 | 2351 | from os.path import join
import xml.etree.ElementTree as ET
from urllib.request import urlopen
from urllib.parse import urlencode
class Plugin:
SEARCH_URL = 'https://www.goodreads.com/search/index.html?'
BOOK_URL = 'https://www.goodreads.com/book/show/'
def __init__(self, bot, handler):
self.bot = bot
self.conn = bot.conn
self.handler = handler
self.conf_dir = join(self.handler.plugin_data_dir, 'goodreads')
self.load_keys()
self.hooks= [
{'type': 'command', 'key': '!book', 'func': self.search_book_by_title}
]
def search(self, title):
query = urlencode({'q': title, 'key': self.key, 'search': 'title'})
return urlopen(self.SEARCH_URL + query).read().decode()
def parse_result(self, result):
print(result)
et = ET.fromstring(result)
work = et.find('search').find('results').find('work')
if not work:
raise ValueError
book = work.find('best_book')
avg_rating = work.findtext('average_rating')
title = book.findtext('title')
author = book.find('author').findtext('name')
year = book.findtext('original_publication_year')
url = self.BOOK_URL + book.findtext('id')
return title, author, year, avg_rating, url
def load_keys(self):
key_file = join(self.conf_dir, 'keys')
with open(key_file) as f:
key_data = f.readlines()
self.key = key_data[0].split()[1].strip()
self.secret = key_data[1].split()[1].strip()
def search_book_by_title(self, data):
query = ' '.join(data.trailing).strip()
xml = self.search(query)
try:
title, author, year, avg_rating, url = self.parse_result(xml)
if year:
response = '"{}" by {} ({}, avg rating {}/5): {}'.format(
title, author, year, avg_rating, url
)
else:
response = '"{}" by {} (avg rating {}/5): {}'.format(
title, author, avg_rating, url
)
except ValueError:
response = 'No results found.'
self.conn.say(response, data.to)
if __name__ == '__main__':
from sys import argv
print(parse_result(search(' '.join(argv[1:]))))
| mit | -1,645,255,331,127,668,700 | 33.072464 | 86 | 0.544875 | false |
atantet/transferLorenz | plot/plotFloquetVector.py | 1 | 5683 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pylibconfig2
def field(x, p):
rho, sigma, beta = p
f = np.array([sigma * (x[1] - x[0]),
x[0] * (rho - x[2]) - x[1],
x[0] * x[1] - beta * x[2]])
return f
def Jacobian(x, p):
rho, sigma, beta = p
J = np.array([[-sigma, sigma, 0.],
[rho - x[2], -1., -x[0]],
[x[1], x[0], -beta]])
return J
def propagateEuler(x0, field, p, dt, nt):
'''Propagate solution of ODE according to the vector field field \
with Euler scheme from x0 for nt time steps of size dt.'''
xt = np.empty((nt, x0.shape[0]))
xt[0] = x0.copy()
for t in np.arange(1, nt):
# Step solution forward
xt[t] = xt[t-1] + dt * field(xt[t-1], p)
return xt
def propagateRK4(x0, field, p, dt, nt):
'''Propagate solution of ODE according to the vector field field \
with Euler scheme from x0 for nt time steps of size dt.'''
xt = np.empty((nt, x0.shape[0]))
xt[0] = x0.copy()
for t in np.arange(1, nt):
# Step solution forward
k1 = field(xt[t-1], p) * dt
tmp = k1 * 0.5 + xt[t-1]
k2 = field(tmp, p) * dt
tmp = k2 * 0.5 + xt[t-1]
k3 = field(tmp, p) * dt
tmp = k3 + xt[t-1]
k4 = field(tmp, p) * dt
tmp = (k1 + 2*k2 + 2*k3 + k4) / 6
xt[t] = xt[t-1] + tmp
return xt
fs_latex = 'xx-large'
fs_xticklabels = 'large'
fs_yticklabels = fs_xticklabels
configFile = '../cfg/Lorenz63.cfg'
cfg = pylibconfig2.Config()
cfg.read_file(configFile)
dim = cfg.model.dim
L = cfg.simulation.LCut + cfg.simulation.spinup
printStepNum = int(cfg.simulation.printStep / cfg.simulation.dt + 0.1)
caseName = cfg.model.caseName
fileFormat = cfg.general.fileFormat
delayName = ""
if (hasattr(cfg.model, 'delaysDays')):
for d in np.arange(len(cfg.model.delaysDays)):
delayName = "%s_d%d" % (delayName, cfg.model.delaysDays[d])
# List of continuations to plot
initContRng = [[7.956126, 7.956126, 24.737477, 24.5, 0.652822]]
#contStepRng = [0.01]
contStepRng = [-0.001]
dtRng = [1.e-5, 1.e-5]
nCont = len(initContRng)
srcPostfix = "_%s%s" % (caseName, delayName)
resDir = '../results/'
contDir = '%s/continuation' % resDir
plotDir = '%s/plot/' % resDir
k = 0
initCont = initContRng[k]
contStep = contStepRng[k]
contAbs = sqrt(contStep*contStep)
sign = contStep / contAbs
exp = np.log10(contAbs)
mantis = sign * np.exp(np.log(contAbs) / exp)
dstPostfix = "%s_cont%04d_contStep%de%d_dt%d_numShoot%d" \
% (srcPostfix, int(initCont[dim] * 1000 + 0.1),
int(mantis*1.01), (int(exp*1.01)),
-np.round(np.log10(dtRng[k])), cfg.continuation.numShoot)
poFileName = '%s/poCont%s.%s' % (contDir, dstPostfix, fileFormat)
FloquetExpFileName = '%s/poExpCont%s.%s' % (contDir, dstPostfix, fileFormat)
FloquetVecFileName = '%s/poVecCont%s.%s' % (contDir, dstPostfix, fileFormat)
if (fileFormat == 'bin'):
# Read fixed point and cont
state = np.fromfile(poFileName)
# Read FloquetExpenvalues
FloquetExp = np.fromfile(FloquetExpFileName)
# Read fundamental matrices
FloquetVec = np.fromfile(FloquetVecFileName)
else:
# Read fixed point and cont
state = np.loadtxt(poFileName)
# Read FloquetExpenvalues
FloquetExp = np.loadtxt(FloquetExpFileName)
# Read fundamental matrices
FloquetVec = np.loadtxt(FloquetVecFileName)
state = state.reshape(-1, dim+2)
FloquetExp = FloquetExp.reshape(-1, 2)
FloquetExp = (FloquetExp[:, 0] + 1j * FloquetExp[:, 1]).reshape(-1, dim)
FloquetVec = FloquetVec.reshape(-1, 2)
FloquetVecReal = FloquetVec[:, 0].reshape(-1, dim, dim)
FloquetVecImag = FloquetVec[:, 1].reshape(-1, dim, dim)
po = state[:, :dim]
TRng = state[:, dim+1]
contRng = state[:, dim]
# # Reorder Floquet exp
# for t in np.arange(1, contRng.shape[0]):
# tmp = FloquetExp[t].tolist()
# for exp in np.arange(dim):
# idx = np.argmin(np.abs(tmp - FloquetExp[t-1, exp]))
# FloquetExp[t, exp] = tmp[idx]
# tmp.pop(idx)
#contSel = 24.09
contSel = 14.
idx = np.argmin((contRng - contSel)**2)
cont = contRng[idx]
poSel = po[idx]
T = TRng[idx]
FV = FloquetVecReal[idx]
FE = FloquetExp[idx]
nt = int(np.ceil(T / dtRng[k]))
# propagate
p = [cont, cfg.model.sigma, cfg.model.beta]
xt = propagateRK4(poSel, field, p, dtRng[k]*10, nt/10)
dstPostfixPlot = "%s_cont%04d_contStep%de%d_dt%d_numShoot%d" \
% (srcPostfix, int(cont * 1000 + 0.1),
int(mantis*1.01), (int(exp*1.01)),
-np.round(np.log10(dtRng[k])), cfg.continuation.numShoot)
# Plot
LyapExpNames = ['+', '0', '-']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(xt[:, 0], xt[:, 1], xt[:, 2], linestyle='--', linewidth=2)
scale = 3
for d in np.arange(dim):
if FE[d] > 1.e-6:
label = '+'
elif FE[d] < -1.e-6:
label = '-'
else:
label = '0'
ax.plot([poSel[0], poSel[0] + scale*FV[0, d]],
[poSel[1], poSel[1] + scale*FV[1, d]],
[poSel[2], poSel[2] + scale*FV[2, d]], linestyle='-', linewidth=2,
label=r'$v^{%s}$' % label)
ax.legend(loc='lower right', fontsize=fs_latex)
ax.set_xlabel(r'$x$', fontsize=fs_latex)
ax.set_ylabel(r'$y$', fontsize=fs_latex)
ax.set_zlabel(r'$z$', fontsize=fs_latex)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
plt.setp(ax.get_zticklabels(), fontsize=fs_xticklabels)
plt.savefig('%s/continuation/FloquetVec%s.eps' % (plotDir, dstPostfixPlot),
dpi=300, bbox_inches='tight')
| gpl-3.0 | -251,475,994,320,488,420 | 30.572222 | 78 | 0.606018 | false |
ayeowch/bitnodes-hardware | poll.py | 1 | 4396 | #!/usr/bin/env python
#
# Copyright (c) Addy Yeow Chin Heng <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hardware.settings')
import functools
import json
import logging
import threading
import time
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from collections import deque
from psutil import net_io_counters
from tornado.options import define, options
from django.conf import settings
define('address', type=str, default=settings.WEBSOCKET_HOST)
define('port', type=int, default=settings.WEBSOCKET_PORT)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/poll', PollHandler),
]
tornado.web.Application.__init__(self, handlers)
class PollHandler(tornado.websocket.WebSocketHandler):
clients = []
ticks = deque([], maxlen=180)
def open(self):
if self not in PollHandler.clients:
PollHandler.clients.append(self)
self.write_ticks()
def on_close(self):
PollHandler.clients.remove(self)
def check_origin(self, origin):
return True
def on_message(self, message):
self.write_message(message)
def write_ticks(self):
_ticks = []
for tick in PollHandler.ticks:
_ticks.append(tick)
message = json.dumps(_ticks)
logging.info(message)
self.write_message(message)
class NetworkStat(object):
def __init__(self, network_interface):
self.timestamp = time.time()
self.network_interface = network_interface
data = self._get_data()
self.bytes_sent = data.bytes_sent
self.bytes_recv = data.bytes_recv
self.sent_bps = 0
self.recv_bps = 0
def get(self):
timestamp = time.time()
seconds = timestamp - self.timestamp
if seconds >= 1:
self.timestamp = timestamp
data = self._get_data()
bytes_sent = data.bytes_sent
bytes_recv = data.bytes_recv
self.sent_bps = max((bytes_sent - self.bytes_sent) / seconds, 0) * 8
self.recv_bps = max((bytes_recv - self.bytes_recv) / seconds, 0) * 8
self.bytes_sent = bytes_sent
self.bytes_recv = bytes_recv
return {
'o': int(self.sent_bps),
'i': int(self.recv_bps),
}
def _get_data(self):
return net_io_counters(pernic=True)[self.network_interface]
def publish():
io_loop = tornado.ioloop.IOLoop.instance()
network_stat = NetworkStat(settings.NETWORK_INTERFACE)
while True:
tick = {
't': int(time.time() * 1000), # In ms
'net': network_stat.get(),
}
PollHandler.ticks.append(tick)
message = json.dumps([tick])
logging.info(message)
for client in PollHandler.clients:
io_loop.add_callback(functools.partial(client.on_message, message))
time.sleep(5)
if __name__ == '__main__':
threading.Thread(target=publish).start()
tornado.options.parse_command_line()
application = Application()
server = tornado.httpserver.HTTPServer(application)
server.listen(options.port, address=options.address)
tornado.ioloop.IOLoop.instance().start()
| mit | -6,233,792,599,498,255,000 | 30.4 | 80 | 0.664468 | false |
wangbokun/ec2instances.info | fabfile.py | 7 | 2877 | # To use this script you must have the following environment variables set:
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# as explained in: http://boto.s3.amazonaws.com/s3_tut.html
import os
import webbrowser
import traceback
from boto import connect_s3
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.key import Key
from fabric.api import abort, task
from fabric.contrib.console import confirm
from render import render
from scrape import scrape
BUCKET_NAME = 'www.ec2instances.info'
# Work around https://github.com/boto/boto/issues/2836 by explicitly setting
# the calling_format.
BUCKET_CALLING_FORMAT = OrdinaryCallingFormat()
abspath = lambda filename: os.path.join(os.path.abspath(os.path.dirname(__file__)),
filename)
@task
def build():
"""Scrape AWS sources for data and build the site"""
data_file = 'www/instances.json'
try:
scrape(data_file)
except Exception, e:
print "ERROR: Unable to scrape site data: %s" % e
print traceback.print_exc()
render_html()
@task
def render_html():
"""Render HTML but do not update data from Amazon"""
render('www/instances.json', 'in/index.html.mako', 'www/index.html')
@task
def preview():
url = 'file://localhost/%s' % (abspath('www/index.html'))
webbrowser.open(url, new=2)
@task
def bucket_create():
"""Creates the S3 bucket used to host the site"""
conn = connect_s3(calling_format=BUCKET_CALLING_FORMAT)
bucket = conn.create_bucket(BUCKET_NAME, policy='public-read')
bucket.configure_website('index.html', 'error.html')
print 'Bucket %r created.' % BUCKET_NAME
@task
def bucket_delete():
"""Deletes the S3 bucket used to host the site"""
if not confirm("Are you sure you want to delete the bucket %r?" % BUCKET_NAME):
abort('Aborting at user request.')
conn = connect_s3(calling_format=BUCKET_CALLING_FORMAT)
conn.delete_bucket(BUCKET_NAME)
print 'Bucket %r deleted.' % BUCKET_NAME
@task
def deploy(root_dir='www'):
"""Deploy current content"""
conn = connect_s3(calling_format=BUCKET_CALLING_FORMAT)
bucket = conn.get_bucket(BUCKET_NAME)
for root, dirs, files in os.walk(root_dir):
for name in files:
if name.startswith('.'):
continue
local_path = os.path.join(root, name)
remote_path = local_path[len(root_dir)+1:]
print '%s -> %s/%s' % (local_path, BUCKET_NAME, remote_path)
k = Key(bucket)
k.key = remote_path
headers = {
"Cache-Control": "max-age=86400, must-revalidate"}
k.set_contents_from_filename(local_path, headers=headers,
policy='public-read')
@task(default=True)
def update():
"""Build and deploy the site"""
build()
deploy()
| mit | 4,661,005,102,698,009,000 | 31.325843 | 83 | 0.646507 | false |
DNFcode/edx-platform | lms/djangoapps/courseware/tests/test_tabs.py | 5 | 4364 | """
Test cases for tabs.
"""
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test.utils import override_settings
from mock import MagicMock, Mock, patch
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_by_id
from courseware.tests.helpers import get_request_for_user, LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_TOY_MODULESTORE, TEST_DATA_MIXED_CLOSED_MODULESTORE
)
from courseware.views import get_static_tab_contents, static_tab
from student.tests.factories import UserFactory
from xmodule.tabs import CourseTabList
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_TOY_MODULESTORE)
class StaticTabDateTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""Test cases for Static Tab Dates."""
def setUp(self):
self.course = CourseFactory.create()
self.page = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="new_tab"
)
self.toy_course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_logged_in(self):
self.setup_user()
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_anonymous_user(self):
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_invalid_course_key(self):
request = get_request_for_user(UserFactory.create())
with self.assertRaises(Http404):
static_tab(request, course_id='edX/toy', tab_slug='new_tab')
def test_get_static_tab_contents(self):
course = get_course_by_id(self.toy_course_key)
request = get_request_for_user(UserFactory.create())
tab = CourseTabList.get_tab_by_slug(course.tabs, 'resources')
# Test render works okay
tab_content = get_static_tab_contents(request, course, tab)
self.assertIn(self.toy_course_key.to_deprecated_string(), tab_content)
self.assertIn('static_tab', tab_content)
# Test when render raises an exception
with patch('courseware.views.get_module') as mock_module_render:
mock_module_render.return_value = MagicMock(
render=Mock(side_effect=Exception('Render failed!'))
)
static_tab = get_static_tab_contents(request, course, tab)
self.assertIn("this module is temporarily unavailable", static_tab)
@override_settings(MODULESTORE=TEST_DATA_MIXED_CLOSED_MODULESTORE)
class StaticTabDateTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the static tab dates of an XML course
"""
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that tabs still appear when
# the course is already closed
xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in the test course's tab
# common/test/data/2014/tabs/8e4cce2b4aaf4ba28b1220804619e41f.html
xml_data = "static 463139"
xml_url = "8e4cce2b4aaf4ba28b1220804619e41f"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
| agpl-3.0 | 4,186,757,118,036,417,000 | 43.080808 | 100 | 0.700046 | false |
preo/dnspython | dns/rdtypes/ANY/HINFO.py | 8 | 2512 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class HINFO(dns.rdata.Rdata):
"""HINFO record
@ivar cpu: the CPU type
@type cpu: string
@ivar os: the OS type
@type os: string
@see: RFC 1035"""
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super(HINFO, self).__init__(rdclass, rdtype)
self.cpu = cpu
self.os = os
def to_text(self, origin=None, relativize=True, **kw):
return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
cpu = tok.get_string()
os = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, cpu, os)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.cpu)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.cpu)
l = len(self.os)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.os)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
cpu = wire[current : current + l].unwrap()
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
os = wire[current : current + l].unwrap()
return cls(rdclass, rdtype, cpu, os)
from_wire = classmethod(from_wire)
| isc | -4,654,875,766,697,699,000 | 31.623377 | 79 | 0.61664 | false |
renatopp/liac-arff | docs/source/conf.py | 2 | 7856 | # -*- coding: utf-8 -*-
#
# liac-arff documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 12 21:11:24 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']#, 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'liac-arff'
copyright = u'2014, Renato de Pontes Pereira'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'liac-arffdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'liac-arff.tex', u'liac-arff Documentation',
u'Renato de Pontes Pereira', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'liac-arff', u'liac-arff Documentation',
[u'Renato de Pontes Pereira'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'liac-arff', u'liac-arff Documentation',
u'Renato de Pontes Pereira', 'liac-arff', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -6,007,727,522,957,781,000 | 31.46281 | 80 | 0.703411 | false |
StackStorm/st2contrib | archive/packs/rackspace/actions/lib/formatters.py | 12 | 1500 | from netaddr import IPAddress
__all__ = [
'to_server_dict',
'to_dns_zone_dict',
'to_dns_record_dict'
]
def to_server_dict(server):
public_ips = [ip['addr'] for ip in server.addresses['public']]
private_ips = [ip['addr'] for ip in server.addresses['private']]
# Pick out first public IPv4 and IPv6 address
public_ipv4 = None
public_ipv6 = None
for ip in public_ips:
try:
ip_obj = IPAddress(ip)
except Exception:
continue
if not ip_obj.is_private():
if ip_obj.version == 4:
public_ipv4 = ip
elif ip_obj.version == 6:
public_ipv6 = ip
result = {
'id': server.id,
'name': server.name,
'status': server.status,
'image_id': server.image['id'],
'flavor_id': server.flavor['id'],
'public_ips': public_ips,
'private_ips': private_ips,
'public_ipv4': public_ipv4,
'public_ipv6': public_ipv6,
'key_name': server.key_name,
'metadata': server.metadata
}
return result
def to_dns_zone_dict(zone):
result = {
'id': zone.id,
'name': zone.name,
'email_address': zone.emailAddress,
'ttl': zone.ttl
}
return result
def to_dns_record_dict(record):
result = {
'id': record.id,
'name': record.name,
'type': record.type,
'data': record.data,
'ttl': record.ttl
}
return result
| apache-2.0 | -5,305,802,822,224,335,000 | 22.4375 | 68 | 0.532667 | false |
xzturn/tensorflow | tensorflow/python/debug/cli/readline_ui_test.py | 7 | 6843 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the readline-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import readline_ui
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class MockReadlineUI(readline_ui.ReadlineUI):
"""Test subclass of ReadlineUI that bypasses terminal manipulations."""
def __init__(self, on_ui_exit=None, command_sequence=None):
readline_ui.ReadlineUI.__init__(
self, on_ui_exit=on_ui_exit,
config=cli_config.CLIConfig(config_file_path=tempfile.mktemp()))
self._command_sequence = command_sequence
self._command_counter = 0
self.observers = {"screen_outputs": []}
def _get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_counter += 1
return command
def _display_output(self, screen_output):
self.observers["screen_outputs"].append(screen_output)
class CursesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config")
self.assertFalse(gfile.Exists(self._tmp_config_path))
super(CursesTest, self).setUp()
def tearDown(self):
file_io.delete_recursively(self._tmp_dir)
super(CursesTest, self).tearDown()
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
parsed = ap.parse_args(args)
lines = ["bar"] * parsed.num_times
return debugger_cli_common.RichTextLines(lines)
def testUIFactoryCreatesReadlineUI(self):
ui = ui_factory.get_ui(
"readline",
config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
self.assertIsInstance(ui, readline_ui.ReadlineUI)
def testUIFactoryRaisesExceptionOnInvalidUIType(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'foobar'"):
ui_factory.get_ui(
"foobar",
config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
def testUIFactoryRaisesExceptionOnInvalidUITypeGivenAvailable(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'readline'"):
ui_factory.get_ui(
"readline",
available_ui_types=["curses"],
config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.observers["screen_outputs"]))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockReadlineUI(command_sequence=["", "exit"])
ui.run_ui()
self.assertEqual(1, len(ui.observers["screen_outputs"]))
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 60, screen_outputs[0].lines)
def testRunUIWithValidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "babble -n 6", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["bar"] * 6, screen_outputs[1].lines)
def testRunUIWithInvalidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "wobble", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"wobble\""],
screen_outputs[1].lines)
def testRunUIWithOnUIExitCallback(self):
observer = {"callback_invoked": False}
def callback_for_test():
observer["callback_invoked"] = True
ui = MockReadlineUI(on_ui_exit=callback_for_test, command_sequence=["exit"])
self.assertFalse(observer["callback_invoked"])
ui.run_ui()
self.assertEqual(0, len(ui.observers["screen_outputs"]))
self.assertTrue(observer["callback_invoked"])
def testIncompleteRedirectWorks(self):
output_path = tempfile.mktemp()
ui = MockReadlineUI(
command_sequence=["babble -n 2 > %s" % output_path, "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 2, screen_outputs[0].lines)
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
def testConfigSetAndShow(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=[
"config set graph_recursion_depth 5", "config show", "exit"])
ui.run_ui()
outputs = ui.observers["screen_outputs"]
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: 5"], outputs[1].lines[:3])
if __name__ == "__main__":
googletest.main()
| apache-2.0 | -4,536,263,132,437,445,600 | 33.215 | 80 | 0.680842 | false |
openmips/stbgui | lib/python/Plugins/SystemPlugins/SoftwareManager/BackupRestore.py | 4 | 13947 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.Console import Console
from Screens.Standby import TryQuitMainloop
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap
from Tools.LoadPixmap import LoadPixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Button import Button
from Components.config import getConfigListEntry, configfile, ConfigSelection, ConfigSubsection, ConfigText, ConfigLocations
from Components.config import config
from Components.ConfigList import ConfigList,ConfigListScreen
from Components.FileList import MultiFileSelectList
from Components.Network import iNetwork
from Plugins.Plugin import PluginDescriptor
from enigma import eTimer, eEnv, eConsoleAppContainer, eEPGCache
from Tools.Directories import *
from os import system, popen, path, makedirs, listdir, access, stat, rename, remove, W_OK, R_OK
from time import gmtime, strftime, localtime, sleep
from datetime import date
from boxbranding import getBoxType, getImageDistro
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname'])
def getBackupPath():
backuppath = config.plugins.configurationbackup.backuplocation.getValue()
box = getBoxType()
distro = getImageDistro()
if backuppath.endswith('/'):
return backuppath + 'backup_' + distro + '_' + box
else:
return backuppath + '/backup_' + distro + '_' + box
def getOldBackupPath():
backuppath = config.plugins.configurationbackup.backuplocation.getValue()
if backuppath.endswith('/'):
return backuppath + 'backup'
else:
return backuppath + '/backup'
def getBackupFilename():
return "enigma2settingsbackup.tar.gz"
def SettingsEntry(name, checked):
if checked:
picture = LoadPixmap(cached = True, path = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png"));
else:
picture = LoadPixmap(cached = True, path = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png"));
return (name, picture, checked)
class BackupScreen(Screen, ConfigListScreen):
skin = """
<screen position="135,144" size="350,310" title="Backup is running" >
<widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, runBackup = False):
Screen.__init__(self, session)
self.session = session
self.runBackup = runBackup
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.close,
"back": self.close,
"cancel": self.close,
}, -1)
self.finished_cb = None
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.list = []
ConfigListScreen.__init__(self, self.list)
self.onLayoutFinish.append(self.layoutFinished)
if self.runBackup:
self.onShown.append(self.doBackup)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Backup is running..."))
def doBackup(self):
configfile.save()
if config.plugins.softwaremanager.epgcache.value:
eEPGCache.getInstance().save()
try:
if (path.exists(self.backuppath) == False):
makedirs(self.backuppath)
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.getValue() )
if path.exists(self.fullbackupfilename):
dt = str(date.fromtimestamp(stat(self.fullbackupfilename).st_ctime))
self.newfilename = self.backuppath + "/" + dt + '-' + self.backupfile
if path.exists(self.newfilename):
remove(self.newfilename)
rename(self.fullbackupfilename,self.newfilename)
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, Console, title = _("Backup is running..."), cmdlist = ["tar -czvf " + self.fullbackupfilename + " " + self.backupdirs],finishedCallback = self.backupFinishedCB,closeOnSuccess = True)
else:
self.session.open(Console, title = _("Backup is running..."), cmdlist = ["tar -czvf " + self.fullbackupfilename + " " + self.backupdirs],finishedCallback = self.backupFinishedCB, closeOnSuccess = True)
except OSError:
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10 )
else:
self.session.openWithCallback(self.backupErrorCB,MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10 )
def backupFinishedCB(self,retval = None):
self.close(True)
def backupErrorCB(self,retval = None):
self.close(False)
def runAsync(self, finished_cb):
self.finished_cb = finished_cb
self.doBackup()
class BackupSelection(Screen):
skin = """
<screen name="BackupSelection" position="center,center" size="560,400" title="Select files/folders to backup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="checkList" position="5,50" size="550,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText()
self.selectedFiles = config.plugins.configurationbackup.backupdirs.value
defaultDir = '/'
inhibitDirs = ["/bin", "/boot", "/dev", "/autofs", "/lib", "/proc", "/sbin", "/sys", "/hdd", "/tmp", "/mnt", "/media"]
self.filelist = MultiFileSelectList(self.selectedFiles, defaultDir, inhibitDirs = inhibitDirs )
self["checkList"] = self.filelist
self["actions"] = ActionMap(["DirectionActions", "OkCancelActions", "ShortcutActions"],
{
"cancel": self.exit,
"red": self.exit,
"yellow": self.changeSelectionState,
"green": self.saveSelection,
"ok": self.okClicked,
"left": self.left,
"right": self.right,
"down": self.down,
"up": self.up
}, -1)
if not self.selectionChanged in self["checkList"].onSelectionChanged:
self["checkList"].onSelectionChanged.append(self.selectionChanged)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
idx = 0
self["checkList"].moveToIndex(idx)
self.setWindowTitle()
self.selectionChanged()
def setWindowTitle(self):
self.setTitle(_("Select files/folders to backup"))
def selectionChanged(self):
current = self["checkList"].getCurrent()[0]
if len(current) > 2:
if current[2] is True:
self["key_yellow"].setText(_("Deselect"))
else:
self["key_yellow"].setText(_("Select"))
def up(self):
self["checkList"].up()
def down(self):
self["checkList"].down()
def left(self):
self["checkList"].pageUp()
def right(self):
self["checkList"].pageDown()
def changeSelectionState(self):
self["checkList"].changeSelectionState()
self.selectedFiles = self["checkList"].getSelectedList()
def saveSelection(self):
self.selectedFiles = self["checkList"].getSelectedList()
config.plugins.configurationbackup.backupdirs.value = self.selectedFiles
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
self.close(None)
def exit(self):
self.close(None)
def okClicked(self):
if self.filelist.canDescent():
self.filelist.descent()
class RestoreMenu(Screen):
skin = """
<screen name="RestoreMenu" position="center,center" size="560,400" title="Restore backups" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="filelist" position="5,50" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.skin_path = plugin_path
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Restore"))
self["key_yellow"] = StaticText(_("Delete"))
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.KeyOk,
"yellow": self.deleteFile,
})
self.flist = []
self["filelist"] = MenuList(self.flist)
self.fill_list()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Restore backups"))
def fill_list(self):
self.flist = []
self.path = getBackupPath()
if (path.exists(self.path) == False):
makedirs(self.path)
for file in listdir(self.path):
if (file.endswith(".tar.gz")):
self.flist.append((file))
self.entry = True
self["filelist"].l.setList(self.flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
if self.sel:
self.val = self.path + "/" + self.sel
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore\nthe following backup:\n%s\nYour receiver will restart after the backup has been restored!") % (self.sel))
def keyCancel(self):
self.close()
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
self.session.open(Console, title = _("Restoring..."), cmdlist = ["tar -xzvf " + self.path + "/" + self.sel + " -C /", "killall -9 enigma2", "/etc/init.d/autofs restart"])
def deleteFile(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
if self.sel:
self.val = self.path + "/" + self.sel
self.session.openWithCallback(self.startDelete, MessageBox, _("Are you sure you want to delete\nthe following backup:\n") + self.sel)
def startDelete(self, ret = False):
if (ret == True):
self.exe = True
print "removing:",self.val
if (path.exists(self.val) == True):
remove(self.val)
self.exe = False
self.fill_list()
class RestoreScreen(Screen, ConfigListScreen):
skin = """
<screen position="135,144" size="350,310" title="Restore is running..." >
<widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, runRestore = False):
Screen.__init__(self, session)
self.session = session
self.runRestore = runRestore
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.close,
"back": self.close,
"cancel": self.close,
}, -1)
self.finished_cb = None
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.list = []
ConfigListScreen.__init__(self, self.list)
self.onLayoutFinish.append(self.layoutFinished)
if self.runRestore:
self.onShown.append(self.doRestore)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Restoring..."))
def doRestore(self):
if path.exists("/proc/stb/vmpeg/0/dst_width"):
restorecmdlist = ["tar -xzvf " + self.fullbackupfilename + " -C /", "echo 0 > /proc/stb/vmpeg/0/dst_height", "echo 0 > /proc/stb/vmpeg/0/dst_left", "echo 0 > /proc/stb/vmpeg/0/dst_top", "echo 0 > /proc/stb/vmpeg/0/dst_width", "killall -9 enigma2", "/etc/init.d/autofs restart"]
else:
restorecmdlist = ["tar -xzvf " + self.fullbackupfilename + " -C /", "killall -9 enigma2", "/etc/init.d/autofs restart"]
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, Console, title = _("Restoring..."), cmdlist = restorecmdlist)
else:
self.session.open(Console, title = _("Restoring..."), cmdlist = restorecmdlist)
def backupFinishedCB(self,retval = None):
self.close(True)
def backupErrorCB(self,retval = None):
self.close(False)
def runAsync(self, finished_cb):
self.finished_cb = finished_cb
self.doRestore()
| gpl-2.0 | -7,068,509,428,050,927,000 | 38.176966 | 291 | 0.708396 | false |
ryokbys/nap | fitpot/util/mike/parallel_run_smd.py | 1 | 2292 | #!/usr/local/bin/python
import os,sys,glob,subprocess
usage="""
Usage: python parallel_run_smd.py in.params.NN
python parallel_run_smd.py in.params.NN smpl_Mg1 smpl_Mg2 ...
"""
if len(sys.argv) < 2:
print '[Error] Number of arguments was wrong.'
print usage
sys.exit()
elif len(sys.argv) == 2:
fparam= sys.argv[1]
dirs= glob.glob('smpl_*')
elif len(sys.argv) > 2:
fparam= sys.argv[1]
dirs = []
for iarg,arg in enumerate(sys.argv):
if iarg < 2:
continue
dirs.append(arg)
dirs.sort()
#print dirs
nodefname= 'nodelist.txt'
if not os.path.exists(nodefname):
print ' [Error] {0} does not exist !!!'.format(nodefname)
sys.exit()
nodefile=open(nodefname,'r')
nodes=[]
for line in nodefile.readlines():
nodes.append(line.split()[0])
nodefile.close()
uniqnodes = []
for node in nodes:
if node not in uniqnodes:
uniqnodes.append(node)
#...assign to-be-computed directories to each node
dir_per_node= []
ndirs= [ 0 for i in range(len(nodes))]
nrem= len(dirs)%len(nodes)
for i in range(len(nodes)):
ndirs[i]= len(dirs)/len(nodes)
if i < nrem:
ndirs[i] += 1
idir= 0
done= False
for inode in range(len(nodes)):
arr= []
for i in range(ndirs[inode]):
if idir >= len(dirs):
done= True
break
arr.append(dirs[idir])
idir += 1
if len(arr) != 0:
dir_per_node.append(arr)
if done:
break
for node in uniqnodes:
os.system('scp {0} {1}:{2}/'.format(fparam,node,os.getcwd()))
procs= []
for inode in range(len(dir_per_node)):
node= nodes[inode]
dir_list= dir_per_node[inode]
str= ""
for dir in dir_list:
str += " "+dir
#...create node file for smd run
fname='/tmp/nodefile_{0}'.format(node)
f= open(fname,'w')
f.write(node+'\n')
f.close()
#...run run_smd.sh on the remote node
# cmd='mpirun --hostfile {}'.format(fname) \
# + ' -np 1 ./run_smd.sh {} {}'.format(fparam,str)
#os.system('scp {0} {1}:{2}/'.format(fparam,node,os.getcwd()))
cmd='ssh -q {0} "cd {1} && ./run_smd.sh {2} {3}"'.format(node,os.getcwd(),fparam,str)
procs.append(subprocess.Popen(cmd,shell=True))
for i in range(len(procs)):
procs[i].wait()
# print " running smd done."
| mit | -4,321,052,109,137,205,000 | 24.752809 | 89 | 0.593368 | false |
RedHatQE/cfme_tests | cfme/tests/configure/test_logs.py | 1 | 4293 | # -*- coding: utf-8 -*-
import pytest
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.tier(3),
pytest.mark.provider(
[AzureProvider, EC2Provider, RHEVMProvider, SCVMMProvider], scope="module"
),
pytest.mark.usefixtures("setup_provider"),
]
@pytest.fixture
def log_exists(appliance, provider):
log_exists = bool(
appliance.ssh_client.run_command(
"(ls /var/www/miq/vmdb/log/{}.log >> /dev/null 2>&1 && echo True) || echo False".format(
provider.log_name
)
).output
)
return log_exists
def test_provider_log_exists(log_exists):
"""
Tests if provider log exists
Metadata:
test_flag: log
Polarion:
assignee: anikifor
casecomponent: Configuration
initialEstimate: 1/4h
"""
assert log_exists
def test_provider_log_rotate(appliance, provider, log_exists):
"""
Tests that log rotation works for provider log
Steps:
1. Force log rotation with default config miq_logs.conf
2. Verify that new
Metadata:
test_flag: log
Polarion:
assignee: anikifor
initialEstimate: 1/4h
casecomponent: Configuration
"""
assert log_exists, "Log file {}.log doesn't exist".format(provider.log_name)
appliance.ssh_client.run_command("logrotate -f /etc/logrotate.d/miq_logs.conf")
logs_count = int(appliance.ssh_client.run_command(
"ls -l /var/www/miq/vmdb/log/{}.log*|wc -l".format(
provider.log_name
)
).output.rstrip())
assert logs_count > 1, "{}.log wasn't rotated by default miq_logs.conf".format(
provider.log_name
)
def test_provider_log_updated(appliance, provider, log_exists):
"""
Tests that providers log is not empty and updatable
Steps:
1. Store log before provider refresh
2. Refresh provider
3. Store log once again
4. Compare logs from 1 and 3
Metadata:
test_flag: log
Polarion:
assignee: anikifor
initialEstimate: 1/4h
casecomponent: Configuration
"""
assert log_exists, "Log file {}.log doesn't exist".format(provider.log_name)
log_before = appliance.ssh_client.run_command(
"md5sum /var/www/miq/vmdb/log/{}.log | awk '{{ print $1 }}'".format(
provider.log_name
)
).output
wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
log_after = appliance.ssh_client.run_command(
"md5sum /var/www/miq/vmdb/log/{}.log | awk '{{ print $1 }}'".format(
provider.log_name
)
).output
assert log_before != log_after, "Log hashes are the same"
@pytest.mark.meta(blockers=[BZ(1633656, forced_streams=['5.10', 'upstream'])])
def test_provider_log_level(appliance, provider, log_exists):
"""
Tests that log level in advanced settings affects log files
Bugzilla:
1633656
1640718
Metadata:
test_flag: log
Polarion:
assignee: anikifor
initialEstimate: 1/4h
casecomponent: Configuration
testSteps:
1. Change log level to debug
2. Refresh provider
3. Check logs contain debug level
4. Reset level back
"""
assert log_exists, "Log file {}.log doesn't exist".format(provider.log_name)
log_level = appliance.server.advanced_settings['log']['level_{}'.format(provider.log_name)]
# set log level to debug
wait_for(lambda: appliance.server.update_advanced_settings(
{'log': {'level_{}'.format(provider.log_name): 'debug'}}), timeout=300)
wait_for(provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
debug_in_logs = appliance.ssh_client.run_command(
"cat /var/www/miq/vmdb/log/{}.log | grep DEBUG".format(provider.log_name))
# set log level back
appliance.server.update_advanced_settings(
{'log': {'level_{}'.format(provider.log_name): log_level}})
assert debug_in_logs.success
| gpl-2.0 | 316,357,081,176,917,200 | 29.232394 | 100 | 0.642674 | false |
mediafactory/tryton_client_desktop | tryton/gui/window/view_board/parser.py | 2 | 7099 | #This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'Parser'
import gtk
import gettext
from tryton.gui.window.view_form.view.form_gtk.parser import _container, VBox
import tryton.common as common
from action import Action
from tryton.config import CONFIG
_ = gettext.gettext
class ParserBoard(object):
def __init__(self, context=None):
self.title = None
self.context = context
def parse(self, root_node, notebook=None, paned=None, tooltips=None):
widgets = []
attrs = common.node_attributes(root_node)
if not tooltips:
tooltips = common.Tooltips()
container = _container(tooltips)
container.new(col=int(attrs.get('col', 4)))
if not self.title:
self.title = attrs.get('string', 'Unknown')
for node in root_node.childNodes:
if not node.nodeType == node.ELEMENT_NODE:
continue
attrs = common.node_attributes(node)
yexpand = int(attrs.get('yexpand', 0))
yfill = int(attrs.get('yfill', 0))
xexpand = int(attrs.get('xexpand', 1))
xfill = int(attrs.get('xfill', 1))
colspan = int(attrs.get('colspan', 1))
if node.localName == 'image':
common.ICONFACTORY.register_icon(attrs['name'])
icon = gtk.Image()
icon.set_from_stock(attrs['name'], gtk.ICON_SIZE_DIALOG)
container.wid_add(icon,
help_tip=attrs.get('help', False),
colspan=colspan,
yexpand=yexpand, yfill=yfill, ypadding=10,
xexpand=xexpand, xfill=xfill)
elif node.localName == 'separator':
text = attrs.get('string', '')
vbox = VBox(attrs=attrs)
if text:
label = gtk.Label(text)
label.set_use_markup(True)
label.set_alignment(float(attrs.get('align', 0.0)), 0.5)
vbox.pack_start(label)
vbox.pack_start(gtk.HSeparator())
container.wid_add(vbox,
help_tip=attrs.get('help', False),
colspan=colspan,
yexpand=yexpand, yfill=yfill, ypadding=10,
xexpand=xexpand, xfill=xfill)
elif node.localName == 'label':
text = attrs.get('string', '')
if not text:
container.empty_add(int(attrs.get('colspan', 1)))
continue
label = gtk.Label(text)
label.set_use_markup(True)
label.set_alignment(float(attrs.get('xalign', 1.0)),
float(attrs.get('yalign', 0.0)))
label.set_angle(int(attrs.get('angle', 0)))
xexpand = bool(attrs.get('xexpand', 0))
container.wid_add(label,
help_tip=attrs.get('help', False),
colspan=colspan,
yexpand=yexpand, yfill=yfill,
xexpand=xexpand, xfill=xfill)
elif node.localName == 'newline':
container.newline()
elif node.localName == 'notebook':
notebook = gtk.Notebook()
if CONFIG['client.form_tab'] == 'top':
pos = gtk.POS_TOP
elif CONFIG['client.form_tab'] == 'left':
pos = gtk.POS_LEFT
elif CONFIG['client.form_tab'] == 'right':
pos = gtk.POS_RIGHT
elif CONFIG['client.form_tab'] == 'bottom':
pos = gtk.POS_BOTTOM
notebook.set_tab_pos(pos)
notebook.set_border_width(3)
container.wid_add(notebook,
colspan=int(attrs.get('colspan', 4)),
yexpand=True, yfill=True)
widget, new_widgets = self.parse(node, notebook,
tooltips=tooltips)
widgets += new_widgets
elif node.localName == 'page':
if CONFIG['client.form_tab'] == 'left':
angle = 90
elif CONFIG['client.form_tab'] == 'right':
angle = -90
else:
angle = 0
label = gtk.Label(attrs.get('string', _('No String Attr.')))
label.set_angle(angle)
widget, new_widgets = self.parse(node, notebook,
tooltips=tooltips)
widgets += new_widgets
notebook.append_page(widget, label)
elif node.localName == 'group':
widget, new_widgets = self.parse(node, tooltips=tooltips)
widgets += new_widgets
if attrs.get('string', None):
frame = gtk.Frame(attrs['string'])
frame.add(widget)
else:
frame = widget
container.wid_add(frame,
colspan=colspan,
yexpand=yexpand, yfill=yfill, ypadding=0,
xexpand=xexpand, xfill=xfill, xpadding=0)
elif node.localName == 'hpaned':
hpaned = gtk.HPaned()
container.wid_add(hpaned, colspan=int(attrs.get('colspan', 4)),
yexpand=True, yfill=True)
widget, new_widgets = self.parse(node, paned=hpaned,
tooltips=tooltips)
widgets += new_widgets
if 'position' in attrs:
hpaned.set_position(int(attrs['position']))
elif node.localName == 'vpaned':
vpaned = gtk.VPaned()
container.wid_add(vpaned, colspan=int(attrs.get('colspan', 4)),
yexpand=True, yfill=True)
widget, new_widgets = self.parse(node, paned=vpaned,
tooltips=tooltips)
widgets += new_widgets
if 'position' in attrs:
vpaned.set_position(int(attrs['position']))
elif node.localName == 'child':
widget, new_widgets = self.parse(node, paned=paned,
tooltips=tooltips)
widgets += new_widgets
if not paned.get_child1():
paned.pack1(widget, resize=True, shrink=True)
elif not paned.get_child2():
paned.pack2(widget, resize=True, shrink=True)
elif node.localName == 'action':
widget_act = Action(attrs, self.context)
widgets.append(widget_act)
yexpand = bool(attrs.get('yexpand', 1))
yfill = bool(attrs.get('yfill', 1))
container.wid_add(widget_act.widget,
colspan=colspan,
yexpand=yexpand, yfill=yfill,
xexpand=xexpand, xfill=xfill)
return container.pop(), widgets
| gpl-3.0 | -5,123,612,658,021,064,000 | 43.093168 | 79 | 0.499084 | false |
simongoffin/website_version | addons/edi/models/edi.py | 12 | 31987 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2014 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import simplejson as json
import logging
import re
import time
import urllib2
import openerp
import openerp.release as release
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'OpenERP ' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
match = EXTERNAL_ID_PATTERN.match(ext_id)
assert match, \
_("'%s' is an invalid external ID") % (ext_id)
return {'module': match.group(1),
'db_uuid': match.group(2),
'id': match.group(3),
'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
"""Generate a unique string to represent a (database_uuid,model,record_id) pair
without being too long, and with a very low probability of collisions.
"""
msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
digest = hashlib.sha1(msg).digest()
# fold the sha1 20 bytes digest to 9 bytes
digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
# b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
digest = base64.urlsafe_b64encode(digest)
return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
"""Returns the last update timestamp for the given record,
if available, otherwise False
"""
if record._log_access:
record_log = record.get_metadata()[0]
return record_log.get('write_date') or record_log.get('create_date') or False
return False
class edi(osv.AbstractModel):
_name = 'edi.edi'
_description = 'EDI Subsystem'
def new_edi_token(self, cr, uid, record):
"""Return a new, random unique token to identify this model record,
and to be used as token when exporting it as an EDI document.
:param browse_record record: model record for which a token is needed
"""
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
return edi_token
def serialize(self, edi_documents):
"""Serialize the given EDI document structures (Python dicts holding EDI data),
using JSON serialization.
:param [dict] edi_documents: list of EDI document structures to serialize
:return: UTF-8 encoded string containing the serialized document
"""
serialized_list = json.dumps(edi_documents)
return serialized_list
def generate_edi(self, cr, uid, records, context=None):
"""Generates a final EDI document containing the EDI serialization
of the given records, which should all be instances of a Model
that has the :meth:`~.edi` mixin. The document is not saved in the
database.
:param list(browse_record) records: records to export as EDI
:return: UTF-8 encoded string containing the serialized records
"""
edi_list = []
for record in records:
record_model = record._model
edi_list += record_model.edi_export(cr, uid, [record], context=context)
return self.serialize(edi_list)
def load_edi(self, cr, uid, edi_documents, context=None):
"""Import the given EDI document structures into the system, using
:meth:`~.import_edi`.
:param edi_documents: list of Python dicts containing the deserialized
version of EDI documents
:return: list of (model, id, action) tuple containing the model and database ID
of all records that were imported in the system, plus a suggested
action definition dict for displaying each document.
"""
ir_module = self.pool.get('ir.module.module')
res = []
for edi_document in edi_documents:
module = edi_document.get('__import_module') or edi_document.get('__module')
assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.'
if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
raise osv.except_osv(_('Missing Application.'),
_("The document you are trying to import requires the OpenERP `%s` application. "
"You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
model = edi_document.get('__import_model') or edi_document.get('__model')
assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.'
assert model in self.pool, 'model `%s` cannot be found, despite module `%s` being available - '\
'this EDI document seems invalid or unsupported.' % (model,module)
model_obj = self.pool[model]
record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
res.append((model, record_id, record_action))
return res
def deserialize(self, edi_documents_string):
"""Return deserialized version of the given EDI Document string.
:param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
JSON-serialized EDI document(s)
:return: Python object representing the EDI document(s) (usually a list of dicts)
"""
return json.loads(edi_documents_string)
def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
"""Import a JSON serialized EDI Document string into the system, first retrieving it
from the given ``edi_url`` if provided.
:param str|unicode edi: UTF-8 string or unicode containing JSON-serialized
EDI Document to import. Must not be provided if
``edi_url`` is given.
:param str|unicode edi_url: URL where the EDI document (same format as ``edi``)
may be retrieved, without authentication.
"""
if edi_url:
assert not edi_document, 'edi must not be provided if edi_url is given.'
edi_document = urllib2.urlopen(edi_url).read()
assert edi_document, 'EDI Document is empty!'
edi_documents = self.deserialize(edi_document)
return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
"""Mixin class for Model objects that want be exposed as EDI documents.
Classes that inherit from this mixin class should override the
``edi_import()`` and ``edi_export()`` methods to implement their
specific behavior, based on the primitives provided by this mixin."""
def _edi_requires_attributes(self, attributes, edi):
model_name = edi.get('__imported_model') or edi.get('__model') or self._name
for attribute in attributes:
assert edi.get(attribute),\
'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name)
# private method, not RPC-exposed as it creates ir.model.data entries as
# SUPERUSER based on its parameters
def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
context=None):
"""Generate/Retrieve unique external ID for ``record``.
Each EDI record and each relationship attribute in it is identified by a
unique external ID, which includes the database's UUID, as a way to
refer to any record within any OpenERP instance, without conflict.
For OpenERP records that have an existing "External ID" (i.e. an entry in
ir.model.data), the EDI unique identifier for this record will be made of
"%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
UUID MUST NOT contain a colon characters (this is guaranteed by the
UUID algorithm).
For records that have no existing ir.model.data entry, a new one will be
created during the EDI export. It is recommended that the generated external ID
contains a readable reference to the record model, plus a unique value that
hides the database ID. If ``existing_id`` is provided (because it came from
an import), it will be used instead of generating a new one.
If ``existing_module`` is provided (because it came from
an import), it will be used instead of using local values.
:param browse_record record: any browse_record needing an EDI external ID
:param string existing_id: optional existing external ID value, usually coming
from a just-imported EDI record, to be used instead
of generating a new one
:param string existing_module: optional existing module name, usually in the
format ``module:db_uuid`` and coming from a
just-imported EDI record, to be used instead
of local values
:return: the full unique External ID to use for record
"""
ir_model_data = self.pool.get('ir.model.data')
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
ext_id = record.get_external_id()[record.id]
if not ext_id:
ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
# ID is unique cross-db thanks to db_uuid (already included in existing_module)
module = existing_module or "%s:%s" % (record._original_module, db_uuid)
_logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name,
module, ext_id, record)
ir_model_data.create(cr, openerp.SUPERUSER_ID,
{'name': ext_id,
'model': record._name,
'module': module,
'res_id': record.id})
else:
module, ext_id = ext_id.split('.')
if not ':' in module:
# this record was not previously EDI-imported
if not module == record._original_module:
# this could happen for data records defined in a module that depends
# on the module that owns the model, e.g. purchase defines
# product.pricelist records.
_logger.debug('Mismatching module: expected %s, got %s, for %s.',
module, record._original_module, record)
# ID is unique cross-db thanks to db_uuid
module = "%s:%s" % (module, db_uuid)
return '%s.%s' % (module, ext_id)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
return {'type': 'ir.actions.act_window',
'view_mode': 'form,tree',
'view_type': 'form',
'res_model': self._name,
'res_id': id}
def edi_metadata(self, cr, uid, records, context=None):
"""Return a list containing the boilerplate EDI structures for
exporting ``records`` as EDI, including
the metadata fields
The metadata fields always include::
{
'__model': 'some.model', # record model
'__module': 'module', # require module
'__id': 'module:db-uuid:model.id', # unique global external ID for the record
'__last_update': '2011-01-01 10:00:00', # last update date in UTC!
'__version': 1, # EDI spec version
'__generator' : 'OpenERP', # EDI generator
'__generator_version' : [6,1,0], # server version, to check compatibility.
'__attachments_':
}
:param list(browse_record) records: records to export
:return: list of dicts containing boilerplate EDI metadata for each record,
at the corresponding index from ``records``.
"""
ir_attachment = self.pool.get('ir.attachment')
results = []
for record in records:
ext_id = self._edi_external_id(cr, uid, record, context=context)
edi_dict = {
'__id': ext_id,
'__last_update': last_update_for(record),
'__model' : record._name,
'__module' : record._original_module,
'__version': EDI_PROTOCOL_VERSION,
'__generator': EDI_GENERATOR,
'__generator_version': EDI_GENERATOR_VERSION,
}
attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
if attachment_ids:
attachments = []
for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
attachments.append({
'name' : attachment.name,
'content': attachment.datas, # already base64 encoded!
'file_name': attachment.datas_fname,
})
edi_dict.update(__attachments=attachments)
results.append(edi_dict)
return results
def edi_m2o(self, cr, uid, record, context=None):
"""Return a m2o EDI representation for the given record.
The EDI format for a many2one is::
['unique_external_id', 'Document Name']
"""
edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
relation_model = record._model
name = relation_model.name_get(cr, uid, [record.id], context=context)
name = name and name[0][1] or False
return [edi_ext_id, name]
def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
"""Return a list representing a O2M EDI relationship containing
all the given records, according to the given ``edi_struct``.
This is basically the same as exporting all the record using
:meth:`~.edi_export` with the given ``edi_struct``, and wrapping
the results in a list.
Example::
[ # O2M fields would be a list of dicts, with their
{ '__id': 'module:db-uuid.id', # own __id.
'__last_update': 'iso date', # update date
'name': 'some name',
#...
},
# ...
],
"""
result = []
for record in records:
result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
return result
def edi_m2m(self, cr, uid, records, context=None):
"""Return a list representing a M2M EDI relationship directed towards
all the given records.
This is basically the same as exporting all the record using
:meth:`~.edi_m2o` and wrapping the results in a list.
Example::
# M2M fields are exported as a list of pairs, like a list of M2O values
[
['module:db-uuid.id1', 'Task 01: bla bla'],
['module:db-uuid.id2', 'Task 02: bla bla']
]
"""
return [self.edi_m2o(cr, uid, r, context=context) for r in records]
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Returns a list of dicts representing EDI documents containing the
records, and matching the given ``edi_struct``, if provided.
:param edi_struct: if provided, edi_struct should be a dictionary
with a skeleton of the fields to export.
Basic fields can have any key as value, but o2m
values should have a sample skeleton dict as value,
to act like a recursive export.
For example, for a res.partner record::
edi_struct: {
'name': True,
'company_id': True,
'address': {
'name': True,
'street': True,
}
}
Any field not specified in the edi_struct will not
be included in the exported data. Fields with no
value (False) will be omitted in the EDI struct.
If edi_struct is omitted, no fields will be exported
"""
if edi_struct is None:
edi_struct = {}
fields_to_export = edi_struct.keys()
results = []
for record in records:
edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
for field in fields_to_export:
column = self._all_columns[field].column
value = getattr(record, field)
if not value and value not in ('', 0):
continue
elif column._type == 'many2one':
value = self.edi_m2o(cr, uid, value, context=context)
elif column._type == 'many2many':
value = self.edi_m2m(cr, uid, value, context=context)
elif column._type == 'one2many':
value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field, {}), context=context)
edi_dict[field] = value
results.append(edi_dict)
return results
def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
model = self.pool[model_name]
search_results = model.name_search(cr, uid, name, operator='=', context=context)
if len(search_results) == 1:
return model.browse(cr, uid, search_results[0][0], context=context)
return False
def _edi_generate_report_attachment(self, cr, uid, record, context=None):
"""Utility method to generate the first PDF-type report declared for the
current model with ``usage`` attribute set to ``default``.
This must be called explicitly by models that need it, usually
at the beginning of ``edi_export``, before the call to ``super()``."""
ir_actions_report = self.pool.get('ir.actions.report.xml')
matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
('report_type','=','pdf'),
('usage','=','default')])
if matching_reports:
report = ir_actions_report.browse(cr, uid, matching_reports[0])
result, format = openerp.report.render_report(cr, uid, [record.id], report.report_name, {'model': self._name}, context=context)
eval_context = {'time': time, 'object': record}
if not report.attachment or not eval(report.attachment, eval_context):
# no auto-saving of report as attachment, need to do it manually
result = base64.b64encode(result)
file_name = record.name_get()[0][1]
file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
file_name += ".pdf"
self.pool.get('ir.attachment').create(cr, uid,
{
'name': file_name,
'datas': result,
'datas_fname': file_name,
'res_model': self._name,
'res_id': record.id,
'type': 'binary'
},
context=context)
def _edi_import_attachments(self, cr, uid, record_id, edi, context=None):
ir_attachment = self.pool.get('ir.attachment')
for attachment in edi.get('__attachments', []):
# check attachment data is non-empty and valid
file_data = None
try:
file_data = base64.b64decode(attachment.get('content'))
except TypeError:
pass
assert file_data, 'Incorrect/Missing attachment file content.'
assert attachment.get('name'), 'Incorrect/Missing attachment name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
ir_attachment.create(cr, uid, {'name': attachment['name'],
'datas_fname': attachment['file_name'],
'res_model': self._name,
'res_id': record_id,
# should be pure 7bit ASCII
'datas': str(attachment['content']),
}, context=context)
def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
"""Returns browse_record representing object identified by the model and external_id,
or None if no record was found with this external id.
:param external_id: fully qualified external id, in the EDI form
``module:db_uuid:identifier``.
:param model: model name the record belongs to.
"""
ir_model_data = self.pool.get('ir.model.data')
# external_id is expected to have the form: ``module:db_uuid:model.random_name``
ext_id_members = split_external_id(external_id)
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
module = ext_id_members['module']
ext_id = ext_id_members['id']
modules = []
ext_db_uuid = ext_id_members['db_uuid']
if ext_db_uuid:
modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
if ext_db_uuid is None or ext_db_uuid == db_uuid:
# local records may also be registered without the db_uuid
modules.append(module)
data_ids = ir_model_data.search(cr, uid, [('model','=',model),
('name','=',ext_id),
('module','in',modules)])
if data_ids:
model = self.pool[model]
data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
if model.exists(cr, uid, [data.res_id]):
return model.browse(cr, uid, data.res_id, context=context)
# stale external-id, cleanup to allow re-import, as the corresponding record is gone
ir_model_data.unlink(cr, 1, [data_ids[0]])
def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
"""Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
given model, returning the corresponding database ID:
* First, checks if the ``external_id`` is already known, in which case the corresponding
database ID is directly returned, without doing anything else;
* If the ``external_id`` is unknown, attempts to locate an existing record
with the same ``value`` via name_search(). If found, the given external_id will
be assigned to this local record (in addition to any existing one)
* If previous steps gave no result, create a new record with the given
value in the target model, assign it the given external_id, and return
the new database ID
:param str value: display name of the record to import
:param str external_id: fully-qualified external ID of the record
:return: database id of newly-imported or pre-existing record
"""
_logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
need_new_ext_id = False
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.",
self._name, external_id, value)
target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
need_new_ext_id = True
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.",
self._name, external_id, value)
# also need_new_ext_id here, but already been set above
model = self.pool[model]
res_id, _ = model.name_create(cr, uid, value, context=context)
target = model.browse(cr, uid, res_id, context=context)
else:
_logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it",
self._name, external_id, value, target.id)
if need_new_ext_id:
ext_id_members = split_external_id(external_id)
# module name is never used bare when creating ir.model.data entries, in order
# to avoid being taken as part of the module's data, and cleanup up at next update
module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
# create a new ir.model.data entry for this value
self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
return target.id
def edi_import(self, cr, uid, edi, context=None):
"""Imports a dict representing an EDI document into the system.
:param dict edi: EDI document to import
:return: the database ID of the imported record
"""
assert self._name == edi.get('__import_model') or \
('__import_model' not in edi and self._name == edi.get('__model')), \
"EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \
(edi.get('__model'), self._name)
# First check the record is now already known in the database, in which case it is ignored
ext_id_members = split_external_id(edi['__id'])
existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
if existing:
_logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
return existing.id
record_values = {}
o2m_todo = {} # o2m values are processed after their parent already exists
for field_name, field_value in edi.iteritems():
# skip metadata and empty fields
if field_name.startswith('__') or field_value is None or field_value is False:
continue
field_info = self._all_columns.get(field_name)
if not field_info:
_logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name)
continue
field = field_info.column
# skip function/related fields
if isinstance(field, fields.function):
_logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name))
continue
relation_model = field._obj
if field._type == 'many2one':
record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
field_value[1], field_value[0],
context=context)
elif field._type == 'many2many':
record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
m2m_value[0], context=context)
for m2m_value in field_value]
elif field._type == 'one2many':
# must wait until parent report is imported, as the parent relationship
# is often required in o2m child records
o2m_todo[field_name] = field_value
else:
record_values[field_name] = field_value
module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
xml_id=ext_id_members['id'], context=context)
record_display, = self.name_get(cr, uid, [record_id], context=context)
# process o2m values, connecting them to their parent on-the-fly
for o2m_field, o2m_value in o2m_todo.iteritems():
field = self._all_columns[o2m_field].column
dest_model = self.pool[field._obj]
for o2m_line in o2m_value:
# link to parent record: expects an (ext_id, name) pair
o2m_line[field._fields_id] = (ext_id_members['full'], record_display[1])
dest_model.edi_import(cr, uid, o2m_line, context=context)
# process the attachments, if any
self._edi_import_attachments(cr, uid, record_id, edi, context=context)
return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,419,222,489,371,158,000 | 51.783828 | 139 | 0.557914 | false |
rhinstaller/anaconda | pyanaconda/modules/boss/module_manager/module_observer.py | 6 | 2634 | #
# Copyright (C) 2019 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pyanaconda.anaconda_loggers import get_module_logger
from dasbus.namespace import get_namespace_from_name, get_dbus_path
from dasbus.client.observer import DBusObserver, DBusObserverError
log = get_module_logger(__name__)
class ModuleObserver(DBusObserver):
"""Observer of an Anaconda module."""
def __init__(self, message_bus, service_name, is_addon=False):
"""Creates a module observer.
:param message_bus: a message bus
:param service_name: a DBus name of a service
:param is_addon: is the observed module an addon?
"""
super().__init__(message_bus, service_name)
self._proxy = None
self._is_addon = is_addon
self._namespace = get_namespace_from_name(service_name)
self._object_path = get_dbus_path(*self._namespace)
@property
def is_addon(self):
"""Is the observed module an addon?
:return: True or False
"""
return self._is_addon
@property
def proxy(self):
"""Returns a proxy of the remote object."""
if not self._is_service_available:
raise DBusObserverError("Service {} is not available."
.format(self._service_name))
if not self._proxy:
self._proxy = self._message_bus.get_proxy(self._service_name,
self._object_path)
return self._proxy
def _enable_service(self):
"""Enable the service."""
self._proxy = None
super()._enable_service()
def _disable_service(self):
"""Disable the service"""
self._proxy = None
super()._disable_service()
def __repr__(self):
"""Returns a string representation."""
return "{}({},{})".format(self.__class__.__name__,
self._service_name,
self._object_path)
| gpl-2.0 | 6,155,446,154,967,560,000 | 34.12 | 73 | 0.615034 | false |
m-entrup/EFTEMj | EFTEMj-pyScripts/src/main/resources/scripts/Plugins/EFTEMj/Tools/Gatan_Renamer.py | 1 | 4615 | """
@ImagePlus IMP
file: Gatan_Renamer.py
author: Michael Entrup b. Epping ([email protected])
version: 20170306
info: A script to rename dm3 files using metadata.
"""
# pylint: disable-msg=C0103
# pylint: enable-msg=C0103
from __future__ import division
# pylint: disable-msg=E0401
from java.lang import Double
from java.text import SimpleDateFormat
from java.text import ParsePosition
from ij.gui import GenericDialog
from de.m_entrup.EFTEMj_lib.tools import GatanMetadataExtractor
# pylint: enable-msg=E0401
# pylint: disable-msg=E0602
IMP = IMP
# pylint: enable-msg=E0602
# Some general settings:
DEFAULT_FORMAT_STR = '%(date)s_%(mag)s_%(dE)s_%(exp)s_%(name)s'
FIELD_WIDTH = 10
FIELD_WIDTH_LONG = 40
# End of settings.
class ImageProperties:
"""A class that uses de.m_entrup.EFTEMj_lib.tools.GatanMetadataExtractor.
Metadata from a dm3 file is gathered to be used for renaming.
"""
def __init__(self, imp):
"""Get the metadata from the given dm3 image.
"""
extractor = GatanMetadataExtractor(imp)
self.exposure = extractor.getExposure()
self.magnification = extractor.getMagnification()
self.mag_factor = extractor.getActualMagnification() / self.magnification
self.mag_unit = 'x'
if not Double.isNaN(extractor.getEnergyloss()):
self.energyloss = extractor.getEnergyloss()
else:
self.energyloss = 0
self.date = extractor.getDateAndTime()
self.name = extractor.getName()
def calc_mag(self, mag):
"""Use the magnification factor to calculate the actual magnification.
"""
self.magnification = self.mag_factor * mag
def to_dict(self):
"""Create a dictionary from the metadata to be used for string formating.
"""
prop_dict = {}
prop_dict['exp'] = '%gs' % (self.exposure,)
prop_dict['dE'] = '%geV' % (self.energyloss,)
prop_dict['date'] = self.date_to_string()
mag = self.magnification
if self.mag_unit.lower() == 'kx':
mag /= 1000
prop_dict['mag'] = '%.3g%s' % (mag, self.mag_unit)
else:
prop_dict['mag'] = '%.0f%s' % (mag, self.mag_unit)
prop_dict['name'] = self.name
return prop_dict
def date_to_string(self):
"""Returns the date as a formated string.
"""
date_formater = SimpleDateFormat('yyyyMMdd')
return date_formater.format(self.date)
def parse_date(self, date_string):
"""Reads a date from the given string.
:param date_string: String to parse.
"""
date_formater = SimpleDateFormat('yyyyMMdd')
self.date = date_formater.parse(date_string, ParsePosition(0))
def run_script(imp):
"""Function to be run when this file is used as a script
"""
properties = ImageProperties(imp)
# Create a GenericDialog to configure renaming:
dialog = GenericDialog('Gatan Reamer')
dialog.addMessage('Modifying: %s' % (imp.getTitle(),))
dialog.addMessage('Recorded: %s' % (properties.date.toString(),))
dialog.addNumericField(
'Exposure time', properties.exposure, 4, FIELD_WIDTH, 's')
dialog.addNumericField(
'Magnification:', properties.magnification, 0, FIELD_WIDTH, 'x')
mag_units = ('kx', 'x')
dialog.addChoice('Magnification unit:', mag_units, mag_units[0])
dialog.addMessage(
'The actual magnification is %.2f times larger.' % (properties.mag_factor,))
dialog.addCheckbox('Use actual magnification:', False)
dialog.addMessage('')
dialog.addNumericField(
'Energy loss:', properties.energyloss, 1, FIELD_WIDTH, 'eV')
dialog.addStringField('Date:', properties.date_to_string(), FIELD_WIDTH)
dialog.addStringField('original name:', properties.name, FIELD_WIDTH_LONG)
dialog.addStringField(
'Filename format', DEFAULT_FORMAT_STR, FIELD_WIDTH_LONG)
dialog.showDialog()
if not dialog.wasCanceled():
# Edit the properties to consiter user choices:
properties.exposure = dialog.getNextNumber()
mag = dialog.getNextNumber()
properties.mag_unit = dialog.getNextChoice()
if dialog.getNextBoolean():
properties.calc_mag(mag)
properties.energyloss = dialog.getNextNumber()
properties.parse_date(dialog.getNextString())
properties.name = dialog.getNextString()
format_str = dialog.getNextString()
# Chenge the title:
imp.setTitle(format_str % properties.to_dict())
if __name__ == '__main__':
run_script(IMP)
| bsd-2-clause | 5,575,381,343,705,638,000 | 34.5 | 84 | 0.645287 | false |
paxapy/zulip | zerver/lib/test_fixtures.py | 4 | 4225 | # -*- coding: utf-8 -*-
import os
import re
import hashlib
from typing import Any, Optional
from importlib import import_module
from six import text_type
from six.moves import cStringIO as StringIO
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.utils import OperationalError
from django.apps import apps
from django.core.management import call_command
from django.utils.module_loading import module_has_submodule
FILENAME_SPLITTER = re.compile('[\W\-_]')
TEST_DB_STATUS_DIR = 'var/test_db_status'
def database_exists(database_name, **options):
# type: (text_type, **Any) -> bool
db = options.get('database', DEFAULT_DB_ALIAS)
try:
connection = connections[db]
with connection.cursor() as cursor:
cursor.execute("SELECT 1 from pg_database WHERE datname='{}';".format(database_name))
return_value = bool(cursor.fetchone())
connections.close_all()
return return_value
except OperationalError:
return False
def get_migration_status(**options):
# type: (**Any) -> str
verbosity = options.get('verbosity', 1)
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
app_labels = [options['app_label']] if options.get('app_label') else None
db = options.get('database', DEFAULT_DB_ALIAS)
out = StringIO()
call_command(
'showmigrations',
'--list',
app_labels=app_labels,
database=db,
no_color=options.get('no_color', False),
settings=options.get('settings', os.environ['DJANGO_SETTINGS_MODULE']),
stdout=out,
traceback=options.get('traceback', True),
verbosity=verbosity,
)
connections.close_all()
out.seek(0)
output = out.read()
return re.sub('\x1b\[(1|0)m', '', output)
def are_migrations_the_same(migration_file, **options):
# type: (text_type, **Any) -> bool
if not os.path.exists(migration_file):
return False
with open(migration_file) as f:
migration_content = f.read()
return migration_content == get_migration_status(**options)
def _get_hash_file_path(source_file_path):
# type: (str) -> str
basename = os.path.basename(source_file_path)
filename = '_'.join(FILENAME_SPLITTER.split(basename)).lower()
return os.path.join(TEST_DB_STATUS_DIR, filename)
def _check_hash(target_hash_file, **options):
# type: (str, **Any) -> bool
"""
This function has a side effect of creating a new hash file or
updating the old hash file.
"""
source_hash_file = _get_hash_file_path(target_hash_file)
with open(target_hash_file) as f:
target_hash_content = hashlib.sha1(f.read().encode('utf8')).hexdigest()
if os.path.exists(source_hash_file):
with open(source_hash_file) as f:
source_hash_content = f.read().strip()
else:
source_hash_content = None
with open(source_hash_file, 'w') as f:
f.write(target_hash_content)
return source_hash_content == target_hash_content
def is_template_database_current(
database_name='zulip_test_template',
migration_status='var/migration-status',
settings='zproject.test_settings',
check_files=None
):
# type: (Optional[text_type], Optional[text_type], Optional[text_type], Optional[List[str]]) -> bool
# Using str type for check_files because re.split doesn't accept unicode
if check_files is None:
check_files = [
'zilencer/management/commands/populate_db.py',
'tools/setup/postgres-init-test-db',
'tools/setup/postgres-init-dev-db',
]
if not os.path.exists(TEST_DB_STATUS_DIR):
os.mkdir(TEST_DB_STATUS_DIR)
if database_exists(database_name):
# To ensure Python evaluates all the hash tests (and thus creates the
# hash files about the current state), we evaluate them in a
# list and then process the result
hash_status = all([_check_hash(fn) for fn in check_files])
return are_migrations_the_same(migration_status, settings=settings) and hash_status
return False
| apache-2.0 | 2,799,866,729,595,887,600 | 33.631148 | 104 | 0.652781 | false |
ladybug-analysis-tools/honeybee-core | honeybee/radiance/geometry/source.py | 3 | 3444 | """Radiance Source.
http://radsite.lbl.gov/radiance/refer/ray.html#Source
"""
from .geometrybase import RadianceGeometry
from ..datatype import RadianceNumber, RadianceTuple
class Source(RadianceGeometry):
"""Radiance Source.
A source is not really a surface, but a solid angle. It is used for specifying light
sources that are very distant. The direction to the center of the source and the
number of degrees subtended by its disk are given as follows:
mod source id
0
0
4 xdir ydir zdir angle
"""
direction = RadianceTuple('direction', tuple_size=3, num_type=float)
angle = RadianceNumber('angle', num_type=float, check_positive=True)
def __init__(self, name, direction=None, angle=None, modifier=None):
"""Radiance Source.
Attributes:
name: Geometry name as a string. Do not use white space and special
character.
direction: A vector to set source direction (x, y, z) (Default: (0, 0 ,-1)).
angle: Source solid angle (Default: 0.533).
modifier: Geometry modifier (Default: "void").
Usage:
source = Source("test_source", (0, 0, 10), 10)
print(source)
"""
RadianceGeometry.__init__(self, name, modifier=modifier)
self.direction = direction or (0, 0, -1)
self.angle = angle or 0.533
self._update_values()
@classmethod
def from_string(cls, geometry_string, modifier=None):
"""Create a Radiance material from a string.
If the material has a modifier the modifier material should also be part of the
string or should be provided using modifier argument.
"""
modifier, name, base_geometry_data = cls._analyze_string_input(
cls.__name__.lower(), geometry_string, modifier)
cx, cy, cz, angle = base_geometry_data[3:]
return cls(name, (cx, cy, cz), angle, modifier)
@classmethod
def from_json(cls, geo_json):
"""Make radiance material from json
{
"type": "source", // Geometry type
"modifier": {} or "void",
"name": "", // Geometry Name
"direction": {"x": float, "y": float, "z": float},
"angle": float
}
"""
modifier = cls._analyze_json_input(cls.__name__.lower(), geo_json)
direction = geo_json["direction"]
return cls(name=geo_json["name"],
direction=(direction["x"], direction["y"], direction["z"]),
angle=geo_json["angle"],
modifier=modifier)
def _update_values(self):
"""update value dictionaries."""
self._values[2] = \
[self.direction[0], self.direction[1], self.direction[2], self.angle]
def to_json(self):
"""Translate radiance material to json
{
"type": "source", // Geometry type
"modifier": {} or void, // Modifier
"name": "", // Geometry Name
"direction": {"x": float, "y": float, "z": float},
"angle": float
}
"""
return {
"modifier": self.modifier.to_json(),
"type": "source",
"name": self.name,
"angle": self.angle,
"direction": {"x": self.direction[0],
"y": self.direction[1],
"z": self.direction[2]}
}
| gpl-3.0 | -2,118,213,671,687,488,000 | 33.44 | 88 | 0.558653 | false |
matthieuheitz/VesselView | Modules/Scripted/Workflow/Widgets/WorkflowStep.py | 4 | 9259 | import imp, sys, os
from __main__ import qt, ctk, vtk, slicer
from Workflow import *
class WorkflowStep( ctk.ctkWorkflowWidgetStep ) :
def __init__( self ):
self.Observations = []
def createUserInterface( self ):
# if the user interface has already be created, quit
# \todo: f) have an option to setup all the gui at startup
if hasattr(self, 'widget'):
return
self.setupUi()
self.widget.setMRMLScene(slicer.mrmlScene)
self.setWorkflowLevel(self.Workflow.level)
def setupUi( self ):
'''\todo automatically retrieve the ui filename.'''
#self.loadUI('LoadDataStep.ui')
pass
def loadUi(self, uiFileName):
widget = self.Workflow.loadUi(uiFileName)
layout = qt.QVBoxLayout(self)
layout.setContentsMargins(0,0,0,0)
layout.addWidget(widget)
self.setLayout(layout)
self.widget = widget
return widget
def get(self, objectName):
return self.findWidget(self.widget, objectName)
def step(self, stepid):
return self.Workflow.step(stepid)
def getChildren(self, object):
'''Return the list of the children and grand children of a Qt object'''
children = object.children()
allChildren = list(children)
for child in children:
allChildren.extend( self.getChildren(child) )
return allChildren
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def removeObservers(self, method):
for o, e, m, g, t in self.Observations:
if method == m:
o.RemoveObserver(t)
self.Observations.remove([o, e, m, g, t])
def addObserver(self, object, event, method, group = 'none'):
if self.hasObserver(object, event, method):
print 'already has observer'
return
tag = object.AddObserver(event, method)
self.Observations.append([object, event, method, group, tag])
def hasObserver(self, object, event, method):
for o, e, m, g, t in self.Observations:
if o == object and e == event and m == method:
return True
return False
def observer(self, event, method):
for o, e, m, g, t in self.Observations:
if e == event and m == method:
return o
return None
def getCLINode(self, module, nodeName = None):
if not nodeName:
nodeName = module.title
cliNode = slicer.mrmlScene.GetFirstNodeByName(nodeName)
# Also check path to make sure the CLI isn't a scripted module
if (cliNode == None) and ('qt-scripted-modules' not in module.path):
cliNode = slicer.cli.createNode(module)
cliNode.SetName(nodeName)
return cliNode
def loadLabelmapFile(self, title, fileType, nodeComboBox):
volumeNode = self.loadFile(title, fileType, nodeComboBox)
if volumeNode != None:
volumesLogic = slicer.modules.volumes.logic()
volumesLogic.SetVolumeAsLabelMap(volumeNode, 1)
nodeComboBox.setCurrentNode(volumeNode)
def loadFile(self, title, fileType, nodeComboBox):
manager = slicer.app.ioManager()
loadedNodes = vtk.vtkCollection()
properties = {}
res = manager.openDialog(fileType, slicer.qSlicerFileDialog.Read, properties, loadedNodes)
loadedNode = loadedNodes.GetItemAsObject(0)
if res == True:
nodeComboBox.setCurrentNode(loadedNode)
self.reset3DViews()
return loadedNode
def saveFile(self, title, fileType, fileSuffix, nodeComboBox):
self.saveNode(title, fileType, fileSuffix, nodeComboBox.currentNode())
def saveNode(self, title, fileType, fileSuffix, node):
if not node:
return
manager = slicer.app.ioManager()
properties = {}
properties['nodeID'] = node.GetID()
properties['defaultFileName'] = node.GetName() + fileSuffix
manager.openDialog(fileType, slicer.qSlicerFileDialog.Write, properties)
def reset3DViews(self):
# Reset focal view around volumes
manager = slicer.app.layoutManager()
for i in range(0, manager.threeDViewCount):
manager.threeDWidget(i).threeDView().resetFocalPoint()
rendererCollection = manager.threeDWidget(i).threeDView().renderWindow().GetRenderers()
for i in range(0, rendererCollection.GetNumberOfItems()):
rendererCollection.GetItemAsObject(i).ResetCamera()
def resetSliceViews(self):
# Reset focal view around volumes
manager = slicer.app.layoutManager()
for i in manager.sliceViewNames():
manager.sliceWidget(i).sliceController().fitSliceToBackground()
def openModule(self, moduleName):
slicer.util.selectModule(moduleName)
def getFirstNodeByNameAndClass(self, name, className):
nodes = slicer.mrmlScene.GetNodesByClass(className)
nodes.UnRegister(nodes)
for i in range(0, nodes.GetNumberOfItems()):
node = nodes.GetItemAsObject(i)
if node.GetName() == name:
return node
return None
def setWorkflowLevel(self, level):
widgets = self.getChildren(self.widget)
for widget in widgets:
workflow = widget.property('workflow')
if workflow != None:
widget.setVisible( str(level) in workflow )
# Potentially enable/disable next button
self.validate(None)
def validate( self, desiredBranchId = None ):
'''Check whether the step is valid or not. A valid step means the user can
go forward to the next step.
To be reimplemented by the step.'''
self.validateStep(False, desiredBranchId)
def validateStep(self, valid, desiredBranchId):
valid = valid or (self.Workflow.level > 0)
# If desiredBranchId is valid, it means the validation comes from ctk.
# It should then be passed through to go to the next step.
if desiredBranchId != None:
super( WorkflowStep, self ).validate(valid, desiredBranchId)
# Enable/Disable next step button.
# \todo: c) make it automatic in ctkWorkflow
if (self.Workflow.workflow.currentStep() ==
self.Workflow.workflow.step(self.stepid)):
self.Workflow.workflowWidget.buttonBoxWidget().nextButton().enabled = valid
def onEntry(self, comingFrom, transitionType):
'''Can be reimplemented by the step'''
self.updateHelp()
comingFromId = "None"
if comingFrom: comingFromId = comingFrom.id()
super( WorkflowStep, self ).onEntry(comingFrom, transitionType)
# Don't pass a valid step as it would automatically jump to the step.
self.validate(None)
# Hide the Progress bar if nothing is happening
cliNode = self.Workflow.getProgressBar().commandLineModuleNode()
if cliNode != None and not cliNode.IsBusy():
self.Workflow.getProgressBar().setCommandLineModuleNode(0)
def onExit(self, goingTo, transitionType):
'''Can be reimplemented by the step'''
goingToId = "None"
if goingTo: goingToId = goingTo.id()
super( WorkflowStep, self ).onExit(goingTo, transitionType)
def observeCLINode(self, cliNode, onCLINodeModified = None):
if cliNode != None and onCLINodeModified != None:
self.addObserver(cliNode,
slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
onCLINodeModified)
self.Workflow.getProgressBar().setCommandLineModuleNode(cliNode)
def createOutputIfNeeded( self, node, suffix, combobox ):
'''Create an output node for the given combox box with its name suffixed
by the given suffix if it doesn't already exists. If it does, then
this node is simpl set as the combobox current node.'''
if node == None:
return
nodeName = '%s-%s' % (node.GetName(), suffix)
node = self.getFirstNodeByNameAndClass(nodeName, 'vtkMRMLScalarVolumeNode')
if node == None:
newNode = combobox.addNode()
newNode.SetName(nodeName)
node = newNode
combobox.setCurrentNode(node)
def setViews( self, nodes ):
self.Workflow.setViews(nodes)
def updateFromCLIParameters( self ):
'''Overload this function to udpate the necessary steps from the CLIs.
This is called by the workflow on enter.
'''
pass
def getJsonParameters( self, module ):
return self.Workflow.getJsonParameters(module)
def updateConfiguration( self, config ):
'''Overload this function to udpate step's form the config dictionnary.'''
pass
def onNumberOfInputsChanged( self, numberOfInputs ):
'''Overload this function to udpate the step depending on the number of
inputs. This does nothing by default.'''
pass
def getHelp( self ):
return '''Each step must re-implement the getHelp method so correct help can
be displayed at each step !'''
def updateHelp( self ):
'''<h1>Update the help dialog of the module with the text given by the
getHelp method.</h1>
'''
modulePanelWidget = self.findWidget(slicer.util.mainWindow(), 'ModulePanel')
helpButton = self.findWidget(modulePanelWidget, 'HelpCollapsibleButton')
helpButton.setChecked(True)
tab = self.findWidget(helpButton, 'HelpAcknowledgementTabWidget')
tab.currentIndex = 0 # Select the help (first) tab
helpLabel = self.findWidget(tab, 'HelpLabel')
helpLabel.setHtml(self.getHelp())
| apache-2.0 | -7,871,073,238,978,426,000 | 34.749035 | 94 | 0.692947 | false |
Arafatk/sympy | sympy/physics/quantum/boson.py | 59 | 6134 | """Bosonic quantum operators."""
from sympy.core.compatibility import u
from sympy import Mul, Integer, exp, sqrt, conjugate
from sympy.physics.quantum import Operator
from sympy.physics.quantum import HilbertSpace, FockSpace, Ket, Bra, IdentityOperator
from sympy.functions.special.tensor_functions import KroneckerDelta
__all__ = [
'BosonOp',
'BosonFockKet',
'BosonFockBra',
'BosonCoherentKet',
'BosonCoherentBra'
]
class BosonOp(Operator):
"""A bosonic operator that satisfies [a, Dagger(a)] == 1.
Parameters
==========
name : str
A string that labels the bosonic mode.
annihilation : bool
A bool that indicates if the bosonic operator is an annihilation (True,
default value) or creation operator (False)
Examples
========
>>> from sympy.physics.quantum import Dagger, Commutator
>>> from sympy.physics.quantum.boson import BosonOp
>>> a = BosonOp("a")
>>> Commutator(a, Dagger(a)).doit()
1
"""
@property
def name(self):
return self.args[0]
@property
def is_annihilation(self):
return bool(self.args[1])
@classmethod
def default_args(self):
return ("a", True)
def __new__(cls, *args, **hints):
if not len(args) in [1, 2]:
raise ValueError('1 or 2 parameters expected, got %s' % args)
if len(args) == 1:
args = (args[0], Integer(1))
if len(args) == 2:
args = (args[0], Integer(args[1]))
return Operator.__new__(cls, *args)
def _eval_commutator_BosonOp(self, other, **hints):
if self.name == other.name:
# [a^\dagger, a] = -1
if not self.is_annihilation and other.is_annihilation:
return Integer(-1)
elif 'independent' in hints and hints['independent']:
# [a, b] = 0
return Integer(0)
return None
def _eval_commutator_FermionOp(self, other, **hints):
return Integer(0)
def _eval_anticommutator_BosonOp(self, other, **hints):
if 'independent' in hints and hints['independent']:
# {a, b} = 2 * a * b, because [a, b] = 0
return 2 * self * other
return None
def _eval_adjoint(self):
return BosonOp(str(self.name), not self.is_annihilation)
def __mul__(self, other):
if other == IdentityOperator(2):
return self
if isinstance(other, Mul):
args1 = tuple(arg for arg in other.args if arg.is_commutative)
args2 = tuple(arg for arg in other.args if not arg.is_commutative)
x = self
for y in args2:
x = x * y
return Mul(*args1) * x
return Mul(self, other)
def _print_contents_latex(self, printer, *args):
if self.is_annihilation:
return r'{%s}' % str(self.name)
else:
return r'{{%s}^\dag}' % str(self.name)
def _print_contents(self, printer, *args):
if self.is_annihilation:
return r'%s' % str(self.name)
else:
return r'Dagger(%s)' % str(self.name)
def _print_contents_pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if self.is_annihilation:
return pform
else:
return pform**prettyForm(u('\N{DAGGER}'))
class BosonFockKet(Ket):
"""Fock state ket for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Ket.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockBra
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
def _eval_innerproduct_BosonFockBra(self, bra, **hints):
return KroneckerDelta(self.n, bra.n)
def _apply_operator_BosonOp(self, op, **options):
if op.is_annihilation:
return sqrt(self.n) * BosonFockKet(self.n - 1)
else:
return sqrt(self.n + 1) * BosonFockKet(self.n + 1)
class BosonFockBra(Bra):
"""Fock state bra for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Bra.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockKet
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
class BosonCoherentKet(Ket):
"""Coherent state ket for a bosonic mode.
Parameters
==========
alpha : Number, Symbol
The complex amplitude of the coherent state.
"""
def __new__(cls, alpha):
return Ket.__new__(cls, alpha)
@property
def alpha(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonCoherentBra
@classmethod
def _eval_hilbert_space(cls, label):
return HilbertSpace()
def _eval_innerproduct_BosonCoherentBra(self, bra, **hints):
if self.alpha == bra.alpha:
return Integer(1)
else:
return exp(-(abs(self.alpha)**2 + abs(bra.alpha)**2 - 2 * conjugate(bra.alpha) * self.alpha)/2)
def _apply_operator_BosonOp(self, op, **options):
if op.is_annihilation:
return self.alpha * self
else:
return None
class BosonCoherentBra(Bra):
"""Coherent state bra for a bosonic mode.
Parameters
==========
alpha : Number, Symbol
The complex amplitude of the coherent state.
"""
def __new__(cls, alpha):
return Bra.__new__(cls, alpha)
@property
def alpha(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonCoherentKet
def _apply_operator_BosonOp(self, op, **options):
if not op.is_annihilation:
return self.alpha * self
else:
return None
| bsd-3-clause | -7,905,876,544,738,666,000 | 23.054902 | 107 | 0.573199 | false |
ChronoMonochrome/android_external_chromium_org | native_client_sdk/src/build_tools/update_nacl_manifest.py | 27 | 34849 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that reads omahaproxy and gsutil to determine version of SDK to put
in manifest.
"""
# pylint is convinced the email module is missing attributes
# pylint: disable=E1101
import buildbot_common
import csv
import cStringIO
import difflib
import email
import json
import logging
import logging.handlers
import manifest_util
import optparse
import os
import posixpath
import re
import smtplib
import subprocess
import sys
import time
import traceback
import urllib2
MANIFEST_BASENAME = 'naclsdk_manifest2.json'
SCRIPT_DIR = os.path.dirname(__file__)
REPO_MANIFEST = os.path.join(SCRIPT_DIR, 'json', MANIFEST_BASENAME)
GS_BUCKET_PATH = 'gs://nativeclient-mirror/nacl/nacl_sdk/'
GS_SDK_MANIFEST = GS_BUCKET_PATH + MANIFEST_BASENAME
GS_SDK_MANIFEST_LOG = GS_BUCKET_PATH + MANIFEST_BASENAME + '.log'
GS_MANIFEST_BACKUP_DIR = GS_BUCKET_PATH + 'manifest_backups/'
CANARY_BUNDLE_NAME = 'pepper_canary'
CANARY = 'canary'
NACLPORTS_ARCHIVE_NAME = 'naclports.tar.bz2'
logger = logging.getLogger(__name__)
def SplitVersion(version_string):
"""Split a version string (e.g. "18.0.1025.163") into its components.
Note that this function doesn't handle versions in the form "trunk.###".
"""
return tuple(map(int, version_string.split('.')))
def JoinVersion(version_tuple):
"""Create a string from a version tuple.
The tuple should be of the form (18, 0, 1025, 163).
"""
return '.'.join(map(str, version_tuple))
def GetTimestampManifestName():
"""Create a manifest name with a timestamp.
Returns:
A manifest name with an embedded date. This should make it easier to roll
back if necessary.
"""
return time.strftime('naclsdk_manifest2.%Y_%m_%d_%H_%M_%S.json',
time.gmtime())
def GetPlatformArchiveName(platform):
"""Get the basename of an archive given a platform string.
Args:
platform: One of ('win', 'mac', 'linux').
Returns:
The basename of the sdk archive for that platform.
"""
return 'naclsdk_%s.tar.bz2' % platform
def GetCanonicalArchiveName(url):
"""Get the canonical name of an archive given its URL.
This will convert "naclsdk_linux.bz2" -> "naclsdk_linux.tar.bz2", and also
remove everything but the filename of the URL.
This is used below to determine if an expected bundle is found in an version
directory; the archives all have the same name, but may not exist for a given
version.
Args:
url: The url to parse.
Returns:
The canonical name as described above.
"""
name = posixpath.basename(url)
match = re.match(r'naclsdk_(.*?)(?:\.tar)?\.bz2', name)
if match:
return 'naclsdk_%s.tar.bz2' % match.group(1)
return name
class Delegate(object):
"""Delegate all external access; reading/writing to filesystem, gsutil etc."""
def GetRepoManifest(self):
"""Read the manifest file from the NaCl SDK repository.
This manifest is used as a template for the auto updater; only pepper
bundles with no archives are considered for auto updating.
Returns:
A manifest_util.SDKManifest object read from the NaCl SDK repo."""
raise NotImplementedError()
def GetHistory(self):
"""Read Chrome release history from omahaproxy.appspot.com
Here is an example of data from this URL:
cros,stable,18.0.1025.168,2012-05-01 17:04:05.962578\n
win,canary,20.0.1123.0,2012-05-01 13:59:31.703020\n
mac,canary,20.0.1123.0,2012-05-01 11:54:13.041875\n
win,stable,18.0.1025.168,2012-04-30 20:34:56.078490\n
mac,stable,18.0.1025.168,2012-04-30 20:34:55.231141\n
...
Where each line has comma separated values in the following format:
platform, channel, version, date/time\n
Returns:
A list where each element is a line from the document, represented as a
tuple."""
raise NotImplementedError()
def GetTrunkRevision(self, version):
"""Given a Chrome version, get its trunk revision.
Args:
version: A version string of the form '18.0.1025.64'
Returns:
The revision number for that version, as a string."""
raise NotImplementedError()
def GsUtil_ls(self, url):
"""Runs gsutil ls |url|
Args:
url: The cloud storage url to list.
Returns:
A list of URLs, all with the gs:// schema."""
raise NotImplementedError()
def GsUtil_cat(self, url):
"""Runs gsutil cat |url|
Args:
url: The cloud storage url to read from.
Returns:
A string with the contents of the file at |url|."""
raise NotImplementedError()
def GsUtil_cp(self, src, dest, stdin=None):
"""Runs gsutil cp |src| |dest|
Args:
src: The file path or url to copy from.
dest: The file path or url to copy to.
stdin: If src is '-', this is used as the stdin to give to gsutil. The
effect is that text in stdin is copied to |dest|."""
raise NotImplementedError()
def SendMail(self, subject, text):
"""Send an email.
Args:
subject: The subject of the email.
text: The text of the email.
"""
raise NotImplementedError()
class RealDelegate(Delegate):
def __init__(self, dryrun=False, gsutil=None, mailfrom=None, mailto=None):
super(RealDelegate, self).__init__()
self.dryrun = dryrun
self.mailfrom = mailfrom
self.mailto = mailto
if gsutil:
self.gsutil = gsutil
else:
self.gsutil = buildbot_common.GetGsutil()
def GetRepoManifest(self):
"""See Delegate.GetRepoManifest"""
with open(REPO_MANIFEST, 'r') as sdk_stream:
sdk_json_string = sdk_stream.read()
manifest = manifest_util.SDKManifest()
manifest.LoadDataFromString(sdk_json_string, add_missing_info=True)
return manifest
def GetHistory(self):
"""See Delegate.GetHistory"""
url_stream = urllib2.urlopen('https://omahaproxy.appspot.com/history')
return [(platform, channel, version, date)
for platform, channel, version, date in csv.reader(url_stream)]
def GetTrunkRevision(self, version):
"""See Delegate.GetTrunkRevision"""
url = 'http://omahaproxy.appspot.com/revision.json?version=%s' % (version,)
data = json.loads(urllib2.urlopen(url).read())
return 'trunk.%s' % int(data['chromium_revision'])
def GsUtil_ls(self, url):
"""See Delegate.GsUtil_ls"""
try:
stdout = self._RunGsUtil(None, False, 'ls', url)
except subprocess.CalledProcessError:
return []
# filter out empty lines
return filter(None, stdout.split('\n'))
def GsUtil_cat(self, url):
"""See Delegate.GsUtil_cat"""
return self._RunGsUtil(None, True, 'cat', url)
def GsUtil_cp(self, src, dest, stdin=None):
"""See Delegate.GsUtil_cp"""
if self.dryrun:
logger.info("Skipping upload: %s -> %s" % (src, dest))
if src == '-':
logger.info(' contents = """%s"""' % stdin)
return
return self._RunGsUtil(stdin, True, 'cp', '-a', 'public-read', src, dest)
def SendMail(self, subject, text):
"""See Delegate.SendMail"""
if self.mailfrom and self.mailto:
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = self.mailfrom
msg['To'] = ', '.join(self.mailto)
msg['Date'] = email.Utils.formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(email.MIMEText.MIMEText(text))
smtp_obj = smtplib.SMTP('localhost')
smtp_obj.sendmail(self.mailfrom, self.mailto, msg.as_string())
smtp_obj.close()
def _RunGsUtil(self, stdin, log_errors, *args):
"""Run gsutil as a subprocess.
Args:
stdin: If non-None, used as input to the process.
log_errors: If True, write errors to stderr.
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The stdout from the process."""
cmd = [self.gsutil] + list(args)
logger.debug("Running: %s" % str(cmd))
if stdin:
stdin_pipe = subprocess.PIPE
else:
stdin_pipe = None
try:
process = subprocess.Popen(cmd, stdin=stdin_pipe, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate(stdin)
except OSError as e:
raise manifest_util.Error("Unable to run '%s': %s" % (cmd[0], str(e)))
if process.returncode:
if log_errors:
logger.error(stderr)
raise subprocess.CalledProcessError(process.returncode, ' '.join(cmd))
return stdout
class GsutilLoggingHandler(logging.handlers.BufferingHandler):
def __init__(self, delegate):
logging.handlers.BufferingHandler.__init__(self, capacity=0)
self.delegate = delegate
def shouldFlush(self, record):
# BufferingHandler.shouldFlush automatically flushes if the length of the
# buffer is greater than self.capacity. We don't want that behavior, so
# return False here.
return False
def flush(self):
# Do nothing here. We want to be explicit about uploading the log.
pass
def upload(self):
output_list = []
for record in self.buffer:
output_list.append(self.format(record))
output = '\n'.join(output_list)
self.delegate.GsUtil_cp('-', GS_SDK_MANIFEST_LOG, stdin=output)
logging.handlers.BufferingHandler.flush(self)
class NoSharedVersionException(Exception):
pass
class VersionFinder(object):
"""Finds a version of a pepper bundle that all desired platforms share.
Args:
delegate: See Delegate class above.
platforms: A sequence of platforms to consider, e.g.
('mac', 'linux', 'win')
extra_archives: A sequence of tuples: (archive_basename, minimum_version),
e.g. [('foo.tar.bz2', '18.0.1000.0'), ('bar.tar.bz2', '19.0.1100.20')]
These archives must exist to consider a version for inclusion, as
long as that version is greater than the archive's minimum version.
"""
def __init__(self, delegate, platforms, extra_archives=None):
self.delegate = delegate
self.history = delegate.GetHistory()
self.platforms = platforms
self.extra_archives = extra_archives
def GetMostRecentSharedVersion(self, major_version):
"""Returns the most recent version of a pepper bundle that exists on all
given platforms.
Specifically, the resulting version should be the most recently released
(meaning closest to the top of the listing on
omahaproxy.appspot.com/history) version that has a Chrome release on all
given platforms, and has a pepper bundle archive for each platform as well.
Args:
major_version: The major version of the pepper bundle, e.g. 19.
Returns:
A tuple (version, channel, archives). The version is a string such as
"19.0.1084.41". The channel is one of ('stable', 'beta', or 'dev').
|archives| is a list of archive URLs."""
def GetPlatformHistory(platform):
return self._GetPlatformMajorVersionHistory(major_version, platform)
shared_version_generator = self._FindNextSharedVersion(self.platforms,
GetPlatformHistory)
return self._DoGetMostRecentSharedVersion(shared_version_generator,
allow_trunk_revisions=False)
def GetMostRecentSharedCanary(self):
"""Returns the most recent version of a canary pepper bundle that exists on
all given platforms.
Canary is special-cased because we don't care about its major version; we
always use the most recent canary, regardless of major version.
Returns:
A tuple (version, channel, archives). The version is a string such as
"19.0.1084.41". The channel is always 'canary'. |archives| is a list of
archive URLs."""
# Canary versions that differ in the last digit shouldn't be considered
# different; this number is typically used to represent an experiment, e.g.
# using ASAN or aura.
def CanaryKey(version):
return version[:-1]
# We don't ship canary on Linux, so it won't appear in self.history.
# Instead, we can use the matching Linux trunk build for that version.
shared_version_generator = self._FindNextSharedVersion(
set(self.platforms) - set(('linux',)),
self._GetPlatformCanaryHistory, CanaryKey)
return self._DoGetMostRecentSharedVersion(shared_version_generator,
allow_trunk_revisions=True)
def GetAvailablePlatformArchivesFor(self, version, allow_trunk_revisions):
"""Returns a sequence of archives that exist for a given version, on the
given platforms.
The second element of the returned tuple is a list of all platforms that do
not have an archive for the given version.
Args:
version: The version to find archives for. (e.g. "18.0.1025.164")
allow_trunk_revisions: If True, will search for archives using the
trunk revision that matches the branch version.
Returns:
A tuple (archives, missing_archives). |archives| is a list of archive
URLs, |missing_archives| is a list of archive names.
"""
archive_urls = self._GetAvailableArchivesFor(version)
platform_archives = set(GetPlatformArchiveName(p) for p in self.platforms)
expected_archives = platform_archives
if self.extra_archives:
for extra_archive, extra_archive_min_version in self.extra_archives:
if SplitVersion(version) >= SplitVersion(extra_archive_min_version):
expected_archives.add(extra_archive)
found_archives = set(GetCanonicalArchiveName(a) for a in archive_urls)
missing_archives = expected_archives - found_archives
if allow_trunk_revisions and missing_archives:
# Try to find trunk versions of any missing archives.
trunk_version = self.delegate.GetTrunkRevision(version)
trunk_archives = self._GetAvailableArchivesFor(trunk_version)
for trunk_archive_url in trunk_archives:
trunk_archive = GetCanonicalArchiveName(trunk_archive_url)
if trunk_archive in missing_archives:
archive_urls.append(trunk_archive_url)
missing_archives.discard(trunk_archive)
# Only return archives that are "expected".
def IsExpected(url):
return GetCanonicalArchiveName(url) in expected_archives
expected_archive_urls = [u for u in archive_urls if IsExpected(u)]
return expected_archive_urls, missing_archives
def _DoGetMostRecentSharedVersion(self, shared_version_generator,
allow_trunk_revisions):
"""Returns the most recent version of a pepper bundle that exists on all
given platforms.
This function does the real work for the public GetMostRecentShared* above.
Args:
shared_version_generator: A generator that will yield (version, channel)
tuples in order of most recent to least recent.
allow_trunk_revisions: If True, will search for archives using the
trunk revision that matches the branch version.
Returns:
A tuple (version, channel, archives). The version is a string such as
"19.0.1084.41". The channel is one of ('stable', 'beta', 'dev',
'canary'). |archives| is a list of archive URLs."""
version = None
skipped_versions = []
channel = ''
while True:
try:
version, channel = shared_version_generator.next()
except StopIteration:
msg = 'No shared version for platforms: %s\n' % (
', '.join(self.platforms))
msg += 'Last version checked = %s.\n' % (version,)
if skipped_versions:
msg += 'Versions skipped due to missing archives:\n'
for version, channel, missing_archives in skipped_versions:
archive_msg = '(missing %s)' % (', '.join(missing_archives))
msg += ' %s (%s) %s\n' % (version, channel, archive_msg)
raise NoSharedVersionException(msg)
logger.info('Found shared version: %s, channel: %s' % (
version, channel))
archives, missing_archives = self.GetAvailablePlatformArchivesFor(
version, allow_trunk_revisions)
if not missing_archives:
return version, channel, archives
logger.info(' skipping. Missing archives: %s' % (
', '.join(missing_archives)))
skipped_versions.append((version, channel, missing_archives))
def _GetPlatformMajorVersionHistory(self, with_major_version, with_platform):
"""Yields Chrome history for a given platform and major version.
Args:
with_major_version: The major version to filter for. If 0, match all
versions.
with_platform: The name of the platform to filter for.
Returns:
A generator that yields a tuple (channel, version) for each version that
matches the platform and major version. The version returned is a tuple as
returned from SplitVersion.
"""
for platform, channel, version, _ in self.history:
version = SplitVersion(version)
if (with_platform == platform and
(with_major_version == 0 or with_major_version == version[0])):
yield channel, version
def _GetPlatformCanaryHistory(self, with_platform):
"""Yields Chrome history for a given platform, but only for canary
versions.
Args:
with_platform: The name of the platform to filter for.
Returns:
A generator that yields a tuple (channel, version) for each version that
matches the platform and uses the canary channel. The version returned is
a tuple as returned from SplitVersion.
"""
for platform, channel, version, _ in self.history:
version = SplitVersion(version)
if with_platform == platform and channel == CANARY:
yield channel, version
def _FindNextSharedVersion(self, platforms, generator_func, key_func=None):
"""Yields versions of Chrome that exist on all given platforms, in order of
newest to oldest.
Versions are compared in reverse order of release. That is, the most
recently updated version will be tested first.
Args:
platforms: A sequence of platforms to consider, e.g.
('mac', 'linux', 'win')
generator_func: A function which takes a platform and returns a
generator that yields (channel, version) tuples.
key_func: A function to convert the version into a value that should be
used for comparison. See python built-in sorted() or min(), for
an example.
Returns:
A generator that yields a tuple (version, channel) for each version that
matches all platforms and the major version. The version returned is a
string (e.g. "18.0.1025.164").
"""
if not key_func:
key_func = lambda x: x
platform_generators = []
for platform in platforms:
platform_generators.append(generator_func(platform))
shared_version = None
platform_versions = []
for platform_gen in platform_generators:
platform_versions.append(platform_gen.next())
while True:
if logger.isEnabledFor(logging.INFO):
msg_info = []
for i, platform in enumerate(platforms):
msg_info.append('%s: %s' % (
platform, JoinVersion(platform_versions[i][1])))
logger.info('Checking versions: %s' % ', '.join(msg_info))
shared_version = min((v for c, v in platform_versions), key=key_func)
if all(key_func(v) == key_func(shared_version)
for c, v in platform_versions):
# The real shared_version should be the real minimum version. This will
# be different from shared_version above only if key_func compares two
# versions with different values as equal.
min_version = min((v for c, v in platform_versions))
# grab the channel from an arbitrary platform
first_platform = platform_versions[0]
channel = first_platform[0]
yield JoinVersion(min_version), channel
# force increment to next version for all platforms
shared_version = None
# Find the next version for any platform that isn't at the shared version.
try:
for i, platform_gen in enumerate(platform_generators):
if platform_versions[i][1] != shared_version:
platform_versions[i] = platform_gen.next()
except StopIteration:
return
def _GetAvailableArchivesFor(self, version_string):
"""Downloads a list of all available archives for a given version.
Args:
version_string: The version to find archives for. (e.g. "18.0.1025.164")
Returns:
A list of strings, each of which is a platform-specific archive URL. (e.g.
"gs://nativeclient_mirror/nacl/nacl_sdk/18.0.1025.164/"
"naclsdk_linux.tar.bz2").
All returned URLs will use the gs:// schema."""
files = self.delegate.GsUtil_ls(GS_BUCKET_PATH + version_string)
assert all(file.startswith('gs://') for file in files)
archives = [f for f in files if not f.endswith('.json')]
manifests = [f for f in files if f.endswith('.json')]
# don't include any archives that don't have an associated manifest.
return filter(lambda a: a + '.json' in manifests, archives)
class UnknownLockedBundleException(Exception):
pass
class Updater(object):
def __init__(self, delegate):
self.delegate = delegate
self.versions_to_update = []
self.locked_bundles = []
self.online_manifest = manifest_util.SDKManifest()
self._FetchOnlineManifest()
def AddVersionToUpdate(self, bundle_name, version, channel, archives):
"""Add a pepper version to update in the uploaded manifest.
Args:
bundle_name: The name of the pepper bundle, e.g. 'pepper_18'
version: The version of the pepper bundle, e.g. '18.0.1025.64'
channel: The stability of the pepper bundle, e.g. 'beta'
archives: A sequence of archive URLs for this bundle."""
self.versions_to_update.append((bundle_name, version, channel, archives))
def AddLockedBundle(self, bundle_name):
"""Add a "locked" bundle to the updater.
A locked bundle is a bundle that wasn't found in the history. When this
happens, the bundle is now "locked" to whatever was last found. We want to
ensure that the online manifest has this bundle.
Args:
bundle_name: The name of the locked bundle.
"""
self.locked_bundles.append(bundle_name)
def Update(self, manifest):
"""Update a manifest and upload it.
Note that bundles will not be updated if the current version is newer.
That is, the updater will never automatically update to an older version of
a bundle.
Args:
manifest: The manifest used as a template for updating. Only pepper
bundles that contain no archives will be considered for auto-updating."""
# Make sure there is only one stable branch: the one with the max version.
# All others are post-stable.
stable_major_versions = [SplitVersion(version)[0] for _, version, channel, _
in self.versions_to_update if channel == 'stable']
# Add 0 in case there are no stable versions.
max_stable_version = max([0] + stable_major_versions)
# Ensure that all locked bundles exist in the online manifest.
for bundle_name in self.locked_bundles:
online_bundle = self.online_manifest.GetBundle(bundle_name)
if online_bundle:
manifest.SetBundle(online_bundle)
else:
msg = ('Attempted to update bundle "%s", but no shared versions were '
'found, and there is no online bundle with that name.')
raise UnknownLockedBundleException(msg % bundle_name)
if self.locked_bundles:
# Send a nagging email that we shouldn't be wasting time looking for
# bundles that are no longer in the history.
scriptname = os.path.basename(sys.argv[0])
subject = '[%s] Reminder: remove bundles from %s' % (scriptname,
MANIFEST_BASENAME)
text = 'These bundles are not in the omahaproxy history anymore: ' + \
', '.join(self.locked_bundles)
self.delegate.SendMail(subject, text)
# Update all versions.
logger.info('>>> Updating bundles...')
for bundle_name, version, channel, archives in self.versions_to_update:
logger.info('Updating %s to %s...' % (bundle_name, version))
bundle = manifest.GetBundle(bundle_name)
for archive in archives:
platform_bundle = self._GetPlatformArchiveBundle(archive)
# Normally the manifest snippet's bundle name matches our bundle name.
# pepper_canary, however is called "pepper_###" in the manifest
# snippet.
platform_bundle.name = bundle_name
bundle.MergeWithBundle(platform_bundle)
# Fix the stability and recommended values
major_version = SplitVersion(version)[0]
if major_version < max_stable_version:
bundle.stability = 'post_stable'
else:
bundle.stability = channel
# We always recommend the stable version.
if bundle.stability == 'stable':
bundle.recommended = 'yes'
else:
bundle.recommended = 'no'
# Check to ensure this bundle is newer than the online bundle.
online_bundle = self.online_manifest.GetBundle(bundle_name)
if online_bundle:
# This test used to be online_bundle.revision >= bundle.revision.
# That doesn't do quite what we want: sometimes the metadata changes
# but the revision stays the same -- we still want to push those
# changes.
if online_bundle.revision > bundle.revision or online_bundle == bundle:
logger.info(
' Revision %s is not newer than than online revision %s. '
'Skipping.' % (bundle.revision, online_bundle.revision))
manifest.SetBundle(online_bundle)
continue
self._UploadManifest(manifest)
logger.info('Done.')
def _GetPlatformArchiveBundle(self, archive):
"""Downloads the manifest "snippet" for an archive, and reads it as a
Bundle.
Args:
archive: A full URL of a platform-specific archive, using the gs schema.
Returns:
An object of type manifest_util.Bundle, read from a JSON file storing
metadata for this archive.
"""
stdout = self.delegate.GsUtil_cat(archive + '.json')
bundle = manifest_util.Bundle('')
bundle.LoadDataFromString(stdout)
# Some snippets were uploaded with revisions and versions as strings. Fix
# those here.
bundle.revision = int(bundle.revision)
bundle.version = int(bundle.version)
# HACK. The naclports archive specifies host_os as linux. Change it to all.
for archive in bundle.GetArchives():
if NACLPORTS_ARCHIVE_NAME in archive.url:
archive.host_os = 'all'
return bundle
def _UploadManifest(self, manifest):
"""Upload a serialized manifest_util.SDKManifest object.
Upload one copy to gs://<BUCKET_PATH>/naclsdk_manifest2.json, and a copy to
gs://<BUCKET_PATH>/manifest_backups/naclsdk_manifest2.<TIMESTAMP>.json.
Args:
manifest: The new manifest to upload.
"""
new_manifest_string = manifest.GetDataAsString()
online_manifest_string = self.online_manifest.GetDataAsString()
if self.delegate.dryrun:
logger.info(''.join(list(difflib.unified_diff(
online_manifest_string.splitlines(1),
new_manifest_string.splitlines(1)))))
return
else:
online_manifest = manifest_util.SDKManifest()
online_manifest.LoadDataFromString(online_manifest_string)
if online_manifest == manifest:
logger.info('New manifest doesn\'t differ from online manifest.'
'Skipping upload.')
return
timestamp_manifest_path = GS_MANIFEST_BACKUP_DIR + \
GetTimestampManifestName()
self.delegate.GsUtil_cp('-', timestamp_manifest_path,
stdin=manifest.GetDataAsString())
# copy from timestampped copy over the official manifest.
self.delegate.GsUtil_cp(timestamp_manifest_path, GS_SDK_MANIFEST)
def _FetchOnlineManifest(self):
try:
online_manifest_string = self.delegate.GsUtil_cat(GS_SDK_MANIFEST)
except subprocess.CalledProcessError:
# It is not a failure if the online manifest doesn't exist.
online_manifest_string = ''
if online_manifest_string:
self.online_manifest.LoadDataFromString(online_manifest_string)
def Run(delegate, platforms, extra_archives, fixed_bundle_versions=None):
"""Entry point for the auto-updater.
Args:
delegate: The Delegate object to use for reading Urls, files, etc.
platforms: A sequence of platforms to consider, e.g.
('mac', 'linux', 'win')
extra_archives: A sequence of tuples: (archive_basename, minimum_version),
e.g. [('foo.tar.bz2', '18.0.1000.0'), ('bar.tar.bz2', '19.0.1100.20')]
These archives must exist to consider a version for inclusion, as
long as that version is greater than the archive's minimum version.
fixed_bundle_versions: A sequence of tuples (bundle_name, version_string).
e.g. ('pepper_21', '21.0.1145.0')
"""
if fixed_bundle_versions:
fixed_bundle_versions = dict(fixed_bundle_versions)
else:
fixed_bundle_versions = {}
manifest = delegate.GetRepoManifest()
auto_update_bundles = []
for bundle in manifest.GetBundles():
if not bundle.name.startswith('pepper_'):
continue
archives = bundle.GetArchives()
if not archives:
auto_update_bundles.append(bundle)
if not auto_update_bundles:
logger.info('No versions need auto-updating.')
return
version_finder = VersionFinder(delegate, platforms, extra_archives)
updater = Updater(delegate)
for bundle in auto_update_bundles:
try:
if bundle.name == CANARY_BUNDLE_NAME:
logger.info('>>> Looking for most recent pepper_canary...')
version, channel, archives = version_finder.GetMostRecentSharedCanary()
else:
logger.info('>>> Looking for most recent pepper_%s...' %
bundle.version)
version, channel, archives = version_finder.GetMostRecentSharedVersion(
bundle.version)
except NoSharedVersionException:
# If we can't find a shared version, make sure that there is an uploaded
# bundle with that name already.
updater.AddLockedBundle(bundle.name)
continue
if bundle.name in fixed_bundle_versions:
# Ensure this version is valid for all platforms.
# If it is, use the channel found above (because the channel for this
# version may not be in the history.)
version = fixed_bundle_versions[bundle.name]
logger.info('Fixed bundle version: %s, %s' % (bundle.name, version))
allow_trunk_revisions = bundle.name == CANARY_BUNDLE_NAME
archives, missing = version_finder.GetAvailablePlatformArchivesFor(
version, allow_trunk_revisions)
if missing:
logger.warn(
'Some archives for version %s of bundle %s don\'t exist: '
'Missing %s' % (version, bundle.name, ', '.join(missing)))
return
updater.AddVersionToUpdate(bundle.name, version, channel, archives)
updater.Update(manifest)
class CapturedFile(object):
"""A file-like object that captures text written to it, but also passes it
through to an underlying file-like object."""
def __init__(self, passthrough):
self.passthrough = passthrough
self.written = cStringIO.StringIO()
def write(self, s):
self.written.write(s)
if self.passthrough:
self.passthrough.write(s)
def getvalue(self):
return self.written.getvalue()
def main(args):
parser = optparse.OptionParser()
parser.add_option('--gsutil', help='path to gsutil.')
parser.add_option('-d', '--debug', help='run in debug mode.',
action='store_true')
parser.add_option('--mailfrom', help='email address of sender.')
parser.add_option('--mailto', help='send error mails to...', action='append')
parser.add_option('-n', '--dryrun', help="don't upload the manifest.",
action='store_true')
parser.add_option('-v', '--verbose', help='print more diagnotic messages. '
'Use more than once for more info.',
action='count')
parser.add_option('--log-file', metavar='FILE', help='log to FILE')
parser.add_option('--upload-log', help='Upload log alongside the manifest.',
action='store_true')
parser.add_option('--bundle-version',
help='Manually set a bundle version. This can be passed more than once. '
'format: --bundle-version pepper_24=24.0.1312.25', action='append')
options, args = parser.parse_args(args[1:])
if (options.mailfrom is None) != (not options.mailto):
options.mailfrom = None
options.mailto = None
logger.warning('Disabling email, one of --mailto or --mailfrom '
'was missing.\n')
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG, filename=options.log_file)
elif options.verbose:
logging.basicConfig(level=logging.INFO, filename=options.log_file)
else:
logging.basicConfig(level=logging.WARNING, filename=options.log_file)
# Parse bundle versions.
fixed_bundle_versions = {}
if options.bundle_version:
for bundle_version_string in options.bundle_version:
bundle_name, version = bundle_version_string.split('=')
fixed_bundle_versions[bundle_name] = version
if options.mailfrom and options.mailto:
# Capture stderr so it can be emailed, if necessary.
sys.stderr = CapturedFile(sys.stderr)
try:
try:
delegate = RealDelegate(options.dryrun, options.gsutil,
options.mailfrom, options.mailto)
if options.upload_log:
gsutil_logging_handler = GsutilLoggingHandler(delegate)
logger.addHandler(gsutil_logging_handler)
# Only look for naclports archives >= 27. The old ports bundles don't
# include license information.
extra_archives = [('naclports.tar.bz2', '27.0.0.0')]
Run(delegate, ('mac', 'win', 'linux'), extra_archives,
fixed_bundle_versions)
return 0
except Exception:
if options.mailfrom and options.mailto:
traceback.print_exc()
scriptname = os.path.basename(sys.argv[0])
subject = '[%s] Failed to update manifest' % (scriptname,)
text = '%s failed.\n\nSTDERR:\n%s\n' % (scriptname,
sys.stderr.getvalue())
delegate.SendMail(subject, text)
return 1
else:
raise
finally:
if options.upload_log:
gsutil_logging_handler.upload()
except manifest_util.Error as e:
if options.debug:
raise
sys.stderr.write(str(e) + '\n')
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -393,354,323,993,834,200 | 35.799366 | 80 | 0.669689 | false |
Alecto3-D/testable-greeter | bb-master/sandbox/lib/python3.5/site-packages/buildbot/plugins/db.py | 5 | 16093 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
# pylint: disable=C0111
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
from future.utils import itervalues
from future.utils import string_types
import traceback
from pkg_resources import iter_entry_points
from zope.interface import Invalid
from zope.interface.verify import verifyClass
from buildbot.errors import PluginDBError
from buildbot.interfaces import IPlugin
from buildbot.worker_transition import reportDeprecatedWorkerNameUsage
# Base namespace for Buildbot specific plugins
_NAMESPACE_BASE = 'buildbot'
class _PluginEntry(object):
def __init__(self, group, entry, loader):
self._group = group
self._entry = entry
self._value = None
self._loader = loader
def load(self):
if self._value is None:
self._value = self._loader(self._entry)
@property
def group(self):
return self._group
@property
def name(self):
return self._entry.name
@property
def info(self):
dist = self._entry.dist
return (dist.project_name, dist.version)
def __ne__(self, other):
return self.info != other.info
@property
def value(self):
self.load()
return self._value
class _PluginEntryProxy(_PluginEntry):
"""Proxy for specific entry with custom group name.
Used to provided access to the same entry from different namespaces.
"""
def __init__(self, group, plugin_entry):
assert isinstance(plugin_entry, _PluginEntry)
self._plugin_entry = plugin_entry
self._group = group
def load(self):
self._plugin_entry.load()
@property
def group(self):
return self._group
@property
def name(self):
return self._plugin_entry.name
@property
def info(self):
return self._plugin_entry.info
@property
def value(self):
return self._plugin_entry.value
class _DeprecatedPluginEntry(_PluginEntry):
"""Plugin entry that emits warnings when it's value is requested."""
def __init__(self, compat_name, new_name, plugin_entry):
assert isinstance(plugin_entry, _PluginEntry)
self._plugin_entry = plugin_entry
self._compat_name = compat_name
self._new_name = new_name
def load(self):
self._plugin_entry.load()
@property
def group(self):
return self._plugin_entry.group
@property
def name(self):
return self._plugin_entry.name
@property
def info(self):
return self._plugin_entry.info
@property
def value(self):
reportDeprecatedWorkerNameUsage(
"'{group}.{compat_name}' is deprecated, "
"use '{group}.{new_name}' instead".format(
group=self.group,
compat_name=self._compat_name,
new_name=self._new_name))
return self._plugin_entry.value
class _NSNode(object):
# pylint: disable=W0212
def __init__(self):
self._children = dict()
def load(self):
for child in itervalues(self._children):
child.load()
def add(self, name, entry):
assert isinstance(name, string_types) and isinstance(entry,
_PluginEntry)
self._add(name, entry)
def _add(self, name, entry):
path = name.split('.', 1)
key = path.pop(0)
is_leaf = not path
child = self._children.get(key)
if is_leaf:
if child is not None:
assert isinstance(child, _PluginEntry)
if child != entry:
raise PluginDBError('Duplicate entry point for "%s:%s".\n'
' Previous definition %s\n'
' This definition %s' %
(child.group, child.name, child.info,
entry.info))
else:
self._children[key] = entry
else:
if child is None:
child = _NSNode()
assert isinstance(child, _NSNode)
child._add(path[0], entry)
self._children[key] = child
def __getattr__(self, name):
child = self._children.get(name)
if child is None:
raise PluginDBError('Unknown component name: %s' % name)
if isinstance(child, _PluginEntry):
return child.value
return child
def info(self, name):
assert isinstance(name, string_types)
return self._get(name).info
def get(self, name):
assert isinstance(name, string_types)
return self._get(name).value
def _get(self, name):
path = name.split('.', 1)
key = path.pop(0)
is_leaf = not path
child = self._children.get(key)
if isinstance(child, _PluginEntry):
if not is_leaf:
raise PluginDBError('Excessive namespace specification: %s' %
path[0])
return child
elif child is None:
raise PluginDBError('Unknown component name: %s' % name)
else:
return child._get(path[0])
def _info_all(self):
result = []
for key, child in iteritems(self._children):
if isinstance(child, _PluginEntry):
result.append((key, child.info))
else:
result.extend([
('%s.%s' % (key, name), value)
for name, value in iteritems(child.info_all())
])
return result
def info_all(self):
return dict(self._info_all())
class _Plugins(object):
"""
represent plugins within a namespace
"""
def __init__(self, namespace, interface=None, check_extras=True):
if interface is not None:
assert interface.isOrExtends(IPlugin)
self._group = '%s.%s' % (_NAMESPACE_BASE, namespace)
self._interface = interface
self._check_extras = check_extras
self._real_tree = None
def _load_entry(self, entry):
# pylint: disable=W0703
if self._check_extras:
try:
entry.require()
except Exception as err:
raise PluginDBError('Requirements are not satisfied '
'for %s:%s: %s' % (self._group,
entry.name,
str(err)))
try:
result = entry.load()
except Exception as err:
# log full traceback of the bad entry to help support
traceback.print_exc()
raise PluginDBError('Unable to load %s:%s: %s' %
(self._group, entry.name, str(err)))
if self._interface:
try:
verifyClass(self._interface, result)
except Invalid as err:
raise PluginDBError('Plugin %s:%s does not implement %s: %s' %
(self._group, entry.name,
self._interface.__name__, str(err)))
return result
@property
def _tree(self):
if self._real_tree is None:
self._real_tree = _NSNode()
for entry in iter_entry_points(self._group):
self._real_tree.add(entry.name,
_PluginEntry(self._group, entry,
self._load_entry))
return self._real_tree
def load(self):
self._tree.load()
def info_all(self):
return self._tree.info_all()
@property
def names(self):
# Expensive operation
return list(self.info_all())
def info(self, name):
"""
get information about a particular plugin if known in this namespace
"""
return self._tree.info(name)
def __contains__(self, name):
"""
check if the given name is available as a plugin
"""
try:
return not isinstance(self._tree.get(name), _NSNode)
except PluginDBError:
return False
def get(self, name):
"""
get an instance of the plugin with the given name
"""
return self._tree.get(name)
def __getattr__(self, name):
try:
return getattr(self._tree, name)
except PluginDBError as err:
raise AttributeError(str(err))
class _DeprecatedWorkerPlugins(_Plugins):
"""Plugins for deprecated 'buildbot.buildslave' entry point."""
def __init__(self, namespace, interface=None, check_extras=True):
assert namespace == 'buildslave'
_Plugins.__init__(self, namespace, interface=interface,
check_extras=check_extras)
def __contains__(self, name):
reportDeprecatedWorkerNameUsage(
"'buildbot.plugins.buildslave' plugins namespace is deprecated, "
"use 'buildbot.plugins.worker' instead "
"(you checked is '{0}' name inside "
"'buildbot.plugins.buildslave').".format(name))
return _Plugins.__contains__(self, name)
def get(self, name):
reportDeprecatedWorkerNameUsage(
"'buildbot.plugins.buildslave' plugins namespace is deprecated, "
"use 'buildbot.plugins.worker' instead "
"(you requested '{0}' name of "
"'buildbot.plugins.buildslave' plugin).".format(name))
return _Plugins.get(self, name)
def __getattr__(self, name):
reportDeprecatedWorkerNameUsage(
"'buildbot.plugins.buildslave' plugins namespace is deprecated, "
"use 'buildbot.plugins.worker' instead "
"(you accessed 'buildslave.{0}' name).".format(name))
return _Plugins.__getattr__(self, name)
class _PluginDB(object):
"""
Plugin infrastructure support for Buildbot
"""
def __init__(self):
self._namespaces = dict()
def add_namespace(self, namespace, interface=None, check_extras=True,
load_now=False):
"""
register given namespace in global database of plugins
in case it's already registered, return the registration
"""
tempo = self._namespaces.get(namespace)
if tempo is None:
if namespace in ['worker', 'buildslave']:
# 'buildbot.worker' and 'buildbot.buildslave' namespaces are
# treated in the special way:
# 1. 'buildslave' namespace is deprecated and it's usage
# should emit warnings.
# 2. Built-in into Buildbot plugins were moved from
# 'buildslave' namespace to 'worker' namespace.
# 3. Built-in plugins must still be available under old names
# in 'buildslave' namespace.
# 4. For convenience of using plugins which API is not yet
# moved from 'buildslave' namespace to 'worker', all
# external plugins that are found under 'buildslave'
# namespace should be available through 'worker' namespace
# too.
#
# 'worker' and 'buildslave' namespaces are added at the
# same time with adding workarounds described above.
assert 'worker' not in self._namespaces
assert 'buildslave' not in self._namespaces
# Load plugins in deprecated 'buildbot.buildslave' namespace
# using wrapper that generates warnings, when namespace
# attributes are queried.
buildslave_ns = _DeprecatedWorkerPlugins(
'buildslave', interface, check_extras)
self._namespaces['buildslave'] = buildslave_ns
# Load plugins in 'buildbot.worker' namespace.
worker_ns = _Plugins('worker', interface, check_extras)
self._namespaces['worker'] = worker_ns
# All plugins that use deprecated 'buildslave' namespace
# should be available under 'worker' namespace, so add
# fake entries for them.
worker_group = '%s.%s' % (_NAMESPACE_BASE, 'worker')
for name in buildslave_ns.names:
entry = buildslave_ns._tree._get(name)
assert isinstance(entry, _PluginEntry)
proxy_entry = _PluginEntryProxy(worker_group, entry)
worker_ns._tree.add(name, proxy_entry)
# Add aliases in deprecated 'buildslave' namespace for
# built-in plugins.
old_new_names = [
('BuildSlave', 'Worker'),
('EC2LatentBuildSlave', 'EC2LatentWorker'),
('LibVirtSlave', 'LibVirtWorker'),
('OpenStackLatentBuildSlave', 'OpenStackLatentWorker'),
]
for compat_name, new_name in old_new_names:
buildslave_ns._tree.add(
compat_name, worker_ns._tree._children[new_name])
tempo = self._namespaces[namespace]
elif namespace == 'util':
tempo = _Plugins(namespace, interface, check_extras)
# Handle deprecated plugins names in util namespace
old_new_names = [
('SlaveLock', 'WorkerLock'),
('enforceChosenSlave', 'enforceChosenWorker'),
('BuildslaveChoiceParameter', 'WorkerChoiceParameter'),
]
for compat_name, new_name in old_new_names:
entry = tempo._tree._get(new_name)
assert isinstance(entry, _PluginEntry)
proxy_entry = _DeprecatedPluginEntry(
compat_name, new_name, entry)
tempo._tree.add(compat_name, proxy_entry)
else:
tempo = _Plugins(namespace, interface, check_extras)
self._namespaces[namespace] = tempo
if load_now:
tempo.load()
return tempo
@property
def namespaces(self):
"""
get a list of registered namespaces
"""
return list(self._namespaces)
def info(self):
"""
get information about all plugins in registered namespaces
"""
result = dict()
for name, namespace in iteritems(self._namespaces):
result[name] = namespace.info_all()
return result
_DB = _PluginDB()
def namespaces():
"""
provide information about known namespaces
"""
return _DB.namespaces
def info():
"""
provide information about all known plugins
format of the output:
{<namespace>, {
{<plugin-name>: (<package-name>, <package-version),
...},
...
}
"""
return _DB.info()
def get_plugins(namespace, interface=None, check_extras=True, load_now=False):
"""
helper to get a direct interface to _Plugins
"""
return _DB.add_namespace(namespace, interface, check_extras, load_now)
| mit | -9,122,794,810,420,944,000 | 30.867327 | 79 | 0.557758 | false |
UnrememberMe/pants | tests/python/pants_test/backend/jvm/tasks/test_jar_dependency_management_setup.py | 4 | 20871 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
JarDependencyManagementSetup)
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.managed_jar_dependencies import (ManagedJarDependencies,
ManagedJarLibraries)
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.target import Target
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate
from pants_test.backend.jvm.tasks.jvm_binary_task_test_base import JvmBinaryTaskTestBase
from pants_test.subsystem.subsystem_util import global_subsystem_instance
class TestJarDependencyManagementSetup(JvmBinaryTaskTestBase):
@classmethod
def task_type(cls):
return JarDependencyManagementSetup
def _init_manager(self, **jar_dependency_management_options):
options = {JarDependencyManagement.options_scope: jar_dependency_management_options}
return global_subsystem_instance(JarDependencyManagement, options=options)
def _single_artifact_set(self, manager, targets):
sets = manager.targets_by_artifact_set(targets)
sets = {a: tgts for a, tgts in sets.items()
if any(isinstance(t, JarLibrary) for t in tgts)}
if len(sets) != 1:
raise ValueError('Test expected there to be only one artifact set! {}'.format(sets))
return next(iter(sets))
def test_default_target(self):
default_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
])
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
])
context = self.context(target_roots=[default_target, jar_library])
manager = self._init_manager(default_target='//foo:management')
task = self.create_task(context)
task.execute()
artifact_set = manager.for_target(jar_library)
self.assertFalse(artifact_set is None)
self.assertEquals('2', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_bad_default(self):
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
])
context = self.context(target_roots=[jar_library])
self._init_manager(default_target='//foo:nonexistant')
task = self.create_task(context)
with self.assertRaises(JarDependencyManagementSetup.InvalidDefaultTarget):
task.execute()
def test_no_default_target(self):
# Loading this into the context just to make sure it isn't erroneously used.
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
])
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
])
context = self.context(target_roots=[management_target, jar_library])
manager = self._init_manager()
task = self.create_task(context)
task.execute()
artifact_set = manager.for_target(jar_library)
self.assertTrue(artifact_set is None)
def test_explicit_target(self):
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
])
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
],
managed_dependencies='//foo:management')
context = self.context(target_roots=[management_target, jar_library])
manager = self._init_manager()
task = self.create_task(context)
task.execute()
artifact_set = manager.for_target(jar_library)
self.assertFalse(artifact_set is None)
self.assertEquals('2', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_explicit_and_default_target(self):
default_target = self.make_target(spec='//foo:foobar',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
])
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='3'),
])
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
],
managed_dependencies='//foo:management')
context = self.context(target_roots=[default_target, management_target, jar_library])
manager = self._init_manager(default_target='//foo:management')
task = self.create_task(context)
task.execute()
artifact_set = manager.for_target(jar_library)
self.assertFalse(artifact_set is None)
self.assertEquals('3', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_using_jar_library_address(self):
pin_jar_library = self.make_target(
spec='//foo:pinned-library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar', rev='2'),
],
)
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
'//foo:pinned-library',
])
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
],
managed_dependencies='//foo:management')
context = self.context(target_roots=[management_target, jar_library, pin_jar_library])
manager = self._init_manager()
task = self.create_task(context)
task.execute()
artifact_set = manager.for_target(jar_library)
self.assertFalse(artifact_set is None)
self.assertEquals('2', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_duplicate_coord_error(self):
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
JarDependency(org='foobar', name='foobar', rev='3'),
])
context = self.context(target_roots=[management_target])
self._init_manager()
task = self.create_task(context)
with self.assertRaises(JarDependencyManagementSetup.DuplicateCoordinateError):
task.execute()
def test_missing_version_error(self):
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar'),
])
context = self.context(target_roots=[management_target])
self._init_manager()
task = self.create_task(context)
with self.assertRaises(JarDependencyManagementSetup.MissingVersion):
task.execute()
def test_duplicate_coord_error_jar(self):
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar', rev='3'),
])
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
'//foo:library',
])
context = self.context(target_roots=[jar_library, management_target])
self._init_manager()
task = self.create_task(context)
with self.assertRaises(JarDependencyManagementSetup.DuplicateCoordinateError):
task.execute()
def test_missing_version_error_jar(self):
jar_library = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar', rev=None),
])
management_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
'//foo:library',
])
context = self.context(target_roots=[jar_library, management_target])
self._init_manager()
task = self.create_task(context)
with self.assertRaises(JarDependencyManagementSetup.MissingVersion):
task.execute()
def test_heterogenous_for_targets(self):
default_target = self.make_target(spec='//foo:management',
target_type=ManagedJarDependencies,
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
])
jar_library1 = self.make_target(spec='//foo:library',
target_type=JarLibrary,
jars=[
JarDependency(org='foobar', name='foobar'),
])
jar_library2 = self.make_target(spec='//foo:library2',
target_type=JarLibrary,
jars=[
JarDependency(org='vegetables', name='potato', rev='3'),
])
unpacked_target = self.make_target(spec='//foo:unpacked',
target_type=UnpackedJars,
libraries=[
':library2',
])
context = self.context(target_roots=[default_target, jar_library1, jar_library2,
unpacked_target])
manager = self._init_manager(default_target='//foo:management')
task = self.create_task(context)
task.execute()
artifact_set = self._single_artifact_set(manager, [jar_library1, jar_library2,
unpacked_target])
self.assertFalse(artifact_set is None)
self.assertEquals('2', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_indirection(self):
management_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management_indirect',
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
],
)
default_target = self.make_target(
target_type=Target,
spec='//foo:management',
dependencies=[
management_target,
],
)
jar_library1 = self.make_target(
target_type=JarLibrary,
spec='//foo:library',
jars=[
JarDependency(org='foobar', name='foobar'),
],
)
context = self.context(target_roots=[default_target, jar_library1, management_target])
manager = self._init_manager(default_target='//foo:management')
task = self.create_task(context)
task.execute()
artifact_set = self._single_artifact_set(manager, [jar_library1])
self.assertFalse(artifact_set is None)
self.assertEquals('2', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_invalid_managed_jar_libraries(self):
target_aliases = {
'managed_jar_dependencies': ManagedJarDependencies,
'jar_library': JarLibrary,
}
class FakeContext(object):
def create_object(fake, target_type, name, **kwargs):
return self.make_target(target_type=target_aliases[target_type],
spec='//foo:{}'.format(name), **kwargs)
with self.assertRaises(ManagedJarLibraries.JarLibraryNameCollision):
ManagedJarLibraries(FakeContext())(
name='management',
artifacts=[
JarDependency(org='fruit.apple', name='orange', rev='2'),
JarDependency(org='fruit', name='apple', rev='2', classifier='orange'),
],
)
def test_simple_dependency_override(self):
management_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management_indirect',
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
JarDependency(org='barfoo', name='barfoo', rev='1'),
JarDependency(org='foobar', name='foobar', rev='7', ext='tar'),
],
)
default_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management',
artifacts=[
JarDependency(org='foobar', name='foobar', rev='3'),
JarDependency(org='fruit', name='apple', rev='4'),
],
dependencies=[
management_target,
],
)
jar_library1 = self.make_target(
target_type=JarLibrary,
spec='//foo:library',
jars=[
JarDependency(org='foobar', name='foobar'),
],
)
def check_task_execution(manager):
context = self.context(target_roots=[default_target, jar_library1, management_target])
task = self.create_task(context)
task.execute()
artifact_set = self._single_artifact_set(manager, [jar_library1])
self.assertFalse(artifact_set is None)
self.assertEquals('3', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
self.assertEquals('1', artifact_set[M2Coordinate('barfoo', 'barfoo')].rev)
self.assertEquals('4', artifact_set[M2Coordinate('fruit', 'apple')].rev)
self.assertEquals('7', artifact_set[M2Coordinate('foobar', 'foobar', ext='tar')].rev)
manager = self._init_manager(default_target='//foo:management')
with self.assertRaises(JarDependencyManagementSetup.IllegalVersionOverride):
check_task_execution(manager)
def test_double_dependency_override(self):
management_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management_indirect',
artifacts=[
JarDependency(org='foobar', name='foobar', rev='2'),
JarDependency(org='barfoo', name='barfoo', rev='1'),
JarDependency(org='foobar', name='foobar', rev='7', ext='tar'),
],
)
management_target2 = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management_indirect2',
artifacts=[
JarDependency(org='foobar', name='foobar', rev='7', ext='tar'),
],
)
indirection_2 = self.make_target(
target_type=Target,
spec='//foo:indirection_2',
dependencies=[
management_target2,
],
)
default_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management',
artifacts=[
JarDependency(org='foobar', name='foobar', rev='3'),
JarDependency(org='fruit', name='apple', rev='4'),
],
dependencies=[
management_target,
indirection_2,
],
)
jar_library1 = self.make_target(
target_type=JarLibrary,
spec='//foo:library',
jars=[
JarDependency(org='foobar', name='foobar'),
],
)
def check_task_execution(manager):
context = self.context(target_roots=[default_target, jar_library1, management_target,
management_target2, indirection_2])
task = self.create_task(context)
task.execute()
artifact_set = self._single_artifact_set(manager, [jar_library1])
self.assertFalse(artifact_set is None)
self.assertEquals('3', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
self.assertEquals('1', artifact_set[M2Coordinate('barfoo', 'barfoo')].rev)
self.assertEquals('4', artifact_set[M2Coordinate('fruit', 'apple')].rev)
self.assertEquals('7', artifact_set[M2Coordinate('foobar', 'foobar', ext='tar')].rev)
manager = self._init_manager(default_target='//foo:management')
with self.assertRaises(JarDependencyManagementSetup.IllegalVersionOverride):
check_task_execution(manager)
def test_artifacts_indirection(self):
jar_library_unversioned = self.make_target(
target_type=JarLibrary,
spec='//foo:library-unversioned',
jars=[
JarDependency(org='foobar', name='foobar'),
],
)
jar_library_versioned = self.make_target(
target_type=JarLibrary,
spec='//foo:library-versioned',
jars=[
JarDependency(org='foobar', name='foobar', rev='2'),
],
)
indirect_target = self.make_target(
target_type=Target,
spec='//foo:indirect-deps',
dependencies=[
jar_library_versioned,
])
management_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management',
artifacts=[
'//foo:indirect-deps',
])
context = self.context(target_roots=[
management_target,
indirect_target,
jar_library_versioned,
jar_library_unversioned
])
manager = self._init_manager(default_target='//foo:management')
task = self.create_task(context)
task.execute()
artifact_set = self._single_artifact_set(manager, [jar_library_unversioned])
self.assertFalse(artifact_set is None)
self.assertEquals('2', artifact_set[M2Coordinate('foobar', 'foobar')].rev)
def test_invalid_artifacts_indirection(self):
class DummyTarget(Target):
pass
dummy_target = self.make_target(
target_type=DummyTarget,
spec='//foo:dummy',
)
indirect_target = self.make_target(
target_type=Target,
spec='//foo:indirect',
dependencies=[
dummy_target,
])
management_target = self.make_target(
target_type=ManagedJarDependencies,
spec='//foo:management',
artifacts=[
'//foo:indirect',
])
context = self.context(target_roots=[
management_target,
indirect_target,
dummy_target,
])
self._init_manager()
task = self.create_task(context)
with self.assertRaises(TargetDefinitionException):
task.execute()
| apache-2.0 | -4,616,595,992,423,954,000 | 42.938947 | 97 | 0.55637 | false |
janpipek/physt | physt/compat/dask.py | 1 | 4502 | """Dask-based and dask oriented variants of physt histogram facade functions."""
from typing import TYPE_CHECKING
import dask
from physt import h1 as original_h1
from physt import histogramdd as original_hdd
if TYPE_CHECKING:
import dask.array
options = {"chunk_split": 16}
def _run_dask(
*,
name: str,
data: "dask.array.Array",
compute: bool,
method,
func,
expand_arg: bool = False,
):
"""Construct the computation graph and optionally compute it.
:param name: Name of the method (for graph naming purposes).
:param data: Dask array data
:param func: Function running of each array chunk.
:param compute: If True, compute immediately
:param method: None (linear execution), "threaded" or callable
to apply when computing.
"""
if expand_arg:
graph = dict(
(f"{name}-{data.name}-{index}", (func, *item))
for index, item in enumerate(data.__dask_keys__())
)
else:
graph = dict(
(f"{name}-{data.name}-{index}", (func, item))
for index, item in enumerate(data.__dask_keys__())
)
items = list(graph.keys())
result_name = f"{name}-{data.name}-result"
graph.update(data.dask)
graph[result_name] = (sum, items)
if compute:
if not method:
return dask.get(graph, result_name)
if method in ["thread", "threaded", "threading", "threads"]:
return dask.threaded.get(graph, result_name)
return method(graph, result_name)
return graph, result_name
def histogram1d(data, bins=None, **kwargs):
"""Facade function to create one-dimensional histogram using dask.
Parameters
----------
data: dask.DaskArray or array-like (can have more than one dimension)
See also
--------
physt.histogram
"""
if not hasattr(data, "dask"):
data = dask.array.from_array(data, chunks=int(data.shape[0] / options["chunk_split"]))
if not kwargs.get("adaptive", True):
raise ValueError("Only adaptive histograms supported for dask (currently).")
kwargs["adaptive"] = True
def block_hist(array):
return original_h1(array, bins, **kwargs)
return _run_dask(
name="dask_adaptive1d",
data=data,
compute=kwargs.pop("compute", True),
method=kwargs.pop("dask_method", "threaded"),
func=block_hist,
)
h1 = histogram1d # Alias for convenience
def histogramdd(data, bins=None, **kwargs):
"""Facade function to create multi-dimensional histogram using dask.
Each "column" must be one-dimensional.
"""
from dask.array.rechunk import rechunk
if isinstance(data, (list, tuple)):
data = dask.array.stack(data, axis=1)
if not hasattr(data, "dask"):
data = dask.array.from_array(
data, chunks=(int(data.shape[0] / options["chunk_split"]), data.shape[1])
)
else:
data = rechunk(data, {1: data.shape[1]})
if isinstance(data, dask.array.Array):
if data.ndim != 2:
raise ValueError(
f"Only (n, dim) data allowed for histogramdd, {data.shape} encountered."
)
if not kwargs.get("adaptive", True):
raise ValueError("Only adaptive histograms supported for dask (currently).")
kwargs["adaptive"] = True
def block_hist(array):
return original_hdd(array, bins, **kwargs)
return _run_dask(
name="dask_adaptive_dd",
data=data,
compute=kwargs.pop("compute", True),
method=kwargs.pop("dask_method", "threaded"),
func=block_hist,
expand_arg=True,
)
def histogram2d(data1, data2, bins=None, **kwargs):
"""Facade function to create 2D histogram using dask."""
# TODO: currently very unoptimized! for non-dasks
if "axis_names" not in kwargs:
if hasattr(data1, "name") and hasattr(data2, "name"):
kwargs["axis_names"] = [data1.name, data2.name]
if not hasattr(data1, "dask"):
data1 = dask.array.from_array(data1, chunks=data1.size() / 100)
if not hasattr(data2, "dask"):
data2 = dask.array.from_array(data2, chunks=data2.size() / 100)
data = dask.array.stack([data1, data2], axis=1)
kwargs["dim"] = 2
return histogramdd(data, bins, **kwargs)
h2 = histogram2d # Alias for convenience
def h3(data, bins=None, **kwargs):
"""Facade function to create 3D histogram using dask."""
return histogramdd(data, bins, **kwargs)
| mit | -448,636,275,839,962,500 | 29.013333 | 94 | 0.61817 | false |
shparutask/poker-player-wise-kaa-2 | player.py | 1 | 2157 | import json
import requests
import sys
class Player:
VERSION = "Default Python folding player"
def rank_request(self, cards):
url = "http://rainman.leanpoker.org/rank"
myResponse = requests.post(url, data='cards=' + json.dumps(cards))
print(myResponse.status_code)
if (myResponse.ok):
jData = json.loads(myResponse.content)
return jData['rank']
else:
return -1
def getAllCards(self, game_state):
action_ = game_state['in_action']
mystatus = game_state['players'][action_]
communityCards = game_state['community_cards']
mycards = mystatus['hole_cards']
return communityCards + mycards
def betRequest(self, game_state):
try:
print(json.dumps(game_state))
action_ = game_state['in_action']
mystatus = game_state['players'][action_]
communityCards = game_state['community_cards']
mycards = mystatus['hole_cards']
current_buy = game_state['current_buy_in']
theCall = current_buy - mystatus['bet']
theRaise = theCall+game_state['big_blind']
allIn = theCall + game_state['minimum_raise']
if len(communityCards) > 0:
rank = self.rank_request(communityCards + mycards)
if rank > 2:
return allIn
elif rank == 1 and current_buy < mystatus['stack'] / 4:
return theCall
else:
return 0
firstCard = mycards[0]
secondCard = mycards[1]
if firstCard['rank'] == secondCard['rank']:
return theRaise
else:
if game_state['current_buy_in'] > mystatus['stack'] / 4:
return 0
else:
if theCall == 0:
return theRaise
else:
return theCall
except:
print("Unexpected error:", sys.exc_info()[0])
return 0
def showdown(self, game_state):
pass | mit | 176,093,094,359,867,000 | 27.773333 | 74 | 0.515994 | false |
chapmanb/bcbio-nextgen-vm | tests/conftest.py | 1 | 4182 | import collections
import contextlib
from datetime import datetime
import io
import os
import shutil
import subprocess
import tarfile
import pytest
import requests
import yaml
TEST_DIR = '/tmp/bcbio'
def test_data_dir():
return os.path.join(os.path.dirname(__file__), "data")
def data_dir():
return os.path.join(test_data_dir(), "automated")
@contextlib.contextmanager
def make_workdir():
work_dir = os.path.join(TEST_DIR, "test_automated_output")
if os.path.exists(work_dir):
shutil.rmtree(work_dir)
os.makedirs(work_dir)
# workaround for hardcoded data file paths in test run config files
custom_test_data_dir = os.path.join(TEST_DIR, os.path.basename(test_data_dir()))
with contextlib.suppress(FileExistsError):
os.symlink(test_data_dir(), custom_test_data_dir)
orig_dir = os.getcwd()
try:
os.chdir(work_dir)
yield work_dir
finally:
os.chdir(orig_dir)
def prepare_test_config(data_dir, work_dir):
"""Prepare a bcbio_system YAML file pointing to test data"""
system_config_path = os.path.join(data_dir, "bcbio_system.yaml")
# create local config pointing to reduced genomes
test_config_path = os.path.join(work_dir, "bcbio_system.yaml")
with open(system_config_path) as input_config_file:
config = yaml.safe_load(input_config_file)
config["galaxy_config"] = os.path.join(data_dir, "universe_wsgi.ini")
with open(test_config_path, "w") as output_config_file:
yaml.dump(config, output_config_file)
return test_config_path
@contextlib.contextmanager
def install_cwl_test_files():
orig_dir = os.getcwd()
url = "https://github.com/bcbio/test_bcbio_cwl/archive/master.tar.gz"
dirname = os.path.join(TEST_DIR, 'test_bcbio_cwl-master')
if os.path.exists(dirname):
# check for updated commits if the directory exists
ctime = os.path.getctime(os.path.join(dirname, "README.md"))
dtime = datetime.fromtimestamp(ctime).isoformat()
r = requests.get("https://api.github.com/repos/bcbio/test_bcbio_cwl/commits?since=%s" % dtime).json()
if len(r) > 0:
shutil.rmtree(dirname)
try:
if not os.path.exists(dirname):
print("Downloading CWL test directory: %s" % url)
os.makedirs(dirname)
os.chdir(os.path.dirname(dirname))
r = requests.get(url)
tf = tarfile.open(fileobj=io.BytesIO(r.content), mode='r|gz')
tf.extractall()
os.chdir(dirname)
yield dirname
finally:
os.chdir(orig_dir)
@pytest.fixture
def install_test_files():
"""Download required sequence and reference files"""
DlInfo = collections.namedtuple("DlInfo", "fname dirname version")
download_data = [
DlInfo("110106_FC70BUKAAXX.tar.gz", None, None),
DlInfo("genomes_automated_test.tar.gz", "genomes", 34),
DlInfo("110907_ERP000591.tar.gz", None, None),
DlInfo("100326_FC6107FAAXX.tar.gz", None, 12),
DlInfo("tcga_benchmark.tar.gz", None, 3),
DlInfo("singlecell-rnaseq-test-data.tar.gz", "Harvard-inDrop", 1)
]
for dl in download_data:
url = "https://chapmanb.s3.amazonaws.com/{fname}".format(fname=dl.fname)
dirname = os.path.join(data_dir(), os.pardir,
dl.fname.replace(".tar.gz", "") if dl.dirname is None
else dl.dirname)
if os.path.exists(dirname) and dl.version is not None:
version_file = os.path.join(dirname, "VERSION")
is_old = True
if os.path.exists(version_file):
with open(version_file) as in_handle:
version = int(in_handle.read())
is_old = version < dl.version
if is_old:
shutil.rmtree(dirname)
if not os.path.exists(dirname):
_download_to_dir(url, dirname)
def _download_to_dir(url, dirname):
subprocess.check_call(["wget", url])
subprocess.check_call(["tar", "-xzvpf", os.path.basename(url)])
shutil.move(os.path.basename(dirname), dirname)
os.remove(os.path.basename(url))
| mit | -297,118,055,162,965,900 | 34.142857 | 109 | 0.63056 | false |
jcrist/pydy | examples/Kane1985/Chapter5/Ex10.6.py | 7 | 1376 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 10.6 from Kane 1985."""
from __future__ import division
from sympy import symbols
from sympy.physics.mechanics import ReferenceFrame, RigidBody, Point
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
q1, q2, q3 = dynamicsymbols('q1, q2 q3')
#omega1, omega2, omega3 = dynamicsymbols('ω1 ω2 ω3')
q1d, q2d = dynamicsymbols('q1, q2', level=1)
m, I11, I22, I33 = symbols('m I11 I22 I33', real=True, positive=True)
# reference frames
A = ReferenceFrame('A')
B = A.orientnew('B', 'body', [q1, q2, q3], 'xyz')
# points B*, O
pB_star = Point('B*')
pB_star.set_vel(A, 0)
# rigidbody B
I_B_Bs = inertia(B, I11, I22, I33)
rbB = RigidBody('rbB', pB_star, B, m, (I_B_Bs, pB_star))
# kinetic energy
K = rbB.kinetic_energy(A) # velocity of point B* is zero
print('K_ω = {0}'.format(msprint(K)))
print('\nSince I11, I22, I33 are the central principal moments of inertia')
print('let I_min = I11, I_max = I33')
I_min = I11
I_max = I33
H = rbB.angular_momentum(pB_star, A)
K_min = dot(H, H) / I_max / 2
K_max = dot(H, H) / I_min / 2
print('K_ω_min = {0}'.format(msprint(K_min)))
print('K_ω_max = {0}'.format(msprint(K_max)))
print('\nI11/I33, I22/I33 =< 1, since I33 >= I11, I22, so K_ω_min <= K_ω')
print('Similarly, I22/I11, I33/I11 >= 1, '
'since I11 <= I22, I33, so K_ω_max >= K_ω')
| bsd-3-clause | -2,349,516,675,173,920,000 | 30.045455 | 75 | 0.644949 | false |
itucsdb1611/itucsdb1611 | classes/operations/information_operations.py | 1 | 2663 | import psycopg2 as dbapi2
from classes.model_config import dsn
class information_operations:
def __init__(self):
self.last_key = None
def add_information(self, informationCVId, information_type_id, description):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
query = "INSERT INTO Information (CVId, InformationTypeId, Description, Deleted) VALUES (%s, %s, %s, False)"
cursor.execute(query, (informationCVId, information_type_id, description))
connection.commit()
self.last_key = cursor.lastrowid
def delete_information(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
query = """DELETE FROM Information WHERE (ObjectId=%s)"""
cursor.execute(query, (key,))
connection.commit()
def update_information(self, key, description):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute(
"""UPDATE Information SET Description = %s WHERE (ObjectId=%s)""",
(description, key))
connection.commit()
def get_all_information_by_CVId(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
query = """SELECT Information.ObjectId, Information.CVId, InformationType.Name, Information.Description FROM Information JOIN CV ON(Information.CVId = CV.ObjectId) JOIN InformationType ON(Information.InformationTypeId = InformationType.ObjectId) WHERE (CV.ObjectId = %s)"""
cursor.execute(query, (key,))
results = cursor.fetchall()
return results
def get_all_information_by_ActiveCV_And_PersonId(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
query = """SELECT Information.ObjectId, Information.CVId, InformationType.Name, Information.Description
FROM Information
JOIN CV ON(Information.CVId = CV.ObjectId)
JOIN InformationType ON(Information.InformationTypeId = InformationType.ObjectId)
INNER JOIN Person ON (CV.PersonId = Person.ObjectId)
WHERE (Information.CVId=(Select CV.ObjectId FROM CV
INNER JOIN Person ON (CV.PersonId = Person.ObjectId)
WHERE (Person.ObjectId = %s AND CV.IsActive=TRUE)))"""
cursor.execute(query, (key,))
results = cursor.fetchall()
return results
| gpl-3.0 | 1,730,767,064,806,212,400 | 49.245283 | 285 | 0.612843 | false |
davidcox/glumpy | demos/solver.py | 1 | 5085 | import scipy.weave as weave
from scipy.weave import converters
"""
Real-Time Fluid Dynamics for Games by Jos Stam (2003).
Parts of author's work are also protected
under U. S. patent #6,266,071 B1 [Patent].
"""
def set_bnd(N, b, x):
"""
We assume that the fluid is contained in a box with solid walls: no flow
should exit the walls. This simply means that the horizontal component of
the velocity should be zero on the vertical walls, while the vertical
component of the velocity should be zero on the horizontal walls. For the
density and other fields considered in the code we simply assume
continuity. The following code implements these conditions.
"""
if b == 1:
x[0,:] = -x[1,:]
x[N+1,:] = -x[N,:]
else:
x[0,:] = x[1,:]
x[N+1,:] = x[N,:]
if b == 2:
x[:,0] = -x[:,1]
x[:,N+1] = -x[:,N]
else:
x[:,0] = x[:,1]
x[:,N+1] = x[:,N]
x[0,0] = 0.5*(x[1,0]+x[0,1])
x[0,N+1] = 0.5*(x[1,N+1]+x[0,N])
x[N+1,0] = 0.5*(x[N,0]+x[N+1,1])
x[N+1,N+1] = 0.5*(x[N,N+1]+x[N+1,N])
def lin_solve(N, b, x, x0, a, c):
for k in range(0, 20):
x[1:N+1,1:N+1] = (x0[1:N+1,1:N+1]
+a*(x[0:N,1:N+1] +
x[2:N+2,1:N+1]+
x[1:N+1,0:N] +
x[1:N+1,2:N+2]))/c
set_bnd(N, b, x)
# Addition of forces: the density increases due to sources
def add_source(N, x, s, dt):
x += dt*s
# Diffusion: the density diffuses at a certain rate
def diffuse (N, b, x, x0, diff, dt):
"""
The basic idea behind our method is to find the densities which when
diffused backward in time yield the densities we started with. The
simplest iterative solver which works well in practice is Gauss-Seidel
relaxation.
"""
a = dt*diff*N*N
lin_solve(N, b, x, x0, a, 1+4*a)
# Advection: the density follows the velocity field
def advect (N, b, d, d0, u, v, dt):
"""
The basic idea behind the advection step. Instead of moving the cell
centers forward in time through the velocity field, we look for the
particles which end up exactly at the cell centers by tracing backwards in
time from the cell centers.
"""
code = """
#define MAX(a,b) ((a)<(b) ? (b) : (a))
#define MIN(a,b) ((a)>(b) ? (b) : (a))
float x, y, s1, s0, t1, t0;;
int i0, i1, j0, j1;
for (int i=1; i<(N+1); ++i) {
for (int j=1; j<(N+1); ++j) {
x = MIN(MAX(i-dt0*u(i,j),0.5),N+0.5);
y = MIN(MAX(j-dt0*v(i,j),0.5),N+0.5);
i0 = int(x);
i1 = i0+1;
j0 = int(y);
j1 = j0+1;
s1 = x-i0;
s0 = 1-s1;
t1 = y-j0;
t0 = 1-t1;
d(i,j) = s0*(t0*d0(i0,j0)+t1*d0(i0,j1))+
s1*(t0*d0(i1,j0)+t1*d0(i1,j1));
}
}
#undef MIN
#undef MAX
"""
dt0 = dt*N
# Does not work yet
weave.inline(code, ['N', 'u', 'v', 'd', 'd0', 'dt0'],
type_converters=converters.blitz,
compiler='gcc')
# for i in range(1, N+1):
# for j in range(1, N+1):
# x = min(max(i-dt0*u[i,j],0.5),N+0.5)
# y = min(max(j-dt0*v[i,j],0.5),N+0.5)
# i0 = int(x)
# i1 = i0+1
# j0 = int(y)
# j1 = j0+1
# s1 = x-i0
# s0 = 1-s1
# t1 = y-j0
# t0 = 1-t1
# d[i,j] = s0*(t0*d0[i0,j0]+t1*d0[i0,j1])+ \
# s1*(t0*d0[i1,j0]+t1*d0[i1,j1])
set_bnd (N, b, d)
def project(N, u, v, p, div):
h = 1.0/N
div[1:N+1,1:N+1] = -0.5*h*(u[2:N+2,1:N+1]
-u[0:N,1:N+1]
+v[1:N+1,2:N+2]
-v[1:N+1,0:N])
p[1:N+1,1:N+1] = 0
set_bnd (N, 0, div)
set_bnd (N, 0, p)
lin_solve (N, 0, p, div, 1, 4)
# ??? not in the paper /h
u[1:N+1,1:N+1] -= 0.5*(p[2:N+2,1:N+1]-p[0:N,1:N+1])/h
# ??? not in the paper /h
v[1:N+1,1:N+1] -= 0.5*(p[1:N+1,2:N+2]-p[1:N+1,0:N])/h
set_bnd (N, 1, u)
set_bnd (N, 2, v)
# Evolving density: advection, diffusion, addition of sources
def dens_step (N, x, x0, u, v, diff, dt):
add_source(N, x, x0, dt)
x0, x = x, x0 # swap
diffuse(N, 0, x, x0, diff, dt)
x0, x = x, x0 # swap
advect(N, 0, x, x0, u, v, dt)
# Evolving velocity: self-advection, viscous diffusion, addition of forces
def vel_step (N, u, v, u0, v0, visc, dt):
add_source(N, u, u0, dt)
add_source(N, v, v0, dt);
u0, u = u, u0 # swap
diffuse(N, 1, u, u0, visc, dt)
v0, v = v, v0 # swap
diffuse(N, 2, v, v0, visc, dt)
project(N, u, v, u0, v0)
u0, u = u, u0 # swap
v0, v = v, v0 # swap
advect(N, 1, u, u0, u0, v0, dt)
advect(N, 2, v, v0, u0, v0, dt)
project(N, u, v, u0, v0)
| bsd-3-clause | 2,303,759,792,909,406,000 | 31.388535 | 78 | 0.45939 | false |
argivaitv/argivaitv | plugin.video.SportsDevil/lib/customReplacements.py | 10 | 6368 | # -*- coding: utf-8 -*-
import os.path
import re
from string import lower
import common
import utils.fileUtils as fu
from utils.regexUtils import findall
class CustomReplacements(object):
def __init__(self):
self.simpleScheme = {'(@PLATFORM@)': os.environ.get('OS'),
'(@CURRENT_URL@)': fu.getFileContent(os.path.join(common.Paths.cacheDir, 'lasturl')),
'(@LANGUAGE@)': self.languageShortName(common.language)
}
self.complexScheme = { 'import': '(#*@IMPORT=([^@]+)@)',
'find': '(#*@FIND\(.*?\)@)',
'catch': '(#*@CATCH\([^\)]+\)@)'
}
def languageShortName(self, longName):
if str(longName).lower() == 'german':
return 'de'
else:
return 'en'
def regex(self, item):
return self.complexScheme.get(item)
def __replaceImports(self, pathToImports, data):
while True:
m_reg = findall(data, self.regex('import'))
if len(m_reg) > 0:
for idat in m_reg:
if idat[0].startswith('#'):
data = data.replace(idat[0],'')
continue
filename = idat[1]
pathImp = os.path.join(common.Paths.modulesDir, filename)
if not os.path.exists(pathImp):
pathImp = os.path.join(pathToImports, filename)
if not (os.path.exists(pathImp)):
common.log('Skipped Import: ' + filename)
continue
dataImp = fu.getFileContent(pathImp)
dataImp = dataImp.replace('\r\n','\n')
data = data.replace(idat[0], dataImp)
else:
break
return data
def __replaceParameters(self, data, params=[]):
i=1
for par in params:
matches = findall(data,'(@PARAM' + str(i) + '@)')
if matches:
for m in matches:
ptemp = str(par).strip()
data = data.replace(m, ptemp)
i = i + 1
return data
def __replaceFinders(self, data):
m_reg = findall(data, self.regex('find'))
if len(m_reg) > 0:
for idat in m_reg:
if idat.startswith('#'):
continue
ps = idat[6:-2].strip().split(',')
method = ps[0].strip("'")
param1 = ps[1].strip("'")
param2 = ps[2].strip("'")
param3 = ps[3].strip("'")
if method == 'JS1':
jsName = param1
idName = param2
varName = param3
regex = "(?:java)?scr(?:'\+')?ipt[^<]+" + idName + "\s*=\s*[\"']([^\"']+)[\"'][^<]*</scr(?:'\+')?ipt\s*>[^<]*<scr(?:'\+')?ipt[^<]*src=[\"']" + jsName + "[\"']"
lines = "item_infos=" + regex + "\nitem_order=" + varName
data = data.replace(idat, lines)
return data
def __replaceCatchers(self, data):
m_reg = findall(data, self.regex('catch'))
if not (m_reg is None or len(m_reg) == 0):
for idat in m_reg:
if idat.startswith('#'):
continue
ps = idat[7:-2].strip().split(',')
catcherName = ps.pop(0).strip()
# import catcher file and insert parameters
pathImp = os.path.join(common.Paths.catchersDir, catcherName + '.txt')
if not (os.path.exists(pathImp)):
common.log('Skipped Catcher: ' + catcherName)
continue
dataImp = fu.getFileContent(pathImp)
for i in range(len(ps)):
dataImp = dataImp.replace('@PARAM' + str(i+1) + '@',ps.pop(i).strip())
dataImp = dataImp.replace('\r\n','\n')
dataImp += "\nitem_info_name=type\nitem_info_build=video\nitem_url_build=%s"
data = data.replace(idat, dataImp)
return data
def __replaceSimpleVars(self, data):
for s in self.simpleScheme:
m_reg = findall(data, s)
value = self.simpleScheme.get(s)
for idat in m_reg:
data = data.replace(idat, value)
return data
def __replaceConditions(self, data):
starts = [match.start() for match in re.finditer(re.escape('@IF('), data)]
for j in range(len(starts)-1,-1,-1):
s = starts[j]
p_reg = re.compile('((@IF\((.+?)\)@).*?(@ENDIF@))', re.IGNORECASE + re.DOTALL + re.MULTILINE)
m_reg = p_reg.findall(data[s:])
if len(m_reg) > 0:
for m in m_reg:
new_reg=p_reg.match(m[0])
condStr = new_reg.group(3)
hidePassage=False
if condStr.find('==') != -1:
condArr=condStr.split('==')
hidePassage = condArr[0].strip().lower() != condArr[1].strip().lower()
elif condStr.find('!=') != -1:
condArr=condStr.split('!=')
hidePassage = condArr[0].strip().lower() == condArr[1].strip().lower()
if hidePassage:
data = data.replace(str(new_reg.group(1)),'')
else:
tmpdata = str(new_reg.group(1))
tmpdata = tmpdata.replace(str(new_reg.group(2)),'',1)
tmpdata = tmpdata[:-len(str(new_reg.group(4)))]
data = data.replace(str(new_reg.group(1)),tmpdata)
return data
def replace(self, pathToImports, data, lItem, params=[]):
data = self.__replaceParameters(data, params)
data = self.__replaceConditions(data)
data = self.__replaceImports(pathToImports, data)
data = self.__replaceParameters(data, params)
data = self.__replaceFinders(data)
data = self.__replaceCatchers(data)
data = self.__replaceSimpleVars(data)
data = self.__replaceConditions(data)
return data
| gpl-2.0 | 1,464,656,541,639,811,800 | 38.552795 | 179 | 0.461683 | false |
Acidity/PyPermissions | pypermissions/factory.py | 1 | 2758 | from pypermissions.permission import Permission
class PermissionFactory(object):
"""General factory for creating permissions. Will select the correct permission type for the provided string and
return that."""
def __init__(self, delimiter=".", wildcard="*", prefix="", templates=None):
"""Allows developers to use custom characters for the special characters in the permissions.
:param delimiter: The character that serves as the separator for permissions. Default value: "."
:param wildcard: The character that serves as the wildcard segment. Default: "*"
:param prefix: The prefix to be added to all permissions created with this factory. Default: None
"""
self.delimiter = delimiter
self.wildcard = wildcard
self.prefix = prefix + self.delimiter if prefix else ""
self.templates = templates if templates else list()
def create_permission(self, permission):
"""Create a permission from the provided string, adding prefixes as necessary.
:param permission: String representing the permission to be returned
:rtype: :py:class:`Permission` or one of it's subclasses
"""
if self.prefix:
permission = self.prefix + permission
for template in self.templates:
if template.matches_format(permission)[0]:
return template.create_permission(permission)
return Permission(name=permission, delimiter=self.delimiter, wildcard=self.wildcard)
def create_child(self, prefix):
"""Create a PermissionFactory with a prefix that is this factory's prefix with the provided prefix added on.
Useful for creating factories for different components of the same application.
:param prefix: The prefix to be added to the end of this factory's prefix
"""
return PermissionFactory(delimiter=self.delimiter, wildcard=self.wildcard, prefix=self.prefix + prefix,
templates=self.templates)
def register_template(self, template, index=None):
"""Register a new template for the factory to use when creating permissions.
:param template: the :py:class:`PermissionTemplate` to be added
"""
if not index:
self.templates.append(template)
else:
self.templates.insert(index, template)
def __repr__(self):
return "{cls}({prefix}, {d}, {w})".format(cls=self.__class__.__name__, prefix=self.prefix[:-1], w=self.wildcard,
d=self.delimiter)
def __eq__(self, other):
return self.delimiter == other.delimiter and self.wildcard == other.wildcard and self.prefix == other.prefix
| mit | 5,641,672,599,840,932,000 | 40.787879 | 120 | 0.65591 | false |
Beauhurst/django | django/core/management/base.py | 16 | 20294 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
import os
import sys
from argparse import ArgumentParser
from io import TextIOBase
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super().__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super().parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super().error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(TextIOBase):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(style_func(msg))
class BaseCommand:
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_migrations_checks = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(
self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument(
'-v', '--verbosity', action='store', dest='verbosity', default=1,
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output',
)
parser.add_argument(
'--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument(
'--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions')
parser.add_argument(
'--no-color', action='store_true', dest='no_color',
help="Don't colorize the command output.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
try:
connections.close_all()
except ImproperlyConfigured:
# Ignore if connections aren't setup at this point (e.g. no
# configured settings).
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options['no_color']:
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options['stderr'], self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if self.requires_system_checks and not options.get('skip_checks'):
self.check()
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
output = '%s\n%s\n%s' % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
finally:
if saved_locale is not None:
translation.activate(saved_locale)
return output
def _run_checks(self, **kwargs):
return checks.run_checks(**kwargs)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR):
"""
Use the system check framework to validate entire Django project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = self._run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(str(e))
if e.is_serious()
else self.style.WARNING(str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted(set(migration.app_label for migration, backwards in plan))
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unpplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s." % {
"unpplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
| bsd-3-clause | 7,132,677,243,514,835,000 | 38.102119 | 114 | 0.608899 | false |
Changaco/oh-mainline | vendor/packages/whoosh/src/whoosh/lang/dmetaphone.py | 21 | 18640 | # coding= utf-8
# This script implements the Double Metaphone algorythm (c) 1998, 1999 by
# Lawrence Philips. It was translated to Python from the C source written by
# Kevin Atkinson (http://aspell.net/metaphone/) By Andrew Collins - January 12,
# 2007 who claims no rights to this work.
# http://atomboy.isa-geek.com:8080/plone/Members/acoil/programing/double-metaphone
import re
from whoosh.compat import u
vowels = frozenset("AEIOUY")
slavo_germ_exp = re.compile("W|K|CZ|WITZ")
silent_starts = re.compile("GN|KN|PN|WR|PS")
def double_metaphone(text):
text = text.upper()
slavo_germanic = bool(slavo_germ_exp.search(text))
length = len(text)
text = "--" + text + " "
first = pos = 2
last = first + length - 1
primary = secondary = ""
if silent_starts.match(text, pos):
pos += 1
while pos < length + 2:
ch = text[pos]
if ch in vowels:
# all init vowels now map to 'A'
if pos != first:
next = (None, 1)
else:
next = ("A", 1)
elif ch == "B":
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if text[pos + 1] == "B":
next = ("P", 2)
else:
next = ("P", 1)
elif ch == "C":
# various germanic
if (pos > (first + 1) and text[pos - 2] not in vowels and text[pos - 1:pos + 2] == 'ACH' and \
(text[pos + 2] not in ['I', 'E'] or text[pos - 2:pos + 4] in ['BACHER', 'MACHER'])):
next = ('K', 2)
# special case 'CAESAR'
elif pos == first and text[first:first + 6] == 'CAESAR':
next = ('S', 2)
elif text[pos:pos + 4] == 'CHIA': # italian 'chianti'
next = ('K', 2)
elif text[pos:pos + 2] == 'CH':
# find 'michael'
if pos > first and text[pos:pos + 4] == 'CHAE':
next = ('K', 'X', 2)
elif pos == first and (text[pos + 1:pos + 6] in ['HARAC', 'HARIS'] or \
text[pos + 1:pos + 4] in ["HOR", "HYM", "HIA", "HEM"]) and text[first:first + 5] != 'CHORE':
next = ('K', 2)
# germanic, greek, or otherwise 'ch' for 'kh' sound
elif text[first:first + 4] in ['VAN ', 'VON '] or text[first:first + 3] == 'SCH' \
or text[pos - 2:pos + 4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or text[pos + 2] in ['T', 'S'] \
or ((text[pos - 1] in ["A", "O", "U", "E"] or pos == first) \
and text[pos + 2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W", " "]):
next = ('K', 1)
else:
if pos > first:
if text[first:first + 2] == 'MC':
next = ('K', 2)
else:
next = ('X', 'K', 2)
else:
next = ('X', 2)
# e.g, 'czerny'
elif text[pos:pos + 2] == 'CZ' and text[pos - 2:pos + 2] != 'WICZ':
next = ('S', 'X', 2)
# e.g., 'focaccia'
elif text[pos + 1:pos + 4] == 'CIA':
next = ('X', 3)
# double 'C', but not if e.g. 'McClellan'
elif text[pos:pos + 2] == 'CC' and not (pos == (first + 1) and text[first] == 'M'):
# 'bellocchio' but not 'bacchus'
if text[pos + 2] in ["I", "E", "H"] and text[pos + 2:pos + 4] != 'HU':
# 'accident', 'accede' 'succeed'
if (pos == (first + 1) and text[first] == 'A') or \
text[pos - 1:pos + 4] in ['UCCEE', 'UCCES']:
next = ('KS', 3)
# 'bacci', 'bertucci', other italian
else:
next = ('X', 3)
else:
next = ('K', 2)
elif text[pos:pos + 2] in ["CK", "CG", "CQ"]:
next = ('K', 'K', 2)
elif text[pos:pos + 2] in ["CI", "CE", "CY"]:
# italian vs. english
if text[pos:pos + 3] in ["CIO", "CIE", "CIA"]:
next = ('S', 'X', 2)
else:
next = ('S', 2)
else:
# name sent in 'mac caffrey', 'mac gregor
if text[pos + 1:pos + 3] in [" C", " Q", " G"]:
next = ('K', 3)
else:
if text[pos + 1] in ["C", "K", "Q"] and text[pos + 1:pos + 3] not in ["CE", "CI"]:
next = ('K', 2)
else: # default for 'C'
next = ('K', 1)
elif ch == u('\xc7'):
next = ('S', 1)
elif ch == 'D':
if text[pos:pos + 2] == 'DG':
if text[pos + 2] in ['I', 'E', 'Y']: # e.g. 'edge'
next = ('J', 3)
else:
next = ('TK', 2)
elif text[pos:pos + 2] in ['DT', 'DD']:
next = ('T', 2)
else:
next = ('T', 1)
elif ch == 'F':
if text[pos + 1] == 'F':
next = ('F', 2)
else:
next = ('F', 1)
elif ch == 'G':
if text[pos + 1] == 'H':
if pos > first and text[pos - 1] not in vowels:
next = ('K', 2)
elif pos < (first + 3):
if pos == first: # 'ghislane', ghiradelli
if text[pos + 2] == 'I':
next = ('J', 2)
else:
next = ('K', 2)
# Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and text[pos - 2] in ['B', 'H', 'D']) \
or (pos > (first + 2) and text[pos - 3] in ['B', 'H', 'D']) \
or (pos > (first + 3) and text[pos - 4] in ['B', 'H']):
next = (None, 2)
else:
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and text[pos - 1] == 'U' \
and text[pos - 3] in ["C", "G", "L", "R", "T"]:
next = ('F', 2)
else:
if pos > first and text[pos - 1] != 'I':
next = ('K', 2)
elif text[pos + 1] == 'N':
if pos == (first + 1) and text[first] in vowels and not slavo_germanic:
next = ('KN', 'N', 2)
else:
# not e.g. 'cagney'
if text[pos + 2:pos + 4] != 'EY' and text[pos + 1] != 'Y' and not slavo_germanic:
next = ('N', 'KN', 2)
else:
next = ('KN', 2)
# 'tagliaro'
elif text[pos + 1:pos + 3] == 'LI' and not slavo_germanic:
next = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (text[pos + 1] == 'Y' \
or text[pos + 1:pos + 3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]):
next = ('K', 'J', 2)
# -ger-, -gy-
elif (text[pos + 1:pos + 2] == 'ER' or text[pos + 1] == 'Y') \
and text[first:first + 6] not in ["DANGER", "RANGER", "MANGER"] \
and text[pos - 1] not in ['E', 'I'] and text[pos - 1:pos + 2] not in ['RGY', 'OGY']:
next = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif text[pos + 1] in ['E', 'I', 'Y'] or text[pos - 1:pos + 3] in ["AGGI", "OGGI"]:
# obvious germanic
if text[first:first + 4] in ['VON ', 'VAN '] or text[first:first + 3] == 'SCH' \
or text[pos + 1:pos + 3] == 'ET':
next = ('K', 2)
else:
# always soft if french ending
if text[pos + 1:pos + 5] == 'IER ':
next = ('J', 2)
else:
next = ('J', 'K', 2)
elif text[pos + 1] == 'G':
next = ('K', 2)
else:
next = ('K', 1)
elif ch == 'H':
# only keep if first & before vowel or btw. 2 vowels
if (pos == first or text[pos - 1] in vowels) and text[pos + 1] in vowels:
next = ('H', 2)
else: # (also takes care of 'HH')
next = (None, 1)
elif ch == 'J':
# obvious spanish, 'jose', 'san jacinto'
if text[pos:pos + 4] == 'JOSE' or text[first:first + 4] == 'SAN ':
if (pos == first and text[pos + 4] == ' ') or text[first:first + 4] == 'SAN ':
next = ('H',)
else:
next = ('J', 'H')
elif pos == first and text[pos:pos + 4] != 'JOSE':
next = ('J', 'A') # Yankelovich/Jankelowicz
else:
# spanish pron. of e.g. 'bajador'
if text[pos - 1] in vowels and not slavo_germanic \
and text[pos + 1] in ['A', 'O']:
next = ('J', 'H')
else:
if pos == last:
next = ('J', ' ')
else:
if text[pos + 1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and text[pos - 1] not in ["S", "K", "L"]:
next = ('J',)
else:
next = (None,)
if text[pos + 1] == 'J':
next = next + (2,)
else:
next = next + (1,)
elif ch == 'K':
if text[pos + 1] == 'K':
next = ('K', 2)
else:
next = ('K', 1)
elif ch == 'L':
if text[pos + 1] == 'L':
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and text[pos - 1:pos + 3] in ["ILLO", "ILLA", "ALLE"]) \
or ((text[last - 1:last + 1] in ["AS", "OS"] or text[last] in ["A", "O"]) \
and text[pos - 1:pos + 3] == 'ALLE'):
next = ('L', '', 2)
else:
next = ('L', 2)
else:
next = ('L', 1)
elif ch == 'M':
if text[pos + 1:pos + 4] == 'UMB' \
and (pos + 1 == last or text[pos + 2:pos + 4] == 'ER') \
or text[pos + 1] == 'M':
next = ('M', 2)
else:
next = ('M', 1)
elif ch == 'N':
if text[pos + 1] == 'N':
next = ('N', 2)
else:
next = ('N', 1)
elif ch == u('\xd1'):
next = ('N', 1)
elif ch == 'P':
if text[pos + 1] == 'H':
next = ('F', 2)
elif text[pos + 1] in ['P', 'B']: # also account for "campbell", "raspberry"
next = ('P', 2)
else:
next = ('P', 1)
elif ch == 'Q':
if text[pos + 1] == 'Q':
next = ('K', 2)
else:
next = ('K', 1)
elif ch == 'R':
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not slavo_germanic \
and text[pos - 2:pos] == 'IE' and text[pos - 4:pos - 2] not in ['ME', 'MA']:
next = ('', 'R')
else:
next = ('R',)
if text[pos + 1] == 'R':
next = next + (2,)
else:
next = next + (1,)
elif ch == 'S':
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if text[pos - 1:pos + 2] in ['ISL', 'YSL']:
next = (None, 1)
# special case 'sugar-'
elif pos == first and text[first:first + 5] == 'SUGAR':
next = ('X', 'S', 1)
elif text[pos:pos + 2] == 'SH':
# germanic
if text[pos + 1:pos + 5] in ["HEIM", "HOEK", "HOLM", "HOLZ"]:
next = ('S', 2)
else:
next = ('X', 2)
# italian & armenian
elif text[pos:pos + 3] in ["SIO", "SIA"] or text[pos:pos + 4] == 'SIAN':
if not slavo_germanic:
next = ('S', 'X', 3)
else:
next = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
# also, -sz- in slavic language altho in hungarian it is pronounced 's'
elif (pos == first and text[pos + 1] in ["M", "N", "L", "W"]) or text[pos + 1] == 'Z':
next = ('S', 'X')
if text[pos + 1] == 'Z':
next = next + (2,)
else:
next = next + (1,)
elif text[pos:pos + 2] == 'SC':
# Schlesinger's rule
if text[pos + 2] == 'H':
# dutch origin, e.g. 'school', 'schooner'
if text[pos + 3:pos + 5] in ["OO", "ER", "EN", "UY", "ED", "EM"]:
# 'schermerhorn', 'schenker'
if text[pos + 3:pos + 5] in ['ER', 'EN']:
next = ('X', 'SK', 3)
else:
next = ('SK', 3)
else:
if pos == first and text[first + 3] not in vowels and text[first + 3] != 'W':
next = ('X', 'S', 3)
else:
next = ('X', 3)
elif text[pos + 2] in ['I', 'E', 'Y']:
next = ('S', 3)
else:
next = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and text[pos - 2:pos] in ['AI', 'OI']:
next = ('', 'S', 1)
else:
next = ('S',)
if text[pos + 1] in ['S', 'Z']:
next = next + (2,)
else:
next = next + (1,)
elif ch == 'T':
if text[pos:pos + 4] == 'TION':
next = ('X', 3)
elif text[pos:pos + 3] in ['TIA', 'TCH']:
next = ('X', 3)
elif text[pos:pos + 2] == 'TH' or text[pos:pos + 3] == 'TTH':
# special case 'thomas', 'thames' or germanic
if text[pos + 2:pos + 4] in ['OM', 'AM'] or text[first:first + 4] in ['VON ', 'VAN '] \
or text[first:first + 3] == 'SCH':
next = ('T', 2)
else:
next = ('0', 'T', 2)
elif text[pos + 1] in ['T', 'D']:
next = ('T', 2)
else:
next = ('T', 1)
elif ch == 'V':
if text[pos + 1] == 'V':
next = ('F', 2)
else:
next = ('F', 1)
elif ch == 'W':
# can also be in middle of word
if text[pos:pos + 2] == 'WR':
next = ('R', 2)
elif pos == first and (text[pos + 1] in vowels or text[pos:pos + 2] == 'WH'):
# Wasserman should match Vasserman
if text[pos + 1] in vowels:
next = ('A', 'F', 1)
else:
next = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and text[pos - 1] in vowels) \
or text[pos - 1:pos + 5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or text[first:first + 3] == 'SCH':
next = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif text[pos:pos + 4] in ["WICZ", "WITZ"]:
next = ('TS', 'FX', 4)
else: # default is to skip it
next = (None, 1)
elif ch == 'X':
# french e.g. breaux
next = (None,)
if not(pos == last and (text[pos - 3:pos] in ["IAU", "EAU"] \
or text[pos - 2:pos] in ['AU', 'OU'])):
next = ('KS',)
if text[pos + 1] in ['C', 'X']:
next = next + (2,)
else:
next = next + (1,)
elif ch == 'Z':
# chinese pinyin e.g. 'zhao'
if text[pos + 1] == 'H':
next = ('J',)
elif text[pos + 1:pos + 3] in ["ZO", "ZI", "ZA"] \
or (slavo_germanic and pos > first and text[pos - 1] != 'T'):
next = ('S', 'TS')
else:
next = ('S',)
if text[pos + 1] == 'Z':
next = next + (2,)
else:
next = next + (1,)
else:
next = (None, 1)
if len(next) == 2:
if next[0]:
primary += next[0]
secondary += next[0]
pos += next[1]
elif len(next) == 3:
if next[0]:
primary += next[0]
if next[1]:
secondary += next[1]
pos += next[2]
if primary == secondary:
return (primary, None)
else:
return (primary, secondary)
if __name__ == '__main__':
names = {'maurice': ('MRS', None), 'aubrey': ('APR', None), 'cambrillo': ('KMPRL', 'KMPR'),
'heidi': ('HT', None), 'katherine': ('K0RN', 'KTRN'), 'Thumbail': ('0MPL', 'TMPL'),
'catherine': ('K0RN', 'KTRN'), 'richard': ('RXRT', 'RKRT'), 'bob': ('PP', None), \
'eric': ('ARK', None), 'geoff': ('JF', 'KF'), 'Through': ('0R', 'TR'), 'Schwein': ('XN', 'XFN'),
'dave': ('TF', None), 'ray': ('R', None), 'steven': ('STFN', None), 'bryce': ('PRS', None),
'randy': ('RNT', None), 'bryan': ('PRN', None), 'Rapelje': ('RPL', None),
'brian': ('PRN', None), 'otto': ('AT', None), 'auto': ('AT', None), 'Dallas': ('TLS', None),
'maisey': ('MS', None), 'zhang': ('JNK', None), 'Chile': ('XL', None),
'Jose': ('HS', None), 'Arnow': ('ARN', 'ARNF'), 'solilijs': ('SLLS', None),
'Parachute': ('PRKT', None), 'Nowhere': ('NR', None), 'Tux': ('TKS', None)}
for name in names.keys():
assert (double_metaphone(name) == names[name]), 'For "%s" function returned %s. Should be %s.' % (name, double_metaphone(name), names[name])
| agpl-3.0 | -4,152,823,970,094,223,400 | 42.551402 | 148 | 0.359227 | false |
nvoron23/arangodb | 3rdParty/V8-4.3.61/tools/try_perf.py | 19 | 1075 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import find_depot_tools
import sys
find_depot_tools.add_depot_tools_to_path()
from git_cl import Changelist
BOTS = [
'v8_linux32_perf_try',
'v8_linux64_perf_try',
]
def main(tests):
cl = Changelist()
if not cl.GetIssue():
print 'Need to upload first'
return 1
props = cl.GetIssueProperties()
if props.get('closed'):
print 'Cannot send tryjobs for a closed CL'
return 1
if props.get('private'):
print 'Cannot use trybots with private issue'
return 1
if not tests:
print 'Please specify the benchmarks to run as arguments.'
return 1
masters = {'internal.client.v8': dict((b, tests) for b in BOTS)}
cl.RpcServer().trigger_distributed_try_jobs(
cl.GetIssue(), cl.GetMostRecentPatchset(), cl.GetBranch(),
False, None, masters)
return 0
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv[1:]))
| apache-2.0 | -827,118,711,520,392,600 | 23.431818 | 72 | 0.676279 | false |
vladikoff/fxa-mochitest | tests/mozbase/mozdevice/tests/sut_fileMethods.py | 3 | 2481 | #!/usr/bin/env python
import hashlib
import mozdevice
import mozlog
import shutil
import tempfile
import unittest
from sut import MockAgent
class TestFileMethods(unittest.TestCase):
""" Class to test misc file methods """
content = "What is the answer to the life, universe and everything? 42"
h = hashlib.md5()
h.update(content)
temp_hash = h.hexdigest()
def test_validateFile(self):
with tempfile.NamedTemporaryFile() as f:
f.write(self.content)
f.flush()
# Test Valid Hashes
commands_valid = [("hash /sdcard/test/file", self.temp_hash)]
m = MockAgent(self, commands=commands_valid)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=mozlog.DEBUG)
self.assertTrue(d.validateFile('/sdcard/test/file', f.name))
# Test invalid hashes
commands_invalid = [("hash /sdcard/test/file", "0this0hash0is0completely0invalid")]
m = MockAgent(self, commands=commands_invalid)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=mozlog.DEBUG)
self.assertFalse(d.validateFile('/sdcard/test/file', f.name))
def test_getFile(self):
fname = "/mnt/sdcard/file"
commands = [("pull %s" % fname, "%s,%s\n%s" % (fname, len(self.content), self.content)),
("hash %s" % fname, self.temp_hash)]
with tempfile.NamedTemporaryFile() as f:
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=mozlog.DEBUG)
# No error means success
self.assertEqual(None, d.getFile(fname, f.name))
def test_getDirectory(self):
fname = "/mnt/sdcard/file"
commands = [("isdir /mnt/sdcard", "TRUE"),
("isdir /mnt/sdcard", "TRUE"),
("cd /mnt/sdcard", ""),
("ls", "file"),
("isdir %s" % fname, "FALSE"),
("pull %s" % fname, "%s,%s\n%s" % (fname, len(self.content), self.content)),
("hash %s" % fname, self.temp_hash)]
tmpdir = tempfile.mkdtemp()
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=mozlog.DEBUG)
self.assertEqual(None, d.getDirectory("/mnt/sdcard", tmpdir))
# Cleanup
shutil.rmtree(tmpdir)
if __name__ == '__main__':
unittest.main()
| mpl-2.0 | 623,408,842,041,311,600 | 33.458333 | 96 | 0.574768 | false |
harshilasu/LinkurApp | y/google-cloud-sdk/lib/googlecloudsdk/dns/lib/util.py | 2 | 1158 | # Copyright 2013 Google Inc. All Rights Reserved.
"""Common utility functions for the dns tool."""
import json
def GetError(error, verbose=False):
"""Returns a ready-to-print string representation from the http response.
Args:
error: A string representing the raw json of the Http error response.
verbose: Whether or not to print verbose messages [default false]
Returns:
A ready-to-print string representation of the error.
"""
data = json.loads(error.content)
reasons = ','.join([x['reason'] for x in data['error']['errors']])
status = data['error']['code']
message = data['error']['message']
code = error.resp.reason
if verbose:
PrettyPrint(data)
return ('ResponseError: status=%s, code=%s, reason(s)=%s\nmessage=%s'
% (str(status), code, reasons, message))
def GetErrorMessage(error):
error = json.loads(error.content).get('error', {})
return '\n{0} (code: {1})'.format(
error.get('message', ''), error.get('code', ''))
def PrettyPrintString(value):
return json.dumps(value, sort_keys=True, indent=4, separators=(',', ': '))
def PrettyPrint(value):
print PrettyPrintString(value)
| gpl-3.0 | -4,674,655,921,802,708,000 | 27.95 | 76 | 0.674439 | false |
mfraezz/osf.io | api_tests/users/views/test_user_nodes_list.py | 4 | 14721 | import pytest
from django.utils.timezone import now
from api.base.settings.defaults import API_BASE
from api_tests.nodes.filters.test_filters import NodesListFilteringMixin, NodesListDateFilteringMixin
from osf_tests.factories import (
AuthUserFactory,
CollectionFactory,
OSFGroupFactory,
PreprintFactory,
ProjectFactory,
RegistrationFactory,
UserFactory,
)
from website.views import find_bookmark_collection
from osf.utils import permissions
from osf.utils.workflows import DefaultStates
@pytest.mark.django_db
class TestUserNodes:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory()
user_one.social['twitter'] = 'RheisenDennis'
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def public_project_user_one(self, user_one):
return ProjectFactory(
title='Public Project User One',
is_public=True,
creator=user_one)
@pytest.fixture()
def private_project_user_one(self, user_one):
return ProjectFactory(
title='Private Project User One',
is_public=False,
creator=user_one)
@pytest.fixture()
def public_project_user_two(self, user_two):
return ProjectFactory(
title='Public Project User Two',
is_public=True,
creator=user_two)
@pytest.fixture()
def private_project_user_two(self, user_two):
return ProjectFactory(
title='Private Project User Two',
is_public=False,
creator=user_two)
@pytest.fixture()
def deleted_project_user_one(self, user_one):
return CollectionFactory(
title='Deleted Project User One',
is_public=False,
creator=user_one,
deleted=now())
@pytest.fixture()
def folder(self):
return CollectionFactory()
@pytest.fixture()
def deleted_folder(self, user_one):
return CollectionFactory(
title='Deleted Folder User One',
is_public=False,
creator=user_one,
deleted=now())
@pytest.fixture()
def bookmark_collection(self, user_one):
return find_bookmark_collection(user_one)
@pytest.fixture()
def registration(self, user_one, public_project_user_one):
return RegistrationFactory(
project=public_project_user_one,
creator=user_one,
is_public=True)
def test_user_nodes(
self, app, user_one, user_two,
public_project_user_one,
public_project_user_two,
private_project_user_one,
private_project_user_two,
deleted_project_user_one,
folder, deleted_folder, registration):
# test_authorized_in_gets_200
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_anonymous_gets_200
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_get_projects_logged_in
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_user_one._id in ids
assert private_project_user_one._id in ids
assert public_project_user_two._id not in ids
assert private_project_user_two._id not in ids
assert folder._id not in ids
assert deleted_folder._id not in ids
assert deleted_project_user_one._id not in ids
assert registration._id not in ids
# test_get_projects_not_logged_in
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_user_one._id in ids
assert private_project_user_one._id not in ids
assert public_project_user_two._id not in ids
assert private_project_user_two._id not in ids
assert folder._id not in ids
assert deleted_project_user_one._id not in ids
assert registration._id not in ids
# test_get_projects_logged_in_as_different_user
url = '/{}users/{}/nodes/'.format(API_BASE, user_two._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_user_two._id in ids
assert public_project_user_one._id not in ids
assert private_project_user_one._id not in ids
assert private_project_user_two._id not in ids
assert folder._id not in ids
assert deleted_project_user_one._id not in ids
assert registration._id not in ids
url = '/{}users/{}/nodes/?sort=-title'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_user_one._id == ids[0]
assert private_project_user_one._id == ids[1]
url = '/{}users/{}/nodes/?sort=title'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_user_one._id == ids[1]
assert private_project_user_one._id == ids[0]
# test_osf_group_member_node_shows_up_in_user_nodes
group_mem = AuthUserFactory()
url = '/{}users/{}/nodes/'.format(API_BASE, group_mem._id)
res = app.get(url, auth=group_mem.auth)
assert len(res.json['data']) == 0
group = OSFGroupFactory(creator=group_mem)
private_project_user_one.add_osf_group(group, permissions.READ)
res = app.get(url, auth=group_mem.auth)
assert len(res.json['data']) == 1
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 1
private_project_user_one.delete()
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
@pytest.mark.django_db
class TestUserNodesPreprintsFiltering:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def no_preprints_node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def valid_preprint_node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def orphaned_preprint_node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def abandoned_preprint_node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def valid_preprint(self, valid_preprint_node):
return PreprintFactory(project=valid_preprint_node)
@pytest.fixture()
def abandoned_preprint(self, abandoned_preprint_node):
preprint = PreprintFactory(project=abandoned_preprint_node,
is_published=False)
preprint.machine_state = DefaultStates.INITIAL.value
return preprint
@pytest.fixture()
def orphaned_preprint(self, orphaned_preprint_node):
orphaned_preprint = PreprintFactory(project=orphaned_preprint_node)
orphaned_preprint.primary_file = None
orphaned_preprint.save()
return orphaned_preprint
@pytest.fixture()
def url_base(self):
return '/{}users/me/nodes/?filter[preprint]='.format(API_BASE)
def test_filter_false(
self, app, user, abandoned_preprint_node, abandoned_preprint, orphaned_preprint, valid_preprint, valid_preprint_node,
no_preprints_node, orphaned_preprint_node, url_base):
expected_ids = [
abandoned_preprint_node._id,
no_preprints_node._id,
orphaned_preprint_node._id]
res = app.get('{}false'.format(url_base), auth=user.auth)
actual_ids = [n['id'] for n in res.json['data']]
assert set(expected_ids) == set(actual_ids)
def test_filter_true(
self, app, user, valid_preprint_node, orphaned_preprint_node, orphaned_preprint, abandoned_preprint_node, abandoned_preprint,
valid_preprint, url_base):
expected_ids = [valid_preprint_node._id]
res = app.get('{}true'.format(url_base), auth=user.auth)
actual_ids = [n['id'] for n in res.json['data']]
assert set(expected_ids) == set(actual_ids)
@pytest.mark.django_db
class TestNodeListFiltering(NodesListFilteringMixin):
@pytest.fixture()
def url(self):
return '/{}users/me/nodes/?'.format(API_BASE)
@pytest.mark.django_db
class TestNodeListDateFiltering(NodesListDateFilteringMixin):
@pytest.fixture()
def url(self):
return '/{}users/me/nodes/?'.format(API_BASE)
@pytest.mark.django_db
class TestNodeListPermissionFiltering:
@pytest.fixture()
def creator(self):
return UserFactory()
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def no_perm_node(self, creator):
return ProjectFactory(creator=creator)
@pytest.fixture()
def read_node(self, creator, contrib):
node = ProjectFactory(creator=creator)
node.add_contributor(contrib, permissions=permissions.READ, save=True)
return node
@pytest.fixture()
def write_node(self, creator, contrib):
node = ProjectFactory(creator=creator)
node.add_contributor(contrib, permissions=permissions.WRITE, save=True)
return node
@pytest.fixture()
def admin_node(self, creator, contrib):
node = ProjectFactory(creator=creator)
node.add_contributor(contrib, permissions=permissions.ADMIN, save=True)
return node
@pytest.fixture()
def url(self):
return '/{}users/me/nodes/?filter[current_user_permissions]='.format(API_BASE)
def test_current_user_permissions_filter(self, app, url, contrib, no_perm_node, read_node, write_node, admin_node):
# test filter read
res = app.get('{}read'.format(url), auth=contrib.auth)
assert len(res.json['data']) == 3
assert set([read_node._id, write_node._id, admin_node._id]) == set([node['id'] for node in res.json['data']])
# test filter write
res = app.get('{}write'.format(url), auth=contrib.auth)
assert len(res.json['data']) == 2
assert set([admin_node._id, write_node._id]) == set([node['id'] for node in res.json['data']])
# test filter admin
res = app.get('{}admin'.format(url), auth=contrib.auth)
assert len(res.json['data']) == 1
assert [admin_node._id] == [node['id'] for node in res.json['data']]
# test filter null
res = app.get('{}null'.format(url), auth=contrib.auth, expect_errors=True)
assert res.status_code == 400
user2 = AuthUserFactory()
osf_group = OSFGroupFactory(creator=user2)
read_node.add_osf_group(osf_group, permissions.READ)
write_node.add_osf_group(osf_group, permissions.WRITE)
admin_node.add_osf_group(osf_group, permissions.ADMIN)
# test filter group member read
res = app.get('{}read'.format(url), auth=user2.auth)
assert len(res.json['data']) == 3
assert set([read_node._id, write_node._id, admin_node._id]) == set([node['id'] for node in res.json['data']])
# test filter group member write
res = app.get('{}write'.format(url), auth=user2.auth)
assert len(res.json['data']) == 2
assert set([admin_node._id, write_node._id]) == set([node['id'] for node in res.json['data']])
# test filter group member admin
res = app.get('{}admin'.format(url), auth=user2.auth)
assert len(res.json['data']) == 1
assert [admin_node._id] == [node['id'] for node in res.json['data']]
def test_filter_my_current_user_permissions_to_other_users_nodes(self, app, contrib, no_perm_node, read_node, write_node, admin_node):
url = '/{}users/{}/nodes/?filter[current_user_permissions]='.format(API_BASE, contrib._id)
me = AuthUserFactory()
# test filter read
res = app.get('{}read'.format(url), auth=me.auth)
assert len(res.json['data']) == 0
read_node.add_contributor(me, permissions.READ)
read_node.save()
res = app.get('{}read'.format(url), auth=me.auth)
assert len(res.json['data']) == 1
assert set([read_node._id]) == set([node['id'] for node in res.json['data']])
# test filter write
res = app.get('{}write'.format(url), auth=me.auth)
assert len(res.json['data']) == 0
write_node.add_contributor(me, permissions.WRITE)
write_node.save()
res = app.get('{}write'.format(url), auth=me.auth)
assert len(res.json['data']) == 1
assert set([write_node._id]) == set([node['id'] for node in res.json['data']])
# test filter admin
res = app.get('{}admin'.format(url), auth=me.auth)
assert len(res.json['data']) == 0
res = app.get('{}admin'.format(url), auth=me.auth)
admin_node.add_contributor(me, permissions.ADMIN)
admin_node.save()
res = app.get('{}admin'.format(url), auth=me.auth)
assert len(res.json['data']) == 1
assert set([admin_node._id]) == set([node['id'] for node in res.json['data']])
res = app.get('{}read'.format(url), auth=me.auth)
assert len(res.json['data']) == 3
assert set([read_node._id, write_node._id, admin_node._id]) == set([node['id'] for node in res.json['data']])
# test filter nonauthenticated_user v2.11
read_node.is_public = True
read_node.save()
res = app.get('{}read&version=2.11'.format(url))
assert len(res.json['data']) == 0
# test filter nonauthenticated_user v2.2
res = app.get('{}read&version=2.2'.format(url))
assert len(res.json['data']) == 1
assert set([read_node._id]) == set([node['id'] for node in res.json['data']])
# test filter nonauthenticated_user v2.2
res = app.get('{}write&version=2.2'.format(url))
assert len(res.json['data']) == 0
# test filter nonauthenticated_user v2.2
res = app.get('{}admin&version=2.2'.format(url))
assert len(res.json['data']) == 0
| apache-2.0 | -1,142,894,514,728,001,300 | 34.817518 | 138 | 0.615379 | false |
buildbot/buildbot | master/buildbot/secrets/providers/passwordstore.py | 2 | 2478 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
password store based provider
"""
import os
from pathlib import Path
from twisted.internet import defer
from buildbot import config
from buildbot.secrets.providers.base import SecretProviderBase
from buildbot.util import runprocess
class SecretInPass(SecretProviderBase):
"""
secret is stored in a password store
"""
name = "SecretInPass"
def checkPassIsInPath(self):
if not any([(Path(p) / "pass").is_file() for p in os.environ["PATH"].split(":")]):
config.error("pass does not exist in PATH")
def checkPassDirectoryIsAvailableAndReadable(self, dirname):
if not os.access(dirname, os.F_OK):
config.error("directory {} does not exist".format(dirname))
def checkConfig(self, gpgPassphrase=None, dirname=None):
self.checkPassIsInPath()
if dirname:
self.checkPassDirectoryIsAvailableAndReadable(dirname)
def reconfigService(self, gpgPassphrase=None, dirname=None):
self._env = {**os.environ}
if gpgPassphrase:
self._env["PASSWORD_STORE_GPG_OPTS"] = "--passphrase {}".format(gpgPassphrase)
if dirname:
self._env["PASSWORD_STORE_DIR"] = dirname
@defer.inlineCallbacks
def get(self, entry):
"""
get the value from pass identified by 'entry'
"""
try:
rc, output = yield runprocess.run_process(self.master.reactor,
['pass', entry], env=self._env,
collect_stderr=False, stderr_is_error=True)
if rc != 0:
return None
return output.decode("utf-8", "ignore").splitlines()[0]
except IOError:
return None
| gpl-2.0 | -4,792,404,961,214,427,000 | 35.441176 | 97 | 0.649314 | false |
youfoh/webkit-efl | Tools/Scripts/webkitpy/style/checkers/text_unittest.py | 18 | 3671 | #!/usr/bin/python
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for text_style.py."""
import unittest
import text as text_style
from text import TextChecker
class TextStyleTestCase(unittest.TestCase):
"""TestCase for text_style.py"""
def assertNoError(self, lines):
"""Asserts that the specified lines has no errors."""
self.had_error = False
def error_for_test(line_number, category, confidence, message):
"""Records if an error occurs."""
self.had_error = True
text_style.process_file_data('', lines, error_for_test)
self.assert_(not self.had_error, '%s should not have any errors.' % lines)
def assertError(self, lines, expected_line_number):
"""Asserts that the specified lines has an error."""
self.had_error = False
def error_for_test(line_number, category, confidence, message):
"""Checks if the expected error occurs."""
self.assertEquals(expected_line_number, line_number)
self.assertEquals('whitespace/tab', category)
self.had_error = True
text_style.process_file_data('', lines, error_for_test)
self.assert_(self.had_error, '%s should have an error [whitespace/tab].' % lines)
def test_no_error(self):
"""Tests for no error cases."""
self.assertNoError([''])
self.assertNoError(['abc def', 'ggg'])
def test_error(self):
"""Tests for error cases."""
self.assertError(['2009-12-16\tKent Tamura\t<[email protected]>'], 1)
self.assertError(['2009-12-16 Kent Tamura <[email protected]>',
'',
'\tReviewed by NOBODY.'], 3)
class TextCheckerTest(unittest.TestCase):
"""Tests TextChecker class."""
def mock_handle_style_error(self):
pass
def test_init(self):
"""Test __init__ constructor."""
checker = TextChecker("foo.txt", self.mock_handle_style_error)
self.assertEquals(checker.file_path, "foo.txt")
self.assertEquals(checker.handle_style_error, self.mock_handle_style_error)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | 5,091,315,396,920,996,000 | 38.053191 | 89 | 0.684282 | false |
Learn-Android-app/buck | third-party/py/twitter-commons/src/python/twitter/common/python/interpreter.py | 18 | 13136 | """
twitter.common.python support for interpreter environments.
"""
from __future__ import absolute_import
try:
from numbers import Integral
except ImportError:
Integral = (int, long)
from collections import defaultdict
import os
import re
import subprocess
import sys
from .base import maybe_requirement, maybe_requirement_list
from .compatibility import string
from .tracer import Tracer
from pkg_resources import (
find_distributions,
Distribution,
Requirement,
)
TRACER = Tracer(predicate=Tracer.env_filter('PEX_VERBOSE'),
prefix='twitter.common.python.interpreter: ')
# Determine in the most platform-compatible way possible the identity of the interpreter
# and its known packages.
ID_PY = b"""
import sys
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
print("%s %s %s %s" % (
subversion,
sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
setuptools_path = None
try:
import pkg_resources
except ImportError:
sys.exit(0)
requirements = {}
for item in sys.path:
for dist in pkg_resources.find_distributions(item):
requirements[str(dist.as_requirement())] = dist.location
for requirement_str, location in requirements.items():
rs = requirement_str.split('==', 2)
if len(rs) == 2:
print('%s %s %s' % (rs[0], rs[1], location))
"""
class PythonCapability(list):
def __init__(self, requirements=None):
super(PythonCapability, self).__init__(maybe_requirement_list(requirements or []))
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(user) Support interpreter-specific versions, e.g. PyPy-2.2.1
HASHBANGS = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
}
@classmethod
def get_subversion(cls):
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
return subversion
@classmethod
def get(cls):
return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def from_id_string(cls, id_string):
values = id_string.split()
if len(values) != 4:
raise cls.InvalidError("Invalid id string: %s" % id_string)
return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3]))
@classmethod
def from_path(cls, dirname):
interp, version = dirname.split('-')
major, minor, patch = version.split('.')
return cls(str(interp), int(major), int(minor), int(patch))
def __init__(self, interpreter, major, minor, patch):
for var in (major, minor, patch):
assert isinstance(var, Integral)
self._interpreter = interpreter
self._version = (major, minor, patch)
@property
def interpreter(self):
return self._interpreter
@property
def version(self):
return self._version
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version)))
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.6', '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
return '%s-%s.%s.%s' % (self._interpreter,
self._version[0], self._version[1], self._version[2])
def __repr__(self):
return 'PythonIdentity(%r, %s, %s, %s)' % (
self._interpreter, self._version[0], self._version[1], self._version[2])
def __eq__(self, other):
return all([isinstance(other, PythonIdentity),
self.interpreter == other.interpreter,
self.version == other.version])
def __hash__(self):
return hash((self._interpreter, self._version))
class PythonInterpreter(object):
REGEXEN = (
re.compile(r'jython$'),
re.compile(r'python$'),
re.compile(r'python[23].[0-9]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
CACHE = {} # memoize executable => PythonInterpreter
try:
# Versions of distribute prior to the setuptools merge would automatically replace
# 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg
# to toggle this, but it was removed post-merge.
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False)
except TypeError:
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@classmethod
def all(cls, paths=None):
if paths is None:
paths = os.getenv('PATH', '').split(':')
return cls.filter(cls.find(paths))
@classmethod
def _parse_extras(cls, output_lines):
def iter_lines():
for line in output_lines:
try:
dist_name, dist_version, location = line.split()
except ValueError:
raise cls.IdentificationError('Could not identify requirement: %s' % line)
yield ((dist_name, dist_version), location)
return dict(iter_lines())
@classmethod
def _from_binary_internal(cls, path_extras):
def iter_extras():
for item in sys.path + list(path_extras):
for dist in find_distributions(item):
if dist.version:
yield ((dist.key, dist.version), dist.location)
return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))
@classmethod
def _from_binary_external(cls, binary, path_extras):
environ = cls.sanitized_environment()
environ['PYTHONPATH'] = ':'.join(path_extras)
po = subprocess.Popen(
[binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environ)
so, _ = po.communicate(ID_PY)
output = so.decode('utf8').splitlines()
if len(output) == 0:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
identity, extras = output[0], output[1:]
return cls(
binary,
PythonIdentity.from_id_string(identity),
extras=cls._parse_extras(extras))
@classmethod
def expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return [os.path.join(path, fn) for fn in os.listdir(path)]
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
"""
paths = os.getenv('PATH', '').split(':')
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if hashbang == basefile:
try:
return cls.from_binary(fn)
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
@classmethod
def from_binary(cls, binary, path_extras=None):
path_extras = path_extras or ()
if binary not in cls.CACHE:
if binary == sys.executable:
cls.CACHE[binary] = cls._from_binary_internal(path_extras)
else:
cls.CACHE[binary] = cls._from_binary_external(binary, path_extras)
return cls.CACHE[binary]
@classmethod
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
@classmethod
def filter(cls, pythons):
"""
Given a map of python interpreters in the format provided by PythonInterpreter.find(),
filter out duplicate versions and versions we would prefer not to use.
Returns a map in the same format as find.
"""
good = []
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 6 or
version[MAJOR] == 3 and version[MINOR] >= 2)
all_versions = set(interpreter.identity.version for interpreter in pythons)
good_versions = filter(version_filter, all_versions)
for version in good_versions:
# For each candidate, use the latest version we find on the filesystem.
candidates = defaultdict(list)
for interp in pythons:
if interp.identity.version == version:
candidates[interp.identity.interpreter].append(interp)
for interp_class in candidates:
candidates[interp_class].sort(
key=lambda interp: os.path.getmtime(interp.binary), reverse=True)
good.append(candidates[interp_class].pop(0))
return good
@classmethod
def sanitized_environment(cls):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.6/2.7 breaks.
env_copy = os.environ.copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
@classmethod
def replace(cls, requirement):
self = cls.get()
if self.identity.matches(requirement):
return False
for pi in cls.all():
if pi.identity.matches(requirement):
break
else:
raise cls.InterpreterNotFound('Could not find interpreter matching filter!')
os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment())
def __init__(self, binary, identity, extras=None):
"""Construct a PythonInterpreter.
You should probably PythonInterpreter.from_binary instead.
:param binary: The full path of the python binary.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
:param extras: A mapping from (dist.key, dist.version) to dist.location
of the extras associated with this interpreter.
"""
self._binary = os.path.realpath(binary)
self._binary_stat = os.stat(self._binary)
self._extras = extras or {}
self._identity = identity
def with_extra(self, key, version, location):
extras = self._extras.copy()
extras[(key, version)] = location
return self.__class__(self._binary, self._identity, extras)
@property
def extras(self):
return self._extras.copy()
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def satisfies(self, capability):
if not isinstance(capability, PythonCapability):
raise TypeError('Capability must be a PythonCapability, got %s' % type(capability))
return not any(self.get_location(req) is None for req in capability)
def get_location(self, req):
req = maybe_requirement(req)
for dist, location in self.extras.items():
dist_name, dist_version = dist
if req.key == dist_name and dist_version in req:
return location
def __hash__(self):
return hash(self._binary_stat)
def __eq__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self._binary_stat == other._binary_stat
def __lt__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self.version < other.version
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
| apache-2.0 | -4,083,062,231,172,067,000 | 29.337182 | 99 | 0.657811 | false |
lebrice/SimpleParsing | examples/ugly/ugly_example_after.py | 1 | 10566 | """Parameters module."""
import argparse
import simple_parsing
import os
import random
# import getpass
# import torch
# import torch.nn.parallel
# import torch.backends.cudnn as cudnn
# import torch.utils.data
from dataclasses import dataclass, field
from typing import *
from simple_parsing import choice
@dataclass
class DatasetParams:
""" Dataset Parameters """
default_root: ClassVar[str] = "/dataset" # the default root directory to use.
dataset: str = 'objects_folder_multi' #laptop,pistol
""" dataset name: [shapenet, objects_folder, objects_folder]') """
root_dir: str = default_root # dataset root directory
root_dir1: str = default_root # dataset root directory
root_dir2: str = default_root # dataset root directory
root_dir3: str = default_root # dataset root directory
root_dir4: str = default_root # dataset root directory
synsets: str = '' # Synsets from the shapenet dataset to use
classes: str = 'bowl' # Classes from the shapenet dataset to use #,cap,can,laptop
workers: int = 0 # number of data loading workers
light_change: int = 2000 # number of data loading workers
toy_example: bool = False # Use toy example
use_old_sign: bool = True # Use toy example
use_quartic: bool = False # Use toy example
rescaled: bool = False # Use toy example
full_sphere_sampling: bool = False # Use toy example
full_sphere_sampling_light: bool = True # Use toy example
random_rotation: bool = True # Use toy example
stoch_enc: bool = False # Use toy example
only_background: bool = False # Use toy example
only_foreground: bool = False # Use toy example
rotate_foreground: bool = False # Use toy example
use_penality: bool = True # Use toy example
use_mesh: bool = True # Render dataset with meshes
gen_model_path: Optional[str] = None # 'dataset root directory
gen_model_path2: Optional[str] = None # dataset root directory
dis_model_path: Optional[str] = None # dataset root directory
dis_model_path2: Optional[str] = None # dataset root directory
bg_model: str = "../../../data/halfbox.obj" # Background model path
gz_gi_loss: float = 0.0 # grad z and grad img consistency.
pixel_samples: int = 1 # Samples per pixel.
@dataclass
class NetworkParams:
# Network parameters
gen_type: str = choice("dcgan", "mlp", "cnn", "resnet", default="dcgan") # One of: mlp, cnn, dcgan, resnet # try resnet :)
gen_norm: str = choice("batchnorm", "instancenorm", default="batchnorm") # One of: None, batchnorm, instancenorm
ngf: int = 75 # number of features in the generator network
nef: int = 65 # number of features in the generator network
gen_nextra_layers: int = 0 # number of extra layers in the generator network
gen_bias_type: Optional[str] = choice(None, "plane", default=None) # One of: None, plane
netG: str = '' # path to netG (to continue training)
netG2: str = '' # path to netG2 (normal generator to continue training)
fix_splat_pos: bool = True # X and Y coordinates are fix
zloss: float = 0.0 # use Z loss
unit_normalloss: float = 0.0 # use unit_normal loss
norm_sph_coord: bool = True # Use spherical coordinates for the normal
max_gnorm: float = 500. # max grad norm to which it will be clipped (if exceeded)
disc_type: str = choice("cnn", "dcgan", default="cnn") # One of: cnn, dcgan
disc_norm: str = choice("None", "batchnorm", "instancenorm", default="None") # One of: None, batchnorm, instancenorm
ndf: int = 75 # number of features in the discriminator network
disc_nextra_layers: int = 0 # number of extra layers in the discriminator network
nz: int = 100 # size of the latent z vector
netD: str = '' # path to netD (to continue training)
netE: str = '' # path to netD (to continue training)
@dataclass
class OptimizerParams:
""" Optimization parameters """
optimizer: str = 'adam' # Optimizer (adam, rmsprop)
lr: float = 0.0001 # learning rate, default=0.0002
lr_sched_type: str = 'step' # Learning rate scheduler type.
z_lr_sched_step: int = 100000 # Learning rate schedule for z.
lr_iter: int = 10000 # Learning rate operation iterations
normal_lr_sched_step: int = 100000 # Learning rate schedule for normal.
z_lr_sched_gamma: float = 1.0 # Learning rate gamma for z.
normal_lr_sched_gamma: float = 1.0 # Learning rate gamma for normal.
normal_consistency_loss_weight: float = 1e-3 # Normal consistency loss weight.
z_norm_weight_init: float = 1e-2 # Normal consistency loss weight.
z_norm_activate_iter: float = 1000 # Normal consistency loss weight.
spatial_var_loss_weight: float = 1e-2 # Spatial variance loss weight.
grad_img_depth_loss: float = 2.0 # Spatial variance loss weight.
spatial_loss_weight: float = 0.5 # Spatial smoothness loss weight.
beta1: float = 0.0 # beta1 for adam. default=0.5
n_iter: int = 76201 # number of iterations to train
batchSize: int = 4 # input batch size
alt_opt_zn_interval: Optional[int] = None
""" Alternating optimization interval.
- None: joint optimization
- 20: every 20 iterations, etc.
"""
alt_opt_zn_start: int = 100000
"""Alternating optimization start interation.
- -1: starts immediately,
- '100: starts alternating after the first 100 iterations.
"""
@dataclass
class GanParams:
""" Gan parameters """
criterion: str = choice('GAN', 'WGAN', default="WGAN") # GAN Training criterion
gp: str = choice("None", 'original', default="original") # Add gradient penalty
gp_lambda: float = 10. # GP lambda
critic_iters: int = 5 # Number of critic iterations
clamp: float = 0.01 # clamp the weights for WGAN
@dataclass
class OtherParams:
""" Other parameters """
manualSeed: int = 1# manual seed
no_cuda: bool = False # enables cuda
ngpu: int = 1 # number of GPUs to use
out_dir: str = "default_output"
name: str = ''
@dataclass
class CameraParams:
"""Camera Parameters"""
cam_pos: Tuple[float, float, float] = (0., 0., 0.)# Camera position.
width: int = 128
height: int = 128
cam_dist: float = 3.0 # Camera distance from the center of the object
nv: int = 10 # Number of views to generate
angle: int = 30 # cam angle
fovy: float = 30 # Field of view in the vertical direction.
focal_length: float = 0.1 # focal length
theta: Tuple[float, float] = (20, 80) # Angle in degrees from the z-axis.
phi: Tuple[float, float] = (20, 70) # Angle in degrees from the x-axis.
axis: Tuple[float, float, float] = (0., 1., 0.) # Axis for random camera position.
at: Tuple[float, float, float] = (0.05, 0.0, 0.0) # Camera lookat position.
sphere_halfbox: bool = False # Renders demo sphere-halfbox
norm_depth_image_only: bool = False # Render on the normalized depth image.
mesh: bool = False # Render as mesh if enabled.
test_cam_dist: bool = False # Check if the images are consistent with a camera at a fixed distance.
@dataclass
class RenderingParams:
splats_img_size: int = 128 # the height / width of the number of generator splats
render_type: str = 'img' # render the image or the depth map [img, depth]
render_img_size: int = 128 # Width/height of the rendering image
splats_radius: float = 0.05 # radius of the splats (fix)
est_normals: bool = False # Estimate normals from splat positions.
n_splats: Optional[int] = None
same_view: bool = False # before we add conditioning on cam pose, this is necessary
""" data with view fixed """
print_interval: int = 10 # Print loss interval.
save_image_interval: int = 100 # Save image interval.
save_interval: int = 5000 # Save state interval.
@dataclass
class Parameters:
"""base options."""
# Dataset parameters.
dataset: DatasetParams = DatasetParams()
# Set of parameters related to the optimizer.
optimizer: OptimizerParams = OptimizerParams()
# GAN Settings
gan: GanParams = GanParams()
# Camera settings
camera: CameraParams = CameraParams()
# Rendering-related settings
rendering: RenderingParams = RenderingParams()
# other (misc) settings
other: OtherParams = OtherParams()
def __post_init__(self):
""" Post-initialization code """
# Make output folder
# try:
# os.makedirs(self.other.out_dir)
# except OSError:
# pass
# Set render number of channels
if self.rendering.render_type == 'img':
self.rendering.render_img_nc = 3
elif self.rendering.render_type == 'depth':
self.rendering.render_img_nc = 1
else:
raise ValueError('Unknown rendering type')
# # Set random seed
# if self.other.manualSeed is None:
# self.other.manualSeed = random.randint(1, 10000)
# print("Random Seed: ", self.other.manualSeed)
# random.seed(self.other.manualSeed)
# torch.manual_seed(self.other.manualSeed)
# if not self.other.no_cuda:
# torch.cuda.manual_seed_all(self.other.manualSeed)
# # Set number of splats param
# self.rendering.n_splats = self.rendering.splats_img_size ** 2
# # Check CUDA is selected
# cudnn.benchmark = True
# if torch.cuda.is_available() and self.other.no_cuda:
# print("WARNING: You have a CUDA device, so you should "
# "probably run with --cuda")
@classmethod
def parse(cls):
parser = simple_parsing.ArgumentParser()
parser.add_arguments(cls, dest="parameters")
args = parser.parse_args()
instance: Parameters = args.parameters
return instance
params = Parameters.parse()
print(params) | mit | 2,088,566,076,051,556,600 | 43.748918 | 129 | 0.616506 | false |
google/oss-fuzz | infra/cifuzz/filestore/github_actions/__init__.py | 1 | 3437 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a filestore using Github actions artifacts."""
import os
import logging
import http_utils
import filestore
from filestore.github_actions import github_api
from third_party.github_actions_toolkit.artifact import artifact_client
class GithubActionsFilestore(filestore.BaseFilestore):
"""Implementation of BaseFilestore using Github actions artifacts. Relies on
github_actions_toolkit for using the GitHub actions API and the github_api
module for using GitHub's standard API. We need to use both because the GitHub
actions API is the only way to upload an artifact but it does not support
downloading artifacts from other runs. The standard GitHub API does support
this however."""
def __init__(self, config):
super().__init__(config)
self.github_api_http_headers = github_api.get_http_auth_headers(config)
def upload_directory(self, name, directory): # pylint: disable=no-self-use
"""Uploads |directory| as artifact with |name|."""
directory = os.path.abspath(directory)
# Get file paths.
file_paths = []
for root, _, curr_file_paths in os.walk(directory):
for file_path in curr_file_paths:
file_paths.append(os.path.join(root, file_path))
logging.debug('file_paths: %s', file_paths)
# TODO(metzman): Zip so that we can upload directories within directories
# and save time?
return artifact_client.upload_artifact(name, file_paths, directory)
def download_corpus(self, name, dst_directory): # pylint: disable=unused-argument,no-self-use
"""Downloads the corpus located at |name| to |dst_directory|."""
return self._download_artifact(name, dst_directory)
def _find_artifact(self, name):
"""Finds an artifact using the GitHub API and returns it."""
logging.debug('listing artifact')
artifacts = self._list_artifacts()
artifact = github_api.find_artifact(name, artifacts)
logging.debug('Artifact: %s.', artifact)
return artifact
def _download_artifact(self, name, dst_directory):
"""Downloads artifact with |name| to |dst_directory|."""
artifact = self._find_artifact(name)
if not artifact:
logging.warning('Could not download artifact: %s.', name)
return artifact
download_url = artifact['archive_download_url']
return http_utils.download_and_unpack_zip(
download_url, dst_directory, headers=self.github_api_http_headers)
def _list_artifacts(self):
"""Returns a list of artifacts."""
return github_api.list_artifacts(self.config.project_repo_owner,
self.config.project_repo_name,
self.github_api_http_headers)
def download_latest_build(self, name, dst_directory):
"""Downloads latest build with name |name| to |dst_directory|."""
return self._download_artifact(name, dst_directory)
| apache-2.0 | 5,008,271,780,587,500,000 | 40.409639 | 96 | 0.71254 | false |
dmitriz/zipline | tests/utils/test_events.py | 1 | 12988 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import datetime
from functools import partial
from itertools import islice
import random
from unittest import TestCase
from nose_parameterized import parameterized
import numpy as np
from six import iteritems
from six.moves import range, map
from zipline.finance.trading import TradingEnvironment
import zipline.utils.events
from zipline.utils.events import (
EventRule,
StatelessRule,
Always,
Never,
AfterOpen,
ComposedRule,
BeforeClose,
NotHalfDay,
NthTradingDayOfWeek,
NDaysBeforeLastTradingDayOfWeek,
NthTradingDayOfMonth,
NDaysBeforeLastTradingDayOfMonth,
StatefulRule,
OncePerDay,
_build_offset,
_build_date,
_build_time,
EventManager,
Event,
MAX_MONTH_RANGE,
MAX_WEEK_RANGE,
)
from zipline.utils.test_utils import subtest
# A day known to be a half day.
HALF_DAY = datetime.datetime(year=2014, month=7, day=3)
# A day known to be a full day.
FULL_DAY = datetime.datetime(year=2014, month=9, day=24)
def param_range(*args):
return ([n] for n in range(*args))
class TestUtils(TestCase):
@parameterized.expand([
('_build_date', _build_date),
('_build_time', _build_time),
])
def test_build_none(self, name, f):
with self.assertRaises(ValueError):
f(None, {})
def test_build_offset_default(self):
default = object()
self.assertIs(default, _build_offset(None, {}, default))
def test_build_offset_both(self):
with self.assertRaises(ValueError):
_build_offset(datetime.timedelta(minutes=1), {'minutes': 1}, None)
def test_build_offset_exc(self):
with self.assertRaises(TypeError):
# object() is not an instance of a timedelta.
_build_offset(object(), {}, None)
def test_build_offset_kwargs(self):
kwargs = {'minutes': 1}
self.assertEqual(
_build_offset(None, kwargs, None),
datetime.timedelta(**kwargs),
)
def test_build_offset_td(self):
td = datetime.timedelta(minutes=1)
self.assertEqual(
_build_offset(td, {}, None),
td,
)
def test_build_date_both(self):
with self.assertRaises(ValueError):
_build_date(
datetime.date(year=2014, month=9, day=25), {
'year': 2014,
'month': 9,
'day': 25,
},
)
def test_build_date_kwargs(self):
kwargs = {'year': 2014, 'month': 9, 'day': 25}
self.assertEqual(
_build_date(None, kwargs),
datetime.date(**kwargs),
)
def test_build_date_date(self):
date = datetime.date(year=2014, month=9, day=25)
self.assertEqual(
_build_date(date, {}),
date,
)
def test_build_time_both(self):
with self.assertRaises(ValueError):
_build_time(
datetime.time(hour=1, minute=5), {
'hour': 1,
'minute': 5,
},
)
def test_build_time_kwargs(self):
kwargs = {'hour': 1, 'minute': 5}
self.assertEqual(
_build_time(None, kwargs),
datetime.time(**kwargs),
)
class TestEventManager(TestCase):
def setUp(self):
self.em = EventManager()
self.event1 = Event(Always(), lambda context, data: None)
self.event2 = Event(Always(), lambda context, data: None)
def test_add_event(self):
self.em.add_event(self.event1)
self.assertEqual(len(self.em._events), 1)
def test_add_event_prepend(self):
self.em.add_event(self.event1)
self.em.add_event(self.event2, prepend=True)
self.assertEqual([self.event2, self.event1], self.em._events)
def test_add_event_append(self):
self.em.add_event(self.event1)
self.em.add_event(self.event2)
self.assertEqual([self.event1, self.event2], self.em._events)
def test_checks_should_trigger(self):
class CountingRule(Always):
count = 0
def should_trigger(self, dt, env):
CountingRule.count += 1
return True
for r in [CountingRule] * 5:
self.em.add_event(
Event(r(), lambda context, data: None)
)
mock_algo_class = namedtuple('FakeAlgo', ['trading_environment'])
mock_algo = mock_algo_class(trading_environment="fake_env")
self.em.handle_data(mock_algo, None, datetime.datetime.now())
self.assertEqual(CountingRule.count, 5)
class TestEventRule(TestCase):
def test_is_abstract(self):
with self.assertRaises(TypeError):
EventRule()
def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
super(Always, Always()).should_trigger('a', env=None)
def minutes_for_days():
"""
500 randomly selected days.
This is used to make sure our test coverage is unbaised towards any rules.
We use a random sample because testing on all the trading days took
around 180 seconds on my laptop, which is far too much for normal unit
testing.
We manually set the seed so that this will be deterministic.
Results of multiple runs were compared to make sure that this is actually
true.
This returns a generator of tuples each wrapping a single generator.
Iterating over this yeilds a single day, iterating over the day yields
the minutes for that day.
"""
env = TradingEnvironment()
random.seed('deterministic')
return ((env.market_minutes_for_day(random.choice(env.trading_days)),)
for _ in range(500))
class RuleTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.class_ = None # Mark that this is the base class.
@classmethod
def tearDownClass(cls):
del cls.env
def test_completeness(self):
"""
Tests that all rules are being tested.
"""
if not self.class_:
return # This is the base class testing, it is always complete.
dem = {
k for k, v in iteritems(vars(zipline.utils.events))
if isinstance(v, type) and
issubclass(v, self.class_) and
v is not self.class_
}
ds = {
k[5:] for k in dir(self)
if k.startswith('test') and k[5:] in dem
}
self.assertTrue(
dem <= ds,
msg='This suite is missing tests for the following classes:\n' +
'\n'.join(map(repr, dem - ds)),
)
class TestStatelessRules(RuleTestCase):
@classmethod
def setUpClass(cls):
super(TestStatelessRules, cls).setUpClass()
cls.class_ = StatelessRule
cls.sept_days = cls.env.days_in_range(
np.datetime64(datetime.date(year=2014, month=9, day=1)),
np.datetime64(datetime.date(year=2014, month=9, day=30)),
)
cls.sept_week = cls.env.minutes_for_days_in_range(
datetime.date(year=2014, month=9, day=21),
datetime.date(year=2014, month=9, day=26),
)
@subtest(minutes_for_days(), 'ms')
def test_Always(self, ms):
should_trigger = partial(Always().should_trigger, env=self.env)
self.assertTrue(all(map(partial(should_trigger, env=self.env), ms)))
@subtest(minutes_for_days(), 'ms')
def test_Never(self, ms):
should_trigger = partial(Never().should_trigger, env=self.env)
self.assertFalse(any(map(should_trigger, ms)))
@subtest(minutes_for_days(), 'ms')
def test_AfterOpen(self, ms):
should_trigger = partial(
AfterOpen(minutes=5, hours=1).should_trigger,
env=self.env,
)
for m in islice(ms, 64):
# Check the first 64 minutes of data.
# We use 64 because the offset is from market open
# at 13:30 UTC, meaning the first minute of data has an
# offset of 1.
self.assertFalse(should_trigger(m))
for m in islice(ms, 64, None):
# Check the rest of the day.
self.assertTrue(should_trigger(m))
@subtest(minutes_for_days(), 'ms')
def test_BeforeClose(self, ms):
ms = list(ms)
should_trigger = partial(
BeforeClose(hours=1, minutes=5).should_trigger,
env=self.env
)
for m in ms[0:-66]:
self.assertFalse(should_trigger(m))
for m in ms[-66:]:
self.assertTrue(should_trigger(m))
@subtest(minutes_for_days(), 'ms')
def test_NotHalfDay(self, ms):
should_trigger = partial(NotHalfDay().should_trigger, env=self.env)
self.assertTrue(should_trigger(FULL_DAY))
self.assertFalse(should_trigger(HALF_DAY))
@subtest(param_range(MAX_WEEK_RANGE), 'n')
def test_NthTradingDayOfWeek(self, n):
should_trigger = partial(NthTradingDayOfWeek(n).should_trigger,
env=self.env)
prev_day = self.sept_week[0].date()
n_tdays = 0
for m in self.sept_week:
if prev_day < m.date():
n_tdays += 1
prev_day = m.date()
if should_trigger(m):
self.assertEqual(n_tdays, n)
else:
self.assertNotEqual(n_tdays, n)
@subtest(param_range(MAX_WEEK_RANGE), 'n')
def test_NDaysBeforeLastTradingDayOfWeek(self, n):
should_trigger = partial(
NDaysBeforeLastTradingDayOfWeek(n).should_trigger, env=self.env
)
for m in self.sept_week:
if should_trigger(m):
n_tdays = 0
date = m.to_datetime().date()
next_date = self.env.next_trading_day(date)
while next_date.weekday() > date.weekday():
date = next_date
next_date = self.env.next_trading_day(date)
n_tdays += 1
self.assertEqual(n_tdays, n)
@subtest(param_range(MAX_MONTH_RANGE), 'n')
def test_NthTradingDayOfMonth(self, n):
should_trigger = partial(NthTradingDayOfMonth(n).should_trigger,
env=self.env)
for n_tdays, d in enumerate(self.sept_days):
for m in self.env.market_minutes_for_day(d):
if should_trigger(m):
self.assertEqual(n_tdays, n)
else:
self.assertNotEqual(n_tdays, n)
@subtest(param_range(MAX_MONTH_RANGE), 'n')
def test_NDaysBeforeLastTradingDayOfMonth(self, n):
should_trigger = partial(
NDaysBeforeLastTradingDayOfMonth(n).should_trigger, env=self.env
)
for n_days_before, d in enumerate(reversed(self.sept_days)):
for m in self.env.market_minutes_for_day(d):
if should_trigger(m):
self.assertEqual(n_days_before, n)
else:
self.assertNotEqual(n_days_before, n)
@subtest(minutes_for_days(), 'ms')
def test_ComposedRule(self, ms):
rule1 = Always()
rule2 = Never()
composed = rule1 & rule2
should_trigger = partial(composed.should_trigger, env=self.env)
self.assertIsInstance(composed, ComposedRule)
self.assertIs(composed.first, rule1)
self.assertIs(composed.second, rule2)
self.assertFalse(any(map(should_trigger, ms)))
class TestStatefulRules(RuleTestCase):
@classmethod
def setUpClass(cls):
super(TestStatefulRules, cls).setUpClass()
cls.class_ = StatefulRule
@subtest(minutes_for_days(), 'ms')
def test_OncePerDay(self, ms):
class RuleCounter(StatefulRule):
"""
A rule that counts the number of times another rule triggers
but forwards the results out.
"""
count = 0
def should_trigger(self, dt, env):
st = self.rule.should_trigger(dt, env)
if st:
self.count += 1
return st
rule = RuleCounter(OncePerDay())
for m in ms:
rule.should_trigger(m, env=self.env)
self.assertEqual(rule.count, 1)
| apache-2.0 | 1,256,628,842,927,454,000 | 31.069136 | 78 | 0.589621 | false |
benmoran56/FightstickDisplay | pyglet/lib.py | 3 | 12708 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Functions for loading dynamic libraries.
These extend and correct ctypes functions.
'''
from __future__ import print_function
from builtins import str
from builtins import object
from past.builtins import basestring
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import re
import sys
import ctypes
import ctypes.util
import pyglet
_debug_lib = pyglet.options['debug_lib']
_debug_trace = pyglet.options['debug_trace']
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
if pyglet.options['search_local_libs']:
script_path = pyglet.resource.get_script_home()
_local_lib_paths = [script_path, os.path.join(script_path, 'lib'),]
else:
_local_lib_paths = None
class _TraceFunction(object):
def __init__(self, func):
self.__dict__['_func'] = func
def __str__(self):
return self._func.__name__
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._func, name)
def __setattr__(self, name, value):
setattr(self._func, name, value)
class _TraceLibrary(object):
def __init__(self, library):
self._library = library
print(library)
def __getattr__(self, name):
func = getattr(self._library, name)
f = _TraceFunction(func)
return f
if _is_epydoc:
class LibraryMock(object):
"""Mock library used when generating documentation."""
def __getattr__(self, name):
return LibraryMock()
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return LibraryMock()
class LibraryLoader(object):
def load_library(self, *names, **kwargs):
'''Find and load a library.
More than one name can be specified, they will be tried in order.
Platform-specific library names (given as kwargs) are tried first.
Raises ImportError if library is not found.
'''
if _is_epydoc:
return LibraryMock()
if 'framework' in kwargs and self.platform == 'darwin':
return self.load_framework(kwargs['framework'])
if not names:
raise ImportError("No library name specified")
platform_names = kwargs.get(self.platform, [])
if isinstance(platform_names, basestring):
platform_names = [platform_names]
elif type(platform_names) is tuple:
platform_names = list(platform_names)
if self.platform.startswith('linux'):
for name in names:
libname = self.find_library(name)
platform_names.append(libname or 'lib%s.so' % name)
platform_names.extend(names)
for name in platform_names:
try:
lib = ctypes.cdll.LoadLibrary(name)
if _debug_lib:
print(name)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError as o:
if self.platform == "win32" and o.winerror != 126:
print("Unexpected error loading library %s: %s" % (name, str(o)))
raise
path = self.find_library(name)
if path:
try:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print(path)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError:
pass
raise ImportError('Library "%s" not found.' % names[0])
find_library = lambda self, name: ctypes.util.find_library(name)
platform = pyglet.compat_platform
# this is only for library loading, don't include it in pyglet.platform
if platform == 'cygwin':
platform = 'win32'
def load_framework(self, path):
raise RuntimeError("Can't load framework on this platform.")
class MachOLibraryLoader(LibraryLoader):
def __init__(self):
if 'LD_LIBRARY_PATH' in os.environ:
self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
else:
self.ld_library_path = []
if _local_lib_paths:
# search first for local libs
self.ld_library_path = _local_lib_paths + self.ld_library_path
os.environ['LD_LIBRARY_PATH'] = ':'.join(self.ld_library_path)
if 'DYLD_LIBRARY_PATH' in os.environ:
self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
else:
self.dyld_library_path = []
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
self.dyld_fallback_library_path = \
os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
else:
self.dyld_fallback_library_path = [
os.path.expanduser('~/lib'),
'/usr/local/lib',
'/usr/lib']
def find_library(self, path):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
libname = os.path.basename(path)
search_path = []
if '.' not in libname:
libname = 'lib' + libname + '.dylib'
# py2app support
if (hasattr(sys, 'frozen') and sys.frozen == 'macosx_app' and
'RESOURCEPATH' in os.environ):
search_path.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks',
libname))
# pyinstaller.py sets sys.frozen to True, and puts dylibs in
# Contents/MacOS, which path pyinstaller puts in sys._MEIPASS
if (hasattr(sys, 'frozen') and hasattr(sys, '_MEIPASS') and
sys.frozen == True and pyglet.compat_platform == 'darwin'):
search_path.append(os.path.join(sys._MEIPASS, libname))
if '/' in path:
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
else:
search_path.extend(
[os.path.join(p, libname) \
for p in self.ld_library_path])
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
for path in search_path:
if os.path.exists(path):
return path
return None
def find_framework(self, path):
'''Implement runtime framework search as described by:
http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html
'''
# e.g. path == '/System/Library/Frameworks/OpenGL.framework'
# name == 'OpenGL'
# return '/System/Library/Frameworks/OpenGL.framework/OpenGL'
name = os.path.splitext(os.path.split(path)[1])[0]
realpath = os.path.join(path, name)
if os.path.exists(realpath):
return realpath
for dir in ('/Library/Frameworks',
'/System/Library/Frameworks'):
realpath = os.path.join(dir, '%s.framework' % name, name)
if os.path.exists(realpath):
return realpath
return None
def load_framework(self, path):
realpath = self.find_framework(path)
if realpath:
lib = ctypes.cdll.LoadLibrary(realpath)
if _debug_lib:
print(realpath)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
raise ImportError("Can't find framework %s." % path)
class LinuxLibraryLoader(LibraryLoader):
_ld_so_cache = None
_local_libs_cache = None
def _find_libs(self, directories):
cache = {}
lib_re = re.compile('lib(.*)\.so(?:$|\.)')
for dir in directories:
try:
for file in os.listdir(dir):
match = lib_re.match(file)
if match:
# Index by filename
path = os.path.join(dir, file)
if file not in cache:
cache[file] = path
# Index by library name
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
return cache
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
try:
directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
except KeyError:
pass
try:
with open('/etc/ld.so.conf') as fid:
directories.extend([dir.strip() for dir in fid])
except IOError:
pass
directories.extend(['/lib', '/usr/lib'])
self._ld_so_cache = self._find_libs(directories)
def find_library(self, path):
# search first for local libs
if _local_lib_paths:
if not self._local_libs_cache:
self._local_libs_cache = self._find_libs(_local_lib_paths)
if path in self._local_libs_cache:
return self._local_libs_cache[path]
# ctypes tries ldconfig, gcc and objdump. If none of these are
# present, we implement the ld-linux.so search path as described in
# the man page.
result = ctypes.util.find_library(path)
if result:
return result
if self._ld_so_cache is None:
self._create_ld_so_cache()
return self._ld_so_cache.get(path)
if pyglet.compat_platform == 'darwin':
loader = MachOLibraryLoader()
elif pyglet.compat_platform.startswith('linux'):
loader = LinuxLibraryLoader()
else:
loader = LibraryLoader()
load_library = loader.load_library
| gpl-3.0 | -6,575,638,694,990,342,000 | 33.912088 | 139 | 0.573182 | false |
pymedusa/SickRage | ext/jwt/__main__.py | 13 | 4162 | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import argparse
import json
import sys
import time
from . import DecodeError, __version__, decode, encode
def encode_payload(args):
# Try to encode
if args.key is None:
raise ValueError('Key is required when encoding. See --help for usage.')
# Build payload object to encode
payload = {}
for arg in args.payload:
k, v = arg.split('=', 1)
# exp +offset special case?
if k == 'exp' and v[0] == '+' and len(v) > 1:
v = str(int(time.time()+int(v[1:])))
# Cast to integer?
if v.isdigit():
v = int(v)
else:
# Cast to float?
try:
v = float(v)
except ValueError:
pass
# Cast to true, false, or null?
constants = {'true': True, 'false': False, 'null': None}
if v in constants:
v = constants[v]
payload[k] = v
token = encode(
payload,
key=args.key,
algorithm=args.algorithm
)
return token.decode('utf-8')
def decode_payload(args):
try:
if args.token:
token = args.token
else:
if sys.stdin.isatty():
token = sys.stdin.readline().strip()
else:
raise IOError('Cannot read from stdin: terminal not a TTY')
token = token.encode('utf-8')
data = decode(token, key=args.key, verify=args.verify)
return json.dumps(data)
except DecodeError as e:
raise DecodeError('There was an error decoding the token: %s' % e)
def build_argparser():
usage = '''
Encodes or decodes JSON Web Tokens based on input.
%(prog)s [options] <command> [options] input
Decoding examples:
%(prog)s --key=secret decode json.web.token
%(prog)s decode --no-verify json.web.token
Encoding requires the key option and takes space separated key/value pairs
separated by equals (=) as input. Examples:
%(prog)s --key=secret encode iss=me exp=1302049071
%(prog)s --key=secret encode foo=bar exp=+10
The exp key is special and can take an offset to current Unix time.
'''
arg_parser = argparse.ArgumentParser(
prog='pyjwt',
usage=usage
)
arg_parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s ' + __version__
)
arg_parser.add_argument(
'--key',
dest='key',
metavar='KEY',
default=None,
help='set the secret key to sign with'
)
arg_parser.add_argument(
'--alg',
dest='algorithm',
metavar='ALG',
default='HS256',
help='set crypto algorithm to sign with. default=HS256'
)
subparsers = arg_parser.add_subparsers(
title='PyJWT subcommands',
description='valid subcommands',
help='additional help'
)
# Encode subcommand
encode_parser = subparsers.add_parser('encode', help='use to encode a supplied payload')
payload_help = """Payload to encode. Must be a space separated list of key/value
pairs separated by equals (=) sign."""
encode_parser.add_argument('payload', nargs='+', help=payload_help)
encode_parser.set_defaults(func=encode_payload)
# Decode subcommand
decode_parser = subparsers.add_parser('decode', help='use to decode a supplied JSON web token')
decode_parser.add_argument(
'token',
help='JSON web token to decode.',
nargs='?')
decode_parser.add_argument(
'-n', '--no-verify',
action='store_false',
dest='verify',
default=True,
help='ignore signature and claims verification on decode'
)
decode_parser.set_defaults(func=decode_payload)
return arg_parser
def main():
arg_parser = build_argparser()
try:
arguments = arg_parser.parse_args(sys.argv[1:])
output = arguments.func(arguments)
print(output)
except Exception as e:
print('There was an unforseen error: ', e)
arg_parser.print_help()
| gpl-3.0 | -6,980,530,937,878,810 | 23.77381 | 99 | 0.582172 | false |
repotvsupertuga/tvsupertuga.repository | script.vodextende/scrapers/local_scraper.py | 7 | 7621 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import xbmc
import kodi
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import SORT_KEYS
from salts_lib.constants import VIDEO_TYPES
import scraper
logger = log_utils.Logger.get_logger()
BASE_URL = ''
class Scraper(scraper.Scraper):
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT): # @UnusedVariable
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.def_quality = int(kodi.get_setting('%s-def-quality' % (self.get_name())))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'Local'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
params = scraper_utils.parse_query(source_url)
if video.video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
result_key = 'moviedetails'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
result_key = 'episodedetails'
run = cmd % (params['id'])
meta = xbmc.executeJSONRPC(run)
meta = scraper_utils.parse_json(meta)
logger.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
if result_key in meta.get('result', []):
details = meta['result'][result_key]
def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
stream_details = details['streamdetails']
if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
host['quality'] = scraper_utils.width_get_quality(stream_details['video'][0]['width'])
hosters.append(host)
return hosters
def _get_episode_url(self, show_url, video):
params = scraper_utils.parse_query(show_url)
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
"limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'
base_url = 'video_type=%s&id=%s'
episodes = []
force_title = scraper_utils.force_title(video)
if not force_title:
run = cmd % (params['id'], video.season, 'episode', video.episode)
meta = xbmc.executeJSONRPC(run)
meta = scraper_utils.parse_json(meta)
logger.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
else:
logger.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and not episodes:
run = cmd % (params['id'], video.season, 'title', video.ep_title)
meta = xbmc.executeJSONRPC(run)
meta = scraper_utils.parse_json(meta)
logger.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
for episode in episodes:
if episode['file'].endswith('.strm'):
continue
return base_url % (video.video_type, episode['episodeid'])
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-def-quality" type="enum" label=" Default Quality" values="None|Low|Medium|High|HD720|HD1080" default="0" visible="eq(-3,true)"/>' % (name))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
filter_str = '{{"field": "title", "operator": "contains", "value": "{search_title}"}}'
if year: filter_str = '{{"and": [%s, {{"field": "year", "operator": "is", "value": "%s"}}]}}' % (filter_str, year)
if video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year", "file", "streamdetails"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libMovies"}'
result_key = 'movies'
id_key = 'movieid'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libTvShows"}'
result_key = 'tvshows'
id_key = 'tvshowid'
command = cmd % (filter_str.format(search_title=title))
results = self.__get_results(command, result_key, video_type, id_key)
norm_title = self.__normalize_title(title)
if not results and norm_title and norm_title != title:
command = cmd % (filter_str.format(search_title=norm_title))
results = self.__get_results(command, result_key, video_type, id_key)
return results
def __normalize_title(self, title):
norm_title = re.sub('[^A-Za-z0-9 ]', ' ', title)
return re.sub('\s+', ' ', norm_title)
def __get_results(self, cmd, result_key, video_type, id_key):
results = []
logger.log('Search Command: %s' % (cmd), log_utils.LOGDEBUG)
meta = xbmc.executeJSONRPC(cmd)
meta = scraper_utils.parse_json(meta)
logger.log('Search Meta: %s' % (meta), log_utils.LOGDEBUG)
for item in meta.get('result', {}).get(result_key, {}):
if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith('.strm'):
continue
result = {'title': item['title'], 'year': item['year'], 'url': 'video_type=%s&id=%s' % (video_type, item[id_key])}
results.append(result)
return results
| gpl-2.0 | 4,173,299,171,746,068,500 | 51.19863 | 215 | 0.594279 | false |
petrutlucian94/nova | nova/tests/unit/api/openstack/compute/test_plugin_framework.py | 46 | 1306 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class PluginTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace")
def test_plugin_framework_index(self, mock_namespace):
mock_namespace.return_value = 'nova.api.v3.test_extensions'
app = fakes.wsgi_app_v21(init_only='test-basic')
req = fakes.HTTPRequest.blank('/v2/fake/test')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
| apache-2.0 | -5,830,842,402,062,778,000 | 35.277778 | 78 | 0.714395 | false |
Fireblend/chromium-crosswalk | tools/perf/benchmarks/sunspider.py | 6 | 6151 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import json
import os
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from metrics import power
_URL = 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html'
DESCRIPTIONS = {
'3d-cube':
'Pure JavaScript computations of the kind you might use to do 3d '
'rendering, but without the rendering. This ends up mostly hitting '
'floating point math and array access.',
'3d-morph':
'Pure JavaScript computations of the kind you might use to do 3d '
'rendering, but without the rendering. This ends up mostly hitting '
'floating point math and array access.',
'3d-raytrace':
'Pure JavaScript computations of the kind you might use to do 3d '
'rendering, but without the rendering. This ends up mostly hitting '
'floating point math and array access.',
'access-binary-trees': 'Array, object property and variable access.',
'access-fannkuch': 'Array, object property and variable access.',
'access-nbody': 'Array, object property and variable access.',
'access-nsieve': 'Array, object property and variable access.',
'bitops-3bit-bits-in-byte':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'bitops-bits-in-byte':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'bitops-bitwise-and':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'bitops-nsieve-bits':
'Bitwise operations, these can be useful for various things '
'including games, mathematical computations, and various kinds of '
'encoding/decoding. It\'s also the only kind of math in JavaScript '
'that is done as integer, not floating point.',
'controlflow-recursive':
'Control flow constructs (looping, recursion, conditionals). Right '
'now it mostly covers recursion, as the others are pretty well covered '
'by other tests.',
'crypto-aes': 'Real cryptography code related to AES.',
'crypto-md5': 'Real cryptography code related to MD5.',
'crypto-sha1': 'Real cryptography code related to SHA1.',
'date-format-tofte': 'Performance of JavaScript\'s "date" objects.',
'date-format-xparb': 'Performance of JavaScript\'s "date" objects.',
'math-cordic': 'Various mathematical type computations.',
'math-partial-sums': 'Various mathematical type computations.',
'math-spectral-norm': 'Various mathematical type computations.',
'regexp-dna': 'Regular expressions performance.',
'string-base64': 'String processing.',
'string-fasta': 'String processing',
'string-tagcloud': 'String processing code to generate a giant "tagcloud".',
'string-unpack-code': 'String processing code to extracting compressed JS.',
'string-validate-input': 'String processing.',
}
class _SunspiderMeasurement(page_test.PageTest):
def __init__(self):
super(_SunspiderMeasurement, self).__init__()
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results.html") >= 0'
'&& typeof(output) != "undefined"', 300)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'JSON.stringify(output);'
js_results = json.loads(tab.EvaluateJavaScript(js_get_results))
# Below, r is a map of benchmark names to lists of result numbers,
# and totals is a list of totals of result numbers.
# js_results is: formatted like this:
# [
# {'3d-cube': v1, '3d-morph': v2, ...},
# {'3d-cube': v3, '3d-morph': v4, ...},
# ...
# ]
r = collections.defaultdict(list)
totals = []
for result in js_results:
total = 0
for key, value in result.iteritems():
r[key].append(value)
total += value
totals.append(total)
for key, values in r.iteritems():
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, key, 'ms', values, important=False,
description=DESCRIPTIONS.get(key)))
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'Total', 'ms', totals,
description='Totals of run time for each different type of benchmark '
'in sunspider'))
class Sunspider(benchmark.Benchmark):
"""Apple's SunSpider JavaScript benchmark.
http://www.webkit.org/perf/sunspider/sunspider.html
"""
test = _SunspiderMeasurement
@classmethod
def Name(cls):
return 'sunspider'
def CreatePageSet(self, options):
ps = page_set.PageSet(
archive_data_file='../page_sets/data/sunspider.json',
file_path=os.path.abspath(__file__),
bucket=page_set.PARTNER_BUCKET)
ps.AddUserStory(page_module.Page(
_URL, ps, ps.base_dir, make_javascript_deterministic=False))
return ps
| bsd-3-clause | -2,310,679,917,354,462,000 | 41.130137 | 80 | 0.682491 | false |
aapa/pyfibot | pyfibot/modules/module_rss.py | 1 | 11257 | from __future__ import unicode_literals, print_function, division
import feedparser
import dataset
from twisted.internet.reactor import callLater
from threading import Thread
import twisted.internet.error
import logging
logger = logging.getLogger('module_rss')
DATABASE = None
updater = None
botref = None
config = {}
def init(bot, testing=False):
''' Initialize updater '''
global DATABASE
global config
global botref
global updater
global logger
if testing:
DATABASE = dataset.connect('sqlite:///:memory:')
else:
DATABASE = dataset.connect('sqlite:///databases/rss.db')
logger.info('RSS module initialized')
botref = bot
config = bot.config.get('rss', {})
finalize()
# As there's no signal if this is a rehash or restart
# update feeds in 30 seconds
updater = callLater(30, update_feeds)
def finalize():
''' Finalize updater (rehash etc) so we don't leave an updater running '''
global updater
global logger
logger.info('RSS module finalized')
if updater:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = None
def get_feeds(**kwargs):
''' Get feeds from database '''
return [
Feed(f['network'], f['channel'], f['id'])
for f in list(DATABASE['feeds'].find(**kwargs))
]
def find_feed(network, channel, **kwargs):
''' Find specific feed from database '''
f = DATABASE['feeds'].find_one(network=network, channel=channel, **kwargs)
if not f:
return
return Feed(f['network'], f['channel'], f['id'])
def add_feed(network, channel, url):
''' Add feed to database '''
f = Feed(network=network, channel=channel, url=url)
return (f.initialized, f.read())
def remove_feed(network, channel, id):
''' Remove feed from database '''
f = find_feed(network=network, channel=channel, id=int(id))
if not f:
return
DATABASE['feeds'].delete(id=f.id)
DATABASE['items_%i' % (f.id)].drop()
return f
def update_feeds(cancel=True, **kwargs):
# from time import sleep
''' Update all feeds in the DB '''
global config
global updater
global logger
logger.info('Updating RSS feeds started')
for f in get_feeds(**kwargs):
Thread(target=f.update).start()
# If we get a cancel, cancel the existing updater
# and start a new one
# NOTE: Not sure if needed, as atm cancel isn't used in any command...
if cancel:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = callLater(5 * 60, update_feeds)
def command_rss(bot, user, channel, args):
commands = ['list', 'add', 'remove', 'latest', 'update']
args = args.split()
if not args or args[0] not in commands:
return bot.say(channel, 'rss: valid arguments are [%s]' % (', '.join(commands)))
command = args[0]
network = bot.network.alias
# Get latest feed item from database
# Not needed? mainly for debugging
# Possibly useful for checking if feed still exists?
if command == 'latest':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss latest <id from list>"')
feed = find_feed(network=network, channel=channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
item = feed.get_latest()
if not item:
return bot.say(channel, 'no items in feed')
return bot.say(channel, feed.get_item_str(item))
# List all feeds for current network && channel
if command == 'list':
feeds = get_feeds(network=network, channel=channel)
if not feeds:
return bot.say(channel, 'no feeds set up')
for f in feeds:
bot.say(channel, '%02i: %s <%s>' % (f.id, f.name, f.url))
return
# Rest of the commands are only for admins
if not bot.factory.isAdmin(user):
return bot.say(channel, 'only "latest" and "list" available for non-admins')
# Add new feed for channel
if command == 'add':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss add url"')
init, items = add_feed(network, channel, url=args[1])
if not init:
return bot.say(channel, 'feed already added')
return bot.say(channel, 'feed added with %i items' % len(items))
# remove feed from channel
if command == 'remove':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss remove <id from list>"')
feed = remove_feed(network, channel, id=args[1])
if not feed:
return bot.say(channel, 'feed not found, no action taken')
return bot.say(channel, 'feed "%s" <%s> removed' % (feed.name, feed.url))
# If there's no args, update all feeds (even for other networks)
# If arg exists, try to update the feed...
if command == 'update':
if len(args) < 2:
bot.say(channel, 'feeds updating')
update_feeds()
return
feed = find_feed(network, channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
feed.update()
return
class Feed(object):
''' Feed object to simplify feed handling '''
def __init__(self, network, channel, id=None, url=None):
# Not sure if (this complex) init is needed...
self.id = id
self.network = network
self.channel = channel
self.url = url
if url:
self.url = url
self.initialized = False
# load feed details from database
self._get_feed_from_db()
def __repr__(self):
return '(%s, %s, %s)' % (self.url, self.channel, self.network)
def __unicode__(self):
return '%i - %s' % (self.id, self.url)
def __init_feed(self):
''' Initialize databases for feed '''
DATABASE['feeds'].insert({
'network': self.network,
'channel': self.channel,
'url': self.url,
'name': '',
})
# Update feed to match the created
feed = self._get_feed_from_db()
# Initialize item-database for feed
self.__save_item({
'title': 'PLACEHOLDER',
'link': 'https://github.com/lepinkainen/pyfibot/',
'printed': True,
})
self.initialized = True
return feed
def __get_items_tbl(self):
''' Get table for feeds items '''
return DATABASE[('items_%i' % (self.id))]
def __parse_feed(self):
''' Parse items from feed '''
f = feedparser.parse(self.url)
if self.initialized:
self.update_feed_info({'name': f['channel']['title']})
items = [{
'title': i['title'],
'link': i['link'],
} for i in f['items']]
return (f, items)
def __save_item(self, item, table=None):
''' Save item to feeds database '''
if table is None:
table = self.__get_items_tbl()
# If override is set or the item cannot be found, it's a new one
if not table.find_one(title=item['title'], link=item['link']):
# If printed isn't set, set it to the value in self.initialized (True, if initializing, else False)
# This is to prevent flooding when adding a new feed...
if 'printed' not in item:
item['printed'] = self.initialized
table.insert(item)
def __mark_printed(self, item, table=None):
''' Mark item as printed '''
if table is None:
table = self.__get_items_tbl()
table.update({'id': item['id'], 'printed': True}, ['id'])
def _get_feed_from_db(self):
''' Get self from database '''
feed = None
if self.url and not self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, url=self.url)
if self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, id=self.id)
if not feed:
feed = self.__init_feed()
self.id = feed['id']
self.network = feed['network']
self.channel = feed['channel']
self.url = feed['url']
# TODO: Name could just be the domain part of url?
self.name = feed['name']
return feed
def get_item_str(self, item):
return '[%s] %s <%s>' % (''.join([c for c in self.name][0:18]), item['title'], item['link'])
def get_latest(self):
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(order_by='id'))]
if not items:
return
return items[-1]
def update_feed_info(self, data):
''' Update feed information '''
data['id'] = self.id
if 'url' in data:
self.url = data['url']
DATABASE['feeds'].update(data, ['id'])
# Update self to match new...
self._get_feed_from_db()
def read(self):
''' Read new items from feed '''
f, items = self.__parse_feed()
# Get table -reference to speed up stuff...
tbl = self.__get_items_tbl()
# Save items in DB, saving takes care of duplicate checks
for i in reversed(items):
self.__save_item(i, tbl)
# Set initialized to False, as we have read everything...
self.initialized = False
return items
def get_new_items(self, mark_printed=False):
''' Get all items which are not marked as printed, if mark_printed is set, update printed also. '''
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(printed=False))]
if mark_printed:
for i in items:
self.__mark_printed(i, tbl)
return items
def update(self):
global logger
global botref
# If botref isn't defined, bot isn't running, no need to run
# (used for tests?)
if not botref:
return
# Read all items for feed
logger.debug('Feed "%s" updating' % (self.name))
self.read()
# Get number of unprinted items (and don't mark as printed)
items = self.get_new_items(False)
if len(items) == 0:
logger.debug('Feed "%s" containes no new items, doing nothing.' % (self.name))
return
logger.debug('Feed "%s" updated with %i new items' % (self.name, len(items)))
# If bot instance isn't found, don't print anything
bot_instance = botref.find_bot_for_network(self.network)
if not bot_instance:
logger.error('Bot instance for "%s" not found, not printing' % (self.name))
return
logger.debug('Printing new items for "%s"' % (self.name))
# Get all new (not printed) items and print them
items = self.get_new_items(True)
for i in items:
bot_instance.say(self.channel, self.get_item_str(i))
if __name__ == '__main__':
f = Feed('ircnet', '#pyfibot', 'http://feeds.feedburner.com/ampparit-kaikki?format=xml')
f.read()
for i in f.get_new_items(True):
print(i)
| bsd-3-clause | 4,514,868,805,792,006,700 | 31.915205 | 111 | 0.574665 | false |
Ms2ger/presto-testo | wpt/websockets/mod_pywebsocket/handshake/__init__.py | 4 | 3895 | # Copyright 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Web Socket handshaking.
Note: request.connection.write/read are used in this module, even though
mod_python document says that they should be used only in connection handlers.
Unfortunately, we have no other options. For example, request.write/read are
not suitable because they don't allow direct raw bytes writing/reading.
"""
import logging
import re
from mod_pywebsocket.handshake import draft75
from mod_pywebsocket.handshake import handshake
from mod_pywebsocket.handshake._base import DEFAULT_WEB_SOCKET_PORT
from mod_pywebsocket.handshake._base import DEFAULT_WEB_SOCKET_SECURE_PORT
from mod_pywebsocket.handshake._base import WEB_SOCKET_SCHEME
from mod_pywebsocket.handshake._base import WEB_SOCKET_SECURE_SCHEME
from mod_pywebsocket.handshake._base import HandshakeError
from mod_pywebsocket.handshake._base import validate_protocol
class Handshaker(object):
"""This class performs Web Socket handshake."""
def __init__(self, request, dispatcher, allowDraft75=False, strict=False):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
allowDraft75: allow draft 75 handshake protocol.
strict: Strictly check handshake request in draft 75.
Default: False. If True, request.connection must provide
get_memorized_lines method.
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
self._logger = logging.getLogger("mod_pywebsocket.handshake")
self._request = request
self._dispatcher = dispatcher
self._strict = strict
self._handshaker = handshake.Handshaker(request, dispatcher)
self._fallbackHandshaker = None
if allowDraft75:
self._fallbackHandshaker = draft75.Handshaker(
request, dispatcher, strict)
def do_handshake(self):
"""Perform Web Socket Handshake."""
try:
self._handshaker.do_handshake()
except HandshakeError, e:
self._logger.error('Handshake error: %s' % e)
if self._fallbackHandshaker:
self._logger.warning('fallback to old protocol')
self._fallbackHandshaker.do_handshake()
return
raise e
# vi:sts=4 sw=4 et
| bsd-3-clause | -8,050,349,000,817,394,000 | 40 | 78 | 0.721951 | false |
EinsteinsWorkshop/openscad | tests/export_import_pngtest.py | 4 | 5342 | #!/usr/bin/env python
# Export-import test
#
#
# Usage: <script> <inputfile> --openscad=<executable-path> --format=<format> --require-manifold [<openscad args>] file.png
#
#
# step 1. If the input file is _not_ an .scad file, create a temporary .scad file importing the input file.
# step 2. Run OpenSCAD on the .scad file, output an export format (csg, stl, off, dxf, svg, amf)
# step 3. If the export format is _not_ .csg, create a temporary new .scad file importing the exported file
# step 4. Run OpenSCAD on the .csg or .scad file, export to the given .png file
# step 5. (done in CTest) - compare the generated .png file to expected output
# of the original .scad file. they should be the same!
#
# All the optional openscad args are passed on to OpenSCAD both in step 2 and 4.
# Exception: In any --render arguments are passed, the first pass (step 2) will always
# be run with --render=cgal while the second pass (step 4) will use the passed --render
# argument.
#
# This script should return 0 on success, not-0 on error.
#
# The CSG file tests do not include the use<fontfile> statements, so to allow the
# export tests to find the font files in the test data directory, the OPENSCAD_FONT_PATH
# is set to the testdata/ttf directory.
#
# Authors: Torsten Paul, Don Bright, Marius Kintel
import sys, os, re, subprocess, argparse
from validatestl import validateSTL
def failquit(*args):
if len(args)!=0: print(args)
print('export_import_pngtest args:',str(sys.argv))
print('exiting export_import_pngtest.py with failure')
sys.exit(1)
def createImport(inputfile, scadfile):
print ('createImport: ' + inputfile + " " + scadfile)
outputdir = os.path.dirname(scadfile)
try:
if outputdir and not os.path.exists(outputdir): os.mkdir(outputdir)
f = open(scadfile,'w')
f.write('import("'+inputfile+'");'+os.linesep)
f.close()
except:
failquit('failure while opening/writing ' + scadfile + ': ' + str(sys.exc_info()))
#
# Parse arguments
#
formats = ['csg', 'stl','off', 'amf', 'dxf', 'svg']
parser = argparse.ArgumentParser()
parser.add_argument('--openscad', required=True, help='Specify OpenSCAD executable')
parser.add_argument('--format', required=True, choices=[item for sublist in [(f,f.upper()) for f in formats] for item in sublist], help='Specify 3d export format')
parser.add_argument('--require-manifold', dest='requiremanifold', action='store_true', help='Require STL output to be manifold')
parser.set_defaults(requiremanifold=False)
args,remaining_args = parser.parse_known_args()
args.format = args.format.lower()
inputfile = remaining_args[0] # Can be .scad file or a file to be imported
pngfile = remaining_args[-1]
remaining_args = remaining_args[1:-1] # Passed on to the OpenSCAD executable
if not os.path.exists(inputfile):
failquit('cant find input file named: ' + inputfile)
if not os.path.exists(args.openscad):
failquit('cant find openscad executable named: ' + args.openscad)
outputdir = os.path.dirname(pngfile)
inputpath, inputfilename = os.path.split(inputfile)
inputbasename,inputsuffix = os.path.splitext(inputfilename)
if args.format == 'csg':
# Must export to same folder for include/use/import to work
exportfile = inputfile + '.' + args.format
else:
exportfile = os.path.join(outputdir, inputfilename)
if args.format != inputsuffix[1:]: exportfile += '.' + args.format
# If we're not reading an .scad or .csg file, we need to import it.
if inputsuffix != '.scad' and inputsuffix != '.csg':
# FIXME: Remove tempfile if created
tempfile = os.path.join(outputdir, inputfilename + '.scad')
createImport(inputfile, tempfile)
inputfile = tempfile
#
# First run: Just export the given filetype
# For any --render arguments to --render=cgal
#
tmpargs = ['--render=cgal' if arg.startswith('--render') else arg for arg in remaining_args]
export_cmd = [args.openscad, inputfile, '-o', exportfile] + tmpargs
print >> sys.stderr, 'Running OpenSCAD #1:'
print >> sys.stderr, ' '.join(export_cmd)
result = subprocess.call(export_cmd)
if result != 0:
failquit('OpenSCAD #1 failed with return code ' + str(result))
if args.format == 'stl' and args.requiremanifold:
if not validateSTL(exportfile):
failquit("Error: Non-manifold STL file exported from OpenSCAD")
#
# Second run: Import the exported file and render as png
#
newscadfile = exportfile
# If we didn't export a .csg file, we need to import it
if args.format != 'csg':
newscadfile += '.scad'
createImport(exportfile, newscadfile)
create_png_cmd = [args.openscad, newscadfile, '-o', pngfile] + remaining_args
print >> sys.stderr, 'Running OpenSCAD #2:'
print >> sys.stderr, ' '.join(create_png_cmd)
fontdir = os.path.join(os.path.dirname(args.openscad), "..", "testdata");
fontenv = os.environ.copy();
fontenv["OPENSCAD_FONT_PATH"] = fontdir;
result = subprocess.call(create_png_cmd, env = fontenv);
if result != 0:
failquit('OpenSCAD #2 failed with return code ' + str(result))
try: os.remove(exportfile)
except: failquit('failure at os.remove('+exportfile+')')
if newscadfile != exportfile:
try: os.remove(newscadfile)
except: failquit('failure at os.remove('+newscadfile+')')
| gpl-2.0 | -2,458,336,742,504,938,500 | 40.092308 | 163 | 0.691127 | false |
ggreer/django-guardian | guardian/forms.py | 85 | 6401 | from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext as _
from guardian.shortcuts import assign_perm
from guardian.shortcuts import remove_perm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_perms_for_model
class BaseObjectPermissionsForm(forms.Form):
"""
Base form for object permissions management. Needs to be extended for usage
with users and/or groups.
"""
def __init__(self, obj, *args, **kwargs):
"""
:param obj: Any instance which form would use to manage object
permissions"
"""
self.obj = obj
super(BaseObjectPermissionsForm, self).__init__(*args, **kwargs)
field_name = self.get_obj_perms_field_name()
self.fields[field_name] = self.get_obj_perms_field()
def get_obj_perms_field(self):
"""
Returns field instance for object permissions management. May be
replaced entirely.
"""
field_class = self.get_obj_perms_field_class()
field = field_class(
label=self.get_obj_perms_field_label(),
choices=self.get_obj_perms_field_choices(),
initial=self.get_obj_perms_field_initial(),
widget=self.get_obj_perms_field_widget(),
required=self.are_obj_perms_required(),
)
return field
def get_obj_perms_field_name(self):
"""
Returns name of the object permissions management field. Default:
``permission``.
"""
return 'permissions'
def get_obj_perms_field_label(self):
"""
Returns label of the object permissions management field. Defualt:
``_("Permissions")`` (marked to be translated).
"""
return _("Permissions")
def get_obj_perms_field_choices(self):
"""
Returns choices for object permissions management field. Default:
list of tuples ``(codename, name)`` for each ``Permission`` instance
for the managed object.
"""
choices = [(p.codename, p.name) for p in get_perms_for_model(self.obj)]
return choices
def get_obj_perms_field_initial(self):
"""
Returns initial object permissions management field choices. Default:
``[]`` (empty list).
"""
return []
def get_obj_perms_field_class(self):
"""
Returns object permissions management field's base class. Default:
``django.forms.MultipleChoiceField``.
"""
return forms.MultipleChoiceField
def get_obj_perms_field_widget(self):
"""
Returns object permissions management field's widget base class.
Default: ``django.forms.SelectMultiple``.
"""
return forms.SelectMultiple
def are_obj_perms_required(self):
"""
Indicates if at least one object permission should be required. Default:
``False``.
"""
return False
def save_obj_perms(self):
"""
Must be implemented in concrete form class. This method should store
selected object permissions.
"""
raise NotImplementedError
class UserObjectPermissionsForm(BaseObjectPermissionsForm):
"""
Object level permissions management form for usage with ``User`` instances.
Example usage::
from django.shortcuts import get_object_or_404
from myapp.models import Post
from guardian.forms import UserObjectPermissionsForm
from django.contrib.auth.models import User
def my_view(request, post_slug, user_id):
user = get_object_or_404(User, id=user_id)
post = get_object_or_404(Post, slug=post_slug)
form = UserObjectPermissionsForm(user, post, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
...
"""
def __init__(self, user, *args, **kwargs):
self.user = user
super(UserObjectPermissionsForm, self).__init__(*args, **kwargs)
def get_obj_perms_field_initial(self):
perms = get_perms(self.user, self.obj)
return perms
def save_obj_perms(self):
"""
Saves selected object permissions by creating new ones and removing
those which were not selected but already exists.
Should be called *after* form is validated.
"""
perms = self.cleaned_data[self.get_obj_perms_field_name()]
model_perms = [c[0] for c in self.get_obj_perms_field_choices()]
to_remove = set(model_perms) - set(perms)
for perm in to_remove:
remove_perm(perm, self.user, self.obj)
for perm in perms:
assign_perm(perm, self.user, self.obj)
class GroupObjectPermissionsForm(BaseObjectPermissionsForm):
"""
Object level permissions management form for usage with ``Group`` instances.
Example usage::
from django.shortcuts import get_object_or_404
from myapp.models import Post
from guardian.forms import GroupObjectPermissionsForm
from guardian.models import Group
def my_view(request, post_slug, group_id):
group = get_object_or_404(Group, id=group_id)
post = get_object_or_404(Post, slug=post_slug)
form = GroupObjectPermissionsForm(group, post, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
...
"""
def __init__(self, group, *args, **kwargs):
self.group = group
super(GroupObjectPermissionsForm, self).__init__(*args, **kwargs)
def get_obj_perms_field_initial(self):
perms = get_perms(self.group, self.obj)
return perms
def save_obj_perms(self):
"""
Saves selected object permissions by creating new ones and removing
those which were not selected but already exists.
Should be called *after* form is validated.
"""
perms = self.cleaned_data[self.get_obj_perms_field_name()]
model_perms = [c[0] for c in self.get_obj_perms_field_choices()]
to_remove = set(model_perms) - set(perms)
for perm in to_remove:
remove_perm(perm, self.group, self.obj)
for perm in perms:
assign_perm(perm, self.group, self.obj)
| bsd-2-clause | 1,494,128,873,302,491,600 | 31.994845 | 80 | 0.618028 | false |
shsingh/ansible | lib/ansible/module_utils/network/ios/config/static_routes/static_routes.py | 2 | 24560 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The ios_static_routes class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import copy
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.ios.facts.facts import Facts
from ansible.module_utils.network.ios.utils.utils import new_dict_to_set, validate_n_expand_ipv4, filter_dict_having_none_value
class Static_Routes(ConfigBase):
"""
The ios_static_routes class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'static_routes',
]
def __init__(self, module):
super(Static_Routes, self).__init__(module)
def get_static_routes_facts(self, data=None):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources, data=data)
static_routes_facts = facts['ansible_network_resources'].get('static_routes')
if not static_routes_facts:
return []
return static_routes_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
commands = list()
warnings = list()
if self.state in self.ACTION_STATES:
existing_static_routes_facts = self.get_static_routes_facts()
else:
existing_static_routes_facts = []
if self.state in self.ACTION_STATES or self.state == 'rendered':
commands.extend(self.set_config(existing_static_routes_facts))
if commands and self.state in self.ACTION_STATES:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
if self.state in self.ACTION_STATES:
result['commands'] = commands
if self.state in self.ACTION_STATES or self.state == 'gathered':
changed_static_routes_facts = self.get_static_routes_facts()
elif self.state == 'rendered':
result['rendered'] = commands
elif self.state == 'parsed':
running_config = self._module.params['running_config']
if not running_config:
self._module.fail_json(
msg="value of running_config parameter must not be empty for state parsed"
)
result['parsed'] = self.get_static_routes_facts(data=running_config)
else:
changed_static_routes_facts = []
if self.state in self.ACTION_STATES:
result['before'] = existing_static_routes_facts
if result['changed']:
result['after'] = changed_static_routes_facts
elif self.state == 'gathered':
result['gathered'] = changed_static_routes_facts
result['warnings'] = warnings
return result
def set_config(self, existing_static_routes_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_static_routes_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
if state in ('overridden', 'merged', 'replaced', 'rendered') and not want:
self._module.fail_json(msg='value of config parameter must not be empty for state {0}'.format(state))
commands = []
if state == 'overridden':
commands = self._state_overridden(want, have)
elif state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged' or state == 'rendered':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
# Drill each iteration of want n have and then based on dest and afi tyoe comparison take config call
for w in want:
for addr_want in w.get('address_families'):
for route_want in addr_want.get('routes'):
check = False
for h in have:
if h.get('address_families'):
for addr_have in h.get('address_families'):
for route_have in addr_have.get('routes'):
if route_want.get('dest') == route_have.get('dest')\
and addr_want['afi'] == addr_have['afi']:
check = True
have_set = set()
new_hops = []
for each in route_want.get('next_hops'):
want_set = set()
new_dict_to_set(each, [], want_set, 0)
new_hops.append(want_set)
new_dict_to_set(addr_have, [], have_set, 0)
# Check if the have dict next_hops value is diff from want dict next_hops
have_dict = filter_dict_having_none_value(route_want.get('next_hops')[0],
route_have.get('next_hops')[0])
# update the have_dict with forward_router_address
have_dict.update({'forward_router_address': route_have.get('next_hops')[0].
get('forward_router_address')})
# updating the have_dict with next_hops val that's not None
new_have_dict = {}
for k, v in have_dict.items():
if v is not None:
new_have_dict.update({k: v})
# Set the new config from the user provided want config
cmd = self._set_config(w, h, addr_want, route_want, route_have, new_hops, have_set)
if cmd:
# since inplace update isn't allowed for static routes, preconfigured
# static routes needs to be deleted before the new want static routes changes
# are applied
clear_route_have = copy.deepcopy(route_have)
# inplace update is allowed in case of ipv6 static routes, so not deleting it
# before applying the want changes
if ':' not in route_want.get('dest'):
commands.extend(self._clear_config({}, h, {}, addr_have,
{}, clear_route_have))
commands.extend(cmd)
if check:
break
if check:
break
if not check:
# For configuring any non-existing want config
new_hops = []
for each in route_want.get('next_hops'):
want_set = set()
new_dict_to_set(each, [], want_set, 0)
new_hops.append(want_set)
commands.extend(self._set_config(w, {}, addr_want, route_want, {}, new_hops, set()))
commands = [each for each in commands if 'no' in each] + \
[each for each in commands if 'no' not in each]
return commands
def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
# Creating a copy of want, so that want dict is intact even after delete operation
# performed during override want n have comparison
temp_want = copy.deepcopy(want)
# Drill each iteration of want n have and then based on dest and afi tyoe comparison take config call
for h in have:
if h.get('address_families'):
for addr_have in h.get('address_families'):
for route_have in addr_have.get('routes'):
check = False
for w in temp_want:
for addr_want in w.get('address_families'):
count = 0
for route_want in addr_want.get('routes'):
if route_want.get('dest') == route_have.get('dest') \
and addr_want['afi'] == addr_have['afi']:
check = True
have_set = set()
new_hops = []
for each in route_want.get('next_hops'):
want_set = set()
new_dict_to_set(each, [], want_set, 0)
new_hops.append(want_set)
new_dict_to_set(addr_have, [], have_set, 0)
commands.extend(self._clear_config(w, h, addr_want, addr_have,
route_want, route_have))
commands.extend(self._set_config(w, h, addr_want,
route_want, route_have, new_hops, have_set))
del addr_want.get('routes')[count]
count += 1
if check:
break
if check:
break
if not check:
commands.extend(self._clear_config({}, h, {}, addr_have, {}, route_have))
# For configuring any non-existing want config
for w in temp_want:
for addr_want in w.get('address_families'):
for route_want in addr_want.get('routes'):
new_hops = []
for each in route_want.get('next_hops'):
want_set = set()
new_dict_to_set(each, [], want_set, 0)
new_hops.append(want_set)
commands.extend(self._set_config(w, {}, addr_want, route_want, {}, new_hops, set()))
# Arranging the cmds suct that all delete cmds are fired before all set cmds
commands = [each for each in sorted(commands) if 'no' in each] + \
[each for each in sorted(commands) if 'no' not in each]
return commands
def _state_merged(self, want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
# Drill each iteration of want n have and then based on dest and afi tyoe comparison take config call
for w in want:
for addr_want in w.get('address_families'):
for route_want in addr_want.get('routes'):
check = False
for h in have:
if h.get('address_families'):
for addr_have in h.get('address_families'):
for route_have in addr_have.get('routes'):
if route_want.get('dest') == route_have.get('dest')\
and addr_want['afi'] == addr_have['afi']:
check = True
have_set = set()
new_hops = []
for each in route_want.get('next_hops'):
want_set = set()
new_dict_to_set(each, [], want_set, 0)
new_hops.append(want_set)
new_dict_to_set(addr_have, [], have_set, 0)
commands.extend(self._set_config(w, h, addr_want,
route_want, route_have, new_hops, have_set))
if check:
break
if check:
break
if not check:
# For configuring any non-existing want config
new_hops = []
for each in route_want.get('next_hops'):
want_set = set()
new_dict_to_set(each, [], want_set, 0)
new_hops.append(want_set)
commands.extend(self._set_config(w, {}, addr_want, route_want, {}, new_hops, set()))
return commands
def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
if want:
# Drill each iteration of want n have and then based on dest and afi type comparison fire delete config call
for w in want:
if w.get('address_families'):
for addr_want in w.get('address_families'):
for route_want in addr_want.get('routes'):
check = False
for h in have:
if h.get('address_families'):
for addr_have in h.get('address_families'):
for route_have in addr_have.get('routes'):
if route_want.get('dest') == route_have.get('dest') \
and addr_want['afi'] == addr_have['afi']:
check = True
if route_want.get('next_hops'):
commands.extend(self._clear_config({}, w, {}, addr_want, {}, route_want))
else:
commands.extend(self._clear_config({}, h, {}, addr_have, {}, route_have))
if check:
break
if check:
break
else:
for h in have:
for addr_have in h.get('address_families'):
for route_have in addr_have.get('routes'):
if w.get('vrf') == h.get('vrf'):
commands.extend(self._clear_config({}, h, {}, addr_have, {}, route_have))
else:
# Drill each iteration of have and then based on dest and afi type comparison fire delete config call
for h in have:
for addr_have in h.get('address_families'):
for route_have in addr_have.get('routes'):
commands.extend(self._clear_config({}, h, {}, addr_have, {}, route_have))
return commands
def prepare_config_commands(self, config_dict, cmd):
"""
function to parse the input dict and form the prepare the config commands
:rtype: A str
:returns: The command necessary to configure the static routes
"""
dhcp = config_dict.get('dhcp')
distance_metric = config_dict.get('distance_metric')
forward_router_address = config_dict.get('forward_router_address')
global_route_config = config_dict.get('global')
interface = config_dict.get('interface')
multicast = config_dict.get('multicast')
name = config_dict.get('name')
permanent = config_dict.get('permanent')
tag = config_dict.get('tag')
track = config_dict.get('track')
dest = config_dict.get('dest')
temp_dest = dest.split('/')
if temp_dest and ':' not in dest:
dest = validate_n_expand_ipv4(self._module, {'address': dest})
cmd = cmd + dest
if interface:
cmd = cmd + ' {0}'.format(interface)
if forward_router_address:
cmd = cmd + ' {0}'.format(forward_router_address)
if dhcp:
cmd = cmd + ' DHCP'
if distance_metric:
cmd = cmd + ' {0}'.format(distance_metric)
if global_route_config:
cmd = cmd + ' global'
if multicast:
cmd = cmd + ' multicast'
if name:
cmd = cmd + ' name {0}'.format(name)
if permanent:
cmd = cmd + ' permanent'
elif track:
cmd = cmd + ' track {0}'.format(track)
if tag:
cmd = cmd + ' tag {0}'.format(tag)
return cmd
def _set_config(self, want, have, addr_want, route_want, route_have, hops, have_set):
"""
Set the interface config based on the want and have config
:rtype: A list
:returns: The commands necessary to configure the static routes
"""
commands = []
cmd = None
vrf_diff = False
topology_diff = False
want_vrf = want.get('vrf')
have_vrf = have.get('vrf')
if want_vrf != have_vrf:
vrf_diff = True
want_topology = want.get('topology')
have_topology = have.get('topology')
if want_topology != have_topology:
topology_diff = True
have_dest = route_have.get('dest')
if have_dest:
have_set.add(tuple(iteritems({'dest': have_dest})))
# configure set cmd for each hops under the same destination
for each in hops:
diff = each - have_set
if vrf_diff:
each.add(tuple(iteritems({'vrf': want_vrf})))
if topology_diff:
each.add(tuple(iteritems({'topology': want_topology})))
if diff or vrf_diff or topology_diff:
if want_vrf and not vrf_diff:
each.add(tuple(iteritems({'vrf': want_vrf})))
if want_topology and not vrf_diff:
each.add(tuple(iteritems({'topology': want_topology})))
each.add(tuple(iteritems({'afi': addr_want.get('afi')})))
each.add(tuple(iteritems({'dest': route_want.get('dest')})))
temp_want = {}
for each_want in each:
temp_want.update(dict(each_want))
if temp_want.get('afi') == 'ipv4':
cmd = 'ip route '
vrf = temp_want.get('vrf')
if vrf:
cmd = cmd + 'vrf {0} '.format(vrf)
cmd = self.prepare_config_commands(temp_want, cmd)
elif temp_want.get('afi') == 'ipv6':
cmd = 'ipv6 route '
cmd = self.prepare_config_commands(temp_want, cmd)
commands.append(cmd)
return commands
def _clear_config(self, want, have, addr_want, addr_have, route_want, route_have):
"""
Delete the interface config based on the want and have config
:rtype: A list
:returns: The commands necessary to configure the static routes
"""
commands = []
cmd = None
vrf_diff = False
topology_diff = False
want_vrf = want.get('vrf')
have_vrf = have.get('vrf')
if want_vrf != have_vrf:
vrf_diff = True
want_topology = want.get('topology')
have_topology = have.get('topology')
if want_topology != have_topology:
topology_diff = True
want_set = set()
new_dict_to_set(addr_want, [], want_set, 0)
have_hops = []
for each in route_have.get('next_hops'):
temp_have_set = set()
new_dict_to_set(each, [], temp_have_set, 0)
have_hops.append(temp_have_set)
# configure delete cmd for each hops under the same destination
for each in have_hops:
diff = each - want_set
if vrf_diff:
each.add(tuple(iteritems({'vrf': have_vrf})))
if topology_diff:
each.add(tuple(iteritems({'topology': want_topology})))
if diff or vrf_diff or topology_diff:
if want_vrf and not vrf_diff:
each.add(tuple(iteritems({'vrf': want_vrf})))
if want_topology and not vrf_diff:
each.add(tuple(iteritems({'topology': want_topology})))
if addr_want:
each.add(tuple(iteritems({'afi': addr_want.get('afi')})))
else:
each.add(tuple(iteritems({'afi': addr_have.get('afi')})))
if route_want:
each.add(tuple(iteritems({'dest': route_want.get('dest')})))
else:
each.add(tuple(iteritems({'dest': route_have.get('dest')})))
temp_want = {}
for each_want in each:
temp_want.update(dict(each_want))
if temp_want.get('afi') == 'ipv4':
cmd = 'no ip route '
vrf = temp_want.get('vrf')
if vrf:
cmd = cmd + 'vrf {0} '.format(vrf)
cmd = self.prepare_config_commands(temp_want, cmd)
elif temp_want.get('afi') == 'ipv6':
cmd = 'no ipv6 route '
cmd = self.prepare_config_commands(temp_want, cmd)
commands.append(cmd)
return commands
| gpl-3.0 | 6,495,762,339,019,188,000 | 45.165414 | 127 | 0.473331 | false |
JGulic/empathy | tools/make-version-script.py | 14 | 6034 | #!/usr/bin/python
"""Construct a GNU ld or Debian dpkg version-script from a set of
RFC822-style symbol lists.
Usage:
make-version-script.py [--symbols SYMBOLS] [--unreleased-version VER]
[--dpkg "LIBRARY.so.0 LIBRARY0 #MINVER#"]
[--dpkg-build-depends-package LIBRARY-dev]
[FILES...]
Each FILE starts with RFC822-style headers "Version:" (the name of the
symbol version, e.g. FOO_1.2.3) and "Extends:" (either the previous
version, or "-" if this is the first version). Next there is a blank
line, then a list of C symbols one per line.
Comments (lines starting with whitespace + "#") are allowed and ignored.
If --symbols is given, SYMBOLS lists the symbols actually exported by
the library (one per line). If --unreleased-version is given, any symbols
in SYMBOLS but not in FILES are assigned to that version; otherwise, any
such symbols cause an error.
If --dpkg is given, produce a Debian dpkg-gensymbols file instead of a
GNU ld version-script. The argument to --dpkg is the first line of the
resulting symbols file, and --dpkg-build-depends-package can optionally
be used to set the Build-Depends-Package field.
This script originates in telepathy-glib <http://telepathy.freedesktop.org/> -
please send us any changes that are needed.
"""
# Copyright (C) 2008 Collabora Ltd. <http://www.collabora.co.uk/>
# Copyright (C) 2008 Nokia Corporation
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
import sys
from getopt import gnu_getopt
from sets import Set as set
def e(format, *args):
sys.stderr.write((format + '\n') % args)
def main(abifiles, symbols=None, unreleased_version=None,
dpkg=False, dpkg_first_line=None, dpkg_build_depends_package=None):
gnuld = not dpkg
symbol_set = None
if symbols is not None:
symbol_set = open(symbols, 'r').readlines()
symbol_set = map(str.strip, symbol_set)
symbol_set = set(symbol_set)
versioned_symbols = set()
dpkg_symbols = []
dpkg_versions = []
if dpkg:
assert dpkg_first_line is not None
print dpkg_first_line
if dpkg_build_depends_package is not None:
print "* Build-Depends-Package: %s" % dpkg_build_depends_package
for filename in abifiles:
lines = open(filename, 'r').readlines()
version = None
extends = None
release = None
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('#'):
continue
elif not line:
# the transition betwen headers and symbols
cut = i + 1
break
elif line.lower().startswith('version:'):
line = line[8:].strip()
version = line
continue
elif line.lower().startswith('extends:'):
line = line[8:].strip()
extends = line
continue
elif line.lower().startswith('release:'):
release = line[8:].strip()
continue
else:
e('Could not understand line in %s header: %s', filename, line)
raise SystemExit(1)
else:
e('No symbols in %s', filename)
raise SystemExit(1)
if version is None:
e('No Versions: header in %s', filename)
raise SystemExit(1)
if extends is None:
e('No Extends: header in %s', filename)
raise SystemExit(1)
if release is None and dpkg:
e('No Release: header in %s', filename)
raise SystemExit(1)
if dpkg:
dpkg_versions.append('%s@%s %s' % (version, version, release))
lines = lines[cut:]
if gnuld:
print "%s {" % version
print " global:"
for symbol in lines:
symbol = symbol.strip()
if symbol.startswith('#'):
continue
if gnuld:
print " %s;" % symbol
elif dpkg:
dpkg_symbols.append('%s@%s %s' % (symbol, version, release))
versioned_symbols.add(symbol)
if gnuld:
if extends == '-':
print " local:"
print " *;"
print "};"
else:
print "} %s;" % extends
print
if dpkg:
dpkg_symbols.sort()
dpkg_versions.sort()
for x in dpkg_versions:
print " %s" % x
for x in dpkg_symbols:
print " %s" % x
if symbol_set is not None:
missing = versioned_symbols - symbol_set
if missing:
e('These symbols have disappeared:')
for symbol in missing:
e(' %s', symbol)
raise SystemExit(1)
unreleased = symbol_set - versioned_symbols
if unreleased:
if unreleased_version is None:
e('Unversioned symbols are not allowed in releases:')
for symbol in unreleased:
e(' %s', symbol)
raise SystemExit(1)
if gnuld:
print "%s {" % unreleased_version
print " global:"
for symbol in unreleased:
print " %s;" % symbol
print "} %s;" % version
if __name__ == '__main__':
options, argv = gnu_getopt (sys.argv[1:], '',
['symbols=', 'unreleased-version=',
'dpkg=', 'dpkg-build-depends-package='])
opts = {'dpkg': False}
for option, value in options:
if option == '--dpkg':
opts['dpkg'] = True
opts['dpkg_first_line'] = value
else:
opts[option.lstrip('-').replace('-', '_')] = value
main(argv, **opts)
| gpl-2.0 | 3,171,748,179,843,922,400 | 28.434146 | 79 | 0.547895 | false |
celiafish/VisTrails | vistrails/gui/paramexplore/pe_tab.py | 2 | 17319 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" The file describes the parameter exploration tab for VisTrails
QParameterExplorationTab
"""
from PyQt4 import QtCore, QtGui
from ast import literal_eval
from xml.dom.minidom import parseString
from xml.sax.saxutils import escape
from vistrails.core import debug
from vistrails.core.interpreter.default import get_default_interpreter
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.param_explore import ActionBasedParameterExploration
from vistrails.core.system import current_time, strftime, \
get_vistrails_default_pkg_prefix
from vistrails.gui.common_widgets import QDockContainer, QToolWindowInterface
from vistrails.gui.paramexplore.pe_table import QParameterExplorationWidget, QParameterSetEditor
from vistrails.gui.paramexplore.virtual_cell import QVirtualCellWindow
from vistrails.gui.paramexplore.param_view import QParameterView
from vistrails.gui.paramexplore.pe_pipeline import QAnnotatedPipelineView
################################################################################
class QParameterExplorationTab(QDockContainer, QToolWindowInterface):
"""
QParameterExplorationTab is a tab containing different widgets
related to parameter exploration
"""
explorationId = 0
def __init__(self, parent=None):
""" QParameterExplorationTab(parent: QWidget)
-> QParameterExplorationTab
Make it a main window with dockable area and a
QParameterExplorationTable
"""
QDockContainer.__init__(self, parent)
self.setWindowTitle('Parameter Exploration')
self.toolWindow().setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.toolWindow().hide()
self.peWidget = QParameterExplorationWidget()
self.setCentralWidget(self.peWidget)
self.connect(self.peWidget.table,
QtCore.SIGNAL('exploreChange(bool)'),
self.exploreChange)
self.paramView = QParameterView(self)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea,
self.paramView.toolWindow())
self.annotatedPipelineView = QAnnotatedPipelineView(self)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea,
self.annotatedPipelineView.toolWindow())
self.virtualCell = QVirtualCellWindow(self)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea,
self.virtualCell.toolWindow())
self.controller = None
self.currentVersion = -1
def addViewActionsToMenu(self, menu):
"""addViewActionsToMenu(menu: QMenu) -> None
Add toggle view actions to menu
"""
menu.addAction(self.paramView.toolWindow().toggleViewAction())
menu.addAction(self.annotatedPipelineView.toolWindow().toggleViewAction())
menu.addAction(self.virtualCell.toolWindow().toggleViewAction())
def removeViewActionsFromMenu(self, menu):
"""removeViewActionsFromMenu(menu: QMenu) -> None
Remove toggle view actions from menu
"""
menu.removeAction(self.paramView.toolWindow().toggleViewAction())
menu.removeAction(self.annotatedPipelineView.toolWindow().toggleViewAction())
menu.removeAction(self.virtualCell.toolWindow().toggleViewAction())
def setController(self, controller):
""" setController(controller: VistrailController) -> None
Assign a controller to the parameter exploration tab
"""
self.controller = controller
def getParameterExploration(self):
""" getParameterExploration() -> string
Generates an XML string that represents the current
parameter exploration, and which can be loaded with
setParameterExploration().
"""
# Construct xml for persisting parameter exploration
escape_dict = { "'":"'", '"':'"', '\n':'
' }
timestamp = strftime(current_time(), '%Y-%m-%d %H:%M:%S')
# TODO: For now, we use the timestamp as the 'name' - Later, we should set 'name' based on a UI input field
xml = '\t<paramexp dims="%s" layout="%s" date="%s" name="%s">' % (str(self.peWidget.table.label.getCounts()), str(self.virtualCell.getConfiguration()[2]), timestamp, timestamp)
for i in xrange(self.peWidget.table.layout().count()):
pEditor = self.peWidget.table.layout().itemAt(i).widget()
if pEditor and isinstance(pEditor, QParameterSetEditor):
firstParam = True
for paramWidget in pEditor.paramWidgets:
paramInfo = paramWidget.param
interpolator = paramWidget.editor.stackedEditors.currentWidget()
intType = interpolator.exploration_name
# Write function tag prior to the first parameter of the function
if firstParam:
xml += '\n\t\t<function id="%s" alias="%s" name="%s">' % (paramInfo.parent_id, paramInfo.is_alias, pEditor.info[0])
firstParam = False
# Write parameter tag
xml += '\n\t\t\t<param id="%s" dim="%s" interp="%s"' % (paramInfo.id, paramWidget.getDimension(), intType)
if intType == 'Linear Interpolation':
xml += ' min="%s" max="%s"' % (interpolator.fromEdit.get_value(), interpolator.toEdit.get_value())
elif intType == 'List':
xml += ' values="%s"' % escape(str(interpolator._str_values), escape_dict)
elif intType == 'User-defined Function':
xml += ' code="%s"' % escape(interpolator.function, escape_dict)
xml += '/>'
xml += '\n\t\t</function>'
xml += '\n\t</paramexp>'
return xml
def setParameterExploration(self, xmlString):
""" setParameterExploration(xmlString: string) -> None
Sets the current parameter exploration to the one
defined by 'xmlString'.
"""
if not xmlString:
return
# Parse/validate the xml
try:
xmlDoc = parseString(xmlString).documentElement
except Exception:
debug.critical("Parameter Exploration load failed because of "
"invalid XML:\n\n%s" % xmlString)
return
# Set the exploration dimensions
dims = literal_eval(xmlDoc.attributes['dims'].value)
self.peWidget.table.label.setCounts(dims)
# Set the virtual cell layout
layout = literal_eval(xmlDoc.attributes['layout'].value)
self.virtualCell.setConfiguration(layout)
# Populate parameter exploration window with stored functions and aliases
for f in xmlDoc.getElementsByTagName('function'):
# Retrieve function attributes
f_id = long(f.attributes['id'].value)
f_name = str(f.attributes['name'].value)
f_is_alias = (str(f.attributes['alias'].value) == 'True')
# Search the parameter treeWidget for this function and add it directly
newEditor = None
for tidx in xrange(self.paramView.treeWidget.topLevelItemCount()):
moduleItem = self.paramView.treeWidget.topLevelItem(tidx)
for cidx in xrange(moduleItem.childCount()):
paramInfo = moduleItem.child(cidx).parameter
name, params = paramInfo
if params[0].parent_id == f_id and params[0].is_alias == f_is_alias:
newEditor = self.peWidget.table.addParameter(paramInfo)
# Retrieve params for this function and set their values in the UI
if newEditor:
for p in f.getElementsByTagName('param'):
# Locate the param in the newly added param editor and set values
p_id = long(p.attributes['id'].value)
for paramWidget in newEditor.paramWidgets:
if paramWidget.param.id == p_id:
# Set Parameter Dimension (radio button)
p_dim = int(p.attributes['dim'].value)
paramWidget.setDimension(p_dim)
# Set Interpolator Type (dropdown list)
p_intType = str(p.attributes['interp'].value)
paramWidget.editor.selectInterpolator(p_intType)
# Set Interpolator Value(s)
interpolator = paramWidget.editor.stackedEditors.currentWidget()
if p_intType == 'Linear Interpolation':
# Set min/max
p_min = str(p.attributes['min'].value)
p_max = str(p.attributes['max'].value)
interpolator.fromEdit.setText(p_min)
interpolator.toEdit.setText(p_max)
elif p_intType == 'List':
p_values = str(p.attributes['values'].value)
# Set internal list structure
interpolator._str_values = \
literal_eval(p_values)
# Update UI list
if interpolator.type == 'String':
interpolator.listValues.setText(p_values)
else:
interpolator.listValues.setText(p_values.replace("'", "").replace('"', ''))
elif p_intType == 'User-defined Function':
# Set function code
p_code = str(p.attributes['code'].value)
interpolator.function = p_code
def showEvent(self, event):
""" showEvent(event: QShowEvent) -> None
Update the tab when it is shown
"""
if self.currentVersion!=self.controller.current_version:
self.currentVersion = self.controller.current_version
# Update the virtual cell
pipeline = self.controller.current_pipeline
self.virtualCell.updateVirtualCell(pipeline)
# Now we need to inspect the parameter list
self.paramView.treeWidget.updateFromPipeline(pipeline)
# Update the annotated ids
self.annotatedPipelineView.updateAnnotatedIds(pipeline)
# Update the parameter exploration table
self.peWidget.updatePipeline(pipeline)
# Update the UI with the most recent parameter exploration
# TODO: For now, we just strip the root tags since there's only one
# exploration - Later we should parse the root tree and select
# the active exploration based on date, or user choice
xmlString = self.controller.vistrail.get_paramexp(self.currentVersion)
if xmlString is not None:
striplen = len("<paramexps>")
xmlString = xmlString[striplen:-(striplen+1)].strip()
self.setParameterExploration(xmlString)
def performParameterExploration(self):
""" performParameterExploration() -> None
Perform the exploration by collecting a list of actions
corresponding to each dimension
"""
registry = get_module_registry()
actions = self.peWidget.table.collectParameterActions()
spreadsheet_pkg = '%s.spreadsheet' % get_vistrails_default_pkg_prefix()
# Set the annotation to persist the parameter exploration
# TODO: For now, we just replace the existing exploration - Later we should append them.
xmlString = "<paramexps>\n" + self.getParameterExploration() + "\n</paramexps>"
self.controller.vistrail.set_paramexp(self.currentVersion, xmlString)
self.controller.set_changed(True)
if self.controller.current_pipeline and actions:
explorer = ActionBasedParameterExploration()
(pipelines, performedActions) = explorer.explore(
self.controller.current_pipeline, actions)
dim = [max(1, len(a)) for a in actions]
if (registry.has_module(spreadsheet_pkg, 'CellLocation') and
registry.has_module(spreadsheet_pkg, 'SheetReference')):
modifiedPipelines = self.virtualCell.positionPipelines(
'PE#%d %s' % (QParameterExplorationTab.explorationId,
self.controller.name),
dim[2], dim[1], dim[0], pipelines, self.controller)
else:
modifiedPipelines = pipelines
mCount = []
for p in modifiedPipelines:
if len(mCount)==0:
mCount.append(0)
else:
mCount.append(len(p.modules)+mCount[len(mCount)-1])
# Now execute the pipelines
totalProgress = sum([len(p.modules) for p in modifiedPipelines])
progress = QtGui.QProgressDialog('Performing Parameter '
'Exploration...',
'&Cancel',
0, totalProgress)
progress.setWindowTitle('Parameter Exploration')
progress.setWindowModality(QtCore.Qt.WindowModal)
progress.show()
QParameterExplorationTab.explorationId += 1
interpreter = get_default_interpreter()
for pi in xrange(len(modifiedPipelines)):
progress.setValue(mCount[pi])
QtCore.QCoreApplication.processEvents()
if progress.wasCanceled():
break
def moduleExecuted(objId):
if not progress.wasCanceled():
#progress.setValue(progress.value()+1)
#the call above was crashing when used by multithreaded
#code, replacing with the call below (thanks to Terence
#for submitting this fix).
QtCore.QMetaObject.invokeMethod(progress, "setValue",
QtCore.Q_ARG(int,progress.value()+1))
QtCore.QCoreApplication.processEvents()
kwargs = {'locator': self.controller.locator,
'current_version': self.controller.current_version,
'view': self.controller.current_pipeline_scene,
'module_executed_hook': [moduleExecuted],
'reason': 'Parameter Exploration',
'actions': performedActions[pi],
}
interpreter.execute(modifiedPipelines[pi], **kwargs)
progress.setValue(totalProgress)
def exploreChange(self, notEmpty):
""" exploreChange(notEmpty: bool) -> None
echo the signal
"""
self.emit(QtCore.SIGNAL('exploreChange(bool)'), notEmpty)
| bsd-3-clause | 4,047,246,579,297,903,000 | 49.938235 | 184 | 0.58739 | false |
bwalks/pymemcache | pymemcache/test/test_integration.py | 2 | 8843 | # Copyright 2012 Pinterest.com
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import json
import pytest
import six
from pymemcache.client.base import Client
from pymemcache.exceptions import (
MemcacheIllegalInputError,
MemcacheClientError
)
from pymemcache.serde import (
python_memcache_serializer,
python_memcache_deserializer
)
def get_set_helper(client, key, value, key2, value2):
result = client.get(key)
assert result is None
client.set(key, value, noreply=False)
result = client.get(key)
assert result == value
client.set(key2, value2, noreply=True)
result = client.get(key2)
assert result == value2
result = client.get_many([key, key2])
assert result == {key: value, key2: value2}
result = client.get_many([])
assert result == {}
@pytest.mark.integration()
def test_get_set(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
key = b'key'
value = b'value'
key2 = b'key2'
value2 = b'value2'
get_set_helper(client, key, value, key2, value2)
@pytest.mark.integration()
def test_get_set_unicode_key(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module,
allow_unicode_keys=True)
client.flush_all()
key = u"こんにちは"
value = b'hello'
key2 = 'my☃'
value2 = b'value2'
get_set_helper(client, key, value, key2, value2)
@pytest.mark.integration()
def test_add_replace(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.add(b'key', b'value', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'value'
result = client.add(b'key', b'value2', noreply=False)
assert result is False
result = client.get(b'key')
assert result == b'value'
result = client.replace(b'key1', b'value1', noreply=False)
assert result is False
result = client.get(b'key1')
assert result is None
result = client.replace(b'key', b'value2', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'value2'
@pytest.mark.integration()
def test_append_prepend(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.append(b'key', b'value', noreply=False)
assert result is False
result = client.get(b'key')
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.append(b'key', b'after', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'valueafter'
result = client.prepend(b'key1', b'value', noreply=False)
assert result is False
result = client.get(b'key1')
assert result is None
result = client.prepend(b'key', b'before', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'beforevalueafter'
@pytest.mark.integration()
def test_cas(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.cas(b'key', b'value', b'1', noreply=False)
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.cas(b'key', b'value', b'1', noreply=False)
assert result is False
result, cas = client.gets(b'key')
assert result == b'value'
result = client.cas(b'key', b'value1', cas, noreply=False)
assert result is True
result = client.cas(b'key', b'value2', cas, noreply=False)
assert result is False
@pytest.mark.integration()
def test_gets(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.gets(b'key')
assert result == (None, None)
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.gets(b'key')
assert result[0] == b'value'
@pytest.mark.delete()
def test_delete(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.delete(b'key', noreply=False)
assert result is False
result = client.get(b'key')
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.delete(b'key', noreply=False)
assert result is True
result = client.get(b'key')
assert result is None
@pytest.mark.integration()
def test_incr_decr(client_class, host, port, socket_module):
client = Client((host, port), socket_module=socket_module)
client.flush_all()
result = client.incr(b'key', 1, noreply=False)
assert result is None
result = client.set(b'key', b'0', noreply=False)
assert result is True
result = client.incr(b'key', 1, noreply=False)
assert result == 1
def _bad_int():
client.incr(b'key', b'foobar')
with pytest.raises(MemcacheClientError):
_bad_int()
result = client.decr(b'key1', 1, noreply=False)
assert result is None
result = client.decr(b'key', 1, noreply=False)
assert result == 0
result = client.get(b'key')
assert result == b'0'
@pytest.mark.integration()
def test_misc(client_class, host, port, socket_module):
client = Client((host, port), socket_module=socket_module)
client.flush_all()
@pytest.mark.integration()
def test_serialization_deserialization(host, port, socket_module):
def _ser(key, value):
return json.dumps(value).encode('ascii'), 1
def _des(key, value, flags):
if flags == 1:
return json.loads(value.decode('ascii'))
return value
client = Client((host, port), serializer=_ser, deserializer=_des,
socket_module=socket_module)
client.flush_all()
value = {'a': 'b', 'c': ['d']}
client.set(b'key', value)
result = client.get(b'key')
assert result == value
@pytest.mark.integration()
def test_serde_serialization(client_class, host, port, socket_module):
def check(value):
client.set(b'key', value, noreply=False)
result = client.get(b'key')
assert result == value
assert type(result) is type(value)
client = client_class((host, port), serializer=python_memcache_serializer,
deserializer=python_memcache_deserializer,
socket_module=socket_module)
client.flush_all()
check(b'byte string')
check(u'unicode string')
check('olé')
check(u'olé')
check(1)
check(123123123123123123123)
check({'a': 'pickle'})
check([u'one pickle', u'two pickle'])
testdict = defaultdict(int)
testdict[u'one pickle']
testdict[b'two pickle']
check(testdict)
@pytest.mark.integration()
def test_errors(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
def _key_with_ws():
client.set(b'key with spaces', b'value', noreply=False)
with pytest.raises(MemcacheIllegalInputError):
_key_with_ws()
def _key_with_illegal_carriage_return():
client.set(b'\r\nflush_all', b'value', noreply=False)
with pytest.raises(MemcacheIllegalInputError):
_key_with_illegal_carriage_return()
def _key_too_long():
client.set(b'x' * 1024, b'value', noreply=False)
with pytest.raises(MemcacheClientError):
_key_too_long()
def _unicode_key_in_set():
client.set(six.u('\u0FFF'), b'value', noreply=False)
with pytest.raises(MemcacheClientError):
_unicode_key_in_set()
def _unicode_key_in_get():
client.get(six.u('\u0FFF'))
with pytest.raises(MemcacheClientError):
_unicode_key_in_get()
def _unicode_value_in_set():
client.set(b'key', six.u('\u0FFF'), noreply=False)
with pytest.raises(MemcacheClientError):
_unicode_value_in_set()
| apache-2.0 | -6,648,300,440,692,477,000 | 27.947541 | 78 | 0.658285 | false |
KNMI/VERCE | verce-hpc-pe/src/dispel4py/examples/graph_testing/parallel_pipeline.py | 4 | 3516 | # Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a dispel4py graph where each MPI process computes a partition of the workflow instead of a PE instance.
This happens automatically when the graph has more nodes than MPI processes.
In terms of internal execution, the user has control which parts of the graph are distributed to each MPI process.
See :py:mod:`~test.graph_testing.partition_parallel_pipeline` on how to specify the partitioning.
.. image:: /api/images/parallel_pipeline.png
It can be executed with MPI and STORM.
* MPI: Please, locate yourself into the dispel4py directory.
Execute the MPI mapping as follows::
mpiexec -n <number mpi_processes> python -m dispel4py.worker_mpi [-a name_dispel4py_graph] [-f file containing the input dataset in JSON format]
[-i number of iterations/runs'] [-s]
The argument '-s' forces to run the graph in a simple processing, which means that the first node of the graph will be executed in a process, and the rest of nodes will be executed in a second process.
When <-i number of interations/runs> is not indicated, the graph is executed once by default.
For example::
For example::
mpiexec -n 3 python -m dispel4py.worker_mpi dispel4py.examples.parallel_pipeline -i 10
.. note::
To force the partitioning the graph must have more nodes than available MPI processes.
This graph has 4 nodes and we use 3 MPI processes to execute it. Besides, if we use -s option, the graph will be partitioned only in 2 MPI processes.
Output::
Processing 10 iterations
Graph is too large for MPI job size: 4 > 3. Start simple processing.
Partitions: [TestProducer0], [TestOneInOneOut1, TestOneInOneOut2, TestOneInOneOut3]
Processes: {'GraphWrapperPE5': [1, 2], 'GraphWrapperPE4': [0]}
GraphWrapperPE4 (rank 0): I'm a spout
GraphWrapperPE5 (rank 1): I'm a bolt
Rank 0: Sending terminate message to [1, 2]
GraphWrapperPE4 (rank 0): Processed 10 input block(s)
GraphWrapperPE4 (rank 0): Completed.
GraphWrapperPE5 (rank 1): Processed 5 input block(s)
GraphWrapperPE5 (rank 1): Completed.
GraphWrapperPE5 (rank 2): I'm a bolt
GraphWrapperPE5 (rank 2): Processed 5 input block(s)
GraphWrapperPE5 (rank 2): Completed.
* STORM:
'''
from dispel4py.examples.graph_testing import testing_PEs as t
from dispel4py.workflow_graph import WorkflowGraph
def testParallelPipeline():
'''
Creates a graph with 4 nodes.
:rtype: the created graph
'''
graph = WorkflowGraph()
prod = t.TestProducer()
prev = prod
cons1 = t.TestOneInOneOut()
cons2 = t.TestOneInOneOut()
cons3 = t.TestOneInOneOut()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')
graph.connect(cons1, 'output', cons3, 'input')
return graph
''' important: this is the graph_variable '''
graph = testParallelPipeline()
| mit | -4,423,050,062,839,139,000 | 38.505618 | 214 | 0.717577 | false |
aerwin3/swift | swift/cli/form_signature.py | 33 | 5340 | # Copyright (c) 2010-2012 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for generating a form signature for use with FormPost middleware.
"""
from __future__ import print_function
import hmac
from hashlib import sha1
from os.path import basename
from time import time
def main(argv):
if len(argv) != 7:
prog = basename(argv[0])
print('Syntax: %s <path> <redirect> <max_file_size> '
'<max_file_count> <seconds> <key>' % prog)
print()
print('Where:')
print(' <path> The prefix to use for form uploaded')
print(' objects. For example:')
print(' /v1/account/container/object_prefix_ would')
print(' ensure all form uploads have that path')
print(' prepended to the browser-given file name.')
print(' <redirect> The URL to redirect the browser to after')
print(' the uploads have completed.')
print(' <max_file_size> The maximum file size per file uploaded.')
print(' <max_file_count> The maximum number of uploaded files')
print(' allowed.')
print(' <seconds> The number of seconds from now to allow')
print(' the form post to begin.')
print(' <key> The X-Account-Meta-Temp-URL-Key for the')
print(' account.')
print()
print('Example output:')
print(' Expires: 1323842228')
print(' Signature: 18de97e47345a82c4dbfb3b06a640dbb')
print()
print('Sample form:')
print()
print('NOTE: the <form> tag\'s "action" attribute does not contain '
'the Swift cluster\'s hostname.')
print('You should manually add it before using the form.')
print()
print('<form action="/v1/a/c/o" method="POST" '
'enctype="multipart/form-data">')
print(' <input type="hidden" name="max_file_size" value="123" />')
print(' ... more HTML ...')
print(' <input type="submit" />')
print('</form>')
return 1
path, redirect, max_file_size, max_file_count, seconds, key = argv[1:]
try:
max_file_size = int(max_file_size)
except ValueError:
max_file_size = -1
if max_file_size < 0:
print('Please use a <max_file_size> value greater than or equal to 0.')
return 1
try:
max_file_count = int(max_file_count)
except ValueError:
max_file_count = 0
if max_file_count < 1:
print('Please use a positive <max_file_count> value.')
return 1
try:
expires = int(time() + int(seconds))
except ValueError:
expires = 0
if expires < 1:
print('Please use a positive <seconds> value.')
return 1
parts = path.split('/', 4)
# Must be four parts, ['', 'v1', 'a', 'c'], must be a v1 request, have
# account and container values, and optionally have an object prefix.
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
print('<path> must point to a container at least.')
print('For example: /v1/account/container')
print(' Or: /v1/account/container/object_prefix')
return 1
sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
max_file_count, expires),
sha1).hexdigest()
print(' Expires:', expires)
print('Signature:', sig)
print('')
print('Sample form:\n')
print('NOTE: the <form> tag\'s "action" attribute does not '
'contain the Swift cluster\'s hostname.')
print('You should manually add it before using the form.\n')
print('<form action="%s" method="POST" enctype="multipart/form-data">'
% path)
if redirect:
print(' <input type="hidden" name="redirect" value="%s" />'
% redirect)
print(' <input type="hidden" name="max_file_size" value="%d" />'
% max_file_size)
print(' <input type="hidden" name="max_file_count" value="%d" />'
% max_file_count)
print(' <input type="hidden" name="expires" value="%d" />' % expires)
print(' <input type="hidden" name="signature" value="%s" />' % sig)
print(' <!-- This signature allows for at most %d files, -->'
% max_file_count)
print(' <!-- but it may also have any smaller number. -->')
print(' <!-- Remove file inputs as needed. -->')
for i in range(max_file_count):
print(' <input type="file" name="file%d" />' % i)
print(' <br />')
print(' <input type="submit" />')
print('</form>')
return 0
| apache-2.0 | 3,678,419,794,358,147,000 | 40.71875 | 79 | 0.568539 | false |
regular/pyglet-avbin-optimizations | pyglet/clock.py | 1 | 34509 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Precise framerate calculation, scheduling and framerate limiting.
Measuring time
==============
The `tick` and `get_fps` functions can be used in conjunction to fulfil most
games' basic requirements::
from pylet import clock
while True:
dt = clock.tick()
# ... update and render ...
print 'FPS is %f' % clock.get_fps()
The ``dt`` value returned gives the number of seconds (as a float) since the
last "tick".
The `get_fps` function averages the framerate over a sliding window of
approximately 1 second. (You can calculate the instantaneous framerate by
taking the reciprocal of ``dt``).
Always remember to `tick` the clock!
Limiting frame-rate
===================
The framerate can be limited::
clock.set_fps_limit(60)
This causes `clock` to sleep during each `tick` in an attempt to keep the
number of ticks (frames) per second below 60.
The implementation uses platform-dependent high-resolution sleep functions
to achieve better accuracy with busy-waiting than would be possible using
just the `time` module.
Scheduling
==========
You can schedule a function to be called every time the clock is ticked::
def callback(dt):
print '%f seconds since last callback' % dt
clock.schedule(callback)
The `schedule_interval` method causes a function to be called every "n"
seconds::
clock.schedule_interval(callback, .5) # called twice a second
The `schedule_once` method causes a function to be called once "n" seconds
in the future::
clock.schedule_once(callback, 5) # called in 5 seconds
All of the `schedule` methods will pass on any additional args or keyword args
you specify ot the callback function::
def animate(dt, velocity, sprite):
sprite.position += dt * velocity
clock.schedule(animate, velocity=5.0, sprite=alien)
You can cancel a function scheduled with any of these methods using
`unschedule`::
clock.unschedule(animate)
Displaying FPS
==============
The ClockDisplay class provides a simple FPS counter. You should create
an instance of ClockDisplay once during the application's start up::
fps_display = clock.ClockDisplay()
Call draw on the ClockDisplay object for each frame::
fps_display.draw()
There are several options to change the font, color and text displayed
within the __init__ method.
Using multiple clocks
=====================
The clock functions are all relayed to an instance of `Clock` which is
initalised with the module. You can get this instance to use directly::
clk = clock.get_default()
You can also replace the default clock with your own:
myclk = clock.Clock()
clock.set_default(myclk)
Each clock maintains its own set of scheduled functions and FPS
limiting/measurement. Each clock must be "ticked" separately.
Multiple and derived clocks potentially allow you to separate "game-time" and
"wall-time", or to synchronise your clock to an audio or video stream instead
of the system clock.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import time
import sys
import ctypes
import pyglet.lib
if sys.platform in ('win32', 'cygwin'):
# Win32 Sleep function is only 10-millisecond resolution, so instead
# use a waitable timer object, which has up to 100-nanosecond resolution
# (hardware and implementation dependent, of course).
_kernel32 = ctypes.windll.kernel32
class _ClockBase(object):
def __init__(self):
self._timer = _kernel32.CreateWaitableTimerA(ctypes.c_void_p(),
True, ctypes.c_void_p())
def sleep(self, microseconds):
delay = ctypes.c_longlong(int(-microseconds * 10))
_kernel32.SetWaitableTimer(self._timer, ctypes.byref(delay),
0, ctypes.c_void_p(), ctypes.c_void_p(), False)
_kernel32.WaitForSingleObject(self._timer, 0xffffffff)
_default_time_function = time.clock
else:
_c = pyglet.lib.load_library('c', darwin='/usr/lib/libc.dylib')
_c.usleep.argtypes = [ctypes.c_ulong]
class _ClockBase(object):
def sleep(self, microseconds):
_c.usleep(int(microseconds))
_default_time_function = time.time
class _ScheduledItem(object):
__slots__ = ['func', 'args', 'kwargs']
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
class _ScheduledIntervalItem(object):
__slots__ = ['func', 'interval', 'last_ts', 'next_ts',
'args', 'kwargs']
def __init__(self, func, interval, last_ts, next_ts, args, kwargs):
self.func = func
self.interval = interval
self.last_ts = last_ts
self.next_ts = next_ts
self.args = args
self.kwargs = kwargs
def _dummy_schedule_func(*args, **kwargs):
'''Dummy function that does nothing, placed onto zombie scheduled items
to ensure they have no side effect if already queued inside tick() method.
'''
pass
class Clock(_ClockBase):
'''Class for calculating and limiting framerate, and for calling scheduled
functions.
'''
#: The minimum amount of time in seconds this clock will attempt to sleep
#: for when framerate limiting. Higher values will increase the
#: accuracy of the limiting but also increase CPU usage while
#: busy-waiting. Lower values mean the process sleeps more often, but is
#: prone to over-sleep and run at a potentially lower or uneven framerate
#: than desired.
MIN_SLEEP = 0.005
#: The amount of time in seconds this clock subtracts from sleep values
#: to compensate for lazy operating systems.
SLEEP_UNDERSHOOT = MIN_SLEEP - 0.001
# List of functions to call every tick.
_schedule_items = None
# List of schedule interval items kept in sort order.
_schedule_interval_items = None
# If True, a sleep(0) is inserted on every tick.
_force_sleep = False
def __init__(self, fps_limit=None, time_function=_default_time_function):
'''Initialise a Clock, with optional framerate limit and custom
time function.
:Parameters:
`fps_limit` : float
If not None, the maximum allowable framerate. Defaults
to None. Deprecated in pyglet 1.2.
`time_function` : function
Function to return the elapsed time of the application,
in seconds. Defaults to time.time, but can be replaced
to allow for easy time dilation effects or game pausing.
'''
super(Clock, self).__init__()
self.time = time_function
self.next_ts = self.time()
self.last_ts = None
self.times = []
self.set_fps_limit(fps_limit)
self.cumulative_time = 0
self._schedule_items = []
self._schedule_interval_items = []
def update_time(self):
'''Get the elapsed time since the last call to `update_time`.
This updates the clock's internal measure of time and returns
the difference since the last update (or since the clock was created).
:since: pyglet 1.2
:rtype: float
:return: The number of seconds since the last `update_time`, or 0
if this was the first time it was called.
'''
ts = self.time()
if self.last_ts is None:
delta_t = 0
else:
delta_t = ts - self.last_ts
self.times.insert(0, delta_t)
if len(self.times) > self.window_size:
self.cumulative_time -= self.times.pop()
self.cumulative_time += delta_t
self.last_ts = ts
return delta_t
def call_scheduled_functions(self, dt):
'''Call scheduled functions that elapsed on the last `update_time`.
:since: pyglet 1.2
:Parameters:
dt : float
The elapsed time since the last update to pass to each
scheduled function. This is *not* used to calculate which
functions have elapsed.
:rtype: bool
:return: True if any functions were called, otherwise False.
'''
ts = self.last_ts
result = False
# Call functions scheduled for every frame
# Dupe list just in case one of the items unchedules itself
for item in list(self._schedule_items):
result = True
item.func(dt, *item.args, **item.kwargs)
# Call all scheduled interval functions and reschedule for future.
need_resort = False
# Dupe list just in case one of the items unchedules itself
for item in list(self._schedule_interval_items):
if item.next_ts > ts:
break
result = True
item.func(ts - item.last_ts, *item.args, **item.kwargs)
if item.interval:
# Try to keep timing regular, even if overslept this time;
# but don't schedule in the past (which could lead to
# infinitely-worsing error).
item.next_ts = item.last_ts + item.interval
item.last_ts = ts
if item.next_ts <= ts:
if ts - item.next_ts < 0.05:
# Only missed by a little bit, keep the same schedule
item.next_ts = ts + item.interval
else:
# Missed by heaps, do a soft reschedule to avoid
# lumping everything together.
item.next_ts = self._get_soft_next_ts(ts, item.interval)
# Fake last_ts to avoid repeatedly over-scheduling in
# future. Unfortunately means the next reported dt is
# incorrect (looks like interval but actually isn't).
item.last_ts = item.next_ts - item.interval
need_resort = True
else:
item.next_ts = None
# Remove finished one-shots.
self._schedule_interval_items = \
[item for item in self._schedule_interval_items \
if item.next_ts is not None]
if need_resort:
# TODO bubble up changed items might be faster
self._schedule_interval_items.sort(key=lambda a: a.next_ts)
return result
def tick(self, poll=False):
'''Signify that one frame has passed.
This will call any scheduled functions that have elapsed.
:Parameters:
`poll` : bool
If True, the function will call any scheduled functions
but will not sleep or busy-wait for any reason. Recommended
for advanced applications managing their own sleep timers
only.
Since pyglet 1.1.
:rtype: float
:return: The number of seconds since the last "tick", or 0 if this was
the first frame.
'''
if poll:
if self.period_limit:
self.next_ts = self.next_ts + self.period_limit
else:
if self.period_limit:
self._limit()
if self._force_sleep:
self.sleep(0)
delta_t = self.update_time()
self.call_scheduled_functions(delta_t)
return delta_t
def _limit(self):
'''Sleep until the next frame is due. Called automatically by
`tick` if a framerate limit has been set.
This method uses several heuristics to determine whether to
sleep or busy-wait (or both).
'''
ts = self.time()
# Sleep to just before the desired time
sleeptime = self.get_sleep_time(False)
while sleeptime - self.SLEEP_UNDERSHOOT > self.MIN_SLEEP:
self.sleep(1000000 * (sleeptime - self.SLEEP_UNDERSHOOT))
sleeptime = self.get_sleep_time(False)
# Busy-loop CPU to get closest to the mark
sleeptime = self.next_ts - self.time()
while sleeptime > 0:
sleeptime = self.next_ts - self.time()
if sleeptime < -2 * self.period_limit:
# Missed the time by a long shot, let's reset the clock
# print >> sys.stderr, 'Step %f' % -sleeptime
self.next_ts = ts + 2 * self.period_limit
else:
# Otherwise keep the clock steady
self.next_ts = self.next_ts + self.period_limit
def get_sleep_time(self, sleep_idle):
'''Get the time until the next item is scheduled.
This method considers all scheduled items and the current
``fps_limit``, if any.
Applications can choose to continue receiving updates at the
maximum framerate during idle time (when no functions are scheduled),
or they can sleep through their idle time and allow the CPU to
switch to other processes or run in low-power mode.
If `sleep_idle` is ``True`` the latter behaviour is selected, and
``None`` will be returned if there are no scheduled items.
Otherwise, if `sleep_idle` is ``False``, a sleep time allowing
the maximum possible framerate (considering ``fps_limit``) will
be returned; or an earlier time if a scheduled function is ready.
:Parameters:
`sleep_idle` : bool
If True, the application intends to sleep through its idle
time; otherwise it will continue ticking at the maximum
frame rate allowed.
:rtype: float
:return: Time until the next scheduled event in seconds, or ``None``
if there is no event scheduled.
:since: pyglet 1.1
'''
if self._schedule_items or not sleep_idle:
if not self.period_limit:
return 0.
else:
wake_time = self.next_ts
if self._schedule_interval_items:
wake_time = min(wake_time,
self._schedule_interval_items[0].next_ts)
return max(wake_time - self.time(), 0.)
if self._schedule_interval_items:
return max(self._schedule_interval_items[0].next_ts - self.time(),
0)
return None
def set_fps_limit(self, fps_limit):
'''Set the framerate limit.
The framerate limit applies only when a function is scheduled
for every frame. That is, the framerate limit can be exceeded by
scheduling a function for a very small period of time.
:Parameters:
`fps_limit` : float
Maximum frames per second allowed, or None to disable
limiting.
:deprecated: Use `pyglet.app.run` and `schedule_interval` instead.
'''
if not fps_limit:
self.period_limit = None
else:
self.period_limit = 1. / fps_limit
self.window_size = fps_limit or 60
def get_fps_limit(self):
'''Get the framerate limit.
:rtype: float
:return: The framerate limit previously set in the constructor or
`set_fps_limit`, or None if no limit was set.
'''
if self.period_limit:
return 1. / self.period_limit
else:
return 0
def get_fps(self):
'''Get the average FPS of recent history.
The result is the average of a sliding window of the last "n" frames,
where "n" is some number designed to cover approximately 1 second.
:rtype: float
:return: The measured frames per second.
'''
if not self.cumulative_time:
return 0
return len(self.times) / self.cumulative_time
def schedule(self, func, *args, **kwargs):
'''Schedule a function to be called every frame.
The function should have a prototype that includes ``dt`` as the
first argument, which gives the elapsed time, in seconds, since the
last clock tick. Any additional arguments given to this function
are passed on to the callback::
def callback(dt, *args, **kwargs):
pass
:Parameters:
`func` : function
The function to call each frame.
'''
item = _ScheduledItem(func, args, kwargs)
self._schedule_items.append(item)
def _schedule_item(self, func, last_ts, next_ts, interval, *args, **kwargs):
item = _ScheduledIntervalItem(
func, interval, last_ts, next_ts, args, kwargs)
# Insert in sort order
for i, other in enumerate(self._schedule_interval_items):
if other.next_ts > next_ts:
self._schedule_interval_items.insert(i, item)
break
else:
self._schedule_interval_items.append(item)
def schedule_interval(self, func, interval, *args, **kwargs):
'''Schedule a function to be called every `interval` seconds.
Specifying an interval of 0 prevents the function from being
called again (see `schedule` to call a function as often as possible).
The callback function prototype is the same as for `schedule`.
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
last_ts = self.last_ts or self.next_ts
# Schedule from now, unless now is sufficiently close to last_ts, in
# which case use last_ts. This clusters together scheduled items that
# probably want to be scheduled together. The old (pre 1.1.1)
# behaviour was to always use self.last_ts, and not look at ts. The
# new behaviour is needed because clock ticks can now be quite
# irregular, and span several seconds.
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = last_ts + interval
self._schedule_item(func, last_ts, next_ts, interval, *args, **kwargs)
def schedule_interval_soft(self, func, interval, *args, **kwargs):
'''Schedule a function to be called every `interval` seconds,
beginning at a time that does not coincide with other scheduled
events.
This method is similar to `schedule_interval`, except that the
clock will move the interval out of phase with other scheduled
functions so as to distribute CPU more load evenly over time.
This is useful for functions that need to be called regularly,
but not relative to the initial start time. `pyglet.media`
does this for scheduling audio buffer updates, which need to occur
regularly -- if all audio updates are scheduled at the same time
(for example, mixing several tracks of a music score, or playing
multiple videos back simultaneously), the resulting load on the
CPU is excessive for those intervals but idle outside. Using
the soft interval scheduling, the load is more evenly distributed.
Soft interval scheduling can also be used as an easy way to schedule
graphics animations out of phase; for example, multiple flags
waving in the wind.
:since: pyglet 1.1
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
last_ts = self.last_ts or self.next_ts
# See schedule_interval
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = self._get_soft_next_ts(last_ts, interval)
last_ts = next_ts - interval
self._schedule_item(func, last_ts, next_ts, interval, *args, **kwargs)
def _get_soft_next_ts(self, last_ts, interval):
def taken(ts, e):
'''Return True if the given time has already got an item
scheduled nearby.
'''
for item in self._schedule_interval_items:
if item.next_ts is None:
pass
elif abs(item.next_ts - ts) <= e:
return True
elif item.next_ts > ts + e:
return False
return False
# Binary division over interval:
#
# 0 interval
# |--------------------------|
# 5 3 6 2 7 4 8 1 Order of search
#
# i.e., first scheduled at interval,
# then at interval/2
# then at interval/4
# then at interval*3/4
# then at ...
#
# Schedule is hopefully then evenly distributed for any interval,
# and any number of scheduled functions.
next_ts = last_ts + interval
if not taken(next_ts, interval / 4):
return next_ts
dt = interval
divs = 1
while True:
next_ts = last_ts
for i in range(divs - 1):
next_ts += dt
if not taken(next_ts, dt / 4):
return next_ts
dt /= 2
divs *= 2
# Avoid infinite loop in pathological case
if divs > 16:
return next_ts
def schedule_once(self, func, delay, *args, **kwargs):
'''Schedule a function to be called once after `delay` seconds.
The callback function prototype is the same as for `schedule`.
:Parameters:
`func` : function
The function to call when the timer lapses.
`delay` : float
The number of seconds to wait before the timer lapses.
'''
last_ts = self.last_ts or self.next_ts
# See schedule_interval
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = last_ts + delay
self._schedule_item(func, last_ts, next_ts, 0, *args, **kwargs)
def unschedule(self, func):
'''Remove a function from the schedule.
If the function appears in the schedule more than once, all occurances
are removed. If the function was not scheduled, no error is raised.
:Parameters:
`func` : function
The function to remove from the schedule.
'''
# First replace zombie items' func with a dummy func that does
# nothing, in case the list has already been cloned inside tick().
# (Fixes issue 326).
for item in self._schedule_items:
if item.func == func:
item.func = _dummy_schedule_func
for item in self._schedule_interval_items:
if item.func == func:
item.func = _dummy_schedule_func
# Now remove matching items from both schedule lists.
self._schedule_items = \
[item for item in self._schedule_items \
if item.func is not _dummy_schedule_func]
self._schedule_interval_items = \
[item for item in self._schedule_interval_items \
if item.func is not _dummy_schedule_func]
# Default clock.
_default = Clock()
def set_default(default):
'''Set the default clock to use for all module-level functions.
By default an instance of `Clock` is used.
:Parameters:
`default` : `Clock`
The default clock to use.
'''
global _default
_default = default
def get_default():
'''Return the `Clock` instance that is used by all module-level
clock functions.
:rtype: `Clock`
:return: The default clock.
'''
return _default
def tick(poll=False):
'''Signify that one frame has passed on the default clock.
This will call any scheduled functions that have elapsed.
:Parameters:
`poll` : bool
If True, the function will call any scheduled functions
but will not sleep or busy-wait for any reason. Recommended
for advanced applications managing their own sleep timers
only.
Since pyglet 1.1.
:rtype: float
:return: The number of seconds since the last "tick", or 0 if this was the
first frame.
'''
return _default.tick(poll)
def get_sleep_time(sleep_idle):
'''Get the time until the next item is scheduled on the default clock.
See `Clock.get_sleep_time` for details.
:Parameters:
`sleep_idle` : bool
If True, the application intends to sleep through its idle
time; otherwise it will continue ticking at the maximum
frame rate allowed.
:rtype: float
:return: Time until the next scheduled event in seconds, or ``None``
if there is no event scheduled.
:since: pyglet 1.1
'''
return _default.get_sleep_time(sleep_idle)
def get_fps():
'''Return the current measured FPS of the default clock.
:rtype: float
'''
return _default.get_fps()
def set_fps_limit(fps_limit):
'''Set the framerate limit for the default clock.
:Parameters:
`fps_limit` : float
Maximum frames per second allowed, or None to disable
limiting.
:deprecated: Use `pyglet.app.run` and `schedule_interval` instead.
'''
_default.set_fps_limit(fps_limit)
def get_fps_limit():
'''Get the framerate limit for the default clock.
:return: The framerate limit previously set by `set_fps_limit`, or None if
no limit was set.
'''
return _default.get_fps_limit()
def schedule(func, *args, **kwargs):
'''Schedule 'func' to be called every frame on the default clock.
The arguments passed to func are ``dt``, followed by any ``*args`` and
``**kwargs`` given here.
:Parameters:
`func` : function
The function to call each frame.
'''
_default.schedule(func, *args, **kwargs)
def schedule_interval(func, interval, *args, **kwargs):
'''Schedule 'func' to be called every 'interval' seconds on the default
clock.
The arguments passed to 'func' are 'dt' (time since last function call),
followed by any ``*args`` and ``**kwargs`` given here.
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
_default.schedule_interval(func, interval, *args, **kwargs)
def schedule_interval_soft(func, interval, *args, **kwargs):
'''Schedule 'func' to be called every 'interval' seconds on the default
clock, beginning at a time that does not coincide with other scheduled
events.
The arguments passed to 'func' are 'dt' (time since last function call),
followed by any ``*args`` and ``**kwargs`` given here.
:see: `Clock.schedule_interval_soft`
:since: pyglet 1.1
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
_default.schedule_interval_soft(func, interval, *args, **kwargs)
def schedule_once(func, delay, *args, **kwargs):
'''Schedule 'func' to be called once after 'delay' seconds (can be
a float) on the default clock. The arguments passed to 'func' are
'dt' (time since last function call), followed by any ``*args`` and
``**kwargs`` given here.
If no default clock is set, the func is queued and will be scheduled
on the default clock as soon as it is created.
:Parameters:
`func` : function
The function to call when the timer lapses.
`delay` : float
The number of seconds to wait before the timer lapses.
'''
_default.schedule_once(func, delay, *args, **kwargs)
def unschedule(func):
'''Remove 'func' from the default clock's schedule. No error
is raised if the func was never scheduled.
:Parameters:
`func` : function
The function to remove from the schedule.
'''
_default.unschedule(func)
class ClockDisplay(object):
'''Display current clock values, such as FPS.
This is a convenience class for displaying diagnostics such as the
framerate. See the module documentation for example usage.
:Ivariables:
`label` : `pyglet.font.Text`
The label which is displayed.
:deprecated: This class presents values that are often misleading, as
they reflect the rate of clock ticks, not displayed framerate. Use
pyglet.window.FPSDisplay instead.
'''
def __init__(self,
font=None,
interval=0.25,
format='%(fps).2f',
color=(.5, .5, .5, .5),
clock=None):
'''Create a ClockDisplay.
All parameters are optional. By default, a large translucent
font will be used to display the FPS to two decimal places.
:Parameters:
`font` : `pyglet.font.Font`
The font to format text in.
`interval` : float
The number of seconds between updating the display.
`format` : str
A format string describing the format of the text. This
string is modulated with the dict ``{'fps' : fps}``.
`color` : 4-tuple of float
The color, including alpha, passed to ``glColor4f``.
`clock` : `Clock`
The clock which determines the time. If None, the default
clock is used.
'''
if clock is None:
clock = _default
self.clock = clock
self.clock.schedule_interval(self.update_text, interval)
if not font:
from pyglet.font import load as load_font
font = load_font('', 36, bold=True)
import pyglet.font
self.label = pyglet.font.Text(font, '', color=color, x=10, y=10)
self.format = format
def unschedule(self):
'''Remove the display from its clock's schedule.
`ClockDisplay` uses `Clock.schedule_interval` to periodically update
its display label. Even if the ClockDisplay is not being used any
more, its update method will still be scheduled, which can be a
resource drain. Call this method to unschedule the update method
and allow the ClockDisplay to be garbage collected.
:since: pyglet 1.1
'''
self.clock.unschedule(self.update_text)
def update_text(self, dt=0):
'''Scheduled method to update the label text.'''
fps = self.clock.get_fps()
self.label.text = self.format % {'fps': fps}
def draw(self):
'''Method called each frame to render the label.'''
self.label.draw()
def test_clock():
import getopt
test_seconds = 1
test_fps = 60
show_fps = False
options, args = getopt.getopt(sys.argv[1:], 'vht:f:',
['time=', 'fps=', 'help'])
for key, value in options:
if key in ('-t', '--time'):
test_seconds = float(value)
elif key in ('-f', '--fps'):
test_fps = float(value)
elif key in ('-v'):
show_fps = True
elif key in ('-h', '--help'):
print ('Usage: clock.py <options>\n'
'\n'
'Options:\n'
' -t --time Number of seconds to run for.\n'
' -f --fps Target FPS.\n'
'\n'
'Tests the clock module by measuring how close we can\n'
'get to the desired FPS by sleeping and busy-waiting.')
sys.exit(0)
set_fps_limit(test_fps)
start = time.time()
# Add one because first frame has no update interval.
n_frames = int(test_seconds * test_fps + 1)
print 'Testing %f FPS for %f seconds...' % (test_fps, test_seconds)
for i in xrange(n_frames):
tick()
if show_fps:
print get_fps()
total_time = time.time() - start
total_error = total_time - test_seconds
print 'Total clock error: %f secs' % total_error
print 'Total clock error / secs: %f secs/secs' % \
(total_error / test_seconds)
# Not fair to add the extra frame in this calc, since no-one's interested
# in the startup situation.
print 'Average FPS: %f' % ((n_frames - 1) / total_time)
if __name__ == '__main__':
test_clock()
| bsd-3-clause | -591,734,193,958,127,000 | 33.787298 | 80 | 0.598337 | false |
boada/photometrypipeline | setup/vatt4k.py | 2 | 4155 | """
Photometry Pipeline Configuation File for VATT/VATT4k
2017-02-05, [email protected]
"""
# Photometry Pipeline
# Copyright (C) 2016-2018 Michael Mommert, [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
vatt4k_param = {'telescope_instrument': 'VATT/VATT4k',
telescope/instrument name 'telescope_keyword': 'VATT4K',
telescope/instrument keyword 'observatory_code': '290', # MPC
observatory code 'secpix': (0.1875, 0.1875), # pixel size
(arcsec) # before binning # image orientation preferences 'flipx'
: True, 'flipy': False, 'rotate': 0,
# instrument-specific FITS header keywords
'binning': ('CCDBIN1', 'CCDBIN2'), # binning in x/y
'extent': ('NAXIS1', 'NAXIS2'), # N_pixels in x/y
'ra': 'RA', # telescope pointing, RA
'dec': 'DEC', # telescope pointin, Dec
'radec_separator': ':', # RA/Dec hms separator, use 'XXX'
# if already in degrees
'date_keyword': 'DATE-OBS|TIME-OBS', # obs date/time
# keyword; use
# 'date|time' if
# separate
'object': 'OBJECT', # object name keyword
'filter': 'FILTER', # filter keyword
'filter_translations': {'TOP 2 BOT 1': 'V', 'TOP 3 BOT 1': 'R',
'TOP 4 BOT 1': 'I', 'TOP 5 BOT 1': 'B'},
# filtername translation dictionary
'exptime': 'EXPTIME', # exposure time keyword (s)
'airmass': 'AIRMASS', # airmass keyword
# source extractor settings
'sex': {'DETECT_MINAREA': 12,
'DETECT_THRESH': 3,
'ANALYSIS_THRESH': 3,
'CATALOG_NAME': 'VATT4K.ldac',
'aprad_default': 5,
# [minimum, maximum] aperture rad (px)
'aprad_range': [2, 10],
'PHOT_APERTURES': 5,
'WEIGHT_TYPE': 'NONE',
'WEIGHT_IMAGE': 'NONE',
'mask_files': {}, # as a function of x,y binning
'BACKPHOTO_TYPE': 'GLOBAL',
'PARAMETERS_NAME': '$PHOTPIPEDIR/setup/singleaperture.sexparam',
'SATUR_LEVEL:': 50000,
'SATUR_KEY': 'NONE'},
# scamp settings
'scamp-config-file': rootpath+'/setup/vatt4k.scamp',
'reg_max_mag': 19,
'reg_search_radius': 0.5, # deg
# swarp settings
'copy_keywords': ('TELESCOP,INSTRUME,FILTER,EXPTIME,OBJECT,' +
'DATE-OBS,TIME-OBS,RA,DEC,SECPIX,AIRMASS,' +
'TEL_KEYW'),
# keywords to be copied in image
# combination using swarp
'swarp-config-file': rootpath+'/setup/vatt4k.swarp',
# default catalog settings
'astrometry_catalogs': ['GAIA'],
'photometry_catalogs': ['SDSS-R9', 'APASS9']
}
# add telescope parameters to according lists and dictionaries
implemented_telescopes.append('VATT4K')
instrument_identifiers['= "Vatt4k"'] = 'VATT4K'
telescope_parameters['VATT4K'] = vatt4k_param
| gpl-3.0 | 2,590,249,210,455,076,000 | 46.215909 | 88 | 0.519134 | false |
ibmsoe/tensorflow | tensorflow/contrib/distributions/python/ops/deterministic.py | 12 | 13223 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Deterministic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
"Deterministic",
"VectorDeterministic",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDeterministic(distribution.Distribution):
"""Base class for Deterministic distributions."""
def __init__(self,
loc,
atol=None,
rtol=None,
is_vector=False,
validate_args=False,
allow_nan_stats=True,
name="_BaseDeterministic"):
"""Initialize a batch of `_BaseDeterministic` distributions.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor`. The point (or batch of points) on which this
distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,
else `Deterministic`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `loc` is a scalar.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, atol, rtol]):
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
if loc.get_shape().ndims is not None:
if loc.get_shape().ndims < 1:
raise ValueError(msg)
else:
loc = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(loc, 1, message=msg)], loc)
self._loc = loc
super(_BaseDeterministic, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
self._atol = self._get_tol(atol)
self._rtol = self._get_tol(rtol)
# Avoid using the large broadcast with self.loc if possible.
if rtol is None:
self._slack = self.atol
else:
self._slack = self.atol + self.rtol * math_ops.abs(self.loc)
def _get_tol(self, tol):
if tol is None:
return ops.convert_to_tensor(0, dtype=self.loc.dtype)
tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype)
if self.validate_args:
tol = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol
@property
def loc(self):
"""Point (or batch of points) at which this distribution is supported."""
return self._loc
@property
def atol(self):
"""Absolute tolerance for comparing points to `self.loc`."""
return self._atol
@property
def rtol(self):
"""Relative tolerance for comparing points to `self.loc`."""
return self._rtol
def _mean(self):
return array_ops.identity(self.loc)
def _variance(self):
return array_ops.zeros_like(self.loc)
def _mode(self):
return self.mean()
def _sample_n(self, n, seed=None): # pylint: disable=unused-arg
n_static = tensor_util.constant_value(ops.convert_to_tensor(n))
if n_static is not None and self.loc.get_shape().ndims is not None:
ones = [1] * self.loc.get_shape().ndims
multiples = [n_static] + ones
else:
ones = array_ops.ones_like(array_ops.shape(self.loc))
multiples = array_ops.concat(([n], ones), axis=0)
return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples)
class Deterministic(_BaseDeterministic):
"""Scalar `Deterministic` distribution on the real line.
The scalar `Deterministic` distribution is parameterized by a [batch] point
`loc` on the real line. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) and cumulative distribution function (cdf)
are
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
#### Examples
```python
# Initialize a single Deterministic supported at zero.
constant = tf.contrib.distributions.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
==> 0.
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
constant = tf.contrib.distributions.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
"""
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="Deterministic"):
"""Initialize a scalar `Deterministic` distribution.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(Deterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)
def _batch_shape(self):
return self.loc.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _prob(self, x):
return math_ops.cast(
math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype)
def _cdf(self, x):
return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype)
class VectorDeterministic(_BaseDeterministic):
"""Vector `Deterministic` distribution on `R^k`.
The `VectorDeterministic` distribution is parameterized by a [batch] point
`loc in R^k`. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) is
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
#### Examples
```python
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tf.contrib.distributions.Deterministic([0., 2.])
constant.prob([0., 2.])
==> 1.
constant.prob([0., 3.])
==> 0.
# Initialize a [3] batch of constants on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
constant = constant_lib.VectorDeterministic(loc)
constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])
==> [1., 0., 0.]
```
"""
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="VectorDeterministic"):
"""Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.
Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0`
then `self.prob([]) == 1`.
The `atol` and `rtol` parameters allow for some slack in `pmf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(VectorDeterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
is_vector=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)[:-1]
def _batch_shape(self):
return self.loc.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.loc)[-1]
def _event_shape(self):
return self.loc.get_shape()[-1:]
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
| apache-2.0 | -6,023,606,606,424,454,000 | 33.524804 | 80 | 0.63261 | false |
frutik/formunculous | formunculous/forms.py | 1 | 12701 | # This file is part of formunculous.
#
# formunculous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# formunculous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with formunculous. If not, see <http://www.gnu.org/licenses/>.
# Copyright 2009-2011 Carson Gee
from django import forms
from django.db import models
from django.utils.safestring import mark_safe
from django.forms.util import ErrorList
from django.contrib.auth.models import User
from formunculous.models import *
from formunculous.fields import HoneypotField, MultipleChoiceToStringField
from django.utils.translation import ugettext as _
from django.forms import ModelForm
from django.forms.widgets import RadioSelect, Select, SelectMultiple, CheckboxSelectMultiple, HiddenInput
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.forms.formsets import BaseFormSet, TOTAL_FORM_COUNT, INITIAL_FORM_COUNT, ORDERING_FIELD_NAME, DELETION_FIELD_NAME, ManagementForm
# Used to introspect model namespace
import formunculous.models as funcmodels
class ApplicationForm(forms.Form):
def __init__(self, app_def, app=None, reviewer=False, *args, **kwargs):
super(ApplicationForm, self).__init__(*args, **kwargs)
self.app_def = app_def
self.app = app
self.reviewer = reviewer
# If there is an application defined, add it's pk
if app:
pk_field = forms.IntegerField(initial=app.id, widget=HiddenInput)
pk_field.is_hidden = True
pk_field.required = False
self.fields['pk'] = pk_field
field_set = app_def.fielddefinition_set.filter(
reviewer_only=self.reviewer)
# Loop through the application field definition set
# and create form fields for them.
for field_def in field_set:
# Introspect the model class defined in type to get it's
# django field add that and the label specified in the
# FieldDefinition.
# Intentionally not catching the potential exceptions here, let
# them bubble up
field_model = getattr(funcmodels, field_def.type)
data = None
try:
# Grab the model if this value is already stored otherwise,
# ignore
field_model = field_model.objects.get(field_def = field_def,
app = self.app)
data = field_model.value
except field_model.DoesNotExist:
pass
# If there are dropdown choices specified, create the
# choices tuple for use in the field determined by other
# user choices.
field_def_choices = field_def.dropdownchoices_set.all()
if field_def_choices and field_model.allow_dropdown:
choices = (())
for choice in field_def_choices:
choices += (choice.value, _(choice.text),),
# Users are allowed to specify that a choiced
widget = Select
if field_def.multi_select:
# Fix the data from being stored as a string to
# being stored as a list
if data:
try:
data = data.split(' | ')
except:
data = None
widget = SelectMultiple
if field_def.use_radio:
widget = CheckboxSelectMultiple
if field_def.use_radio and not field_def.multi_select:
widget = RadioSelect
if field_def.multi_select:
form_field = MultipleChoiceToStringField(
choices = choices, widget = widget)
else:
form_field = forms.ChoiceField(choices=choices,
widget = widget)
else:
form_field = field_model._meta.get_field('value').formfield()
# Custom field widgets
if field_model.widget:
attrs = form_field.widget.attrs
form_field.widget = field_model.widget(attrs = attrs)
form_field.required = False # Will check required on final submission
form_field.label = mark_safe(field_def.label)
form_field.initial = data
form_field.help_text = mark_safe(field_def.help_text)
# Add it to the growing fieldset
self.fields[field_def.slug] = form_field
# Create a honeypot field automatically
if not self.fields.has_key('company'):
self.fields['company'] = HoneypotField()
else:
i = 0
while self.fields.has_key('company%s' % i):
i+=1
self.fields['company%s' % i] = HoneypotField()
def save(self):
"""
This is used for interim saves, and will save all data in the form
"""
# Save the app first
self.app.save()
# Go through the fieldset and save the values
field_set = self.app_def.fielddefinition_set.filter(
reviewer_only=self.reviewer)
for field_def in field_set:
field_model = getattr(funcmodels, field_def.type)
try:
field_model = field_model.objects.get(field_def = field_def,
app = self.app)
except field_model.DoesNotExist:
field_model = field_model(field_def = field_def, app = self.app)
true_field = field_model._meta.get_field('value')
true_field.save_form_data(field_model,
self.cleaned_data[field_def.slug])
field_model.save()
def check_required(self):
"""
Checks for field definitions that have been marked with the
required field. If they are empty or blank, update
the fields with errors.
"""
if not self.is_bound:
# Probably should throw an exception
return False
if not hasattr(self, 'cleaned_data'):
return False
ret_val = True
field_set = self.app_def.fielddefinition_set.filter(
reviewer_only=self.reviewer)
for field_def in field_set:
if field_def.require:
# If the field isn't clean, don't bother checking
if not self.cleaned_data.has_key(field_def.slug):
self._errors[field_def.slug] = ErrorList([_('This field requires a value before the form can be submitted'),])
ret_val = False
continue
if self.cleaned_data[field_def.slug] == None \
or self.cleaned_data[field_def.slug] == '':
# Before assuming that because the field isn't saved, grab
# the value from the model to see if it actually is.
fv = None
if self.app:
fv = self.app.get_field_value(field_def.slug)
if not fv:
self._errors[field_def.slug] = ErrorList([_('This field requires a value before the form can be submitted'),])
del self.cleaned_data[field_def.slug]
ret_val = False
return ret_val
class ApplicationDefinitionForm(ModelForm):
start_date = forms.DateTimeField(widget=DateWidget)
stop_date = forms.DateTimeField(widget=DateWidget)
reviewers = forms.ModelMultipleChoiceField(
queryset=User.objects.all().order_by("username"),
widget=FilteredSelectMultiple("Reviewers", False)
)
class Meta:
model = ApplicationDefinition
class FieldDefinitionForm(ModelForm):
type = forms.CharField(max_length=250,
widget=forms.Select(choices=FieldDefinition.field_types),
initial=FieldDefinition.field_types[0][0])
order = forms.IntegerField(widget=forms.HiddenInput())
class Meta:
model = FieldDefinition
class FormunculousBaseFormSet(BaseFormSet):
def __init__(self, app_def=None, user=None, reviewer=False, parent=None,
minimum=0, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.app_def = app_def
self.user = user
self.reviewer = reviewer
self.parent = parent
self.minimum = minimum
# Make sure we have at least minimum number of forms:
if not ( data or files ):
apps = Application.objects.filter(user = self.user,
parent = self.parent,
app_definition = self.app_def)
initial_count = apps.count()
total = initial_count + self.extra
if total < self.minimum:
self.extra = self.minimum - initial_count
super(FormunculousBaseFormSet, self).__init__(data, files, auto_id,
prefix, initial, error_class)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
apps = Application.objects.filter(user = self.user,
parent = self.parent,
app_definition = self.app_def)
return apps.count()
return super(FormunculousBaseFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {'auto_id': self.auto_id,
'prefix': self.add_prefix(i)}
if self.data or self.files:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
app = None
# Grab the proper app if this is an already existing instance
if i < self.initial_form_count():
# If the form is already posted, grab the PK
# from it, instead of relying on a query
if self.is_bound:
pk_key = "%s-%s" % (self.add_prefix(i), 'pk')
pk = int(self.data[pk_key])
app = Application.objects.get(id=pk)
else:
apps = Application.objects.filter(
user = self.user,
parent = self.parent,
app_definition = self.app_def).order_by("id")
app = apps[i]
form = self.form(self.app_def, app, self.reviewer, **defaults)
self.add_fields(form, i)
return form
# This is a straight rip of the standard formset_factory, but customized
# to handle the Django 1.2 formset backwards incompatible fix
def formunculous_subformset_factory(form, formset=BaseFormSet, extra=1,
can_order=False, can_delete=False,
max_num=None):
"""Return a FormSet for the given form class."""
# Here is the version checking and max_num fixing
# Django Ticket: 13023
import django
if django.VERSION[0] == 1 and django.VERSION[1] >= 2 \
or django.VERSION[0] > 1:
#apply the max_num fix.
if max_num == 0:
max_num = None
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'max_num': max_num}
return type(form.__name__ + 'FormSet', (formset,), attrs)
| gpl-3.0 | -4,646,825,230,478,952,000 | 39.193038 | 141 | 0.559484 | false |
gajim/gajim | gajim/common/logging_helpers.py | 1 | 6664 | # Copyright (C) 2009 Bruno Tarquini <btarquini AT gmail.com>
#
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import sys
import time
from datetime import datetime
from gajim.common import app
from gajim.common import configpaths
from gajim.common.i18n import _
def parseLogLevel(arg):
"""
Either numeric value or level name from logging module
"""
if arg.isdigit():
return int(arg)
if arg.isupper() and hasattr(logging, arg):
return getattr(logging, arg)
print(_('%s is not a valid loglevel') % repr(arg), file=sys.stderr)
return 0
def parseLogTarget(arg):
"""
[gajim.]c.x.y -> gajim.c.x.y
.other_logger -> other_logger
<None> -> gajim
"""
arg = arg.lower()
if not arg:
return 'gajim'
if arg.startswith('.'):
return arg[1:]
if arg.startswith('gajim'):
return arg
return 'gajim.' + arg
def parseAndSetLogLevels(arg):
"""
[=]LOGLEVEL -> gajim=LOGLEVEL
gajim=LOGLEVEL -> gajim=LOGLEVEL
.other=10 -> other=10
.=10 -> <nothing>
c.x.y=c.z=20 -> gajim.c.x.y=20
gajim.c.z=20
gajim=10,c.x=20 -> gajim=10
gajim.c.x=20
"""
for directive in arg.split(','):
directive = directive.strip()
if not directive:
continue
if '=' not in directive:
directive = '=' + directive
targets, level = directive.rsplit('=', 1)
level = parseLogLevel(level.strip())
for target in targets.split('='):
target = parseLogTarget(target.strip())
if target:
logging.getLogger(target).setLevel(level)
print("Logger %s level set to %d" % (target, level),
file=sys.stderr)
class colors:
# pylint: disable=C0326
NONE = chr(27) + "[0m"
BLACk = chr(27) + "[30m"
RED = chr(27) + "[31m"
GREEN = chr(27) + "[32m"
BROWN = chr(27) + "[33m"
BLUE = chr(27) + "[34m"
MAGENTA = chr(27) + "[35m"
CYAN = chr(27) + "[36m"
LIGHT_GRAY = chr(27) + "[37m"
DARK_GRAY = chr(27) + "[30;1m"
BRIGHT_RED = chr(27) + "[31;1m"
BRIGHT_GREEN = chr(27) + "[32;1m"
YELLOW = chr(27) + "[33;1m"
BRIGHT_BLUE = chr(27) + "[34;1m"
PURPLE = chr(27) + "[35;1m"
BRIGHT_CYAN = chr(27) + "[36;1m"
WHITE = chr(27) + "[37;1m"
def colorize(text, color):
return color + text + colors.NONE
class FancyFormatter(logging.Formatter):
"""
An eye-candy formatter with colors
"""
colors_mapping = {
'DEBUG': colors.BLUE,
'INFO': colors.GREEN,
'WARNING': colors.BROWN,
'ERROR': colors.RED,
'CRITICAL': colors.BRIGHT_RED,
}
def __init__(self, fmt, datefmt=None, use_color=False):
logging.Formatter.__init__(self, fmt, datefmt)
self.use_color = use_color
def formatTime(self, record, datefmt=None):
f = logging.Formatter.formatTime(self, record, datefmt)
if self.use_color:
f = colorize(f, colors.DARK_GRAY)
return f
def format(self, record):
level = record.levelname
record.levelname = '(%s)' % level[0]
if self.use_color:
c = FancyFormatter.colors_mapping.get(level, '')
record.levelname = colorize(record.levelname, c)
record.name = '%-25s' % colorize(record.name, colors.CYAN)
else:
record.name = '%-25s|' % record.name
return logging.Formatter.format(self, record)
def init():
"""
Iinitialize the logging system
"""
if app.get_debug_mode():
_cleanup_debug_logs()
_redirect_output()
use_color = False
if os.name != 'nt':
use_color = sys.stderr.isatty()
consoleloghandler = logging.StreamHandler()
consoleloghandler.setFormatter(
FancyFormatter(
'%(asctime)s %(levelname)s %(name)-35s %(message)s',
'%x %H:%M:%S',
use_color
)
)
root_log = logging.getLogger('gajim')
root_log.setLevel(logging.WARNING)
root_log.addHandler(consoleloghandler)
root_log.propagate = False
root_log = logging.getLogger('nbxmpp')
root_log.setLevel(logging.WARNING)
root_log.addHandler(consoleloghandler)
root_log.propagate = False
root_log = logging.getLogger('gnupg')
root_log.setLevel(logging.WARNING)
root_log.addHandler(consoleloghandler)
root_log.propagate = False
# GAJIM_DEBUG is set only on Windows when using Gajim-Debug.exe
# Gajim-Debug.exe shows a command line prompt and we want to redirect
# log output to it
if app.get_debug_mode() or os.environ.get('GAJIM_DEBUG', False):
set_verbose()
def set_loglevels(loglevels_string):
parseAndSetLogLevels(loglevels_string)
def set_verbose():
parseAndSetLogLevels('gajim=DEBUG')
parseAndSetLogLevels('.nbxmpp=INFO')
def set_quiet():
parseAndSetLogLevels('gajim=CRITICAL')
parseAndSetLogLevels('.nbxmpp=CRITICAL')
def _redirect_output():
debug_folder = configpaths.get('DEBUG')
date = datetime.today().strftime('%d%m%Y-%H%M%S')
filename = '%s-debug.log' % date
fd = open(debug_folder / filename, 'a')
sys.stderr = sys.stdout = fd
def _cleanup_debug_logs():
debug_folder = configpaths.get('DEBUG')
debug_files = list(debug_folder.glob('*-debug.log*'))
now = time.time()
for file in debug_files:
# Delete everything older than 3 days
if file.stat().st_ctime < now - 259200:
file.unlink()
# tests
if __name__ == '__main__':
init()
set_loglevels('gajim.c=DEBUG,INFO')
log = logging.getLogger('gajim')
log.debug('debug')
log.info('info')
log.warning('warn')
log.error('error')
log.critical('critical')
log = logging.getLogger('gajim.c.x.dispatcher')
log.debug('debug')
log.info('info')
log.warning('warn')
log.error('error')
log.critical('critical')
| gpl-3.0 | 4,624,066,474,891,536,000 | 27.848485 | 73 | 0.594688 | false |
0x46616c6b/ansible | lib/ansible/modules/files/ini_file.py | 10 | 11036 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ini_file
short_description: Tweak settings in INI files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
sections if they don't exist.
- Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
- Since version 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
no other modifications need to be applied.
version_added: "0.9"
options:
path:
description:
- Path to the INI-style file; this file is created if required.
- Before 2.3 this option was only usable as I(dest).
required: true
default: null
aliases: ['dest']
section:
description:
- Section name in INI file. This is added if C(state=present) automatically when
a single value is being set.
- If left empty or set to `null`, the I(option) will be placed before the first I(section).
Using `null` is also required if the config format does not support sections.
required: true
default: null
option:
description:
- If set (required for changing a I(value)), this is the name of the option.
- May be omitted if adding/removing a whole I(section).
required: false
default: null
value:
description:
- The string value to be associated with an I(option). May be omitted when removing an I(option).
required: false
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
default: "no"
choices: [ "yes", "no" ]
others:
description:
- All arguments accepted by the M(file) module also work here
required: false
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
required: false
default: "present"
choices: [ "present", "absent" ]
no_extra_spaces:
description:
- Do not insert spaces before and after '=' symbol
required: false
default: false
version_added: "2.1"
create:
required: false
choices: [ "yes", "no" ]
default: "yes"
description:
- If set to 'no', the module will fail if the file does not already exist.
By default it will create the file if it is missing.
version_added: "2.2"
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes
no sense.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but
I(dest) still works as well.
author:
- "Jan-Piet Mens (@jpmens)"
- "Ales Nosek (@noseka1)"
'''
EXAMPLES = '''
# Before 2.3, option 'dest' was used instead of 'path'
- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
ini_file:
path: /etc/conf
section: drinks
option: fav
value: lemonade
mode: 0600
backup: yes
- ini_file:
path: /etc/anotherconf
section: drinks
option: temperature
value: cold
backup: yes
'''
import os
import re
# import module snippets
from ansible.module_utils.basic import AnsibleModule
# ==============================================================
# match_opt
def match_opt(option, line):
option = re.escape(option)
return re.match(' *%s( |\t)*=' % option, line) \
or re.match('# *%s( |\t)*=' % option, line) \
or re.match('; *%s( |\t)*=' % option, line)
# ==============================================================
# match_active_opt
def match_active_opt(option, line):
option = re.escape(option)
return re.match(' *%s( |\t)*=' % option, line)
# ==============================================================
# do_ini
def do_ini(module, filename, section=None, option=None, value=None,
state='present', backup=False, no_extra_spaces=False, create=False):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % filename,
'after_header': '%s (content)' % filename}
if not os.path.exists(filename):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
destpath = os.path.dirname(filename)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
ini_lines = []
else:
ini_file = open(filename, 'r')
try:
ini_lines = ini_file.readlines()
finally:
ini_file.close()
if module._diff:
diff['before'] = ''.join(ini_lines)
changed = False
# last line of file may not contain a trailing newline
if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
ini_lines[-1] += '\n'
changed = True
# append a fake section line to simplify the logic
ini_lines.append('[')
within_section = not section
section_start = 0
msg = 'OK'
if no_extra_spaces:
assignment_format = '%s=%s\n'
else:
assignment_format = '%s = %s\n'
for index, line in enumerate(ini_lines):
if line.startswith('[%s]' % section):
within_section = True
section_start = index
elif line.startswith('['):
if within_section:
if state == 'present':
# insert missing option line at the end of the section
for i in range(index, 0, -1):
# search backwards for previous non-blank or non-comment line
if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
ini_lines.insert(i, assignment_format % (option, value))
msg = 'option added'
changed = True
break
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
msg = 'section removed'
changed = True
break
else:
if within_section and option:
if state == 'present':
# change the existing option line
if match_opt(option, line):
newline = assignment_format % (option, value)
option_changed = ini_lines[index] != newline
changed = changed or option_changed
if option_changed:
msg = 'option changed'
ini_lines[index] = newline
if option_changed:
# remove all possible option occurrences from the rest of the section
index = index + 1
while index < len(ini_lines):
line = ini_lines[index]
if line.startswith('['):
break
if match_active_opt(option, line):
del ini_lines[index]
else:
index = index + 1
break
elif state == 'absent':
# delete the existing line
if match_active_opt(option, line):
del ini_lines[index]
changed = True
msg = 'option changed'
break
# remove the fake section line
del ini_lines[-1:]
if not within_section and option and state == 'present':
ini_lines.append('[%s]\n' % section)
ini_lines.append(assignment_format % (option, value))
changed = True
msg = 'section and option added'
if module._diff:
diff['after'] = ''.join(ini_lines)
backup_file = None
if changed and not module.check_mode:
if backup:
backup_file = module.backup_local(filename)
ini_file = open(filename, 'w')
try:
ini_file.writelines(ini_lines)
finally:
ini_file.close()
return (changed, backup_file, diff, msg)
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True, aliases=['dest'], type='path'),
section = dict(required=True),
option = dict(required=False),
value = dict(required=False),
backup = dict(default='no', type='bool'),
state = dict(default='present', choices=['present', 'absent']),
no_extra_spaces = dict(required=False, default=False, type='bool'),
create=dict(default=True, type='bool')
),
add_file_common_args = True,
supports_check_mode = True
)
path = module.params['path']
section = module.params['section']
option = module.params['option']
value = module.params['value']
state = module.params['state']
backup = module.params['backup']
no_extra_spaces = module.params['no_extra_spaces']
create = module.params['create']
(changed,backup_file,diff,msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create)
if not module.check_mode and os.path.exists(path):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
results = { 'changed': changed, 'msg': msg, 'path': path, 'diff': diff }
if backup_file is not None:
results['backup_file'] = backup_file
# Mission complete
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,633,318,924,062,548,000 | 34.146497 | 135 | 0.560982 | false |
ujfjhz/vnpy | vnpy/trader/gateway/ksotpGateway/ksotpGateway.py | 2 | 67488 | # encoding: UTF-8
'''
vn.ksotp的gateway接入
'''
import os
import json
from vnpy.api.ksotp import MdApi, TdApi, defineDict
from vnpy.trader.vtFunction import getTempPath
from vnpy.trader.vtGateway import *
# 以下为一些VT类型和CTP类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["KS_OTP_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["KS_OTP_OPT_AnyPrice"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict['KS_OTP_D_Buy']
directionMap[DIRECTION_SHORT] = defineDict['KS_OTP_D_Sell']
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict['KS_OTP_OF_Open']
offsetMap[OFFSET_CLOSE] = defineDict['KS_OTP_OF_Close']
offsetMap[OFFSET_CLOSETODAY] = defineDict['KS_OTP_OF_CloseToday']
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict['KS_OTP_OF_CloseYesterday']
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_CFFEX] = 'CFFEX'
exchangeMap[EXCHANGE_SHFE] = 'SHFE'
exchangeMap[EXCHANGE_CZCE] = 'CZCE'
exchangeMap[EXCHANGE_DCE] = 'DCE'
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_SZSE] = 'SZSE'
exchangeMap[EXCHANGE_UNKNOWN] = ''
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_LONG] = defineDict["KSVOC_PD_Buy"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["KSVOC_PD_Sell"]
posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
########################################################################
class KsotpGateway(VtGateway):
"""金仕达期权接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='KSOTP'):
"""Constructor"""
super(KsotpGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = KsotpMdApi(self) # 行情API
self.tdApi = KsotpTdApi(self) # 交易API
self.mdConnected = False # 行情API连接状态,登录完成后为True
self.tdConnected = False # 交易API连接状态
self.qryEnabled = False # 是否要启动循环查询
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.gatewayName + '_connect.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
password = str(setting['password'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, password, brokerID, mdAddress)
self.tdApi.connect(userID, password, brokerID, tdAddress)
# 初始化并启动查询
self.initQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.tdApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.tdApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点,金仕达接口查询非常慢,因此不适合频繁查询
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class KsotpMdApi(MdApi):
"""金仕达期权的行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(KsotpMdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.subscribedSymbols = set() # 已订阅合约代码
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = exchangeMapReverse.get(data['ExchangeID'], u'未知')
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['TradingDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# CTP只有一档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
#----------------------------------------------------------------------
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createOTPMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
req = {}
req['InstrumentID'] = subscribeReq.symbol
req['ExchangeID'] = subscribeReq.exchange
self.subscribeMarketData(req)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class KsotpTdApi(TdApi):
"""金仕达期权的交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(KsotpTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
##----------------------------------------------------------------------
#def onFrontConnected(self):
#"""服务器连接"""
#self.connectionStatus = True
#log = VtLogData()
#log.gatewayName = self.gatewayName
#log.logContent = u'交易服务器连接成功'
#self.gateway.onLog(log)
#self.login()
##----------------------------------------------------------------------
#def onFrontDisconnected(self, n):
#"""服务器断开"""
#self.connectionStatus = False
#self.loginStatus = False
#self.gateway.tdConnected = False
#log = VtLogData()
#log.gatewayName = self.gatewayName
#log.logContent = u'交易服务器连接断开'
#self.gateway.onLog(log)
##----------------------------------------------------------------------
#def onRspUserLogin(self, data, error, n, last):
#"""登陆回报"""
## 如果登录成功,推送日志信息
#if error['ErrorID'] == 0:
#self.frontID = str(data['FrontID'])
#self.sessionID = str(data['SessionID'])
#self.loginStatus = True
#self.gateway.mdConnected = True
#log = VtLogData()
#log.gatewayName = self.gatewayName
#log.logContent = u'交易服务器登录完成'
#self.gateway.onLog(log)
## 确认结算信息
#req = {}
#req['BrokerID'] = self.brokerID
#req['InvestorID'] = self.userID
#self.reqID += 1
#self.reqSettlementInfoConfirm(req, self.reqID)
## 否则,推送错误信息
#else:
#err = VtErrorData()
#err.gatewayName = self.gateway
#err.errorID = error['ErrorID']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
#self.gateway.onError(err)
##----------------------------------------------------------------------
#def onRspUserLogout(self, data, error, n, last):
#"""登出回报"""
## 如果登出成功,推送日志信息
#if error['ErrorID'] == 0:
#self.loginStatus = False
#self.gateway.tdConnected = False
#log = VtLogData()
#log.gatewayName = self.gatewayName
#log.logContent = u'交易服务器登出完成'
#self.gateway.onLog(log)
## 否则,推送错误信息
#else:
#err = VtErrorData()
#err.gatewayName = self.gatewayName
#err.errorID = error['ErrorID']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
#self.gateway.onError(err)
##----------------------------------------------------------------------
#def onRspUserPasswordUpdate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspOrderInsert(self, data, error, n, last):
#"""发单错误(柜台)"""
#err = VtErrorData()
#err.gatewayName = self.gatewayName
#err.errorID = error['ErrorID']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
#self.gateway.onError(err)
##----------------------------------------------------------------------
#def onRspOrderAction(self, data, error, n, last):
#"""撤单错误(柜台)"""
#err = VtErrorData()
#err.gatewayName = self.gatewayName
#err.errorID = error['ErrorID']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
#self.gateway.onError(err)
##----------------------------------------------------------------------
#def onRspQueryMaxOrderVolume(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspSettlementInfoConfirm(self, data, error, n, last):
#"""确认结算信息回报"""
#log = VtLogData()
#log.gatewayName = self.gatewayName
#log.logContent = u'结算信息确认完成'
#self.gateway.onLog(log)
## 查询合约代码
#self.reqID += 1
#self.reqQryInstrument({}, self.reqID)
##----------------------------------------------------------------------
#def onRspRemoveParkedOrder(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspRemoveParkedOrderAction(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspExecOrderInsert(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspExecOrderAction(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspForQuoteInsert(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQuoteInsert(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQuoteAction(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryOrder(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryTrade(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInvestorPosition(self, data, error, n, last):
#"""持仓查询回报"""
#pos = VtPositionData()
#pos.gatewayName = self.gatewayName
## 保存代码
#pos.symbol = data['InstrumentID']
#pos.vtSymbol = pos.symbol # 这里因为data中没有ExchangeID这个字段
## 方向和持仓冻结数量
#pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
#if pos.direction == DIRECTION_NET or pos.direction == DIRECTION_LONG:
#pos.frozen = data['LongFrozen']
#elif pos.direction == DIRECTION_SHORT:
#pos.frozen = data['ShortFrozen']
## 持仓量
#pos.position = data['Position']
#pos.ydPosition = data['YdPosition']
## 持仓均价
#if pos.position:
#pos.price = data['PositionCost'] / pos.position
## VT系统持仓名
#pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
## 推送
#self.gateway.onPosition(pos)
##----------------------------------------------------------------------
#def onRspQryTradingAccount(self, data, error, n, last):
#"""资金账户查询回报"""
#account = VtAccountData()
#account.gatewayName = self.gatewayName
## 账户代码
#account.accountID = data['AccountID']
#account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
## 数值相关
#account.preBalance = data['PreBalance']
#account.available = data['Available']
#account.commission = data['Commission']
#account.margin = data['CurrMargin']
#account.closeProfit = data['CloseProfit']
#account.positionProfit = data['PositionProfit']
## 这里的balance和快期中的账户不确定是否一样,需要测试
#account.balance = (data['PreBalance'] - data['PreCredit'] - data['PreMortgage'] +
#data['Mortgage'] - data['Withdraw'] + data['Deposit'] +
#data['CloseProfit'] + data['PositionProfit'] + data['CashIn'] -
#data['Commission'])
## 推送
#self.gateway.onAccount(account)
##----------------------------------------------------------------------
#def onRspQryInvestor(self, data, error, n, last):
#"""投资者查询回报"""
#pass
##----------------------------------------------------------------------
#def onRspQryTradingCode(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInstrumentMarginRate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInstrumentCommissionRate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryExchange(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryProduct(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInstrument(self, data, error, n, last):
#"""合约查询回报"""
#contract = VtContractData()
#contract.gatewayName = self.gatewayName
#contract.symbol = data['InstrumentID']
#contract.exchange = exchangeMapReverse[data['ExchangeID']]
#contract.vtSymbol = contract.symbol #'.'.join([contract.symbol, contract.exchange])
#contract.name = data['InstrumentName'].decode('GBK')
## 合约数值
#contract.size = data['VolumeMultiple']
#contract.priceTick = data['PriceTick']
#contract.strikePrice = data['StrikePrice']
#contract.underlyingSymbol = data['UnderlyingInstrID']
## 合约类型
#if data['ProductClass'] == defineDict["KS_OTP_PC_Futures"]:
#contract.productClass = PRODUCT_FUTURES
## 这里把期货和现货的期权统一为期权类型
#elif data['ProductClass'] == defineDict["KS_OTP_PC_ETFOption"]:
#contract.productClass = PRODUCT_OPTION
#elif data['ProductClass'] == defineDict["KS_OTP_PC_Options"]:
#contract.productClass = PRODUCT_OPTION
#elif defineDict["KS_OTP_PC_Combination"]:
#contract.productClass = PRODUCT_COMBINATION
#else:
#contract.productClass = PRODUCT_UNKNOWN
## 期权类型
#if data['OptionsType'] == '1':
#contract.optionType = OPTION_CALL
#elif data['OptionsType'] == '2':
#contract.optionType = OPTION_PUT
## 推送
#self.gateway.onContract(contract)
#if last:
#log = VtLogData()
#log.gatewayName = self.gatewayName
#log.logContent = u'交易合约信息获取完成'
#self.gateway.onLog(log)
##----------------------------------------------------------------------
#def onRspQryDepthMarketData(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQrySettlementInfo(self, data, error, n, last):
#"""查询结算信息回报"""
#pass
##----------------------------------------------------------------------
#def onRspQryTransferBank(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInvestorPositionDetail(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryNotice(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQrySettlementInfoConfirm(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryEWarrantOffset(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryExchangeMarginRate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryExchangeRate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQrySecAgentACIDMap(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryOptionInstrTradeCost(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryOptionInstrCommRate(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryExecOrder(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryForQuote(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryQuote(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryTransferSerial(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryAccountregister(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspError(self, error, n, last):
#"""错误回报"""
#err = VtErrorData()
#err.gatewayName = self.gatewayName
#err.errorID = error['ErrorID']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
#self.gateway.onError(err)
##----------------------------------------------------------------------
#def onRtnOrder(self, data):
#"""报单回报"""
## 更新最大报单编号
#newref = data['OrderRef']
#self.orderRef = max(self.orderRef, int(newref))
## 创建报单数据对象
#order = VtOrderData()
#order.gatewayName = self.gatewayName
## 保存代码和报单号
#order.symbol = data['InstrumentID']
#order.exchange = exchangeMapReverse[data['ExchangeID']]
#order.vtSymbol = order.symbol #'.'.join([order.symbol, order.exchange])
#order.orderID = data['OrderRef']
## 方向
#if data['Direction'] == '0':
#order.direction = DIRECTION_LONG
#elif data['Direction'] == '1':
#order.direction = DIRECTION_SHORT
#else:
#order.direction = DIRECTION_UNKNOWN
## 开平
#if data['CombOffsetFlag'] == '0':
#order.offset = OFFSET_OPEN
#elif data['CombOffsetFlag'] == '1':
#order.offset = OFFSET_CLOSE
#else:
#order.offset = OFFSET_UNKNOWN
## 状态
#if data['OrderStatus'] == '0':
#order.status = STATUS_ALLTRADED
#elif data['OrderStatus'] == '1':
#order.status = STATUS_PARTTRADED
#elif data['OrderStatus'] == '3':
#order.status = STATUS_NOTTRADED
#elif data['OrderStatus'] == '5':
#order.status = STATUS_CANCELLED
#else:
#order.status = STATUS_UNKNOWN
## 价格、报单量等数值
#order.price = data['LimitPrice']
#order.totalVolume = data['VolumeTotalOriginal']
#order.tradedVolume = data['VolumeTraded']
#order.orderTime = data['InsertTime']
#order.cancelTime = data['CancelTime']
#order.frontID = data['FrontID']
#order.sessionID = data['SessionID']
## CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
## 但在本接口设计中,已经考虑了CTP的OrderRef的自增性,避免重复
## 唯一可能出现OrderRef重复的情况是多处登录并在非常接近的时间内(几乎同时发单)
## 考虑到VtTrader的应用场景,认为以上情况不会构成问题
#order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
## 推送
#self.gateway.onOrder(order)
##----------------------------------------------------------------------
#def onRtnTrade(self, data):
#"""成交回报"""
## 创建报单数据对象
#trade = VtTradeData()
#trade.gatewayName = self.gatewayName
## 保存代码和报单号
#trade.symbol = data['InstrumentID']
#trade.exchange = exchangeMapReverse[data['ExchangeID']]
#trade.vtSymbol = trade.symbol #'.'.join([trade.symbol, trade.exchange])
#trade.tradeID = data['TradeID']
#trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
#trade.orderID = data['OrderRef']
#trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
## 方向
#trade.direction = directionMapReverse.get(data['Direction'], '')
## 开平
#trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
## 价格、报单量等数值
#trade.price = data['Price']
#trade.volume = data['Volume']
#trade.tradeTime = data['TradeTime']
## 推送
#self.gateway.onTrade(trade)
##----------------------------------------------------------------------
#def onRtnInstrumentStatus(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnTradingNotice(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnErrorConditionalOrder(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnExecOrder(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnExecOrderInsert(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnExecOrderAction(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnForQuoteInsert(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnQuote(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnQuoteInsert(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnQuoteAction(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnForQuoteRsp(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryContractBank(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryParkedOrder(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryParkedOrderAction(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryTradingNotice(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryBrokerTradingParams(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQryBrokerTradingAlgos(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnFromBankToFutureByBank(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnFromFutureToBankByBank(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnRepealFromBankToFutureByBank(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnRepealFromFutureToBankByBank(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnFromBankToFutureByFuture(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnFromFutureToBankByFuture(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnRepealFromBankToFutureByFutureManual(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnRepealFromFutureToBankByFutureManual(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnQueryBankBalanceByFuture(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnBankToFutureByFuture(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnFutureToBankByFuture(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onErrRtnQueryBankBalanceByFuture(self, data, error):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnRepealFromBankToFutureByFuture(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnRepealFromFutureToBankByFuture(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspFromBankToFutureByFuture(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspFromFutureToBankByFuture(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRspQueryBankAccountMoneyByFuture(self, data, error, n, last):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnOpenAccountByBank(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnCancelAccountByBank(self, data):
#""""""
#pass
##----------------------------------------------------------------------
#def onRtnChangeAccountByBank(self, data):
#""""""
#pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createOTPTraderApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryTradingAccount(req, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = orderReq.symbol
req['LimitPrice'] = orderReq.price
req['VolumeTotalOriginal'] = orderReq.volume
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
try:
req['OrderPriceType'] = priceTypeMap[orderReq.priceType]
req['Direction'] = directionMap[orderReq.direction]
req['OffsetFlag'] = offsetMap[orderReq.offset]
except KeyError:
return ''
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['HedgeFlag'] = defineDict['KS_OTP_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['KS_OTP_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['KS_OTP_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['KS_OTP_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['KS_OTP_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['KS_OTP_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, i):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspError(self, error, n, last) :
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last) :
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 确认结算信息
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqID += 1
self.reqSettlementInfoConfirm(req, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last) :
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last) :
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last) :
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last) :
"""持仓查询回报"""
pos = VtPositionData()
pos.gatewayName = self.gatewayName
# 保存代码
pos.symbol = data['InstrumentID']
pos.exchange = exchangeMapReverse.get(data['ExchangeID'], EXCHANGE_UNKNOWN)
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
# 方向和持仓冻结数量
pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
if pos.direction == DIRECTION_NET or pos.direction == DIRECTION_LONG:
pos.frozen = data['LongFrozen']
elif pos.direction == DIRECTION_SHORT:
pos.frozen = data['ShortFrozen']
# 持仓量
pos.position = data['Position']
pos.ydPosition = data['YdPosition']
# 持仓均价
if pos.position:
pos.price = data['PositionCost'] / pos.position
# VT系统持仓名
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
# 推送
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last) :
"""资金账户查询回报"""
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
account.closeProfit = data['CloseProfit']
account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = (data['PreBalance'] + data['Mortgage'] - data['Withdraw'] + data['Deposit'] +
data['CloseProfit'] + data['PositionProfit'] + data['CashIn'] - data['Commission'])
# 推送
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last) :
"""合约查询回报"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['StrikePrice']
contract.underlyingSymbol = data['UnderlyingInstrID']
# 合约类型
if data['ProductClass'] == defineDict["KS_OTP_PC_Futures"]:
contract.productClass = PRODUCT_FUTURES
# 这里把期货和现货的期权统一为期权类型
elif data['ProductClass'] == defineDict["KS_OTP_PC_ETFOption"]:
contract.productClass = PRODUCT_OPTION
elif data['ProductClass'] == defineDict["KS_OTP_PC_Options"]:
contract.productClass = PRODUCT_OPTION
elif defineDict["KS_OTP_PC_Combination"]:
contract.productClass = PRODUCT_COMBINATION
else:
contract.productClass = PRODUCT_UNKNOWN
# 期权类型
if data['OptionsType'] == '1':
contract.optionType = OPTION_CALL
elif data['OptionsType'] == '2':
contract.optionType = OPTION_PUT
# 推送
self.gateway.onContract(contract)
if last:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易合约信息获取完成'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspQryInvestorPositionDetail(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingNotice(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderInsert(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspLockInsert(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderAction(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecOrder(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecOrderVolume(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryLock(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryLockPosition(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryUnderlyingStockInfo(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryOTPInsCommRate(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentMarginRate(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryOTPAssignment(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryDepthMarketData(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspFromBankToStockByStock(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToStockByStock(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRspFromStockToBankByStock(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRtnFromStockToBankByStock(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRtnQueryBankBalanceByStock(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryContractBank(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQueryBankAccountMoneyByStock(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferSerial(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfoConfirm(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspSettlementInfoConfirm(self, data, error, n, last) :
"""确认结算信息回报"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'结算信息确认完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
#----------------------------------------------------------------------
def onRspQrySettlementInfo(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorTradeLevel(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryPurchaseLimitAmt(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryPositionLimitVol(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryHistoryOrder(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryHistoryTrade(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryHistoryAssignment(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryDelivDetail(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspAutoExecOrderAction(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspCombActionInsert(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorCombinePosition(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryCombActionVolume(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspFundOutCreditApply(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryFundOutCredit(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRspQryFundOutCreditApply(self, data, error, n, last) :
""""""
pass
#----------------------------------------------------------------------
def onRtnOrder(self, data) :
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse[data['ExchangeID']]
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['OrderRef']
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 开平
if data['OffsetFlag'] == '0':
order.offset = OFFSET_OPEN
elif data['OffsetFlag'] == '1':
order.offset = OFFSET_CLOSE
else:
order.offset = OFFSET_UNKNOWN
# 状态
if data['OrderStatus'] == '0':
order.status = STATUS_ALLTRADED
elif data['OrderStatus'] == '1':
order.status = STATUS_PARTTRADED
elif data['OrderStatus'] == '3':
order.status = STATUS_NOTTRADED
elif data['OrderStatus'] == '5':
order.status = STATUS_CANCELLED
else:
order.status = STATUS_UNKNOWN
# 价格、报单量等数值
order.price = data['LimitPrice']
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
# 但在本接口设计中,已经考虑了CTP的OrderRef的自增性,避免重复
# 唯一可能出现OrderRef重复的情况是多处登录并在非常接近的时间内(几乎同时发单)
# 考虑到VtTrader的应用场景,认为以上情况不会构成问题
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data) :
"""成交回报"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse[data['ExchangeID']]
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['TradeID']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
# 价格、报单量等数值
trade.price = data['Price']
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onRtnExecOrder(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRtnLock(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRtnInstrumentStatus(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRtnTradingNotice(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRtnCombAction(self, data) :
""""""
pass
| mit | -1,158,835,770,407,496,200 | 34.239359 | 110 | 0.405597 | false |
ycq90/my_qap | qap/viz/plotting.py | 3 | 9937 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import math
import os.path as op
import numpy as np
import nibabel as nb
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
import seaborn as sns
def plot_measures(df, measures, ncols=4, title='Group level report',
subject=None, figsize=(8.27, 11.69)):
import matplotlib.gridspec as gridspec
nmeasures = len(measures)
nrows = nmeasures // ncols
if nmeasures % ncols > 0:
nrows += 1
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(nrows, ncols)
axes = []
for i, mname in enumerate(measures):
axes.append(plt.subplot(gs[i]))
axes[-1].set_xlabel(mname)
sns.distplot(
df[[mname]], ax=axes[-1], color="b", rug=True, norm_hist=True)
# labels = np.array(axes[-1].get_xticklabels())
# labels[2:-2] = ''
axes[-1].set_xticklabels([])
plt.ticklabel_format(style='sci', axis='y', scilimits=(-1, 1))
if subject is not None:
subid = subject
try:
subid = int(subid)
except ValueError:
pass
subdf = df.loc[df['subject'] == subid]
sessions = np.atleast_1d(subdf[['session']]).reshape(-1).tolist()
for ss in sessions:
sesdf = subdf.loc[subdf['session'] == ss]
scans = np.atleast_1d(sesdf[['scan']]).reshape(-1).tolist()
for sc in scans:
scndf = subdf.loc[sesdf['scan'] == sc]
plot_vline(
scndf.iloc[0][mname], '%s_%s' % (ss, sc), axes[-1])
fig.suptitle(title)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(top=0.85)
return fig
def plot_all(df, groups, subject=None, figsize=(11.69, 5),
strip_nsubj=10, title='Summary report'):
import matplotlib.gridspec as gridspec
colnames = [v for gnames in groups for v in gnames]
lengs = [len(el) for el in groups]
ncols = np.sum(lengs)
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(1, len(groups), width_ratios=lengs)
subjects = sorted(pd.unique(df.subject.ravel()))
nsubj = len(subjects)
subid = subject
if subid is not None:
try:
subid = int(subid)
except ValueError:
pass
axes = []
for i, snames in enumerate(groups):
axes.append(plt.subplot(gs[i]))
if nsubj > strip_nsubj:
sns.violinplot(data=df[snames], ax=axes[-1])
else:
stdf = df.copy()
if subid is not None:
stdf = stdf.loc[stdf['subject'] != subid]
sns.stripplot(data=stdf[snames], ax=axes[-1], jitter=0.25)
axes[-1].set_xticklabels(
[el.get_text() for el in axes[-1].get_xticklabels()],
rotation='vertical')
plt.ticklabel_format(style='sci', axis='y', scilimits=(-1, 1))
# df[snames].plot(kind='box', ax=axes[-1])
# If we know the subject, place a star for each scan
if subject is not None:
subdf = df.loc[df['subject'] == subid]
scans = sorted(pd.unique(subdf.scan.ravel()))
nstars = len(scans)
for j, s in enumerate(snames):
vals = []
for k, scid in enumerate(scans):
val = subdf.loc[df.scan == scid, [s]].iloc[0, 0]
vals.append(val)
if len(vals) != nstars:
continue
pos = [j]
if nstars > 1:
pos = np.linspace(j-0.3, j+0.3, num=nstars)
axes[-1].plot(
pos, vals, ms=9, mew=.8, linestyle='None',
color='w', marker='*', markeredgecolor='k',
zorder=10)
fig.suptitle(title)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(top=0.85)
return fig
def plot_mosaic(nifti_file, title=None, overlay_mask=None,
figsize=(11.7, 8.3)):
from six import string_types
from pylab import cm
if isinstance(nifti_file, string_types):
nii = nb.load(nifti_file)
mean_data = nii.get_data()
else:
mean_data = nifti_file
z_vals = np.array(range(0, mean_data.shape[2]))
# Reduce the number of slices shown
if mean_data.shape[2] > 70:
rem = 15
# Crop inferior and posterior
mean_data = mean_data[..., rem:-rem]
z_vals = z_vals[rem:-rem]
# Discard one every two slices
mean_data = mean_data[..., ::2]
z_vals = z_vals[::2]
n_images = mean_data.shape[2]
row, col = _calc_rows_columns(figsize[0] / figsize[1], n_images)
if overlay_mask:
overlay_data = nb.load(overlay_mask).get_data()
# create figures
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
fig.subplots_adjust(top=0.85)
for image, z_val in enumerate(z_vals):
ax = fig.add_subplot(row, col, image + 1)
data_mask = np.logical_not(np.isnan(mean_data))
if overlay_mask:
ax.set_rasterized(True)
ax.imshow(np.fliplr(mean_data[:, :, image].T), vmin=np.percentile(
mean_data[data_mask], 0.5),
vmax=np.percentile(mean_data[data_mask], 99.5),
cmap=cm.Greys_r, interpolation='nearest', origin='lower')
if overlay_mask:
cmap = cm.Reds # @UndefinedVariable
cmap._init()
alphas = np.linspace(0, 0.75, cmap.N + 3)
cmap._lut[:, -1] = alphas
ax.imshow(np.fliplr(overlay_data[:, :, image].T), vmin=0, vmax=1,
cmap=cmap, interpolation='nearest', origin='lower')
ax.annotate(
str(z_val), xy=(.95, .015), xycoords='axes fraction',
fontsize=10, color='white', horizontalalignment='right',
verticalalignment='bottom')
ax.axis('off')
fig.subplots_adjust(
left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.01, hspace=0.1)
if not title:
_, title = op.split(nifti_file)
title += " (last modified: %s)" % time.ctime(
op.getmtime(nifti_file))
fig.suptitle(title, fontsize='10')
return fig
def plot_fd(fd_file, title='FD plot', mean_fd_dist=None, figsize=(11.7, 8.3)):
fd_power = _calc_fd(fd_file)
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
if mean_fd_dist:
grid = GridSpec(2, 4)
else:
grid = GridSpec(1, 2, width_ratios=[3, 1])
grid.update(hspace=1.0, right=0.95, left=0.1, bottom=0.2)
ax = fig.add_subplot(grid[0, :-1])
ax.plot(fd_power)
ax.set_xlim((0, len(fd_power)))
ax.set_ylabel("Frame Displacement [mm]")
ax.set_xlabel("Frame number")
ylim = ax.get_ylim()
ax = fig.add_subplot(grid[0, -1])
sns.distplot(fd_power, vertical=True, ax=ax)
ax.set_ylim(ylim)
if mean_fd_dist:
ax = fig.add_subplot(grid[1, :])
sns.distplot(mean_fd_dist, ax=ax)
ax.set_xlabel("Mean Frame Displacement (over all subjects) [mm]")
mean_fd = fd_power.mean()
label = r'$\overline{\text{FD}}$ = %g' % mean_fd
plot_vline(mean_fd, label, ax=ax)
fig.suptitle(title)
return fig
def plot_dist(
main_file, mask_file, xlabel, distribution=None, xlabel2=None,
figsize=(11.7, 8.3)):
data = _get_values_inside_a_mask(main_file, mask_file)
fig = plt.Figure(figsize=figsize)
FigureCanvas(fig)
gs = GridSpec(2, 1)
ax = fig.add_subplot(gs[0, 0])
sns.distplot(data.astype(np.double), kde=False, bins=100, ax=ax)
ax.set_xlabel(xlabel)
ax = fig.add_subplot(gs[1, 0])
sns.distplot(np.array(distribution).astype(np.double), ax=ax)
cur_val = np.median(data)
label = "%g" % cur_val
plot_vline(cur_val, label, ax=ax)
ax.set_xlabel(xlabel2)
return fig
def plot_vline(cur_val, label, ax):
ax.axvline(cur_val)
ylim = ax.get_ylim()
vloc = (ylim[0] + ylim[1]) / 2.0
xlim = ax.get_xlim()
pad = (xlim[0] + xlim[1]) / 100.0
ax.text(cur_val - pad, vloc, label, color="blue", rotation=90,
verticalalignment='center', horizontalalignment='right')
def _calc_rows_columns(ratio, n_images):
rows = 1
for _ in range(100):
columns = math.floor(ratio * rows)
total = rows * columns
if total > n_images:
break
columns = math.ceil(ratio * rows)
total = rows * columns
if total > n_images:
break
rows += 1
return rows, columns
def _calc_fd(fd_file):
lines = open(fd_file, 'r').readlines()
rows = [[float(x) for x in line.split()] for line in lines]
cols = np.array([list(col) for col in zip(*rows)])
translations = np.transpose(np.abs(np.diff(cols[0:3, :])))
rotations = np.transpose(np.abs(np.diff(cols[3:6, :])))
FD_power = np.sum(translations, axis=1) + \
(50 * 3.141 / 180) * np.sum(rotations, axis=1)
# FD is zero for the first time point
FD_power = np.insert(FD_power, 0, 0)
return FD_power
def _get_mean_fd_distribution(fd_files):
mean_FDs = []
max_FDs = []
for fd_file in fd_files:
FD_power = _calc_fd(fd_file)
mean_FDs.append(FD_power.mean())
max_FDs.append(FD_power.max())
return mean_FDs, max_FDs
def _get_values_inside_a_mask(main_file, mask_file):
main_nii = nb.load(main_file)
main_data = main_nii.get_data()
nan_mask = np.logical_not(np.isnan(main_data))
mask = nb.load(mask_file).get_data() > 0
data = main_data[np.logical_and(nan_mask, mask)]
return data
| bsd-3-clause | 3,455,253,553,791,108,600 | 29.764706 | 78 | 0.565462 | false |
moble/sympy | sympy/interactive/tests/test_ipythonprinting.py | 27 | 5891 | """Tests that the IPython printing module is properly loaded. """
from sympy.core.compatibility import u
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
| bsd-3-clause | -1,629,738,584,008,438,500 | 45.385827 | 102 | 0.607028 | false |
quanganh2627/fMBT | utils/fmbtuinput.py | 2 | 41533 | # fMBT, free Model Based Testing tool
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA
# 02110-1301 USA.
# pylint: disable = C0103, C0111, R0913
"""
This library offers Python interface for Linux uinput system.
Mouse, Touch and Keyboard classes can be used for
creating new mouse, touch and keyboard devices and synthesizing user
inputs through those devices.
"""
cmdline_usage = """
Usage: python fmbtuinput.py -p <list-of-input-device-files>
Example: python fmbtuinput.py -p /dev/input/event*
"""
import array
import glob
import os
import platform
import re
import struct
import thread
import time
import Queue
if os.name != "nt":
import fcntl
else:
class fcntl:
def __getattr__(self, attr):
raise NotImplementedError("%s not available on Windows" % (attr,))
# See /usr/include/linux/input.h
eventTypes = {
"EV_SYN": 0x00,
"EV_KEY": 0x01,
"EV_REL": 0x02,
"EV_ABS": 0x03,
"EV_MSC": 0x04,
"EV_SW": 0x05,
"EV_LED": 0x11,
"EV_SND": 0x12,
"EV_REP": 0x14,
"EV_FF": 0x15,
"EV_PWR": 0x16,
"EV_FF_STATUS": 0x17,
"EV_MAX": 0x1f,
}
keyCodes = {
"KEY_RESERVED": 0,
"KEY_ESC": 1,
"KEY_1": 2,
"KEY_2": 3,
"KEY_3": 4,
"KEY_4": 5,
"KEY_5": 6,
"KEY_6": 7,
"KEY_7": 8,
"KEY_8": 9,
"KEY_9": 10,
"KEY_0": 11,
"KEY_MINUS": 12,
"KEY_EQUAL": 13,
"KEY_BACKSPACE": 14,
"KEY_TAB": 15,
"KEY_Q": 16,
"KEY_W": 17,
"KEY_E": 18,
"KEY_R": 19,
"KEY_T": 20,
"KEY_Y": 21,
"KEY_U": 22,
"KEY_I": 23,
"KEY_O": 24,
"KEY_P": 25,
"KEY_LEFTBRACE": 26,
"KEY_RIGHTBRACE": 27,
"KEY_ENTER": 28,
"KEY_LEFTCTRL": 29,
"KEY_A": 30,
"KEY_S": 31,
"KEY_D": 32,
"KEY_F": 33,
"KEY_G": 34,
"KEY_H": 35,
"KEY_J": 36,
"KEY_K": 37,
"KEY_L": 38,
"KEY_SEMICOLON": 39,
"KEY_APOSTROPHE": 40,
"KEY_GRAVE": 41,
"KEY_LEFTSHIFT": 42,
"KEY_BACKSLASH": 43,
"KEY_Z": 44,
"KEY_X": 45,
"KEY_C": 46,
"KEY_V": 47,
"KEY_B": 48,
"KEY_N": 49,
"KEY_M": 50,
"KEY_COMMA": 51,
"KEY_DOT": 52,
"KEY_SLASH": 53,
"KEY_RIGHTSHIFT": 54,
"KEY_KPASTERISK": 55,
"KEY_LEFTALT": 56,
"KEY_SPACE": 57,
"KEY_CAPSLOCK": 58,
"KEY_F1": 59,
"KEY_F2": 60,
"KEY_F3": 61,
"KEY_F4": 62,
"KEY_F5": 63,
"KEY_F6": 64,
"KEY_F7": 65,
"KEY_F8": 66,
"KEY_F9": 67,
"KEY_F10": 68,
"KEY_NUMLOCK": 69,
"KEY_SCROLLLOCK": 70,
"KEY_KP7": 71,
"KEY_KP8": 72,
"KEY_KP9": 73,
"KEY_KPMINUS": 74,
"KEY_KP4": 75,
"KEY_KP5": 76,
"KEY_KP6": 77,
"KEY_KPPLUS": 78,
"KEY_KP1": 79,
"KEY_KP2": 80,
"KEY_KP3": 81,
"KEY_KP0": 82,
"KEY_KPDOT": 83,
"KEY_ZENKAKUHANKAKU": 85,
"KEY_102ND": 86,
"KEY_F11": 87,
"KEY_F12": 88,
"KEY_RO": 89,
"KEY_KATAKANA": 90,
"KEY_HIRAGANA": 91,
"KEY_HENKAN": 92,
"KEY_KATAKANAHIRAGANA": 93,
"KEY_MUHENKAN": 94,
"KEY_KPJPCOMMA": 95,
"KEY_KPENTER": 96,
"KEY_RIGHTCTRL": 97,
"KEY_KPSLASH": 98,
"KEY_SYSRQ": 99,
"KEY_RIGHTALT": 100,
"KEY_LINEFEED": 101,
"KEY_HOME": 102,
"KEY_UP": 103,
"KEY_PAGEUP": 104,
"KEY_LEFT": 105,
"KEY_RIGHT": 106,
"KEY_END": 107,
"KEY_DOWN": 108,
"KEY_PAGEDOWN": 109,
"KEY_INSERT": 110,
"KEY_DELETE": 111,
"KEY_MACRO": 112,
"KEY_MUTE": 113,
"KEY_VOLUMEDOWN": 114,
"KEY_VOLUMEUP": 115,
"KEY_POWER": 116,
"KEY_KPEQUAL": 117,
"KEY_KPPLUSMINUS": 118,
"KEY_PAUSE": 119,
"KEY_SCALE": 120,
"KEY_KPCOMMA": 121,
"KEY_HANGEUL": 122,
"KEY_HANJA": 123,
"KEY_YEN": 124,
"KEY_LEFTMETA": 125,
"KEY_RIGHTMETA": 126,
"KEY_COMPOSE": 127,
"KEY_STOP": 128,
"KEY_AGAIN": 129,
"KEY_PROPS": 130,
"KEY_UNDO": 131,
"KEY_FRONT": 132,
"KEY_COPY": 133,
"KEY_OPEN": 134,
"KEY_PASTE": 135,
"KEY_FIND": 136,
"KEY_CUT": 137,
"KEY_HELP": 138,
"KEY_MENU": 139,
"KEY_CALC": 140,
"KEY_SETUP": 141,
"KEY_SLEEP": 142,
"KEY_WAKEUP": 143,
"KEY_FILE": 144,
"KEY_SENDFILE": 145,
"KEY_DELETEFILE": 146,
"KEY_XFER": 147,
"KEY_PROG1": 148,
"KEY_PROG2": 149,
"KEY_WWW": 150,
"KEY_MSDOS": 151,
"KEY_COFFEE": 152,
"KEY_DIRECTION": 153,
"KEY_CYCLEWINDOWS": 154,
"KEY_MAIL": 155,
"KEY_BOOKMARKS": 156,
"KEY_COMPUTER": 157,
"KEY_BACK": 158,
"KEY_FORWARD": 159,
"KEY_CLOSECD": 160,
"KEY_EJECTCD": 161,
"KEY_EJECTCLOSECD": 162,
"KEY_NEXTSONG": 163,
"KEY_PLAYPAUSE": 164,
"KEY_PREVIOUSSONG": 165,
"KEY_STOPCD": 166,
"KEY_RECORD": 167,
"KEY_REWIND": 168,
"KEY_PHONE": 169,
"KEY_ISO": 170,
"KEY_CONFIG": 171,
"KEY_HOMEPAGE": 172,
"KEY_REFRESH": 173,
"KEY_EXIT": 174,
"KEY_MOVE": 175,
"KEY_EDIT": 176,
"KEY_SCROLLUP": 177,
"KEY_SCROLLDOWN": 178,
"KEY_KPLEFTPAREN": 179,
"KEY_KPRIGHTPAREN": 180,
"KEY_NEW": 181,
"KEY_REDO": 182,
"KEY_F13": 183,
"KEY_F14": 184,
"KEY_F15": 185,
"KEY_F16": 186,
"KEY_F17": 187,
"KEY_F18": 188,
"KEY_F19": 189,
"KEY_F20": 190,
"KEY_F21": 191,
"KEY_F22": 192,
"KEY_F23": 193,
"KEY_F24": 194,
"KEY_PLAYCD": 200,
"KEY_PAUSECD": 201,
"KEY_PROG3": 202,
"KEY_PROG4": 203,
"KEY_DASHBOARD": 204,
"KEY_SUSPEND": 205,
"KEY_CLOSE": 206,
"KEY_PLAY": 207,
"KEY_FASTFORWARD": 208,
"KEY_BASSBOOST": 209,
"KEY_PRINT": 210,
"KEY_HP": 211,
"KEY_CAMERA": 212,
"KEY_SOUND": 213,
"KEY_QUESTION": 214,
"KEY_EMAIL": 215,
"KEY_CHAT": 216,
"KEY_SEARCH": 217,
"KEY_CONNECT": 218,
"KEY_FINANCE": 219,
"KEY_SPORT": 220,
"KEY_SHOP": 221,
"KEY_ALTERASE": 222,
"KEY_CANCEL": 223,
"KEY_BRIGHTNESSDOWN": 224,
"KEY_BRIGHTNESSUP": 225,
"KEY_MEDIA": 226,
"KEY_SWITCHVIDEOMODE": 227,
"KEY_KBDILLUMTOGGLE": 228,
"KEY_KBDILLUMDOWN": 229,
"KEY_KBDILLUMUP": 230,
"KEY_SEND": 231,
"KEY_REPLY": 232,
"KEY_FORWARDMAIL": 233,
"KEY_SAVE": 234,
"KEY_DOCUMENTS": 235,
"KEY_BATTERY": 236,
"KEY_BLUETOOTH": 237,
"KEY_WLAN": 238,
"KEY_UWB": 239,
"KEY_UNKNOWN": 240,
"KEY_VIDEO_NEXT": 241,
"KEY_VIDEO_PREV": 242,
"KEY_BRIGHTNESS_CYCLE": 243,
"KEY_BRIGHTNESS_ZERO": 244,
"KEY_DISPLAY_OFF": 245,
"KEY_WIMAX": 246,
"KEY_RFKILL": 247,
"KEY_MICMUTE": 248,
"BTN_MISC": 0x100,
"BTN_0": 0x100,
"BTN_1": 0x101,
"BTN_2": 0x102,
"BTN_3": 0x103,
"BTN_4": 0x104,
"BTN_5": 0x105,
"BTN_6": 0x106,
"BTN_7": 0x107,
"BTN_8": 0x108,
"BTN_9": 0x109,
"BTN_MOUSE": 0x110,
"BTN_LEFT": 0x110,
"BTN_RIGHT": 0x111,
"BTN_MIDDLE": 0x112,
"BTN_SIDE": 0x113,
"BTN_EXTRA": 0x114,
"BTN_FORWARD": 0x115,
"BTN_BACK": 0x116,
"BTN_TASK": 0x117,
"BTN_JOYSTICK": 0x120,
"BTN_TRIGGER": 0x120,
"BTN_THUMB": 0x121,
"BTN_THUMB2": 0x122,
"BTN_TOP": 0x123,
"BTN_TOP2": 0x124,
"BTN_PINKIE": 0x125,
"BTN_BASE": 0x126,
"BTN_BASE2": 0x127,
"BTN_BASE3": 0x128,
"BTN_BASE4": 0x129,
"BTN_BASE5": 0x12a,
"BTN_BASE6": 0x12b,
"BTN_DEAD": 0x12f,
"BTN_GAMEPAD": 0x130,
"BTN_A": 0x130,
"BTN_B": 0x131,
"BTN_C": 0x132,
"BTN_X": 0x133,
"BTN_Y": 0x134,
"BTN_Z": 0x135,
"BTN_TL": 0x136,
"BTN_TR": 0x137,
"BTN_TL2": 0x138,
"BTN_TR2": 0x139,
"BTN_SELECT": 0x13a,
"BTN_START": 0x13b,
"BTN_MODE": 0x13c,
"BTN_THUMBL": 0x13d,
"BTN_THUMBR": 0x13e,
"BTN_DIGI": 0x140,
"BTN_TOOL_PEN": 0x140,
"BTN_TOOL_RUBBER": 0x141,
"BTN_TOOL_BRUSH": 0x142,
"BTN_TOOL_PENCIL": 0x143,
"BTN_TOOL_AIRBRUSH": 0x144,
"BTN_TOOL_FINGER": 0x145,
"BTN_TOOL_MOUSE": 0x146,
"BTN_TOOL_LENS": 0x147,
"BTN_TOOL_QUINTTAP": 0x148,
"BTN_TOUCH": 0x14a,
"BTN_STYLUS": 0x14b,
"BTN_STYLUS2": 0x14c,
"BTN_TOOL_DOUBLETAP": 0x14d,
"BTN_TOOL_TRIPLETAP": 0x14e,
"BTN_TOOL_QUADTAP": 0x14f,
"BTN_WHEEL": 0x150,
"BTN_GEAR_DOWN": 0x150,
"BTN_GEAR_UP": 0x151,
}
relCodes = {
"REL_X": 0x00,
"REL_Y": 0x01,
"REL_Z": 0x02,
"REL_RX": 0x03,
"REL_RY": 0x04,
"REL_RZ": 0x05,
"REL_HWHEEL": 0x06,
"REL_DIAL": 0x07,
"REL_WHEEL": 0x08,
"REL_MISC": 0x09,
"REL_MAX": 0x0f,
}
absCodes = {
"ABS_X": 0x00,
"ABS_Y": 0x01,
"ABS_Z": 0x02,
"ABS_RX": 0x03,
"ABS_RY": 0x04,
"ABS_RZ": 0x05,
"ABS_THROTTLE": 0x06,
"ABS_RUDDER": 0x07,
"ABS_WHEEL": 0x08,
"ABS_GAS": 0x09,
"ABS_BRAKE": 0x0a,
"ABS_HAT0X": 0x10,
"ABS_HAT0Y": 0x11,
"ABS_HAT1X": 0x12,
"ABS_HAT1Y": 0x13,
"ABS_HAT2X": 0x14,
"ABS_HAT2Y": 0x15,
"ABS_HAT3X": 0x16,
"ABS_HAT3Y": 0x17,
"ABS_PRESSURE": 0x18,
"ABS_DISTANCE": 0x19,
"ABS_TILT_X": 0x1a,
"ABS_TILT_Y": 0x1b,
"ABS_TOOL_WIDTH": 0x1c,
"ABS_VOLUME": 0x20,
"ABS_MISC": 0x28,
"ABS_MT_SLOT": 0x2f,
"ABS_MT_TOUCH_MAJOR": 0x30,
"ABS_MT_TOUCH_MINOR": 0x31,
"ABS_MT_WIDTH_MAJOR": 0x32,
"ABS_MT_WIDTH_MINOR": 0x33,
"ABS_MT_ORIENTATION": 0x34,
"ABS_MT_POSITION_X": 0x35,
"ABS_MT_POSITION_Y": 0x36,
"ABS_MT_TOOL_TYPE": 0x37,
"ABS_MT_BLOB_ID": 0x38,
"ABS_MT_TRACKING_ID": 0x39,
"ABS_MT_PRESSURE": 0x3a,
"ABS_MT_DISTANCE": 0x3b,
"ABS_MAX": 0x3f,
}
mscCodes = {
"MSC_SERIAL": 0x00,
"MSC_PULSELED": 0x01,
"MSC_GESTURE": 0x02,
"MSC_RAW": 0x03,
"MSC_SCAN": 0x04,
"MSC_TIMESTAMP": 0x05,
"MSC_MAX": 0x07,
"MSC_CNT": 0x08,
}
abs_count = absCodes['ABS_MAX'] + 1
event_codetables = {
eventTypes["EV_SYN"]: {},
eventTypes["EV_KEY"]: keyCodes,
eventTypes["EV_REL"]: relCodes,
eventTypes["EV_ABS"]: absCodes,
}
BUS_PCI = 0x01
BUS_ISAPNP = 0x02
BUS_USB = 0x03
BUS_HIL = 0x04
BUS_BLUETOOTH = 0x05
BUS_VIRTUAL = 0x06
# See struct input_event in /usr/include/linux/input.h
if platform.architecture()[0] == "32bit":
struct_timeval = "II"
else:
struct_timeval = "QQ"
struct_input_event = struct_timeval + 'HHi'
sizeof_input_event = struct.calcsize(struct_input_event)
struct_input_id = 'HHHH'
struct_uinput_user_dev = ('80s' +
struct_input_id +
'i' +
str(abs_count) + 'i' +
str(abs_count) + 'i' +
str(abs_count) + 'i' +
str(abs_count) + 'i')
sizeof_uinput_user_dev = struct.calcsize(struct_uinput_user_dev)
struct_input_absinfo = 'iiii'
# asm-generic/ioctl.h:
IOC_NRBITS = 8
IOC_TYPEBITS = 8
IOC_SIZEBITS = 14
IOC_DIRBITS = 2
IOC_NRSHIFT = 0
IOC_TYPESHIFT = IOC_NRSHIFT + IOC_NRBITS
IOC_SIZESHIFT = IOC_TYPESHIFT + IOC_TYPEBITS
IOC_DIRSHIFT = IOC_SIZESHIFT + IOC_SIZEBITS
IOC_NONE = 0
IOC_WRITE = 1
IOC_READ = 2
def IOC(dir_, type_, nr, size):
return ((dir_ << IOC_DIRSHIFT) |
(type_ << IOC_TYPESHIFT) |
(nr << IOC_NRSHIFT) |
(size << IOC_SIZESHIFT))
def IO(type_, nr):
return IOC(IOC_NONE, type_, nr, 0)
def IOR(type_, nr, size):
return IOC(IOC_READ, type_, nr, struct.calcsize(size))
def IOW(type_, nr, size):
return IOC(IOC_WRITE, type_, nr, struct.calcsize(size))
def EVIOCGABS(abs):
return IOR(ord('E'), 0x40 + (abs), struct_input_absinfo)
UINPUT_IOCTL_BASE = ord('U')
UI_DEV_CREATE = IO(UINPUT_IOCTL_BASE, 1)
UI_DEV_DESTROY = IO(UINPUT_IOCTL_BASE, 2)
UI_SET_EVBIT = IOW(UINPUT_IOCTL_BASE, 100, 'i')
UI_SET_KEYBIT = IOW(UINPUT_IOCTL_BASE, 101, 'i')
UI_SET_RELBIT = IOW(UINPUT_IOCTL_BASE, 102, 'i')
UI_SET_ABSBIT = IOW(UINPUT_IOCTL_BASE, 103, 'i')
# inverse lookup tables for event/key/rel/abs codes
eventTypesInv = {}
keyCodesInv = {}
relCodesInv = {}
absCodesInv = {}
for d in ["eventTypes", "keyCodes",
"relCodes", "absCodes"]:
globals()[d + "Inv"] = dict([(v, k) for k, v in globals()[d].iteritems()])
def toKeyCode(keyCodeOrName):
if isinstance(keyCodeOrName, int):
return keyCodeOrName
elif keyCodeOrName in keyCodes:
return keyCodes[keyCodeOrName]
elif keyCodeOrName.upper() in keyCodes:
return keyCodes[keyCodeOrName.upper(keyCodeOrName)]
elif ("KEY_" + keyCodeOrName.upper()) in keyCodes:
return keyCodes["KEY_" + keyCodeOrName.upper()]
else:
raise ValueError('Invalid keycode "%s"' % (keyCodeOrName,))
def toKeyName(keyCode):
if keyCode in keyCodesInv:
return keyCodesInv[keyCode]
else:
raise ValueError('Invalid keycode "%s"' % (keyCode,))
def toButtonCode(buttonCodeOrName):
if isinstance(buttonCodeOrName, str):
buttonCode = toKeyCode(buttonCodeOrName)
elif buttonCodeOrName < 0xf:
buttonCode = keyCodes["BTN_MOUSE"] + buttonCodeOrName
else:
buttonCode = buttonCodeOrName
return buttonCode
def refreshDeviceInfo():
global _g_devices
global _g_deviceNames
global _g_filenames
_g_devices = file("/proc/bus/input/devices").read().split("\n\n")
_g_deviceNames = {}
_g_filenames = {}
for d in _g_devices:
if d.strip() == "":
continue
_name = [line.split('"')[1] for line in d.split('\n')
if line.startswith('N: ')][0]
_g_deviceNames[_name] = ("/dev/input/" +
re.findall('[ =](event[0-9]+)\s', d)[0])
_g_filenames[_g_deviceNames[_name]] = _name
def toEventFilename(deviceName):
return _g_deviceNames[deviceName]
def toEventDeviceName(filename):
return _g_filenames[filename]
class InputDevice(object):
def __init__(self):
if not "_g_devices" in globals():
refreshDeviceInfo()
self._fd = -1
self._filename = None
self._uidev = None
self._created = False
self._opened = False
def __del__(self):
if self._created:
self.destroy()
def startCreating(self, name, vendor, product, version,
absmin=None, absmax=None):
if self._fd > 0:
raise InputDeviceError("InputDevice is already open")
self._fd = os.open("/dev/uinput", os.O_WRONLY | os.O_NONBLOCK)
if absmin == None:
absmin = [0 for _ in xrange(abs_count)]
if absmax == None:
absmax = [0 for _ in xrange(abs_count)]
absfuzz = [0 for _ in xrange(abs_count)]
absflat = [0 for _ in xrange(abs_count)]
self._uidev = struct.pack(struct_uinput_user_dev,
name, # name
BUS_USB, # id.bus_type
vendor, # id.vendor
product, # id.product
version, # id.version
0, # ff_effects_max
# TODO: why absmin + absmax gives
# error for touch?
*(absmax + absmin + absfuzz + absflat)
)
def finishCreating(self):
if self._fd < 1:
raise InputDeviceError("startCreating() not called")
bytes_written = os.write(self._fd, self._uidev)
if bytes_written != sizeof_uinput_user_dev:
raise InputDeviceError(
"Writing to /dev/uinput failed, wrote %s/%s bytes"
% (bytes_written, sizeof_uinput_user_dev))
rv = fcntl.ioctl(self._fd, UI_DEV_CREATE)
if rv != 0:
raise InputDeviceError(
"Creating device failed, ioctl UI_DEV_CREATE returned %s"
% (rv,))
self._created = True
return True
def destroy(self):
if self._created:
fcntl.ioctl(self._fd, UI_DEV_DESTROY)
self._created = False
self.close()
def open(self, filename):
if self._fd > 0:
raise InputDeviceError("InputDevice is already open")
if not filename.startswith("/dev/input"):
filename = toEventFilename(filename)
self._fd = os.open(filename, os.O_WRONLY | os.O_NONBLOCK)
self._filename = filename
self._created = False
return self
def close(self):
if self._fd > 0:
os.close(self._fd)
self._fd = -1
def filename(self):
return self._filename
def addCap(self, capBit, capCodeOrName, capCode2Name):
if self._fd < 1:
raise InputDeviceError("startCreating() not called")
if self._created or self._opened:
raise InputDeviceError("Cannot add capabilities after creation")
if isinstance(capCodeOrName, int):
capCode = capCodeOrName
elif capCodeOrName in capCode2Name:
capCode = capCode2Name[capCodeOrName]
else:
raise InputDeviceError('Unknown name "%s"' % (capCodeOrName,))
return fcntl.ioctl(self._fd, capBit, capCode)
def addEvent(self, eventCodeOrName):
return self.addCap(UI_SET_EVBIT, eventCodeOrName, eventTypes)
def addKey(self, keyCodeOrName):
return self.addCap(UI_SET_KEYBIT, keyCodeOrName, keyCodes)
def addRel(self, relCodeOrName):
return self.addCap(UI_SET_RELBIT, relCodeOrName, relCodes)
def addAbs(self, absCodeOrName):
return self.addCap(UI_SET_ABSBIT, absCodeOrName, absCodes)
def send(self, type_, code, value):
if self._fd < 1:
raise InputDeviceError("InputDevice is not open")
if isinstance(type_, str):
typeCode = eventTypes[type_]
else:
typeCode = type_
if isinstance(code, str):
codeCode = event_codetables[typeCode][code]
else:
codeCode = code
return sendInputEvent(self._fd, typeCode, codeCode, value)
def sync(self):
if self._fd < 1:
raise InputDeviceError("InputDevice is not open")
return sendInputSync(self._fd)
class InputDeviceError(Exception):
pass
class Mouse(InputDevice):
def __init__(self, absoluteMove=False):
"""
Parameters:
absoluteMove (boolean, optional)
force move(x,y) to send absolute coordinates instead
of standard relative movement. This helps avoiding
mouse pointer drift in some occasions. The default
is False.
"""
InputDevice.__init__(self)
self._x = 0
self._y = 0
self._sendAbs = absoluteMove
def create(self, name="Virtual fMBT Mouse",
vendor=0xf4b7, product=0x4053, version=1):
self.startCreating(name, vendor, product, version)
self.addEvent("EV_KEY")
self.addEvent("EV_REL")
if self._sendAbs:
self.addEvent("EV_ABS")
self.addEvent("EV_SYN")
self.addRel("REL_X")
self.addRel("REL_Y")
self.addRel("REL_HWHEEL")
self.addRel("REL_WHEEL")
self.addKey("BTN_LEFT")
self.addKey("BTN_RIGHT")
self.addKey("BTN_MIDDLE")
self.addKey("BTN_SIDE")
self.addKey("BTN_EXTRA")
self.addKey("BTN_FORWARD")
self.addKey("BTN_BACK")
self.addKey("BTN_TASK")
if self._sendAbs:
self.addAbs("ABS_X")
self.addAbs("ABS_Y")
self.finishCreating()
return self
def move(self, x, y):
"""
Move mouse cursor to coordinates x, y.
"""
if self._sendAbs:
self.send("EV_ABS", "ABS_X", x)
self.send("EV_ABS", "ABS_Y", y)
else:
deltaX = x - self._x
deltaY = y - self._y
self.send("EV_REL", "REL_X", deltaX)
self.send("EV_REL", "REL_Y", deltaY)
self.sync()
self.setXY(x, y)
def moveRel(self, deltaX, deltaY):
self.send("EV_REL", "REL_X", deltaX)
self.send("EV_REL", "REL_Y", deltaY)
self.sync()
self.setXY(self._x + deltaX, self._y + deltaY)
def press(self, button):
buttonCode = toButtonCode(button)
self.send("EV_KEY", buttonCode, 1)
self.sync()
def release(self, button):
buttonCode = toButtonCode(button)
self.send("EV_KEY", buttonCode, 0)
self.sync()
def setXY(self, x, y):
"""
Resets relative mouse position to (x, y), does not synthesize
event. Example: disable possible mouse pointer drift:
mouse.moveRel(-4096, -4096) # move to the top-left corner
mouse.setXY(0, 0) # set current pointer coordinates to 0, 0
After this, mouse.move(x, y) will synthesize relative mouse
move event which will drive cursor to coordinates x, y.
"""
self._x = x
self._y = y
def xy(self):
return (self._x, self._y)
def tap(self, x, y, button):
self.move(x, y)
self.press(button)
self.release(button)
class Touch(InputDevice):
"""
Simulates touchpanel and touchpad
"""
def __init__(self, maxX = None, maxY = None,
screenWidth = None, screenHeight = None, screenAngle = None):
InputDevice.__init__(self)
self._maxX = maxX
self._maxY = maxY
self._screenW = screenWidth
self._screenH = screenHeight
self._screenA = screenAngle
self._maxPressure = None
self._multiTouch = True
self._mtTrackingId = 0
self._mtTracking = {}
self._hoover = (0, 0)
def create(self, name="Virtual fMBT Touch",
vendor=0xf4b7, product=0x70c5, version=1,
maxX=0xffff, maxY=0xffff, maxPressure=None,
multiTouch = True):
absmin = [0 for _ in xrange(abs_count)]
absmax = [0 for _ in xrange(abs_count)]
absmax[absCodes["ABS_X"]] = maxX
absmax[absCodes["ABS_Y"]] = maxY
if maxPressure != None:
self._maxPressure = maxPressure
absmax[absCodes["ABS_PRESSURE"]] = self._maxPressure
absmax[absCodes["ABS_MT_SLOT"]] = 16
absmax[absCodes["ABS_MT_TRACKING_ID"]] = 0x0fffffff
absmax[absCodes["ABS_MT_POSITION_X"]] = maxX
absmax[absCodes["ABS_MT_POSITION_Y"]] = maxY
self._maxX = maxX
self._maxY = maxY
self._multiTouch = multiTouch
self.startCreating(name, vendor, product, version,
absmin=absmin, absmax=absmax)
self.addEvent("EV_KEY")
self.addEvent("EV_ABS")
self.addEvent("EV_SYN")
self.addKey("BTN_TOUCH")
self.addAbs("ABS_X")
self.addAbs("ABS_Y")
if self._maxPressure != None:
self.addAbs("ABS_PRESSURE")
if self._multiTouch:
self.addAbs("ABS_MT_SLOT")
self.addAbs("ABS_MT_TRACKING_ID")
self.addAbs("ABS_MT_POSITION_X")
self.addAbs("ABS_MT_POSITION_Y")
self.finishCreating()
return self
def open(self, filename):
InputDevice.open(self, filename)
# detect touch device capabilities and max values
# nfo is struct input_absinfo
nfo = array.array('i', range(6))
fcntl.ioctl(self._fd, EVIOCGABS(absCodes["ABS_X"]), nfo, 1)
self._maxX = nfo[2]
fcntl.ioctl(self._fd, EVIOCGABS(absCodes["ABS_Y"]), nfo, 1)
self._maxY = nfo[2]
return self
def setScreenSize(self, (width, height)):
self._screenW, self._screenH = (width, height)
def setScreenAngle(self, angle):
self._screenA = angle
def _angleXY(self, x, y, angle=None):
"""return x, y in screen without rotation"""
if angle == None:
angle = self._screenA
sw, sh = self._screenW, self._screenH
if angle:
while angle < 0:
angle += 360
while angle > 360:
angle -= 360
if angle == 90:
ax = self._screenH - y
ay = x
sw, sh = self._screenH, self._screenW
elif angle == 180:
ax = self._screenH - x
ay = self._screenW - y
elif angle == 270:
ax = y
ay = self._screenW - x
sw, sh = self._screenH, self._screenW
else:
raise ValueError('Illegal screen rotation angle %s' %
(self._screenA,))
else:
ax, ay = x, y
return (sw, sh, ax, ay)
def _tXY(self, x, y):
"""convert x, y to touch screen coordinates"""
if self._screenW and self._maxX and self._screenH and self._maxY:
w, h, x, y = self._angleXY(x, y)
x = int((self._maxX * x) / w)
y = int((self._maxY * y) / h)
return (x, y)
else:
return (x, y)
def absToScreenXY(self, absX, absY):
if self._screenW and self._maxX and self._screenH and self._maxY:
x = int(self._screenW * absX / self._maxX)
y = int(self._screenH * absY / self._maxY)
if self._screenA:
_, _, x, y = self._angleXY(x, y, -self._screenA)
return (x, y)
else:
return (absX, absY)
def _startTracking(self, finger, x, y):
self._mtTrackingId += 1
usedSlots = set([self._mtTracking[fngr][0]
for fngr in self._mtTracking])
for freeSlot in xrange(16):
if not freeSlot in usedSlots:
break
else:
raise ValueError("No free slots for multitouch")
self._mtTracking[finger] = [freeSlot, self._mtTrackingId, x, y]
self._sendSlot(finger)
self.send("EV_ABS", "ABS_MT_TRACKING_ID", self._mtTrackingId)
tx, ty = self._tXY(x, y)
self.send("EV_ABS", "ABS_MT_POSITION_X", tx)
self.send("EV_ABS", "ABS_MT_POSITION_Y", ty)
return self._mtTrackingId
def _stopTracking(self, finger):
self._sendSlot(finger)
self.send("EV_ABS", "ABS_MT_TRACKING_ID", -1)
del self._mtTracking[finger]
def _sendSlot(self, finger):
slot = self._mtTracking[finger][0]
self.send("EV_ABS", "ABS_MT_SLOT", slot)
def tap(self, x, y, pressure=None):
self.pressFinger(-1, x, y, pressure)
self.releaseFinger(-1)
# Compatibility API to allow using a Touch almost like a Mouse
def move(self, x, y):
if len(self._mtTracking.keys()) == 0:
self._hoover = (x, y)
else:
finger = sorted(self._mtTracking.keys())[0]
return self.moveFinger(finger, x, y)
def press(self, finger):
return self.pressFinger(finger, *self._hoover)
def release(self, finger):
return self.releaseFinger(finger)
# end of compatibility API
# Multi-touch API
def pressFinger(self, finger, x, y, pressure=None):
"""Add a finger to current multitouch gesture. If multitouch gesture
is not started, it starts automatically.
"""
if self._multiTouch and not finger in self._mtTracking:
self._startTracking(finger, x, y)
if pressure != None and self._maxPressure != None:
self.send("EV_ABS", "ABS_PRESSURE", pressure)
self.send("EV_KEY", "BTN_TOUCH", 1)
tx, ty = self._tXY(x, y)
self.send("EV_ABS", "ABS_X", tx)
self.send("EV_ABS", "ABS_Y", ty)
self.sync()
def releaseFinger(self, finger):
"""Remove a finger from current multitouch gesture. When last finger
is raised from the screen, multitouch gesture ends."""
if self._multiTouch:
self._stopTracking(finger)
self.send("EV_KEY", "BTN_TOUCH", 0)
for fngr in self._mtTracking:
# still some finger pressed, non-multitouch reader gets
# coordinates from one of those
tx, ty = self._tXY(self._mtTracking[fngr][2],
self._mtTracking[fngr][3])
self.send("EV_ABS", "ABS_X", tx)
self.send("EV_ABS", "ABS_Y", ty)
break # only one coordinates will be sent.
self.sync()
def moveFinger(self, finger, x, y):
"""Move a finger in current multitouch gesture"""
self._sendSlot(finger)
tx, ty = self._tXY(x, y)
if self._multiTouch:
self.send("EV_ABS", "ABS_MT_POSITION_X", tx)
self.send("EV_ABS", "ABS_MT_POSITION_Y", ty)
self.send("EV_ABS", "ABS_X", tx)
self.send("EV_ABS", "ABS_Y", ty)
self._mtTracking[finger][2] = x # last X
self._mtTracking[finger][3] = y # last Y
self.sync()
class Keyboard(InputDevice):
def __init__(self):
InputDevice.__init__(self)
def create(self, name="Virtual fMBT Keyboard",
vendor=0xf4b7, product=0x4ebd, version=1):
self.startCreating(name, vendor, product, version)
self.addEvent("EV_KEY")
self.addEvent("EV_SYN")
for keyName in keyCodes:
if keyName.startswith("KEY_"):
self.addKey(keyCodes[keyName])
self.finishCreating()
return self
def press(self, keyCodeOrName):
# TODO: there should be different MSC_SCAN matrix location for each key
self.send("EV_MSC", mscCodes["MSC_SCAN"], 458793)
self.send("EV_KEY", toKeyCode(keyCodeOrName), 1)
self.sync()
def release(self, keyCodeOrName):
# TODO: there should be different MSC_SCAN matrix location for each key
self.send("EV_MSC", mscCodes["MSC_SCAN"], 458793)
self.send("EV_KEY", toKeyCode(keyCodeOrName), 0)
self.sync()
def tap(self, keyCodeOrName):
keyCode = toKeyCode(keyCodeOrName)
self.press(keyCode)
self.release(keyCode)
def sendInputSync(devFd):
return sendInputEvent(devFd, 0, 0, 0)
def sendInputEvent(devFd, type_, code, value):
t = time.time()
t_sec = int(t)
t_usec = int(1000000*(t-t_sec))
rv = os.write(devFd,
struct.pack(struct_input_event,
t_sec, t_usec,
type_,
code,
value))
return rv == sizeof_input_event
def eventToString(inputEvent):
tim, tus, typ, cod, val = struct.unpack(struct_input_event, inputEvent)
styp = eventTypesInv.get(typ, "?")
if styp == "EV_KEY":
scod = keyCodesInv.get(cod, "?")
elif styp == "EV_REL":
scod = relCodesInv.get(cod, "?")
elif styp == "EV_ABS":
scod = absCodesInv.get(cod, "?")
else:
scod = "N/A"
if typ == 0:
return styp
else:
return "%8s.%s type: %4s (%5s), code: %5s (%15s) value: %8s" % \
(tim, str(tus).zfill(6), typ, styp, cod, scod, val)
def queueEventsFromFile(filename, queue, lock, filterOpts):
if isinstance(filterOpts, dict) and "type" in filterOpts:
allowedTypes = set()
for t in filterOpts["type"]:
if isinstance(t, str):
allowedTypes.add(eventTypes[t])
else:
allowedTypes.add(t)
else:
allowedTypes = set(eventTypes.values())
if ("touchScreen" in filterOpts and
filename == filterOpts["touchScreen"].filename()):
touchScreen = filterOpts["touchScreen"]
absXCodes = set([absCodes["ABS_X"], absCodes["ABS_MT_POSITION_X"]])
absYCodes = set([absCodes["ABS_Y"], absCodes["ABS_MT_POSITION_Y"]])
absType = eventTypes["EV_ABS"]
else:
touchScreen = None
fd = os.open(filename, os.O_RDONLY)
try:
while 1:
eventData = os.read(fd, sizeof_input_event)
if not lock.locked():
return
if not eventData:
break
(ts, tus, typ, cod, val) = struct.unpack(struct_input_event, eventData)
if touchScreen and typ == absType:
if cod in absXCodes:
val, _ = touchScreen.absToScreenXY(val, 0)
elif cod in absYCodes:
_, val = touchScreen.absToScreenXY(0, val)
if typ in allowedTypes:
queue.put((ts, tus, typ, cod, val))
finally:
os.close(fd)
# _g_recQL dictionary contains events being actively recorded
# - key: filename, like "/dev/input/event0"
# - value: (eventQueue, lock)
# A thread is filling eventQueue with events from filename.
# Once the lock is released, the thread will quit without writing
# anything to the eventQueue anymore.
_g_recQL = {}
_g_unfetchedEvents = []
def queueEventsFromFiles(listOfFilenames, filterOpts):
global _g_recQL
for filename in listOfFilenames:
q = Queue.Queue()
l = thread.allocate_lock()
l.acquire()
if filename in _g_recQL:
# previous reader thread should quit
_g_recQL[filename][1].release()
thread.start_new_thread(
queueEventsFromFile, (filename, q, l, filterOpts))
_g_recQL[filename] = (q, l)
def startQueueingEvents(filterOpts):
refreshDeviceInfo()
if len(_g_recQL) > 0:
# already queueing, restart
stopQueueingEvents()
if "device" in filterOpts:
deviceFiles = []
for n in filterOpts["device"]:
if n in _g_deviceNames:
deviceFiles.append(_g_deviceNames[n])
elif os.access(n, os.R_OK):
deviceFiles.append(n)
del filterOpts["device"]
else:
deviceFiles = glob.glob("/dev/input/event[0-9]*")
queueEventsFromFiles(deviceFiles, filterOpts)
def stopQueueingEvents():
global _g_recQL
global _g_unfetchedEvents
for filename in _g_recQL:
_g_recQL[filename][1].release()
_g_unfetchedEvents = fetchQueuedEvents()
_g_recQL = {}
def fetchQueuedEvents():
global _g_unfetchedEvents
if len(_g_recQL) == 0: # no active recording
rv = _g_unfetchedEvents
_g_unfetchedEvents = []
return rv
else: # events are being recorded
events = []
for filename in _g_recQL:
events.extend(fetchQueuedEventsFromFile(filename))
return events
def fetchQueuedEventsFromFile(filename):
events = []
q = _g_recQL[filename][0]
deviceName = toEventDeviceName(filename)
while 1:
try:
ts, tus, typ, cod, val = q.get_nowait()
events.append((deviceName, ts + tus/1000000.0, typ, cod, val))
except Queue.Empty:
break
return events
def printEventsFromFile(filename):
fd = os.open(filename, os.O_RDONLY)
sdev = filename.split("/")[-1]
try:
while 1:
inputEvent = os.read(fd, sizeof_input_event)
if not inputEvent:
break
print sdev, eventToString(inputEvent)
finally:
os.close(fd)
if __name__ == "__main__":
import getopt
import sys
opt_print_devices = []
opts, remainder = getopt.getopt(
sys.argv[1:], 'hp',
['help', 'print'])
for opt, arg in opts:
if opt in ['-h', '--help']:
print cmdline_usage
sys.exit(0)
elif opt in ['-p', '--print']:
if not remainder:
print cmdline_usage
opt_print_devices = remainder
if opt_print_devices:
for deviceFilename in opt_print_devices:
thread.start_new_thread(printEventsFromFile, (deviceFilename,))
raw_input("Press ENTER to stop printing...\n")
| lgpl-2.1 | -7,900,114,834,811,422,000 | 32.440419 | 83 | 0.48376 | false |
skywalka/splunk-for-nagios | bin/liveserviceokstatus.py | 1 | 1878 | # Script to request services with OK status and total services by accessing MK Livestatus
# Required field to be passed to this script from Splunk: n/a
import socket,string,sys,re,splunk.Intersplunk,mklivestatus
results = []
try:
results,dummyresults,settings = splunk.Intersplunk.getOrganizedResults()
for r in results:
try:
HOST = mklivestatus.HOST
PORT = mklivestatus.PORT
s = None
liveserviceok = 0
liveservicetotal = 0
for h in HOST:
content = [ "GET services\nStats: state = 0\nStats: state != 9999\n" ]
query = "".join(content)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((h, PORT))
except socket.error, (value,message):
if s:
s.close()
#Error: Could not open socket: connection refused (MK Livestatus not setup in xinetd?)
break
s.send(query)
s.shutdown(socket.SHUT_WR)
data = s.recv(100000000)
data2 = (re.findall(r'(No UNIX socket)', data))
if data2:
#Error: MK Livestatus module not loaded?
s.close()
else:
liveservices2 = data.strip()
liveservices = liveservices2.split(";")
s.close()
liveserviceokind = int(liveservices[0])
liveservicetotalind = int(liveservices[1])
liveserviceok = liveserviceok + liveserviceokind
liveservicetotal = liveservicetotal + liveservicetotalind
r["liveserviceokstatus"] = liveserviceok
r["liveservicetotalstatus"] = liveservicetotal
except:
r["liveserviceokstatus"] = "0"
r["liveservicetotalstatus"] = "0"
except:
import traceback
stack = traceback.format_exc()
results = splunk.Intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
splunk.Intersplunk.outputResults( results )
| gpl-3.0 | -7,778,708,250,402,182,000 | 32.535714 | 93 | 0.636315 | false |
cernops/neutron | neutron/tests/unit/midonet/mock_lib.py | 11 | 8658 | # Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
def get_bridge_mock(id=None, **kwargs):
if id is None:
id = str(uuid.uuid4())
bridge = mock.Mock()
bridge.get_id.return_value = id
bridge.get_tenant_id.return_value = kwargs.get("tenant_id", "test-tenant")
bridge.get_name.return_value = kwargs.get("name", "net")
bridge.get_ports.return_value = []
bridge.get_peer_ports.return_value = []
bridge.get_admin_state_up.return_value = kwargs.get("admin_state_up", True)
return bridge
def get_bridge_port_mock(id=None, bridge_id=None, **kwargs):
if id is None:
id = str(uuid.uuid4())
if bridge_id is None:
bridge_id = str(uuid.uuid4())
port = mock.Mock()
port.get_id.return_value = id
port.get_bridge_id.return_value = bridge_id
port.get_admin_state_up.return_value = kwargs.get("admin_state_up", True)
port.get_type.return_value = "Bridge"
port.create.return_value = port
return port
def get_chain_mock(id=None, tenant_id='test-tenant', name='chain',
rules=None):
if id is None:
id = str(uuid.uuid4())
if rules is None:
rules = []
chain = mock.Mock()
chain.get_id.return_value = id
chain.get_tenant_id.return_value = tenant_id
chain.get_name.return_value = name
chain.get_rules.return_value = rules
return chain
def get_port_group_mock(id=None, tenant_id='test-tenant', name='pg'):
if id is None:
id = str(uuid.uuid4())
port_group = mock.Mock()
port_group.get_id.return_value = id
port_group.get_tenant_id.return_value = tenant_id
port_group.get_name.return_value = name
return port_group
def get_router_mock(id=None, **kwargs):
if id is None:
id = str(uuid.uuid4())
router = mock.Mock()
router.get_id.return_value = id
router.get_tenant_id.return_value = kwargs.get("tenant_id", "test-tenant")
router.get_name.return_value = kwargs.get("name", "router")
router.get_ports.return_value = []
router.get_peer_ports.return_value = []
router.get_routes.return_value = []
router.get_admin_state_up.return_value = kwargs.get("admin_state_up", True)
return router
def get_rule_mock(id=None, chain_id=None, properties=None):
if id is None:
id = str(uuid.uuid4())
if chain_id is None:
chain_id = str(uuid.uuid4())
if properties is None:
properties = {}
rule = mock.Mock()
rule.get_id.return_value = id
rule.get_chain_id.return_value = chain_id
rule.get_properties.return_value = properties
return rule
def get_subnet_mock(bridge_id=None, gateway_ip='10.0.0.1',
subnet_prefix='10.0.0.0', subnet_len=int(24)):
if bridge_id is None:
bridge_id = str(uuid.uuid4())
subnet = mock.Mock()
subnet.get_id.return_value = subnet_prefix + '/' + str(subnet_len)
subnet.get_bridge_id.return_value = bridge_id
subnet.get_default_gateway.return_value = gateway_ip
subnet.get_subnet_prefix.return_value = subnet_prefix
subnet.get_subnet_length.return_value = subnet_len
return subnet
class MidonetLibMockConfig():
def __init__(self, inst):
self.inst = inst
def _create_bridge(self, **kwargs):
return get_bridge_mock(**kwargs)
def _create_router(self, **kwargs):
return get_router_mock(**kwargs)
def _create_subnet(self, bridge, gateway_ip, subnet_prefix, subnet_len):
return get_subnet_mock(bridge.get_id(), gateway_ip=gateway_ip,
subnet_prefix=subnet_prefix,
subnet_len=subnet_len)
def _add_bridge_port(self, bridge, **kwargs):
return get_bridge_port_mock(bridge_id=bridge.get_id(), **kwargs)
def _get_bridge(self, id):
return get_bridge_mock(id=id)
def _get_port(self, id):
return get_bridge_port_mock(id=id)
def _get_router(self, id):
return get_router_mock(id=id)
def _update_bridge(self, id, **kwargs):
return get_bridge_mock(id=id, **kwargs)
def setup(self):
# Bridge methods side effects
self.inst.create_bridge.side_effect = self._create_bridge
self.inst.get_bridge.side_effect = self._get_bridge
self.inst.update_bridge.side_effect = self._update_bridge
# Subnet methods side effects
self.inst.create_subnet.side_effect = self._create_subnet
# Port methods side effects
ex_bp = self.inst.add_bridge_port
ex_bp.side_effect = self._add_bridge_port
self.inst.get_port.side_effect = self._get_port
# Router methods side effects
self.inst.create_router.side_effect = self._create_router
self.inst.get_router.side_effect = self._get_router
class MidoClientMockConfig():
def __init__(self, inst):
self.inst = inst
self.chains_in = None
self.port_groups_in = None
self.chains_out = None
self.rules_out = None
self.port_groups_out = None
def _get_query_tenant_id(self, query):
if query is not None and query['tenant_id']:
tenant_id = query['tenant_id']
else:
tenant_id = 'test-tenant'
return tenant_id
def _get_bridge(self, id):
return get_bridge_mock(id=id)
def _get_chain(self, id, query=None):
if not self.chains_in:
return []
tenant_id = self._get_query_tenant_id(query)
for chain in self.chains_in:
chain_id = chain['id']
if chain_id is id:
rule_mocks = []
if 'rules' in chain:
for rule in chain['rules']:
rule_mocks.append(
get_rule_mock(id=rule['id'],
chain_id=id,
properties=rule['properties']))
return get_chain_mock(id=chain_id, name=chain['name'],
tenant_id=tenant_id, rules=rule_mocks)
return None
def _get_chains(self, query=None):
if not self.chains_in:
return []
tenant_id = self._get_query_tenant_id(query)
self.chains_out = []
self.rules_out = []
for chain in self.chains_in:
chain_id = chain['id']
rule_mocks = []
if 'rules' in chain:
for rule in chain['rules']:
rule_mocks.append(
get_rule_mock(id=rule['id'],
chain_id=id,
properties=rule['properties']))
self.rules_out += rule_mocks
self.chains_out.append(get_chain_mock(id=chain_id,
name=chain['name'],
tenant_id=tenant_id,
rules=rule_mocks))
return self.chains_out
def _get_port_groups(self, query=None):
if not self.port_groups_in:
return []
tenant_id = self._get_query_tenant_id(query)
self.port_groups_out = []
for port_group in self.port_groups_in:
self.port_groups_out.append(get_port_group_mock(
id=port_group['id'], name=port_group['name'],
tenant_id=tenant_id))
return self.port_groups_out
def _get_router(self, id):
return get_router_mock(id=id)
def _add_bridge_port(self, bridge):
return get_bridge_port_mock(bridge_id=bridge.get_id())
def setup(self):
self.inst.get_bridge.side_effect = self._get_bridge
self.inst.get_chains.side_effect = self._get_chains
self.inst.get_chain.side_effect = self._get_chain
self.inst.get_port_groups.side_effect = self._get_port_groups
self.inst.get_router.side_effect = self._get_router
self.inst.add_bridge_port.side_effect = self._add_bridge_port
| apache-2.0 | -6,933,128,995,830,897,000 | 32.172414 | 79 | 0.58651 | false |
matteoalessiocarrara/lib-figafind | src/lib/fbfilter/src/lib/htmlfbapi/src/lib/fbwrapper/src/lib/bot_virtualbrowser/src/lib/human/src/virtualbrowser_obj.py | 6 | 8091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Matteo Alessio Carrara <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Componenti principali del browser """
import logging
from bs4 import BeautifulSoup
import requests2
import requests
import version
# Configurazione del sistema di logging
logger = logging.getLogger(version.lib_name)
logger.addHandler(logging.NullHandler())
class Window(object):
"""Una finestra del browser"""
def __init__(self, parent_browser, window_id, tabs_manager_class):
"""
Crea una nuova finestra, con dentro una scheda vuota
Parameters
----------
parent_browser : virtualbrowser.Browser
Il browser che ha aperto questa finestra
window_id : str
Un identificatore per la finestra, di solito generato automaticamente
dall'oggetto WindowsManager
tabs_manager_class : managers.TabsManager
La classe del gestore delle schede, è possibile specificare una classe
derivata da managers.TabsManager
"""
logger.info("Aprendo una nuova finestra")
# Prima di tutto, impostiamo le informazioni della finestra
self.__set_parent_browser(parent_browser)
self.__set_window_id(window_id)
# Poi creiamo l'oggetto session
self.session = self.new_session()
# Impostiamo lo user agent predefinito per le nuove finestre
self.set_session_ua(parent_browser.default_ua)
# Ora creiamo il gestore delle schede
logger.debug("Tabs manager class = %s", tabs_manager_class)
self.tabs_manager = tabs_manager_class(self)
# E per finire, apriamo una scheda vuota
self.tabs_manager.add_tab()
def __set_parent_browser(self, browser):
"""Imposta il browser che ha creato questa finestra"""
self.__parent_browser = browser
def get_parent_browser(self):
"""Restituisce il browser che ha creato questa finestra"""
return self.__parent_browser
def __set_window_id(self, id_):
"""Imposta un identificatore per questa finestra"""
self.__window_id = id_
logger.debug("Window id: %s", id_)
def get_window_id(self):
"""Restituisce l'idenficatore di questa finestra"""
return self.__window_id
def new_session(self):
"""Restituisce un nuovo oggetto requests2.Session"""
return requests2.Session()
def set_session_ua(self, ua):
"""
Cambia l'ua usato in questa finestra
ATTENZIONE: l'ua verrà salvato nell'oggetto self.session, nel caso questo
fosse sostituito, l'ua dovrebbe essere reimpostato
"""
self.session.headers['User-Agent'] = ua
logger.info("Ua aggiornato nella finestra %s: %s", self.win_id, ua)
def get_session_ua(self):
"""Restituisce l'ua usato in questa finestra"""
return self.session.headers['User-Agent']
def close(self):
"""Chiude questa finestra"""
logger.info("Chiudendo la finestra %s", self.win_id)
# Prima dobbiamo chiudere tutte le schede aperte in questa finestra
# Attenzione, non si può usare direttamente la lista delle schede:
#
# for tab_id in self.tabs_manager.tabs:
# RuntimeError: dictionary changed size during iteration
tabs = tuple(self.tabs_manager.tabs.values())
for tab in tabs:
tab.close(closing_window=True)
# E ora chiudiamo questa finestra
self.parent_browser.windows_manager.remove_window(self.win_id)
parent_browser = property(get_parent_browser)
session_ua = property(get_session_ua, set_session_ua)
win_id = property(get_window_id)
class Tab(object):
"""Una scheda"""
def __init__(self, parent_window, tab_id, url=None):
"""
Parameters
----------
parent_window : Window
La finestra nella quale è aperta questa scheda
tab_id : str
Identificatore per la scheda, generato da un oggetto TabsManager
url : str
Url al quale punta la scheda, può anche essere impostato in seguito
"""
logger.info("Aprendo una nuova scheda")
# Impostiamo le informazioni su questa scheda
self.__set_parent_window(parent_window)
self.__set_tab_id(tab_id)
# Definiamo l'url al quale punta
self.set_url(url)
# Creiamo la variabile con il contenuto della scheda
self.__init_content()
def __set_parent_window(self, window):
"""Imposta la finestra nella quale è aperta questa scheda"""
self.__parent_window = window
logger.debug("La finestra genitore è: %s", self.parent_window.win_id)
def get_parent_window(self):
"""Restituisce la finestra nella quale è aperta questa scheda"""
return self.__parent_window
def __set_tab_id(self, tab_id):
"""Imposta un identificatore per questa scheda"""
self.__tab_id = tab_id
logger.debug("Tab id: %s", tab_id)
def get_tab_id(self):
"""Restituisce l'identificatore di questa scheda"""
return self.__tab_id
def set_url(self, url):
"""Imposta l'url al quale punta la scheda"""
self.__url = url
logger.info("Impostato l'url %s nella scheda %s", url, self.tab_id)
def get_url(self):
"""Restituisce l'url al quale punta la scheda"""
return self.__url
def __init_content(self):
"""
METODO DA CHIAMARE IN __init__
Va creata una variabile usata da get_content e download_content: la variabile
è creata anche da download_content, ma se venisse chiamato get_content prima
di download_content si otterrebbe un errore
"""
self.__content = None
def download_content(self):
"""
Imposta il contenuto della scheda, scaricando il contenuto dell'url impostato
"""
logger.info("Scaricando il contenuto della scheda %s", self.tab_id)
self.__content = self.parent_window.session.get2(self.url)
def get_content(self):
"""Restituisce il contenuto della scheda"""
return self.__content
def get_bs_content(self):
"""Restituisce il contenuto della scheda, in un oggetto BeautifulSoup"""
ret = None
# Se la pagina non è ancora stata caricata, self.content è None
# Ma non possiamo passare direttamente None a BeautifulSoup, va sostituito
# con una stringa vuota, che comunque ha lo stesso significato logico
if self.content is None:
ret = BeautifulSoup("", lxml)
else:
ret = BeautifulSoup(self.content.text, "lxml")
return ret
def re_load(self):
"""Ricarica il contenuto della scheda"""
logger.info("Ricaricando la scheda %s", self.tab_id)
self.download_content()
def post(self, url, data=None, **kwargs):
"""Collegamento a requests.Session.post"""
return self.parent_window.session.post(url, data, **kwargs)
def close(self, closing_window=False):
"""
Chiude la scheda
Se è l'ultima scheda della finestra, chiude anche la finestra
Parameters
----------
closing_window : bool
Significa che questa scheda deve essere chiusa perché la finestra
alla quale appartiene deve essere chiusa; quindi anche se è l'ultima
scheda non chiamerà il metodo di chiusura della finestra, perché già
sta per essere chiusa
"""
is_last_tab = (len(self.parent_window.tabs_manager.tabs) == 1)
logger.info("Chiudendo la scheda %s", self.tab_id)
# Se è l'ultima scheda, chiude anche la finestra
# ... se non la si sta già chiudendo
if (is_last_tab and (not closing_window)):
logger.info("È l'unica scheda nella finestra, verrà chiusa la finestra")
self.parent_window.close()
else:
# Chiude la scheda
self.parent_window.tabs_manager.remove_tab(self.tab_id)
parent_window = property(get_parent_window)
bs_content = property(get_bs_content)
content = property(get_content)
tab_id = property(get_tab_id)
url = property(get_url, set_url)
| gpl-3.0 | -661,679,853,957,830,900 | 29.91954 | 79 | 0.71425 | false |
yarbelk/pgu | pgu/gui/widget.py | 1 | 11433 | """This modules defines the Widget class, which is the base of the PGU widget
hierarchy."""
import pygame
from . import pguglobals
from . import style
from .errors import PguError
class SignalCallback(object):
# The function to call
func = None
# The parameters to pass to the function (as a list)
params = None
class Widget(object):
"""Base class for all PGU graphical objects.
Example - Creating your own Widget:
class Draw(gui.Widget):
def paint(self,s):
# Paint the pygame.Surface
return
def update(self,s):
# Update the pygame.Surface and return the update rects
return [pygame.Rect(0,0,self.rect.w,self.rect.h)]
def event(self,e):
# Handle the pygame.Event
return
def resize(self,width=None,height=None):
# Return the width and height of this widget
return 256,256
"""
# The name of the widget (or None if not defined)
name = None
# The container this widget belongs to
container = None
# Whether this widget has been painted yet
_painted = False
# The widget used to paint the background
background = None
# ...
_rect_margin = None
_rect_border = None
_rect_padding = None
_rect_content = None
# A dictionary of signal callbacks, hashed by signal ID
connects = None
# The area covered by the widget, relative to the parent widget
rect = None
def __init__(self, **params):
"""Create a new Widget instance given the style parameters.
Keyword arguments:
decorate -- whether to call theme.decorate(self) to allow the
theme a chance to decorate the widget. (default is true)
style -- a dict of style parameters.
x, y -- position parameters
width, height -- size parameters
align, valign -- alignment parameters, passed along to style
font -- the font to use with this widget
color -- the color property, if applicable
background -- the widget used to paint the background
cls -- class name as used by Theme
name -- name of widget as used by Form. If set, will call
form.add(self,name) to add the widget to the most recently
created Form.
focusable -- True if this widget can receive focus via Tab, etc.
(default is True)
disabled -- True of this widget is disabled (defaults is False)
value -- initial value
"""
#object.Object.__init__(self)
self.connects = {}
params.setdefault('decorate',True)
params.setdefault('style',{})
params.setdefault('focusable',True)
params.setdefault('disabled',False)
self.focusable = params['focusable']
self.disabled = params['disabled']
self.rect = pygame.Rect(params.get('x',0),
params.get('y',0),
params.get('width',0),
params.get('height',0))
s = params['style']
#some of this is a bit "theme-ish" but it is very handy, so these
#things don't have to be put directly into the style.
for att in ('align','valign','x','y','width','height','color','font','background'):
if att in params: s[att] = params[att]
self.style = style.Style(self,s)
self.cls = 'default'
if 'cls' in params: self.cls = params['cls']
if 'name' in params:
from . import form
self.name = params['name']
if form.Form.form:
form.Form.form.add(self)
self.form = form.Form.form
if 'value' in params: self.value = params['value']
self.pcls = ""
if params['decorate'] != False:
if (not pguglobals.app):
# TODO - fix this somehow
from . import app
app.App()
pguglobals.app.theme.decorate(self,params['decorate'])
def focus(self):
"""Focus this Widget."""
if self.container:
if self.container.myfocus != self: ## by Gal Koren
self.container.focus(self)
def blur(self):
"""Blur this Widget."""
if self.container: self.container.blur(self)
def open(self):
"""Open this widget as a modal dialog."""
#if getattr(self,'container',None) != None: self.container.open(self)
pguglobals.app.open(self)
def close(self, w=None):
"""Close this widget, if it is currently an open dialog."""
#if getattr(self,'container',None) != None: self.container.close(self)
if (not w):
w = self
pguglobals.app.close(w)
def is_open(self):
return (self in pguglobals.app.windows)
def is_hovering(self):
"""Returns true if the mouse is hovering over this widget."""
if self.container:
return (self.container.myhover is self)
return False
def resize(self,width=None,height=None):
"""Resize this widget and all sub-widgets, returning the new size.
This should be implemented by a subclass.
"""
return (self.style.width, self.style.height)
def chsize(self):
"""Signal that this widget has changed its size."""
if (not self._painted):
return
if (not self.container):
return
if (pguglobals.app):
pguglobals.app.chsize()
def update(self,s):
"""Updates the surface and returns a rect list of updated areas
This should be implemented by a subclass.
"""
return
def paint(self,s):
"""Render this widget onto the given surface
This should be implemented by a subclass.
"""
return
def repaint(self):
"""Request a repaint of this Widget."""
if self.container: self.container.repaint(self)
#pguglobals.app.repaint_widget(self)
def repaintall(self):
"""Request a repaint of all Widgets."""
if self.container: self.container.repaintall()
def reupdate(self):
"""Request a reupdate of this Widget."""
if self.container: self.container.reupdate(self)
def next(self):
"""Pass focus to next Widget.
Widget order determined by the order they were added to their container.
"""
if self.container: self.container.next(self)
def previous(self):
"""Pass focus to previous Widget.
Widget order determined by the order they were added to their container.
"""
if self.container: self.container.previous(self)
def get_abs_rect(self):
"""Returns the absolute rect of this widget on the App screen."""
x, y = self.rect.x, self.rect.y
cnt = self.container
while cnt:
x += cnt.rect.x
y += cnt.rect.y
if cnt._rect_content:
x += cnt._rect_content.x
y += cnt._rect_content.y
cnt = cnt.container
return pygame.Rect(x, y, self.rect.w, self.rect.h)
def connect(self,code,func,*params):
"""Connect an event code to a callback function.
Note that there may be multiple callbacks per event code.
Arguments:
code -- event type
fnc -- callback function
*values -- values to pass to callback.
Please note that callbacks may also have "magicaly" parameters.
Such as:
_event -- receive the event
_code -- receive the event code
_widget -- receive the sending widget
Example:
def onclick(value):
print ('click', value)
w = Button("PGU!")
w.connect(gui.CLICK,onclick,'PGU Button Clicked')
"""
if (not code in self.connects):
self.connects[code] = []
for cb in self.connects[code]:
if (cb.func == func):
# Already connected to this callback function
return
# Wrap the callback function and add it to the list
cb = SignalCallback()
cb.func = func
cb.params = params
self.connects[code].append(cb)
# Remove signal handlers from the given event code. If func is specified,
# only those handlers will be removed. If func is None, all handlers
# will be removed.
def disconnect(self, code, func=None):
if (not code in self.connects):
return
if (not func):
# Remove all signal handlers
del self.connects[code]
else:
# Remove handlers that call 'func'
n = 0
callbacks = self.connects[code]
while (n < len(callbacks)):
if (callbacks[n].func == func):
# Remove this callback
del callbacks[n]
else:
n += 1
def send(self,code,event=None):
"""Send a code, event callback trigger."""
if (not code in self.connects):
return
# Trigger all connected signal handlers
for cb in self.connects[code]:
func = cb.func
values = list(cb.params)
# Attempt to be compatible with previous versions of python
try:
code = func.__code__
except:
code = func.func_code
nargs = code.co_argcount
names = list(code.co_varnames)[:nargs]
# If the function is bound to an instance, remove the first argument name. Again
# we keep compatibility with older versions of python.
if (hasattr(func, "__self__") and hasattr(func.__self__, "__class__") or
hasattr(func,'im_class')):
names.pop(0)
args = []
magic = {'_event':event,'_code':code,'_widget':self}
for name in names:
if name in magic.keys():
args.append(magic[name])
elif len(values):
args.append(values.pop(0))
else:
break
args.extend(values)
func(*args)
def _event(self,e):
if self.disabled: return
self.send(e.type,e)
return self.event(e)
def event(self,e):
"""Called when an event is passed to this object.
Please note that if you use an event, returning the value True
will stop parent containers from also using the event. (For example, if
your widget handles TABs or arrow keys, and you don't want those to
also alter the focus.)
This should be implemented by a subclass.
"""
return
def get_toplevel(self):
"""Returns the top-level widget (usually the Desktop) by following the
chain of 'container' references."""
top = self
while (top.container):
top = top.container
return top
def collidepoint(self, pos):
"""Test if the given point hits this widget. Over-ride this function
for more advanced collision testing."""
return self.rect.collidepoint(pos)
| lgpl-2.1 | -5,533,679,612,545,960,000 | 31.480114 | 92 | 0.560308 | false |
stanta/darfchain | darfchain_docker/bigchaindb/common/schema/__init__.py | 3 | 2870 | """ Schema validation related functions and data """
import os.path
import logging
import jsonschema
import yaml
import rapidjson
import rapidjson_schema
from bigchaindb.common.exceptions import SchemaValidationError
logger = logging.getLogger(__name__)
def drop_schema_descriptions(node):
""" Drop descriptions from schema, since they clutter log output """
if 'description' in node:
del node['description']
for n in node.get('properties', {}).values():
drop_schema_descriptions(n)
for n in node.get('definitions', {}).values():
drop_schema_descriptions(n)
for n in node.get('anyOf', []):
drop_schema_descriptions(n)
def _load_schema(name):
""" Load a schema from disk """
path = os.path.join(os.path.dirname(__file__), name + '.yaml')
with open(path) as handle:
schema = yaml.safe_load(handle)
drop_schema_descriptions(schema)
fast_schema = rapidjson_schema.loads(rapidjson.dumps(schema))
return path, (schema, fast_schema)
TX_SCHEMA_PATH, TX_SCHEMA_COMMON = _load_schema('transaction')
_, TX_SCHEMA_CREATE = _load_schema('transaction_create')
_, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer')
VOTE_SCHEMA_PATH, VOTE_SCHEMA = _load_schema('vote')
def _validate_schema(schema, body):
""" Validate data against a schema """
# Note
#
# Schema validation is currently the major CPU bottleneck of
# BigchainDB. the `jsonschema` library validates python data structures
# directly and produces nice error messages, but validation takes 4+ ms
# per transaction which is pretty slow. The rapidjson library validates
# much faster at 1.5ms, however it produces _very_ poor error messages.
# For this reason we use both, rapidjson as an optimistic pathway and
# jsonschema as a fallback in case there is a failure, so we can produce
# a helpful error message.
try:
schema[1].validate(rapidjson.dumps(body))
except ValueError as exc:
try:
jsonschema.validate(body, schema[0])
except jsonschema.ValidationError as exc2:
raise SchemaValidationError(str(exc2)) from exc2
logger.warning('code problem: jsonschema did not raise an exception, wheras rapidjson raised %s', exc)
raise SchemaValidationError(str(exc)) from exc
def validate_transaction_schema(tx):
"""
Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
if tx['operation'] == 'TRANSFER':
_validate_schema(TX_SCHEMA_TRANSFER, tx)
else:
_validate_schema(TX_SCHEMA_CREATE, tx)
def validate_vote_schema(vote):
""" Validate a vote dict """
_validate_schema(VOTE_SCHEMA, vote)
| gpl-3.0 | 6,655,123,799,264,698,000 | 32.764706 | 110 | 0.689895 | false |
NovaSyst/chocolate | chocolate/space.py | 1 | 36188 | """This module provides common building blocks to define a search space.
Search spaces are defined using dictionaries, where the keys are the parameter
names and the values their distribution. For example, defining a two
parameter search space is done as follow ::
space = {"x": uniform(-5, 5),
"y": quantized_uniform(-2, 3, 0.5)}
A conditional search space can be seen as a tree, where each condition
defines a subtree. For example, in the next figure, three search spaces
are presented.
.. image:: /images/search-space-tree.png
:width: 300px
:align: center
The left tree is the simple two parameter search space defined earlier. The
middle tree defines a conditional search space with a single root condition.
Two subspaces exist in this search space, one when the condition is `a` the
other when the condition is `b`. Defining such a search space is done using
a list of dictionaries as follow ::
space = [{"cond": "a", "x": uniform(-5, 5)},
{"cond": "b", "y": quantized_uniform(-2, 3, 0.5)}]
The right most tree has two conditions one at its root and another one when
the root condition is `a`. It has a total of four subspaces. Defining such a
search space is done using a hierarchy of dictionaries as follow ::
space = [{"cond": "a", "sub": {"c": {"x": uniform(-5, 5)},
"d": {"z": log(-5, 5, 10)},
"e": {"w": quantized_log(-2, 7, 1, 10)}}},
{"cond": "b", "y": quantized_uniform(-2, 3, 0.5)}
Note that lists can only be used at the root of conditional search spaces,
sub-conditions must use the dictionary form. Moreover, it is not necessary to
use the same parameter name for root conditions. For example, the following
is a valid search space ::
space = [{"cond": "a", "x": uniform(-5, 5)},
{"spam": "b", "y": quantized_uniform(-2, 3, 0.5)}]
The only restriction is that each search space must have a unique combination
of conditional parameters and values, where conditional parameters have
non-distribution values. Finally, one and only one subspace can be defined
without condition as follow ::
space = [{"x": uniform(-5, 5)},
{"cond": "b", "y": quantized_uniform(-2, 3, 0.5)}]
If two or more subspaces share the same conditional key (set of parameters
and values) an :class:`AssertionError` will be raised uppon building the
search space specifying the erroneous key.
"""
from collections import OrderedDict, Mapping, Sequence
from itertools import chain, count, islice, product, combinations
import numpy
class _Constant(object):
"""Implements Chocolat constant value. his is used internally
by other modules.
"""
def __init__(self, value):
self.value = value
class Distribution(object):
"""Base class for every Chocolate distributions."""
def __len__(self):
raise NotImplementedError
def __getitem__(self, item):
raise NotImplementedError
def __ne__(self, other):
return not (self == other)
class ContinuousDistribution(Distribution):
"""Base class for every Chocolate continuous distributions."""
pass
class QuantizedDistribution(Distribution):
"""Base class for every Chocolate quantized distributions."""
pass
class uniform(ContinuousDistribution):
"""Uniform continuous distribution.
Representation of the uniform continuous distribution in the half-open
interval :math:`[\\text{low}, \\text{high})`.
Args:
low: Lower bound of the distribution. All values will be
greater or equal than low.
high: Upper bound of the distribution. All values will be
lower than high.
"""
def __init__(self, low, high):
assert low < high, "Low must be lower than high"
self.low = low
self.high = high
def __call__(self, x):
"""Transforms *x* a uniform number taken from the half-open continuous
interval :math:`[0, 1)` to the represented distribution.
Returns:
The corresponding number in the half-open interval
:math:`[\\text{low}, \\text{high})`.
"""
return x * (self.high - self.low) + self.low
def __repr__(self):
return "uniform(low={}, high={})".format(self.low, self.high)
def __eq__(self, other):
return self.low == other.low and self.high == other.high
class quantized_uniform(QuantizedDistribution):
"""Uniform discrete distribution.
Representation of the uniform continuous distribution in the half-open
interval :math:`[\\text{low}, \\text{high})` with regular spacing between
samples. If :math:`\\left\\lceil \\frac{\\text{high} - \\text{low}}{step}
\\right\\rceil \\neq \\frac{\\text{high} - \\text{low}}{step}`, the last
interval will have a different probability than the others. It is
preferable to use :math:`\\text{high} = N \\times \\text{step} +
\\text{low}` where :math:`N` is a whole number.
Args:
low: Lower bound of the distribution. All values will be
greater or equal than low.
high: Upper bound of the distribution. All values will be
lower than high.
step: The spacing between each discrete sample.
"""
def __init__(self, low, high, step):
assert low < high, "Low must be lower than high"
assert step > 0, "Step must be greater than 0"
self.low = low
self.high = high
self.step = step
def __call__(self, x):
"""Transforms *x*, a uniform number taken from the half-open continuous
interval :math:`[0, 1)`, to the represented distribution.
Returns:
The corresponding number in the discrete half-open interval
:math:`[\\text{low}, \\text{high})` alligned on step size. If the
output number is whole, this method returns an :class:`int`
otherwise a
:class:`float`.
"""
x += 1e-16 # handle floating point errors in floor
v = numpy.floor((x * (self.high - self.low)) / self.step) * self.step + self.low
if v.is_integer():
return int(v)
return v
def __iter__(self):
"""Iterate over all possible values of this discrete distribution in
the :math:`[0, 1)` space. This is the same as ::
numpy.arange(0, 1, step / (high - low))
"""
step = self.step / (self.high - self.low)
for x in numpy.arange(0, 1, step):
yield x
def __getitem__(self, i):
"""Retrieve the ``i`` th value of this distribution in the
:math:`[0, 1)` space.
"""
return float(i) / ((self.high - self.low) / self.step)
def __len__(self):
"""Get the number of possible values for this distribution.
"""
return int(numpy.ceil((self.high - self.low) / self.step))
def __repr__(self):
return "quantized_uniform(low={}, high={}, step={})".format(self.low, self.high, self.step)
def __eq__(self, other):
return self.low == other.low and self.high == other.high and self.step == other.step
class log(uniform):
"""Logarithmic uniform continuous distribution.
Representation of the logarithmic uniform continuous distribution in the
half-open interval :math:`[\\text{base}^\\text{low},
\\text{base}^\\text{high})`.
Args:
low: Lower bound of the distribution. All values will be
greater or equal than :math:`\\text{base}^\\text{low}`.
high: Upper bound of the distribution. All values will be
lower than :math:`\\text{base}^\\text{high}`.
base: Base of the logarithmic function.
"""
def __init__(self, low, high, base):
super(log, self).__init__(low, high)
assert base > 0, "Base must be larger than 0"
assert base != 1, "Base cannot equal 1"
self.base = base
def __call__(self, x):
"""Transforms *x*, a uniform number taken from the half-open continuous
interval :math:`[0, 1)`, to the represented distribution.
Returns:
The corresponding number in the discrete half-open interval
:math:`[\\text{base}^\\text{low}, \\text{base}^\\text{high})`
alligned on step size. If the output number is whole, this
method returns an :class:`int` otherwise a :class:`float`.
"""
return self.base**(super(log, self).__call__(x))
def __repr__(self):
return "log(low={}, high={}, base={})".format(self.low, self.high, self.base)
def __eq__(self, other):
return self.low == other.low and self.high == other.high and self.base == other.base
class quantized_log(quantized_uniform):
"""Logarithmic uniform discrete distribution.
Representation of the logarithmic uniform discrete distribution in the
half-open interval :math:`[\\text{base}^\\text{low},
\\text{base}^\\text{high})`. with regular spacing between sampled
exponents.
Args:
low: Lower bound of the distribution. All values will be
greater or equal than :math:`\\text{base}^\\text{low}`.
high: Upper bound of the distribution. All values will be
lower than :math:`\\text{base}^\\text{high}`.
step: The spacing between each discrete sample exponent.
base: Base of the logarithmic function.
"""
def __init__(self, low, high, step, base):
super(quantized_log, self).__init__(low, high, step)
assert base > 0, "Base must be larger than 0"
assert base != 1, "Base cannot equal 1"
self.base = base
def __call__(self, x):
"""Transforms *x*, a uniform number taken from the half-open
continuous interval :math:`[0, 1)`, to the represented distribution.
Returns:
The corresponding number in the discrete half-open interval
:math:`[\\text{base}^\\text{low}, \\text{base}^\\text{high})`
alligned on step size. If the output number is whole, this
method returns an :class:`int` otherwise a :class:`float`.
"""
x += 1e-16 # handle floating point errors in floor
v = numpy.float(self.base**(super(quantized_log, self).__call__(x)))
if v.is_integer():
return int(v)
return v
def __repr__(self):
return "quantized_log(low={}, high={}, step={}, base={})".format(self.low, self.high, self.step, self.base)
def __eq__(self, other):
return self.low == other.low and self.high == other.high and self.step == other.step and self.base == other.base
class choice(quantized_uniform):
"""Uniform choice distribution between non-numeric samples.
Args:
values: A list of choices to choose uniformly from.
"""
def __init__(self, values):
assert len(values) > 0, "Choices must at least have one value"
self.values = list(values)
super(choice, self).__init__(low=0, high=len(self.values), step=1)
def __call__(self, x):
"""Transforms *x*, a uniform number taken from the half-open
continuous interval :math:`[0, 1)`, to the represented distribution.
Returns:
The corresponding choice from the entered values.
"""
assert x < 1, "Choices must lie in the half-open interval [0, 1)"
return self.values[int(super(choice, self).__call__(x))]
def __repr__(self):
return "choice({})".format(self.values)
def __eq__(self, other):
return self.values == other.values
class Space(object):
"""Representation of the search space.
Encapsulate a multidimentional search space defined on various
distributions. Remind that order in standard python dictionary is
undefined, thus the keys of the input dictionaries are
:func:`sorted` and put in :class:`OrderedDict` s for reproductibility.
Args:
spaces: A dictionary or list of dictionaries of parameter names to
their distribution. When a list of multiple dictionaries is
provided, the structuring elements of these items must define a
set of unique choices. Structuring elements are defined using
non-distribution values. See examples below.
Raises:
AssertionError: When two keys at the same level are equal.
An instance of a space is a callable object wich will return a valid
parameter set provided a vector of numbers in the half-open uniform
distribution :math:`[0, 1)`.
The number of distinc dimensions can be queried with the :func:`len`
function. When a list of dictionaries is provided, this choice constitute
the first dimension and each subsequent conditional choice is also a
dimension.
Examples:
Here is how a simple search space can be defined and the parameters
can be retrieved ::
In [2]: s = Space({"learning_rate": uniform(0.0005, 0.1),
"n_estimators" : quantized_uniform(1, 11, 1)})
In [3]: s([0.1, 0.7])
Out[3]: {'learning_rate': 0.01045, 'n_estimators': 8}
A one level conditional multidimentional search space is defined using
a list of dictionaries. Here the choices are a SMV with linear kernel
and a K-nearest neighbor as defined by the string values. Note the use
of class names in the space definition. ::
In [2]: from sklearn.svm import SVC
In [3]: from sklearn.neighbors import KNeighborsClassifier
In [4]: s = Space([{"algo": SVC, "kernel": "linear",
"C": log(low=-3, high=5, base=10)},
{"algo": KNeighborsClassifier,
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}])
The number of dimensions of such search space can be retrieved with
the :func:`len` function. ::
In [5]: len(s)
Out[5]: 3
As in the simple search space a valid parameter set can be retrieved
by querying the space object with a vector of length equal to the full
search space. ::
In [6]: s([0.1, 0.2, 0.3])
Out[6]:
{'C': 0.039810717055349734,
'algo': sklearn.svm.classes.SVC,
'kernel': 'linear'}
In [7]: s([0.6, 0.2, 0.3])
Out[7]:
{'algo': sklearn.neighbors.classification.KNeighborsClassifier,
'n_neighbors': 6}
Internal conditions can be modeled using nested dictionaries. For
example, the SVM from last example can have different kernels. The
next search space will share the ``C`` parameter amongst all SVMs, but
will branch on the kernel type with their individual parameters. ::
In [2]: s = Space([{"algo": "svm",
"C": log(low=-3, high=5, base=10),
"kernel": {"linear": None,
"rbf": {"gamma": log(low=-2, high=3, base=10)}}},
{"algo": "knn",
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}])
In [3]: len(s)
Out[3]: 5
In [4]: x = [0.1, 0.2, 0.7, 0.4, 0.5]
In [5]: s(x)
Out[5]: {'C': 0.039810717055349734, 'algo': 'svm', 'gamma': 1.0, 'kernel': 'rbf'}
"""
def __init__(self, spaces):
if isinstance(spaces, Mapping):
spaces = [spaces]
self.spaces = OrderedDict()
self.constants = list()
for subspace in spaces:
ts_key = list()
ts_space = OrderedDict()
for k, v in sorted(subspace.items()):
if k == "":# or k == "_subspace":
raise RuntimeError("'{}' is not a valid parameter name".format(k))
if isinstance(v, Distribution):
ts_space[k] = v
elif isinstance(v, Mapping):
cond_subspaces = list()
try:
sorted_v = sorted(v.items())
except TypeError as e:
sorted_v = sorted(v.items(), key=str)
for sub_k, sub_v in sorted_v:
s = {k: sub_k}
if isinstance(sub_v, Mapping):
s.update(sub_v)
elif sub_v is not None:
s[sub_k] = sub_v
cond_subspaces.append(s)
ts_space[k] = Space(cond_subspaces)
elif isinstance(v, _Constant):
self.constants.append((k, v.value))
else:
ts_key.append((k, v))
ts_key = tuple(ts_key)
assert ts_key not in self.spaces, "Duplicate conditiona key {} found in Space".format(ts_key)
self.spaces[ts_key] = ts_space
if len(self.spaces) > 1:
# print(list(self.spaces.keys()))
# assert all(self.spaces.keys()), "Empty subspace keys are not allowed in conditional search spaces."
self.subspace_choice = quantized_uniform(low=0, high=len(self.spaces), step=1)
def __len__(self):
# We have a single level of choices if this is a structured spce
ndims = 1 if len(self.spaces) > 1 else 0
# Within each subspace, every non-structuring key is a dimension
# A structuring key is not a chocolate.Distribution
for subspace in self.spaces.values():
for v in subspace.values():
if isinstance(v, Space):
ndims += len(v)
elif isinstance(v, Distribution):
ndims += 1
return ndims
def __call__(self, x):
out = dict()
assert len(self) == len(x), "Space and vector dimensions missmatch {} != {}".format(len(self), len(x))
iter_x = iter(x)
space_idx = 0
if len(self.spaces) > 1:
space_idx = self.subspace_choice(numpy.clip(next(iter_x), 0, 0.9999))
subspace_key = list(self.spaces.keys())[space_idx]
for key, subspace in self.spaces.items():
for k, v in subspace.items():
if isinstance(v, Distribution):
xi = next(iter_x)
elif isinstance(v, Space):
xi = [next(iter_x) for _ in range(len(v))]
if len(self.spaces) == 1 or subspace_key == key:
if isinstance(v, Distribution):
out[k] = v(xi)
elif isinstance(v, Space):
out.update(**v(xi))
else:
raise TypeError("Oops something went wrong!")
out.update(subspace_key)
out.update(self.constants)
return out
def isactive(self, x):
"""Checks within conditional subspaces if, with the given vector, a
parameter is active or not.
Args:
x: A vector of numbers in the half-open uniform
distribution :math:`[0, 1)`.
Returns:
A list of booleans telling is the parameter is active or not.
Example:
When using conditional spaces it is often necessary to assess
quickly what dimensions are active according to a given vector.
For example, with the following conditional space ::
In [2]: s = Space([{"algo": "svm",
"C": log(low=-3, high=5, base=10),
"kernel": {"linear": None,
"rbf": {"gamma": log(low=-2, high=3, base=10)}}},
{"algo": "knn",
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}])
In [3]: s.names()
Out[3]:
['_subspace',
'algo_svm_C',
'algo_svm_kernel__subspace',
'algo_svm_kernel_kernel_rbf_gamma',
'algo_knn_n_neighbors']
In [4]: x = [0.1, 0.2, 0.7, 0.4, 0.5]
In [5]: s(x)
Out[5]: {'C': 0.039810717055349734, 'algo': 'svm', 'gamma': 1.0, 'kernel': 'rbf'}
In [6]: s.isactive(x)
Out[6]: [True, True, True, True, False]
In [6]: x = [0.6, 0.2, 0.7, 0.4, 0.5]
In [8]: s(x)
Out[8]: {'algo': 'knn', 'n_neighbors': 10}
In [9]: s.isactive(x)
Out[9]: [True, False, False, False, True]
"""
assert len(self) == len(x), "Space and vector dimensions missmatch {} != {}".format(len(self), len(x))
out = []
iter_x = iter(x)
if len(self.spaces) > 1:
space_idx = self.subspace_choice(numpy.clip(next(iter_x), 0, 0.99999))
subspace_key = list(self.spaces.keys())[space_idx]
out.append(True)
for key, subspace in self.spaces.items():
for k, v in subspace.items():
if isinstance(v, Distribution):
xi = next(iter_x)
elif isinstance(v, Space):
xi = [next(iter_x) for _ in range(len(v))]
if len(self.spaces) == 1 or subspace_key == key:
if isinstance(v, Distribution):
out.append(True)
elif isinstance(v, Space):
out.extend(v.isactive(xi))
else:
raise TypeError("Unexpected type {} in space".format(type(v)))
else:
if isinstance(v, Distribution):
out.append(False)
elif isinstance(v, Space):
out.extend([False] * len(xi))
else:
raise TypeError("Unexpected type {} in space".format(type(v)))
return out
def names(self, unique=True):
"""Returns unique sequential names meant to be used as database column
names.
Args:
unique: Whether or not to return unique mangled names. Subspaces will
still be mangled.
Examples:
If the length of the space is 2 as follow ::
In [2]: s = Space({"learning_rate": uniform(0.0005, 0.1),
"n_estimators" : quantized_uniform(1, 11, 1)})
In [3]: s.names()
Out[3]: ['learning_rate', 'n_estimators']
While in conditional spaces, if the length of the space is 5 (one
for the choice od subspace and four independent parameters) ::
In [4]: s = Space([{"algo": "svm", "kernel": "linear",
"C": log(low=-3, high=5, base=10)},
{"algo": "svm", "kernel": "rbf",
"C": log(low=-3, high=5, base=10),
"gamma": log(low=-2, high=3, base=10)},
{"algo": "knn",
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}])
In [5]: s.names()
Out[5]:
['_subspace',
'algo_svm_kernel_linear_C',
'algo_svm_kernel_rbf_C',
'algo_svm_kernel_rbf_gamma',
'algo_knn_n_neighbors']
When using methods or classes as parameter values for conditional
choices the output might be a little bit more verbose, however the
names are still there. ::
In [6]: s = Space([{"algo": SVC,
"C": log(low=-3, high=5, base=10),
"kernel": {"linear": None,
"rbf": {"gamma": log(low=-2, high=3, base=10)}}},
{"algo": KNeighborsClassifier,
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}])
In [7]: s.names()
Out[7]:
['_subspace',
'algo_<class sklearn_svm_classes_SVC>_C',
'algo_<class sklearn_svm_classes_SVC>_kernel__subspace',
'algo_<class sklearn_svm_classes_SVC>_kernel_kernel_rbf_gamma',
'algo_<class sklearn_neighbors_classification_KNeighborsClassifier>_n_neighbors']
"""
names = list()
if len(self.spaces) > 1:
names.append("_subspace")
for key, subspace in self.spaces.items():
for k, v in subspace.items():
prefix = "{}_".format("_".join(str(ni) for ni in chain(*key))) if key else ""
prefix = prefix.replace("\"", "")
prefix = prefix.replace("'", "")
prefix = prefix.replace(".", "_")
if isinstance(v, Distribution):
if unique:
names.append("{}{}".format(prefix, k))
else:
names.append(k)
elif isinstance(v, Space):
for n in v.names(unique):
if unique or n.endswith("_subspace"):
names.append("{}{}_{}".format(prefix, k, n))
else:
names.append(n)
else:
raise TypeError("Unexpected type {} inspace".format(type(v)))
return names
def steps(self):
"""Returns the steps size between each element of the space
dimensions. If a variable is continuous the returned stepsize is :data:`None`.
"""
steps = list()
if len(self.spaces) > 1:
steps.append(self.subspace_choice.step / (self.subspace_choice.high - self.subspace_choice.low))
for subspace in self.spaces.values():
for v in subspace.values():
if isinstance(v, QuantizedDistribution):
steps.append(v.step / (v.high - v.low))
elif isinstance(v, Space):
steps.extend(v.steps())
else:
steps.append(None)
return steps
def isdiscrete(self):
"""Returns whether or not this search space has only discrete
dimensions.
"""
for subspace in self.spaces.values():
for v in subspace.values():
if isinstance(v, ContinuousDistribution):
return False
elif isinstance(v, Space) and not v.isdiscrete():
return False
return True
def subspaces(self):
"""Returns every valid combinaition of conditions of the tree-
structured search space. Each combinaition is a list of length equal
to the total dimensionality of this search space. Active dimensions
are either a fixed value for conditions or a :class:`Distribution` for
optimizable parameters. Inactive dimensions are :data:`None`.
Example:
The following search space has 3 possible subspaces
::
In [2]: s = Space([{"algo": "svm",
"C": log(low=-3, high=5, base=10),
"kernel": {"linear": None,
"rbf": {"gamma": log(low=-2, high=3, base=10)}}},
{"algo": "knn",
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}])
In [3]: s.names()
Out[3]:
['_subspace',
'algo_svm_C',
'algo_svm_kernel__subspace',
'algo_svm_kernel_kernel_rbf_gamma',
'algo_knn_n_neighbors']
In [4]: s.subspaces()
Out[4]:
[[0.0, log(low=-3, high=5, base=10), 0.0, None, None],
[0.0, log(low=-3, high=5, base=10), 0.5, log(low=-2, high=3, base=10), None],
[0.5, None, None, None, quantized_uniform(low=1, high=20, step=1)]]
"""
subspaces, _ = self._subspaces()
return subspaces
def _subspaces(self):
branches = list()
indices = list()
if len(self.spaces) > 1:
position = 1
else:
position = 0
for i, (key, subspace) in enumerate(self.spaces.items()):
branch = [None] * len(self)
idx = list()
if len(self.spaces) > 1:
step = self.subspace_choice.step / (self.subspace_choice.high - self.subspace_choice.low)
branch[0] = i * step
idx.append(0)
conditionals = list()
conditional_idx = list()
for k, v in subspace.items():
if not isinstance(v, Space):
branch[position] = v
idx.append(position)
position += 1
else:
cond_spaces, cond_indices = v._subspaces()
conditionals.append(cond_spaces)
conditional_idx.append([[position + j for j in s] for s in cond_indices])
if any(cond_indices):
position += max(max(s) for s in cond_indices if s) + 1
if len(conditionals) == 0:
branches.append(branch)
indices.append(idx)
else:
for elements, indx in zip(product(*conditionals), product(*conditional_idx)):
cond_branch = branch.copy()
cond_indices = idx.copy()
for e, j in zip(elements, indx):
# Remove Nones from underlying spaces
e = [ei for ei in e if ei is not None]
for ei, ji in zip(e, j):
cond_branch[ji] = ei
cond_indices.append(ji)
branches.append(cond_branch)
indices.append(cond_indices)
return branches, indices
def __eq__(self, other):
if isinstance(other, Space):
return self.spaces == other.spaces
return False
def __ne__(self, other):
return not (self == other)
if __name__ == "__main__":
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
space1 = {"a": uniform(1, 2),
"b": {"c": {"c1": quantized_log(0, 5, 1, 10)},
"d": {"d1": quantized_log(0, 5, 1, 2)}}}
space2 = {"a": uniform(1, 2),
"b": {"c": {"c1": quantized_log(0, 5, 1, 10)},
"d": {"d1": quantized_log(0, 5, 1, 2)}}}
space3 = {"a": uniform(1, 2),
"b": {"c": {"c1": quantized_log(0, 5, 1, 10)},
"d": {"d2": quantized_log(0, 5, 1, 2)}}}
space4 = {"a": uniform(1, 2),
"b": {"c": {"c1": quantized_log(0, 5, 1, 10)},
"d": {"d1": quantized_log(0, 5, 1, 8)}}}
print(Space(space1) == Space(space2))
print(Space(space1) == Space(space3))
print(Space(space1) == Space(space4))
space = {"initial_learning_rate": choice([0.0005]),
"learning_rate_decay": choice([0.0004]),
"keep_prob": choice([0.7, 0.9]),
"nout_c1": quantized_log(low=0, high=3, step=1, base=2),
"nout_c2": quantized_log(low=0, high=3, step=1, base=2),
"nout_fc1": quantized_log(low=0, high=3, step=1, base=2),
"nout_fc2": quantized_log(low=0, high=3, step=1, base=2),
"act_fn": choice(["elu", "relu"])}
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
print(s(x))
print(s.isactive(x))
print("="*12)
space = [{"algo": SVC, "kernel": "rbf", "C": uniform(low=0.001, high=10000), "gamma": uniform(low=0, high=1)},
{"algo": SVC, "kernel": "linear", "C": uniform(low=0.001, high=10000)},
{"algo": KNeighborsClassifier, "n_neighbors": quantized_uniform(low=1, high=20, step=1)},
{"algo": "cnn", "num_layers": 8, "n_units": quantized_log(low=5, high=8, step=1, base=2)},
{"algo": "cnn", "num_layers": 4, "n_units": quantized_log(low=5, high=12, step=1, base=2)}]
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
print(s(x))
print(s.isactive(x))
print("="*12)
space = [{"algo": "svm",
"C": log(low=-3, high=5, base=10),
"kernel": {"linear": None,
"rbf": {"gamma": log(low=-2, high=3, base=10)}}},
{"algo": "knn",
"n_neighbors": quantized_uniform(low=1, high=20, step=1)}]
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
x = [0.1, 0.2, 0.7, 0.4, 0.5]
print(s(x))
print(s.isactive(x))
print("="*12)
space = {"algo": {"svm": {"C": log(low=-3, high=5, base=10),
"kernel": {"linear": None,
"rbf": {"gamma": log(low=-2, high=3, base=10)}},
"cond2": {"aa": None,
"bb": {"abc": uniform(low=-1, high=1)}}},
"knn": {"n_neighbors": quantized_uniform(low=1, high=20, step=1)}}}
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
x = [0.5, 0.0, 0.5, 0.5, 0.3, 0.5, 0.2]
print(s(x))
print(s.isactive(x))
print("="*12)
space = {"x1": quantized_uniform(-5, 10, 2),
"cond": {"log": {"x2": quantized_log(0, 2, 0.1, 2)},
"uni": {"x2": quantized_uniform(0, 15, 2)}}}
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
x = [0.1, 0.2, 0.7, 0.4]
print(s(x))
print(s.isactive(x))
print("="*12)
space = {"algo": {SVC: {"gamma": log(low=-9, high=3, base=10)},
"kernel": {"rbf": None,
"poly": {"degree": quantized_uniform(low=1, high=5, step=1),
"coef0": uniform(low=-1, high=1)}},
LinearSVC: {"penalty": choice(["l1", "l2"])}},
"C": log(low=-2, high=10, base=10)}
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
print("="*12)
space = [{"algo": {SVC: {"gamma": log(low=-9, high=3, base=10),
"kernel": {"rbf": None,
"poly": {"degree": quantized_uniform(low=1, high=5, step=1),
"coef0": uniform(low=-1, high=1)}}},
LinearSVC: {"penalty": choice(["l1", "l2"])}},
"C": log(low=-2, high=10, base=10)},
{"type": "an_other_optimizer", "param": uniform(low=-1, high=1)}]
s = Space(space)
print(len(s))
print(s.steps())
print(s.names())
print(s.isdiscrete())
for subspace in s.subspaces():
print("*", subspace)
| bsd-3-clause | 7,803,660,800,139,668,000 | 37.621131 | 120 | 0.52219 | false |
shekkizh/WassersteinGAN.tensorflow | Dataset_Reader/read_celebADataset.py | 1 | 3200 | __author__ = 'charlie'
import numpy as np
import os, sys, inspect
import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob
utils_path = os.path.abspath(
os.path.realpath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if utils_path not in sys.path:
sys.path.insert(0, utils_path)
import utils as utils
DATA_URL = 'https://www.dropbox.com/sh/8oqt9vytwxb3s4r/AADIKlz8PR9zr6Y20qbkunrba/Img/img_align_celeba.zip'
random.seed(5)
class CelebA_Dataset():
def __init__(self, dict):
self.train_images = dict['train']
self.test_images = dict['test']
self.validation_images = dict['validation']
def read_dataset(data_dir):
pickle_filename = "celebA.pickle"
pickle_filepath = os.path.join(data_dir, pickle_filename)
if not os.path.exists(pickle_filepath):
# utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
celebA_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
dir_path = os.path.join(data_dir, celebA_folder)
if not os.path.exists(dir_path):
print ("CelebA dataset needs to be downloaded and unzipped manually")
print ("Download from: %s" % DATA_URL)
raise ValueError("Dataset not found")
result = create_image_lists(dir_path)
print ("Training set: %d" % len(result['train']))
print ("Test set: %d" % len(result['test']))
print ("Validation set: %d" % len(result['validation']))
print ("Pickling ...")
with open(pickle_filepath, 'wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print ("Found pickle file!")
with open(pickle_filepath, 'rb') as f:
result = pickle.load(f)
celebA = CelebA_Dataset(result)
del result
return celebA
def create_image_lists(image_dir, testing_percentage=0.0, validation_percentage=0.0):
"""
Code modified from tensorflow/tensorflow/examples/image_retraining
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
training_images = []
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
sub_dirs = [x[0] for x in os.walk(image_dir)]
file_list = []
for extension in extensions:
file_glob = os.path.join(image_dir, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
else:
# print "No. of files found: %d" % len(file_list)
training_images.extend([f for f in file_list])
random.shuffle(training_images)
no_of_images = len(training_images)
validation_offset = int(validation_percentage * no_of_images)
validation_images = training_images[:validation_offset]
test_offset = int(testing_percentage * no_of_images)
testing_images = training_images[validation_offset:validation_offset + test_offset]
training_images = training_images[validation_offset + test_offset:]
result = {
'train': training_images,
'test': testing_images,
'validation': validation_images,
}
return result
| mit | 5,706,518,505,224,522,000 | 34.555556 | 106 | 0.644688 | false |
broferek/ansible | lib/ansible/modules/network/icx/icx_lldp.py | 19 | 5318 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: icx_lldp
version_added: "2.9"
author: "Ruckus Wireless (@Commscope)"
short_description: Manage LLDP configuration on Ruckus ICX 7000 series switches
description:
- This module provides declarative management of LLDP service on ICX network devices.
notes:
- Tested against ICX 10.1.
- For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html).
options:
interfaces:
description:
- specify interfaces
suboptions:
name:
description:
- List of ethernet ports to enable lldp. To add a range of ports use 'to' keyword. See the example.
type: list
state:
description:
- State of lldp configuration for interfaces
type: str
choices: ['present', 'absent', 'enabled', 'disabled']
type: list
check_running_config:
description:
- Check running configuration. This can be set as environment variable.
Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter.
type: bool
default: yes
state:
description:
- Enables the receipt and transmission of Link Layer Discovery Protocol (LLDP) globally.
type: str
choices: ['present', 'absent', 'enabled', 'disabled']
"""
EXAMPLES = """
- name: Disable LLDP
icx_lldp:
state: absent
- name: Enable LLDP
icx_lldp:
state: present
- name: Disable LLDP on ports 1/1/1 - 1/1/10, 1/1/20
icx_lldp:
interfaces:
- name:
- ethernet 1/1/1 to 1/1/10
- ethernet 1/1/20
state: absent
state: present
- name: Enable LLDP on ports 1/1/5 - 1/1/10
icx_lldp:
interfaces:
- name:
- ethernet 1/1/1 to 1/1/10
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- lldp run
- no lldp run
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.icx.icx import load_config, run_commands
def has_lldp(module):
run_commands(module, ['skip'])
output = run_commands(module, ['show lldp'])
is_lldp_enable = False
if len(output) > 0 and "LLDP is not running" not in output[0]:
is_lldp_enable = True
return is_lldp_enable
def map_obj_to_commands(module, commands):
interfaces = module.params.get('interfaces')
for item in interfaces:
state = item.get('state')
if state == 'present':
for port in item.get('name'):
if 'all' in port:
module.fail_json(msg='cannot enable on all the ports')
else:
commands.append('lldp enable ports {0}'.format(str(port)))
elif state == 'absent':
for port in item.get('name'):
if 'all' in port:
module.fail_json(msg='cannot enable on all the ports')
else:
commands.append('no lldp enable ports {0}'.format(str(port)))
def main():
""" main entry point for module execution
"""
interfaces_spec = dict(
name=dict(type='list'),
state=dict(choices=['present', 'absent',
'enabled', 'disabled'])
)
argument_spec = dict(
interfaces=dict(type='list', elements='dict', options=interfaces_spec),
state=dict(choices=['present', 'absent',
'enabled', 'disabled']),
check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG']))
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
if module.params['check_running_config'] is False:
HAS_LLDP = None
else:
HAS_LLDP = has_lldp(module)
commands = []
state = module.params['state']
if state is None:
if HAS_LLDP:
map_obj_to_commands(module, commands)
else:
module.fail_json(msg='LLDP is not running')
else:
if state == 'absent' and HAS_LLDP is None:
commands.append('no lldp run')
if state == 'absent' and HAS_LLDP:
commands.append('no lldp run')
elif state == 'present':
if not HAS_LLDP:
commands.append('lldp run')
if module.params.get('interfaces'):
map_obj_to_commands(module, commands)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,230,196,780,891,398,700 | 27.902174 | 127 | 0.604739 | false |
mantidproject/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/PolDiffILLReductionTest.py | 3 | 7295 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.api import MatrixWorkspace, WorkspaceGroup, Run
from mantid.simpleapi import config, mtd, PolDiffILLReduction
from mantid.geometry import Instrument
class PolDiffILLReductionTest(unittest.TestCase):
_facility = None
_instrument = None
@classmethod
def setUpClass(cls):
config.appendDataSearchSubDir('ILL/D7/')
def setUp(self):
self._facility = config['default.facility']
self._instrument = config['default.instrument']
config['default.facility'] = 'ILL'
config['default.instrument'] = 'D7'
def tearDown(self):
if self._facility:
config['default.facility'] = self._facility
if self._instrument:
config['default.instrument'] = self._instrument
mtd.clear()
def test_absorber_transmission(self):
PolDiffILLReduction(Run='396991', ProcessAs='BeamWithCadmium', OutputWorkspace='cadmium_ws')
self._check_output(mtd['cadmium_ws'], 1, 1, 1, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['cadmium_ws'], 'Cadmium')
self.assertAlmostEqual(mtd['cadmium_ws_1'].readY(0)[0], 116, delta=1)
def test_beam(self):
PolDiffILLReduction(Run='396983', ProcessAs='EmptyBeam', OutputWorkspace='beam_ws')
self._check_output(mtd['beam_ws'], 1, 1, 1, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['beam_ws'], 'Beam')
self.assertAlmostEqual(mtd['beam_ws_1'].readY(0)[0], 10769, delta=1)
def test_transmission(self):
PolDiffILLReduction(Run='396983', ProcessAs='EmptyBeam', OutputWorkspace='beam_ws')
PolDiffILLReduction(Run='396991', ProcessAs='BeamWithCadmium', OutputWorkspace='cadmium_ws')
PolDiffILLReduction(Run='396985', ProcessAs='Transmission', OutputWorkspace='quartz_transmission',
CadmiumTransmissionInputWorkspace='cadmium_ws_1', BeamInputWorkspace='beam_ws_1',)
self._check_output(mtd['quartz_transmission'], 1, 1, 1, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self.assertAlmostEqual(mtd['quartz_transmission_1'].readY(0)[0], 0.692, delta=1e-3)
self._check_process_flag(mtd['quartz_transmission'], 'Transmission')
def test_absorber(self):
PolDiffILLReduction(Run='396928', ProcessAs='Cadmium', OutputWorkspace='absorber_ws')
self._check_output(mtd['absorber_ws'], 1, 132, 6, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['absorber_ws'], 'Cadmium')
def test_container(self):
PolDiffILLReduction(Run='396917', ProcessAs='Empty', OutputWorkspace='container_ws')
self._check_output(mtd['container_ws'], 1, 132, 6, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['container_ws'], 'Empty')
def test_quartz(self):
PolDiffILLReduction(Run='396983', ProcessAs='EmptyBeam', OutputWorkspace='beam_ws')
PolDiffILLReduction(Run='396985', ProcessAs='Transmission', OutputWorkspace='quartz_transmission',
BeamInputWorkspace='beam_ws_1')
PolDiffILLReduction(Run='396939', ProcessAs='Quartz', TransmissionInputWorkspace='quartz_transmission_1',
OutputTreatment='Average', OutputWorkspace='quartz')
self._check_output(mtd['quartz'], 1, 132, 6, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['quartz'], 'Quartz')
def test_vanadium(self):
sampleProperties = {'FormulaUnits': 1, 'SampleMass': 8.54, 'FormulaUnitMass': 50.94}
PolDiffILLReduction(Run='396993', ProcessAs='Vanadium', OutputWorkspace='vanadium',
SampleAndEnvironmentProperties=sampleProperties,
OutputTreatment='Individual')
self._check_output(mtd['vanadium'], 1, 132, 6, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['vanadium'], 'Vanadium')
def test_vanadium_annulus(self):
PolDiffILLReduction(Run='396917', ProcessAs='Empty', OutputWorkspace='container_ws')
sampleProperties = {'FormulaUnits': 1, 'SampleChemicalFormula': 'V', 'SampleMass': 8.54, 'FormulaUnitMass': 50.94,
'SampleInnerRadius': 2, 'SampleOuterRadius': 2.5, 'Height': 2,
'BeamWidth': 2.6, 'BeamHeight': 2.6, 'SampleDensity': 1,
'ContainerChemicalFormula': 'Al', 'ContainerDensity': 2.7,
'ContainerInnerRadius': 0.1, 'ContainerOuterRadius': 2.51, 'EventsPerPoint':1000}
PolDiffILLReduction(Run='396993', ProcessAs='Vanadium', OutputWorkspace='vanadium_annulus',
EmptyInputWorkspace='container_ws',
SampleAndEnvironmentProperties=sampleProperties,
SelfAttenuationMethod='MonteCarlo',
SampleGeometry='Annulus',
OutputTreatment='Individual')
self._check_output(mtd['vanadium_annulus'], 1, 132, 6, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['vanadium_annulus'], 'Vanadium')
def test_sample(self):
sampleProperties = {'FormulaUnits': 1, 'SampleMass': 2.93, 'FormulaUnitMass': 182.56}
PolDiffILLReduction(Run='397004', ProcessAs='Sample', OutputWorkspace='sample',
SampleAndEnvironmentProperties=sampleProperties,
OutputTreatment='Individual')
self._check_output(mtd['sample'], 1, 132, 6, 'Wavelength', 'Wavelength', 'Spectrum', 'Label')
self._check_process_flag(mtd['sample'], 'Sample')
def _check_process_flag(self, ws, value):
self.assertTrue(ws[0].getRun().getLogData('ProcessedAs').value, value)
def _check_output(self, ws, blocksize, spectra, nEntries, x_unit, x_unit_id, y_unit, y_unit_id):
self.assertTrue(ws)
self.assertTrue(isinstance(ws, WorkspaceGroup))
self.assertTrue(ws.getNumberOfEntries(), nEntries)
for entry in ws:
self.assertTrue(isinstance(entry, MatrixWorkspace))
self.assertTrue(entry.isHistogramData())
self.assertTrue(not entry.isDistribution())
self.assertEqual(entry.getAxis(0).getUnit().caption(), x_unit)
self.assertEqual(entry.getAxis(0).getUnit().unitID(), x_unit_id)
self.assertEqual(entry.getAxis(1).getUnit().caption(), y_unit)
self.assertEqual(entry.getAxis(1).getUnit().unitID(), y_unit_id)
self.assertEqual(entry.blocksize(), blocksize)
self.assertEqual(entry.getNumberHistograms(), spectra)
self.assertTrue(isinstance(entry.getInstrument(), Instrument))
self.assertTrue(isinstance(entry.getRun(), Run))
self.assertTrue(entry.getHistory())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,090,283,701,678,494,000 | 55.550388 | 122 | 0.644277 | false |
Unidata/netcdf4-python | test/tst_compression.py | 1 | 5024 | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from netCDF4.utils import _quantize
from numpy.testing import assert_almost_equal
import os, tempfile, unittest
ndim = 100000
ndim2 = 100
chunk1 = 10; chunk2 = ndim2
nfiles = 7
files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)]
array = uniform(size=(ndim,))
array2 = uniform(size=(ndim,ndim2))
lsd = 3
def write_netcdf(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
foo = file.createVariable('data',\
dtype,('n'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data'][:]
file.close()
def write_netcdf2(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
file.createDimension('n2', ndim2)
foo = file.createVariable('data2',\
dtype,('n','n2'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data2'][:]
file.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.files = files
# no compression
write_netcdf(self.files[0],False,None,array)
# compressed, lossless, no shuffle.
write_netcdf(self.files[1],True,None,array)
# compressed, lossless, with shuffle.
write_netcdf(self.files[2],True,None,array,shuffle=True)
# compressed, lossy, no shuffle.
write_netcdf(self.files[3],True,lsd,array)
# compressed, lossy, with shuffle.
write_netcdf(self.files[4],True,lsd,array,shuffle=True)
# compressed, lossy, with shuffle and fletcher32 checksum.
write_netcdf(self.files[5],True,lsd,array,shuffle=True,fletcher32=True)
# 2-d compressed, lossy, with shuffle and fletcher32 checksum and
# chunksizes.
write_netcdf2(self.files[6],True,lsd,array2,shuffle=True,fletcher32=True,chunksizes=(chunk1,chunk2))
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
"""testing zlib and shuffle compression filters"""
uncompressed_size = os.stat(self.files[0]).st_size
# check compressed data.
f = Dataset(self.files[1])
size = os.stat(self.files[1]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() == {'zlib':True,'shuffle':False,'complevel':6,'fletcher32':False}
assert(size < 0.95*uncompressed_size)
f.close()
# check compression with shuffle
f = Dataset(self.files[2])
size = os.stat(self.files[2]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() == {'zlib':True,'shuffle':True,'complevel':6,'fletcher32':False}
assert(size < 0.85*uncompressed_size)
f.close()
# check lossy compression without shuffle
f = Dataset(self.files[3])
size = os.stat(self.files[3]).st_size
checkarray = _quantize(array,lsd)
assert_almost_equal(checkarray,f.variables['data'][:])
assert(size < 0.27*uncompressed_size)
f.close()
# check lossy compression with shuffle
f = Dataset(self.files[4])
size = os.stat(self.files[4]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert(size < 0.20*uncompressed_size)
size_save = size
f.close()
# check lossy compression with shuffle and fletcher32 checksum.
f = Dataset(self.files[5])
size = os.stat(self.files[5]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert f.variables['data'].filters() == {'zlib':True,'shuffle':True,'complevel':6,'fletcher32':True}
assert(size < 0.20*uncompressed_size)
# should be slightly larger than without fletcher32
assert(size > size_save)
# check chunksizes
f.close()
f = Dataset(self.files[6])
checkarray2 = _quantize(array2,lsd)
assert_almost_equal(checkarray2,f.variables['data2'][:])
assert f.variables['data2'].filters() == {'zlib':True,'shuffle':True,'complevel':6,'fletcher32':True}
assert f.variables['data2'].chunking() == [chunk1,chunk2]
f.close()
if __name__ == '__main__':
unittest.main()
| mit | 3,729,668,542,138,617,300 | 41.940171 | 114 | 0.648686 | false |
thaim/ansible | lib/ansible/plugins/cliconf/edgeswitch.py | 31 | 4643 | #
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: edgeswitch
short_description: Use edgeswitch cliconf to run command on EdgeSwitch platform
description:
- This edgeswitch plugin provides low level abstraction apis for
sending and receiving CLI commands from Ubiquiti EdgeSwitch network devices.
version_added: "2.8"
"""
import re
import time
import json
from itertools import chain
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.config import dumps
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
from ansible.module_utils.common._collections_compat import Mapping
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'edgeswitch'
reply = self.get(command='show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Software Version\.+ (.*)', data)
if match:
device_info['network_os_version'] = match.group(1).strip(',')
match = re.search(r'^Machine Model\.+ (.*)', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'System Name\.+ (.*)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
@enable_mode
def get_config(self, source='running', flags=None):
if source not in ('running', 'startup'):
raise ValueError("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = 'show running-config '
else:
cmd = 'show startup-config '
if flags:
cmd += ' '.join(to_list(flags))
cmd = cmd.strip()
return self.send_command(cmd)
@enable_mode
def edit_config(self, commands):
resp = {}
results = []
requests = []
self.send_command('configure')
for line in to_list(commands):
if not isinstance(line, Mapping):
line = {'command': line}
cmd = line['command']
if cmd != 'end' and cmd[0] != '!':
results.append(self.send_command(**line))
requests.append(cmd)
self.send_command('end')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
if not command:
raise ValueError('must provide value of command to execute')
if output:
raise ValueError("'output' value %s is not supported for get" % output)
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['run_commands']
return json.dumps(result)
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
raise ValueError("'output' value %s is not supported for run_commands" % output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, 'err', e)
responses.append(out)
return responses
| mit | 8,739,103,710,368,553,000 | 31.697183 | 136 | 0.626104 | false |
spcui/tp-qemu | qemu/tests/timedrift_check_with_syscall.py | 3 | 1688 | import os
import logging
from autotest.client.shared import error
from virttest import aexpect
from virttest import data_dir
@error.context_aware
def run(test, params, env):
"""
Time clock offset check test (only for Linux guest):
1) boot guest with '-rtc base=utc,clock=host,driftfix=slew'
2) build binary 'clktest' in guest
3) check clock offset with ./clktest
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
build_cmd = params.get("build_cmd", "gcc -lrt clktest.c -o clktest")
test_cmd = params.get("test_cmd", "./clktest")
check_timeout = int(params.get("check_timeout", "600"))
tmp_dir = params.get("tmp_dir", "/tmp")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
src_dir = os.path.join(data_dir.get_deps_dir(), 'timedrift')
src_file = os.path.join(src_dir, "clktest.c")
dst_file = os.path.join(tmp_dir, "clktest.c")
error.context("transfer '%s' to guest('%s')" % (src_file, dst_file),
logging.info)
vm.copy_files_to(src_file, tmp_dir, timeout=120)
error.context("build binary file 'clktest'", logging.info)
session.cmd(build_cmd)
error.context("check clock offset via `clktest`", logging.info)
logging.info("set check timeout to %s seconds", check_timeout)
try:
session.cmd_output(test_cmd, timeout=check_timeout)
except aexpect.ShellTimeoutError, msg:
if 'Interval is' in msg.output:
raise error.TestFail(msg.output)
pass
| gpl-2.0 | -5,827,452,009,124,347,000 | 34.166667 | 72 | 0.659953 | false |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/openstackclient/tests/common/test_commandmanager.py | 2 | 3460 | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from openstackclient.common import commandmanager
from openstackclient.tests import utils
class FakeCommand(object):
@classmethod
def load(cls):
return cls
def __init__(self):
return
FAKE_CMD_ONE = FakeCommand
FAKE_CMD_TWO = FakeCommand
FAKE_CMD_ALPHA = FakeCommand
FAKE_CMD_BETA = FakeCommand
class FakeCommandManager(commandmanager.CommandManager):
commands = {}
def load_commands(self, namespace):
if namespace == 'test':
self.commands['one'] = FAKE_CMD_ONE
self.commands['two'] = FAKE_CMD_TWO
self.group_list.append(namespace)
elif namespace == 'greek':
self.commands['alpha'] = FAKE_CMD_ALPHA
self.commands['beta'] = FAKE_CMD_BETA
self.group_list.append(namespace)
class TestCommandManager(utils.TestCase):
def test_add_command_group(self):
mgr = FakeCommandManager('test')
# Make sure add_command() still functions
mock_cmd_one = mock.Mock()
mgr.add_command('mock', mock_cmd_one)
cmd_mock, name, args = mgr.find_command(['mock'])
self.assertEqual(mock_cmd_one, cmd_mock)
# Find a command added in initialization
cmd_one, name, args = mgr.find_command(['one'])
self.assertEqual(FAKE_CMD_ONE, cmd_one)
# Load another command group
mgr.add_command_group('greek')
# Find a new command
cmd_alpha, name, args = mgr.find_command(['alpha'])
self.assertEqual(FAKE_CMD_ALPHA, cmd_alpha)
# Ensure that the original commands were not overwritten
cmd_two, name, args = mgr.find_command(['two'])
self.assertEqual(FAKE_CMD_TWO, cmd_two)
def test_get_command_groups(self):
mgr = FakeCommandManager('test')
# Make sure add_command() still functions
mock_cmd_one = mock.Mock()
mgr.add_command('mock', mock_cmd_one)
cmd_mock, name, args = mgr.find_command(['mock'])
self.assertEqual(mock_cmd_one, cmd_mock)
# Load another command group
mgr.add_command_group('greek')
gl = mgr.get_command_groups()
self.assertEqual(['test', 'greek'], gl)
def test_get_command_names(self):
mock_cmd_one = mock.Mock()
mock_cmd_one.name = 'one'
mock_cmd_two = mock.Mock()
mock_cmd_two.name = 'cmd two'
mock_pkg_resources = mock.Mock(
return_value=[mock_cmd_one, mock_cmd_two],
)
with mock.patch(
'pkg_resources.iter_entry_points',
mock_pkg_resources,
) as iter_entry_points:
mgr = commandmanager.CommandManager('test')
assert iter_entry_points.called_once_with('test')
cmds = mgr.get_command_names('test')
self.assertEqual(['one', 'cmd two'], cmds)
| mit | 5,924,673,340,771,548,000 | 31.952381 | 77 | 0.631792 | false |
alaski/nova | nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py | 3 | 6926 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import tarfile
import eventlet
import six
from nova.image import glance
from nova import test
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi.image import vdi_through_dev
@contextlib.contextmanager
def fake_context(result=None):
yield result
class TestDelegatingToCommand(test.NoDBTestCase):
def test_upload_image_is_delegated_to_command(self):
command = self.mox.CreateMock(vdi_through_dev.UploadToGlanceAsRawTgz)
self.mox.StubOutWithMock(vdi_through_dev, 'UploadToGlanceAsRawTgz')
vdi_through_dev.UploadToGlanceAsRawTgz(
'ctx', 'session', 'instance', 'image_id', 'vdis').AndReturn(
command)
command.upload_image().AndReturn('result')
self.mox.ReplayAll()
store = vdi_through_dev.VdiThroughDevStore()
result = store.upload_image(
'ctx', 'session', 'instance', 'image_id', 'vdis')
self.assertEqual('result', result)
class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
def test_upload_image(self):
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
self.mox.StubOutWithMock(store, '_perform_upload')
self.mox.StubOutWithMock(store, '_get_vdi_ref')
self.mox.StubOutWithMock(vdi_through_dev, 'glance')
self.mox.StubOutWithMock(vdi_through_dev, 'vm_utils')
self.mox.StubOutWithMock(vdi_through_dev, 'utils')
store._get_vdi_ref().AndReturn('vdi_ref')
vdi_through_dev.vm_utils.vdi_attached(
'session', 'vdi_ref', read_only=True).AndReturn(
fake_context('dev'))
vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath')
vdi_through_dev.utils.temporary_chown('devpath').AndReturn(
fake_context())
store._perform_upload('devpath')
self.mox.ReplayAll()
store.upload_image()
def test__perform_upload(self):
producer = self.mox.CreateMock(vdi_through_dev.TarGzProducer)
consumer = self.mox.CreateMock(glance.UpdateGlanceImage)
pool = self.mox.CreateMock(eventlet.GreenPool)
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
self.mox.StubOutWithMock(store, '_create_pipe')
self.mox.StubOutWithMock(store, '_get_virtual_size')
self.mox.StubOutWithMock(producer, 'get_metadata')
self.mox.StubOutWithMock(vdi_through_dev, 'TarGzProducer')
self.mox.StubOutWithMock(glance, 'UpdateGlanceImage')
self.mox.StubOutWithMock(vdi_through_dev, 'eventlet')
producer.get_metadata().AndReturn('metadata')
store._get_virtual_size().AndReturn('324')
store._create_pipe().AndReturn(('readfile', 'writefile'))
vdi_through_dev.TarGzProducer(
'devpath', 'writefile', '324', 'disk.raw').AndReturn(
producer)
glance.UpdateGlanceImage('context', 'id', 'metadata',
'readfile').AndReturn(consumer)
vdi_through_dev.eventlet.GreenPool().AndReturn(pool)
pool.spawn(producer.start)
pool.spawn(consumer.start)
pool.waitall()
self.mox.ReplayAll()
store._perform_upload('devpath')
def test__get_vdi_ref(self):
session = self.mox.CreateMock(xenapi_session.XenAPISession)
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
session.call_xenapi('VDI.get_by_uuid', 'vdi0').AndReturn('vdi_ref')
self.mox.ReplayAll()
self.assertEqual('vdi_ref', store._get_vdi_ref())
def test__get_virtual_size(self):
session = self.mox.CreateMock(xenapi_session.XenAPISession)
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
self.mox.StubOutWithMock(store, '_get_vdi_ref')
store._get_vdi_ref().AndReturn('vdi_ref')
session.call_xenapi('VDI.get_virtual_size', 'vdi_ref')
self.mox.ReplayAll()
store._get_virtual_size()
def test__create_pipe(self):
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
self.mox.StubOutWithMock(vdi_through_dev, 'os')
self.mox.StubOutWithMock(vdi_through_dev, 'greenio')
vdi_through_dev.os.pipe().AndReturn(('rpipe', 'wpipe'))
vdi_through_dev.greenio.GreenPipe('rpipe', 'rb', 0).AndReturn('rfile')
vdi_through_dev.greenio.GreenPipe('wpipe', 'wb', 0).AndReturn('wfile')
self.mox.ReplayAll()
result = store._create_pipe()
self.assertEqual(('rfile', 'wfile'), result)
class TestTarGzProducer(test.NoDBTestCase):
def test_constructor(self):
producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
'100', 'fname')
self.assertEqual('devpath', producer.fpath)
self.assertEqual('writefile', producer.output)
self.assertEqual('100', producer.size)
self.assertEqual('writefile', producer.output)
def test_start(self):
outf = six.StringIO()
producer = vdi_through_dev.TarGzProducer('fpath', outf,
'100', 'fname')
tfile = self.mox.CreateMock(tarfile.TarFile)
tinfo = self.mox.CreateMock(tarfile.TarInfo)
inf = self.mox.CreateMock(open)
self.mox.StubOutWithMock(vdi_through_dev, 'tarfile')
self.mox.StubOutWithMock(producer, '_open_file')
vdi_through_dev.tarfile.TarInfo(name='fname').AndReturn(tinfo)
vdi_through_dev.tarfile.open(fileobj=outf, mode='w|gz').AndReturn(
fake_context(tfile))
producer._open_file('fpath', 'rb').AndReturn(fake_context(inf))
tfile.addfile(tinfo, fileobj=inf)
outf.close()
self.mox.ReplayAll()
producer.start()
self.assertEqual(100, tinfo.size)
def test_get_metadata(self):
producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
'100', 'fname')
self.assertEqual({
'disk_format': 'raw',
'container_format': 'tgz'},
producer.get_metadata())
| apache-2.0 | 6,704,188,136,323,142,000 | 36.846995 | 78 | 0.644528 | false |
jhuttner/dx-toolkit | src/python/dxpy/bindings/download_all_inputs.py | 1 | 7615 | # Copyright (C) 2014-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import concurrent.futures
import os
import sys
import multiprocessing
import psutil
import dxpy
from dxpy.utils import file_load_utils
def _create_dirs(idir, dirs):
'''
Create a set of directories, so we could store the input files.
For example, seq1 could be stored under:
/in/seq1/NC_001122.fasta
TODO: this call could fail, we need to report a reasonable error code
Note that we create a directory for every file array, even if
it has zero inputs.
'''
# create the <idir> itself
file_load_utils.ensure_dir(idir)
# create each subdir
for d in dirs:
file_load_utils.ensure_dir(os.path.join(idir, d))
def _download_one_file(file_rec, idir):
src_file = file_rec['src_file_id']
trg_file = os.path.join(idir, file_rec['trg_fname'])
print("downloading file: " + src_file + " to filesystem: " + trg_file)
sys.stdout.flush()
dxpy.download_dxfile(src_file, trg_file)
return file_rec
# Download the files sequentially
# to_download: list of tuples describing files to download
def _sequential_file_download(to_download, idir):
for file_rec in to_download:
_download_one_file(file_rec, idir)
# Download files in parallel
# to_download: list of tuples describing files to download
def _parallel_file_download(to_download, idir, max_num_parallel_downloads):
try:
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_num_parallel_downloads) as executor:
future_files = {executor.submit(_download_one_file, file_rec, idir): file_rec
for file_rec in to_download}
for future in concurrent.futures.as_completed(future_files, timeout=sys.maxint):
file_rec = future_files[future]
try:
future.result()
except Exception:
sys.stderr.write('%r -> %s generated an exception' %
(file_rec['src_file_id'], file_rec['trg_fname']))
raise
except KeyboardInterrupt:
# Call os._exit() in case of KeyboardInterrupt. Otherwise, the atexit registered handler in
# concurrent.futures.thread will run, and issue blocking join() on all worker threads,
# requiring us to listen to events in worker threads in order to enable timely exit in
# response to Ctrl-C.
print("", file=sys.stderr)
os._exit(os.EX_IOERR)
def _gen_helper_dict(filtered_inputs):
'''
Create a dict of values for the downloaded files. This is similar to the variables created
when running a bash app.
'''
file_key_descs, _ignore = file_load_utils.analyze_bash_vars(
file_load_utils.get_input_json_file(), None)
flattened_dict = {}
def add_if_no_collision(key, value, dict_):
if key not in dict_:
dict_[key] = value
for input_ in filtered_inputs:
if input_ not in file_key_descs:
continue
input_var_dict = file_key_descs[input_]
add_if_no_collision(input_ + '_path', input_var_dict["path"], flattened_dict)
add_if_no_collision(input_ + '_name', input_var_dict["basename"], flattened_dict)
add_if_no_collision(input_ + '_prefix', input_var_dict["prefix"], flattened_dict)
return flattened_dict
def _get_num_parallel_threads(max_threads, num_cores, mem_available_mb):
'''
Ensure at least ~1.2 GB memory per thread, see PTFM-18767
'''
return min(max_threads, num_cores, max(int(mem_available_mb/1200), 1))
def download_all_inputs(exclude=None, parallel=False, max_threads=8):
'''
:param exclude: List of input variables that should not be downloaded.
:type exclude: Array of strings
:param parallel: Should we download multiple files in parallel? (default: False)
:type filename: boolean
:param max_threads: If parallel is True, how many threads should be used
to download files? (default: 8)
:type append: int
:returns: dict of lists of strings where each key is the input variable
and each list element is the full path to the file that has
been downloaded.
This function downloads all files that were supplied as inputs to the app.
By convention, if an input parameter "FOO" has value
{"$dnanexus_link": "file-xxxx"}
and filename INPUT.TXT, then the linked file will be downloaded into the
path:
$HOME/in/FOO/INPUT.TXT
If an input is an array of files, then all files will be placed into
numbered subdirectories under a parent directory named for the
input. For example, if the input key is FOO, and the inputs are {A, B,
C}.vcf then, the directory structure will be:
$HOME/in/FOO/0/A.vcf
1/B.vcf
2/C.vcf
Zero padding is used to ensure argument order. For example, if there are
12 input files {A, B, C, D, E, F, G, H, I, J, K, L}.txt, the directory
structure will be:
$HOME/in/FOO/00/A.vcf
...
11/L.vcf
This allows using shell globbing (FOO/*/*.vcf) to get all the files in the input
order and prevents issues with files which have the same filename.'''
# Input directory, where all inputs are downloaded
idir = file_load_utils.get_input_dir()
try:
job_input_file = file_load_utils.get_input_json_file()
dirs, inputs, rest = file_load_utils.get_job_input_filenames(job_input_file)
except IOError:
msg = 'Error: Could not find the input json file: {0}.\n'.format(job_input_file)
msg += ' This function should only be called from within a running job.'
print(msg)
raise
# Exclude directories
dirs_to_create = []
for d in dirs:
if (exclude is None) or (d not in exclude):
dirs_to_create.append(d)
# Create the directory structure, in preparation for download.
# Allows performing the download in parallel.
_create_dirs(idir, dirs_to_create)
# Remove excluded inputs
if exclude:
inputs = file_load_utils.filter_dict(inputs, exclude)
# Convert to a flat list of elements to download
to_download = []
for ival_list in inputs.values():
to_download.extend(ival_list)
# Download the files
if parallel:
total_mem = psutil.virtual_memory().total >> 20 # Total RAM in MB
num_cores = multiprocessing.cpu_count()
max_num_parallel_downloads = _get_num_parallel_threads(max_threads, num_cores, total_mem)
sys.stderr.write("Downloading files using {} threads".format(max_num_parallel_downloads))
_parallel_file_download(to_download, idir, max_num_parallel_downloads)
else:
_sequential_file_download(to_download, idir)
helper_vars = _gen_helper_dict(inputs)
return helper_vars
| apache-2.0 | 1,200,526,464,182,079,200 | 37.266332 | 99 | 0.653447 | false |
meejah/crossbarexamples | iotcookbook/device/edison/tutorial/tutorial2/bridge.py | 3 | 2971 | import sys
import mraa
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import LoopingCall
from autobahn.twisted.wamp import ApplicationSession
from autobahn import wamp
# adjust this for your setup:
LED1_PIN = 11 # O0 (used as digital) on Arduino Tinkershield
BTN1_PIN = 14 # I0 (used as digital) on Arduino Tinkershield
POT1_PIN = 1 # I1 (used as analog) on Arduino Tinkershield
ROUTER = "ws://192.168.1.130:8080/ws"
REALM = "realm1"
BASE_URI = "io.crossbar.demo.edison.tutorial3"
class MyEdisonBridgeSession(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Session connected")
self._last = None
self._led = False
self._led1 = mraa.Gpio(LED1_PIN)
self._led1.dir(mraa.DIR_OUT)
self._btn1 = mraa.Gpio(BTN1_PIN)
self._btn1.dir(mraa.DIR_IN)
self._pot1 = mraa.Aio(POT1_PIN)
def loop():
values = [self._btn1.read(), self._pot1.read()]
if self._last is None:
self._last = values
changed = True
else:
changed = False
if values[0] != self._last[0]:
changed = True
if abs(values[1] - self._last[1]) > 4:
changed = True
if changed:
print(values)
self.publish("{}.on_sensors".format(BASE_URI), values)
self._last = values
self._loop = LoopingCall(loop)
self._loop.start(0.05)
yield self.register(self)
print("Procedures registered.")
print("Bridge ready!")
def onLeave(self, details):
if self._loop:
self._loop.stop()
self.disconnect()
@wamp.register("{}.get_sensors".format(BASE_URI))
def get_sensor_vals(self):
return self._last
@wamp.register("{}.set_led".format(BASE_URI))
def set_led(self, value):
if value:
if not self._led:
self._led1.write(1)
self._led = True
self.publish("{}.on_led".format(BASE_URI), self._led)
return True
else:
return False
else:
if self._led:
self._led1.write(0)
self._led = False
self.publish("{}.on_led".format(BASE_URI), self._led)
return True
else:
return False
@wamp.register("{}.get_led".format(BASE_URI))
def get_led(self):
return self._led
if __name__ == '__main__':
import sys
from twisted.python import log
log.startLogging(sys.stdout)
from twisted.internet import reactor
print("Using Twisted reactor {0}".format(reactor.__class__))
from autobahn.twisted.wamp import ApplicationRunner
runner = ApplicationRunner(ROUTER, REALM)
runner.run(MyEdisonBridgeSession)
| apache-2.0 | -1,817,776,500,002,434,600 | 27.295238 | 74 | 0.556378 | false |
CrystallineEntity/bulbs | bulbs/components/reply.py | 2 | 1065 | from slugify import slugify
from bulbs.components import db
from bulbs.components.post import format_post
def reply_to_topic(subject, content, topic_id, ip, username):
"""Creates a reply in the specified topic."""
formatted_post = format_post(content)
cursor = db.con.cursor()
cursor.execute("SELECT subcategory_id FROM bulbs_post WHERE id = %s", (topic_id, ))
subcat_id = cursor.fetchone()[0]
cursor.execute("SELECT id FROM bulbs_user WHERE username = %s", (username, ))
user_id = cursor.fetchone()[0]
post_slug = slugify(subject)
cursor.execute("\
INSERT INTO bulbs_Post (subcategory_id, parent_post, title, content, date, user_id, ip, slug) VALUES \
(%s, %s, %s, %s, now(), %s, %s, %s)", (
subcat_id,
topic_id,
subject,
formatted_post,
user_id,
ip,
post_slug
))
cursor.execute("UPDATE bulbs_Post SET latest_reply = now() WHERE id = %s", (topic_id, ))
db.con.commit()
return True | mit | -2,187,456,846,092,222,000 | 29.457143 | 110 | 0.589671 | false |
jserv/codezero | scripts/kernel/generate_kernel_cinfo.py | 3 | 6239 | #! /usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
#
# Codezero -- a microkernel for embedded systems.
#
# Copyright © 2009 B Labs Ltd
#
import os, sys, shelve, glob
from os.path import join
from string import Template
PROJRELROOT = '../..'
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), PROJRELROOT)))
sys.path.append(os.path.abspath("../"))
from scripts.config.projpaths import *
from scripts.config.configuration import *
cinfo_file_start = \
'''/*
* Autogenerated container descriptions
* defined for the current build.
*
* Copyright (C) 2009 Bahadir Balban
*/
#include <l4/generic/container.h>
#include <l4/generic/resource.h>
#include <l4/generic/capability.h>
#include <l4/generic/cap-types.h>
#include INC_PLAT(platform.h)
#include INC_PLAT(irq.h)
%s
__initdata struct container_info cinfo[] = {
'''
cinfo_file_end = \
'''
};
'''
cinfo_head_start = \
'''
\t[%d] = {
\t.name = "%s",
\t.npagers = 1,
\t.ncaps = %d,
\t.caps = {'''
cinfo_caps_end = \
'''
\t},
'''
cinfo_end = \
'''
\t\t},
\t},
'''
pager_start = \
'''
\t.pager = {
\t\t[0] = {
\t\t\t.start_address = (CONFIG_CONT%(cn)d_START_PC_ADDR),
\t\t\t.pager_lma = __pfn(CONFIG_CONT%(cn)d_PAGER_LOAD_ADDR),
\t\t\t.pager_vma = __pfn(CONFIG_CONT%(cn)d_PAGER_VIRT_ADDR),
\t\t\t.pager_size = __pfn(page_align_up(CONT%(cn)d_PAGER_MAPSIZE)),
\t\t\t.rw_pheader_start = %(rw_pheader_start)s,
\t\t\t.rw_pheader_end = %(rw_pheader_end)s,
\t\t\t.rx_pheader_start = %(rx_pheader_start)s,
\t\t\t.rx_pheader_end = %(rx_pheader_end)s,
\t\t\t.ncaps = %(caps)d,
\t\t\t.caps = {
'''
pager_end = \
'''
\t\t\t},
\t\t},
'''
# These are pager-only. If for container, remove the PAGER part, indent down some tabs.
cap_virtmem = \
'''
\t\t\t[%(capidx)d] = {
\t\t\t\t.target = %(cn)d,
\t\t\t\t.type = CAP_TYPE_MAP_VIRTMEM | CAP_RTYPE_CONTAINER,
\t\t\t\t.access = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_EXEC
\t\t\t\t\t| CAP_MAP_CACHED | CAP_MAP_UNCACHED | CAP_MAP_UNMAP | CAP_MAP_UTCB |
\t\t\t\t\tCAP_CACHE_INVALIDATE | CAP_CACHE_CLEAN,
\t\t\t\t.start = __pfn(CONFIG_CONT%(cn)d_PAGER_VIRT%(vn)d_START),
\t\t\t\t.end = __pfn(CONFIG_CONT%(cn)d_PAGER_VIRT%(vn)d_END),
\t\t\t\t.size = __pfn(CONFIG_CONT%(cn)d_PAGER_VIRT%(vn)d_END - CONFIG_CONT%(cn)d_PAGER_VIRT%(vn)d_START),
\t\t\t},
'''
cap_physmem = \
'''
\t\t\t[%(capidx)d] = {
\t\t\t\t.target = %(cn)d,
\t\t\t\t.type = CAP_TYPE_MAP_PHYSMEM | CAP_RTYPE_CONTAINER,
\t\t\t\t.access = CAP_MAP_READ | CAP_MAP_WRITE | CAP_MAP_EXEC |
\t\t\t\t\tCAP_MAP_CACHED | CAP_MAP_UNCACHED | CAP_MAP_UNMAP | CAP_MAP_UTCB,
\t\t\t\t.start = __pfn(CONFIG_CONT%(cn)d_PAGER_PHYS%(pn)d_START),
\t\t\t\t.end = __pfn(CONFIG_CONT%(cn)d_PAGER_PHYS%(pn)d_END),
\t\t\t\t.size = __pfn(CONFIG_CONT%(cn)d_PAGER_PHYS%(pn)d_END - CONFIG_CONT%(cn)d_PAGER_PHYS%(pn)d_START),
\t\t\t},
'''
pager_ifdefs_todotext = \
'''
/*
* TODO:
* This had to be defined this way because in CML2 there
* is no straightforward way to derive symbols from expressions, even
* if it is stated in the manual that it can be done.
* As a workaround, a ternary expression of (? : ) was tried but this
* complains that type deduction could not be done.
*/'''
# This will be filled after the containers are compiled
# and pager binaries are formed
pager_mapsize = \
'''
#define CONT%d_PAGER_SIZE %s
'''
pager_ifdefs = \
'''
#if defined(CONFIG_CONT%(cn)d_TYPE_LINUX)
#define CONT%(cn)d_PAGER_MAPSIZE \\
(CONT%(cn)d_PAGER_SIZE + CONFIG_CONT%(cn)d_LINUX_ZRELADDR - \\
CONFIG_CONT%(cn)d_LINUX_PHYS_OFFSET)
#else
#define CONT%(cn)d_PAGER_MAPSIZE (CONT%(cn)d_PAGER_SIZE)
#endif
'''
def generate_pager_memory_ifdefs(config, containers):
pager_ifdef_string = ""
linux = 0
for c in containers:
if c.type == "linux":
if linux == 0:
pager_ifdef_string += pager_ifdefs_todotext
linux = 1
pager_ifdef_string += \
pager_mapsize % (c.id, c.pager_size)
pager_ifdef_string += pager_ifdefs % { 'cn' : c.id }
return pager_ifdef_string
def generate_kernel_cinfo(cinfo_path):
config = configuration_retrieve()
containers = config.containers
containers.sort()
print "Generating kernel cinfo..."
#config.config_print()
pager_ifdefs = generate_pager_memory_ifdefs(config, containers)
with open(str(cinfo_path), 'w+') as cinfo_file:
fbody = cinfo_file_start % pager_ifdefs
for c in containers:
for caplist in [c.caplist["CONTAINER"], c.caplist["PAGER"]]:
total_caps = caplist.virt_regions + caplist.phys_regions + len(caplist.caps)
if caplist == c.caplist["CONTAINER"]:
fbody += cinfo_head_start % (c.id, c.name, total_caps)
else:
fbody += pager_start % { 'cn' : c.id, 'caps' : total_caps,
'rw_pheader_start' : hex(c.pager_rw_pheader_start),
'rw_pheader_end' : hex(c.pager_rw_pheader_end),
'rx_pheader_start' : hex(c.pager_rx_pheader_start),
'rx_pheader_end' : hex(c.pager_rx_pheader_end),
}
cap_index = 0
for mem_index in range(caplist.virt_regions):
fbody += cap_virtmem % { 'capidx' : cap_index, 'cn' : c.id, 'vn' : mem_index }
cap_index += 1
for mem_index in range(caplist.phys_regions):
fbody += cap_physmem % { 'capidx' : cap_index, 'cn' : c.id, 'pn' : mem_index }
cap_index += 1
for capkey, capstr in caplist.caps.items():
templ = Template(capstr)
fbody += templ.safe_substitute(idx = cap_index)
cap_index += 1
if caplist == c.caplist["CONTAINER"]:
fbody += cinfo_caps_end
else:
fbody += pager_end
fbody += cinfo_end
fbody += cinfo_file_end
cinfo_file.write(fbody)
if __name__ == "__main__":
if len(sys.argv) > 1:
generate_kernel_cinfo(join(PROJROOT, sys.argv[1]))
else:
generate_kernel_cinfo(KERNEL_CINFO_PATH)
| gpl-3.0 | -7,498,331,285,217,517,000 | 29.429268 | 105 | 0.584162 | false |
ptitdoc/Archive-qubes-core | dom0/qvm-core/qubes.py | 1 | 117287 | #!/usr/bin/python2
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2010 Joanna Rutkowska <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
import sys
import stat
import os
import os.path
import subprocess
import lxml.etree
import xml.parsers.expat
import fcntl
import re
import shutil
import uuid
import time
import warnings
from datetime import datetime
from qmemman_client import QMemmanClient
# Do not use XenAPI or create/read any VM files
# This is for testing only!
dry_run = False
#dry_run = True
if not dry_run:
import xen.lowlevel.xc
import xen.lowlevel.xl
import xen.lowlevel.xs
qubes_guid_path = "/usr/bin/qubes_guid"
qrexec_daemon_path = "/usr/lib/qubes/qrexec_daemon"
qrexec_client_path = "/usr/lib/qubes/qrexec_client"
qubes_clipd_path = "/usr/bin/qclipd"
qubes_base_dir = "/var/lib/qubes"
qubes_appvms_dir = qubes_base_dir + "/appvms"
qubes_templates_dir = qubes_base_dir + "/vm-templates"
qubes_servicevms_dir = qubes_base_dir + "/servicevms"
qubes_store_filename = qubes_base_dir + "/qubes.xml"
qubes_kernels_base_dir = qubes_base_dir + "/vm-kernels"
qubes_max_qid = 254
qubes_max_netid = 254
vm_default_netmask = "255.255.255.0"
default_root_img = "root.img"
default_rootcow_img = "root-cow.img"
default_volatile_img = "volatile.img"
default_clean_volatile_img = "clean-volatile.img.tar"
default_private_img = "private.img"
default_appmenus_templates_subdir = "apps.templates"
default_appmenus_template_templates_subdir = "apps-template.templates"
default_kernels_subdir = "kernels"
default_firewall_conf_file = "firewall.xml"
default_memory = 400
default_kernelopts = ""
default_kernelopts_pcidevs = "iommu=soft swiotlb=4096"
default_hvm_disk_size = 20*1024*1024*1024
default_hvm_private_img_size = 2*1024*1024*1024
default_hvm_memory = 512
config_template_pv = '/usr/share/qubes/vm-template.conf'
config_template_hvm = '/usr/share/qubes/vm-template-hvm.conf'
start_appmenu_template = '/usr/share/qubes/qubes-start.desktop'
qubes_whitelisted_appmenus = 'whitelisted-appmenus.list'
dom0_update_check_interval = 6*3600
updates_stat_file = 'updates.stat'
# how long (in sec) to wait for VMs to shutdown
# before killing them (when used qvm-run with --wait option)
shutdown_counter_max = 60
# do not allow to start a new AppVM if Dom0 mem was to be less than this
dom0_min_memory = 700*1024*1024
# We need this global reference, as each instance of QubesVm
# must be able to ask Dom0 VM about how much memory it currently has...
dom0_vm = None
qubes_appmenu_create_cmd = "/usr/lib/qubes/create_apps_for_appvm.sh"
qubes_appmenu_remove_cmd = "/usr/lib/qubes/remove_appvm_appmenus.sh"
qubes_pciback_cmd = '/usr/lib/qubes/unbind_pci_device.sh'
prepare_volatile_img_cmd = '/usr/lib/qubes/prepare_volatile_img.sh'
yum_proxy_ip = '10.137.255.254'
yum_proxy_port = '8082'
class QubesException (Exception) : pass
if not dry_run:
xc = xen.lowlevel.xc.xc()
xs = xen.lowlevel.xs.xs()
xl_ctx = xen.lowlevel.xl.ctx()
class QubesHost(object):
def __init__(self):
self.physinfo = xc.physinfo()
self.xen_total_mem = long(self.physinfo['total_memory'])
self.xen_no_cpus = self.physinfo['nr_cpus']
# print "QubesHost: total_mem = {0}B".format (self.xen_total_mem)
# print "QubesHost: free_mem = {0}".format (self.get_free_xen_memory())
# print "QubesHost: total_cpus = {0}".format (self.xen_no_cpus)
@property
def memory_total(self):
return self.xen_total_mem
@property
def no_cpus(self):
return self.xen_no_cpus
def get_free_xen_memory(self):
ret = self.physinfo['free_memory']
return long(ret)
# measure cpu usage for all domains at once
def measure_cpu_usage(self, previous=None, previous_time = None, wait_time=1):
if previous is None:
previous_time = time.time()
previous = {}
info = xc.domain_getinfo(0, qubes_max_qid)
for vm in info:
previous[vm['domid']] = {}
previous[vm['domid']]['cpu_time'] = vm['cpu_time']/vm['online_vcpus']
previous[vm['domid']]['cpu_usage'] = 0
time.sleep(wait_time)
current_time = time.time()
current = {}
info = xc.domain_getinfo(0, qubes_max_qid)
for vm in info:
current[vm['domid']] = {}
current[vm['domid']]['cpu_time'] = vm['cpu_time']/max(vm['online_vcpus'],1)
if vm['domid'] in previous.keys():
current[vm['domid']]['cpu_usage'] = \
float(current[vm['domid']]['cpu_time'] - previous[vm['domid']]['cpu_time']) \
/ long(1000**3) / (current_time-previous_time) * 100
if current[vm['domid']]['cpu_usage'] < 0:
# VM has been rebooted
current[vm['domid']]['cpu_usage'] = 0
else:
current[vm['domid']]['cpu_usage'] = 0
return (current_time, current)
class QubesVmLabel(object):
def __init__(self, name, index, color = None, icon = None):
self.name = name
self.index = index
self.color = color if color is not None else name
self.icon = icon if icon is not None else name
self.icon_path = "/usr/share/qubes/icons/" + self.icon + ".png"
# Globally defined lables
QubesVmLabels = {
"red" : QubesVmLabel ("red", 1),
"orange" : QubesVmLabel ("orange", 2),
"yellow" : QubesVmLabel ("yellow", 3),
"green" : QubesVmLabel ("green", 4, color="0x5fa05e"),
"gray" : QubesVmLabel ("gray", 5),
"blue" : QubesVmLabel ("blue", 6),
"purple" : QubesVmLabel ("purple", 7, color="0xb83374"),
"black" : QubesVmLabel ("black", 8),
}
QubesDispVmLabels = {
"red" : QubesVmLabel ("red", 1, icon="dispvm-red"),
"orange" : QubesVmLabel ("orange", 2, icon="dispvm-orange"),
"yellow" : QubesVmLabel ("yellow", 3, icon="dispvm-yellow"),
"green" : QubesVmLabel ("green", 4, color="0x5fa05e", icon="dispvm-green"),
"gray" : QubesVmLabel ("gray", 5, icon="dispvm-gray"),
"blue" : QubesVmLabel ("blue", 6, icon="dispvm-blue"),
"purple" : QubesVmLabel ("purple", 7, color="0xb83374", icon="dispvm-purple"),
"black" : QubesVmLabel ("black", 8, icon="dispvm-black"),
}
default_appvm_label = QubesVmLabels["red"]
default_template_label = QubesVmLabels["black"]
default_servicevm_label = QubesVmLabels["red"]
QubesVmClasses = {}
def register_qubes_vm_class(class_name, vm_class):
global QubesVmClasses
QubesVmClasses[class_name] = vm_class
class QubesVm(object):
"""
A representation of one Qubes VM
Only persistent information are stored here, while all the runtime
information, e.g. Xen dom id, etc, are to be retrieved via Xen API
Note that qid is not the same as Xen's domid!
"""
# In which order load this VM type from qubes.xml
load_order = 100
def _get_attrs_config(self):
""" Object attributes for serialization/deserialization
inner dict keys:
- order: initialization order (to keep dependency intact)
attrs without order will be evaluated at the end
- default: default value used when attr not given to object constructor
- attr: set value to this attribute instead of parameter name
- eval: assign result of this expression instead of value directly;
local variable 'value' contains attribute value (or default if it was not given)
- save: use evaluation result as value for XML serialization; only attrs with 'save' key will be saved in XML
- save_skip: if present and evaluates to true, attr will be omitted in XML
- save_attr: save to this XML attribute instead of parameter name
"""
attrs = {
# __qid cannot be accessed by setattr, so must be set manually in __init__
"qid": { "attr": "_qid", "order": 0 },
"name": { "order": 1 },
"dir_path": { "default": None, "order": 2 },
"conf_file": { "eval": 'self.absolute_path(value, self.name + ".conf")', 'order': 3 },
### order >= 10: have base attrs set
"root_img": { "eval": 'self.absolute_path(value, default_root_img)', 'order': 10 },
"private_img": { "eval": 'self.absolute_path(value, default_private_img)', 'order': 10 },
"volatile_img": { "eval": 'self.absolute_path(value, default_volatile_img)', 'order': 10 },
"firewall_conf": { "eval": 'self.absolute_path(value, default_firewall_conf_file)', 'order': 10 },
"installed_by_rpm": { "default": False, 'order': 10 },
"template": { "default": None, 'order': 10 },
### order >= 20: have template set
"uses_default_netvm": { "default": True, 'order': 20 },
"netvm": { "default": None, "attr": "_netvm", 'order': 20 },
"label": { "attr": "_label", "default": QubesVmLabels["red"], 'order': 20,
'xml_deserialize': lambda _x: QubesVmLabels[_x] },
"memory": { "default": default_memory, 'order': 20, "eval": "int(value)" },
"maxmem": { "default": None, 'order': 25, "eval": "int(value) if value else None" },
"pcidevs": { "default": '[]', 'order': 25, "eval": \
'[] if value in ["none", None] else eval(value) if value.find("[") >= 0 else eval("[" + value + "]")' },
# Internal VM (not shown in qubes-manager, doesn't create appmenus entries
"internal": { "default": False },
"vcpus": { "default": None },
"uses_default_kernel": { "default": True, 'order': 30 },
"uses_default_kernelopts": { "default": True, 'order': 30 },
"kernel": { "default": None, 'order': 31,
'eval': 'collection.get_default_kernel() if self.uses_default_kernel else value' },
"kernelopts": { "default": "", 'order': 31, "eval": \
'value if not self.uses_default_kernelopts else default_kernelopts_pcidevs if len(self.pcidevs) > 0 else default_kernelopts' },
"mac": { "attr": "_mac", "default": None },
"include_in_backups": { "default": True },
"services": { "default": {}, "eval": "eval(str(value))" },
"debug": { "default": False },
"default_user": { "default": "user" },
"qrexec_timeout": { "default": 60, "eval": "int(value)" },
##### Internal attributes - will be overriden in __init__ regardless of args
"appmenus_templates_dir": { "eval": \
'self.dir_path + "/" + default_appmenus_templates_subdir if self.updateable else ' + \
'self.template.appmenus_templates_dir if self.template is not None else None' },
"config_file_template": { "eval": "config_template_pv" },
"icon_path": { "eval": 'self.dir_path + "/icon.png" if self.dir_path is not None else None' },
# used to suppress side effects of clone_attrs
"_do_not_reset_firewall": { "eval": 'False' },
"kernels_dir": { 'eval': 'qubes_kernels_base_dir + "/" + self.kernel if self.kernel is not None else ' + \
# for backward compatibility (or another rare case): kernel=None -> kernel in VM dir
'self.dir_path + "/" + default_kernels_subdir' },
"_start_guid_first": { 'eval': 'False' },
"backup_content" : { 'default': False },
"backup_size" : { 'default': 0, "eval": "int(value)" },
"backup_path" : { 'default': "" },
}
### Mark attrs for XML inclusion
# Simple string attrs
for prop in ['qid', 'name', 'dir_path', 'memory', 'maxmem', 'pcidevs', 'vcpus', 'internal',\
'uses_default_kernel', 'kernel', 'uses_default_kernelopts',\
'kernelopts', 'services', 'installed_by_rpm',\
'uses_default_netvm', 'include_in_backups', 'debug',\
'default_user', 'qrexec_timeout',
'backup_content', 'backup_size', 'backup_path' ]:
attrs[prop]['save'] = 'str(self.%s)' % prop
# Simple paths
for prop in ['conf_file', 'root_img', 'volatile_img', 'private_img']:
attrs[prop]['save'] = 'self.relative_path(self.%s)' % prop
attrs[prop]['save_skip'] = 'self.%s is None' % prop
attrs['mac']['save'] = 'str(self._mac)'
attrs['mac']['save_skip'] = 'self._mac is None'
attrs['netvm']['save'] = 'str(self.netvm.qid) if self.netvm is not None else "none"'
attrs['netvm']['save_attr'] = "netvm_qid"
attrs['template']['save'] = 'str(self.template.qid) if self.template else "none"'
attrs['template']['save_attr'] = "template_qid"
attrs['label']['save'] = 'self.label.name'
return attrs
def __basic_parse_xml_attr(self, value):
if value is None:
return None
if value.lower() == "none":
return None
if value.lower() == "true":
return True
if value.lower() == "false":
return False
if value.isdigit():
return int(value)
return value
def __init__(self, **kwargs):
collection = None
if 'collection' in kwargs:
collection = kwargs['collection']
else:
raise ValueError("No collection given to QubesVM constructor")
# Special case for template b/c it is given in "template_qid" property
if "xml_element" in kwargs and kwargs["xml_element"].get("template_qid"):
template_qid = kwargs["xml_element"].get("template_qid")
if template_qid.lower() != "none":
if int(template_qid) in collection:
kwargs["template"] = collection[int(template_qid)]
else:
raise ValueError("Unknown template with QID %s" % template_qid)
attrs = self._get_attrs_config()
for attr_name in sorted(attrs, key=lambda _x: attrs[_x]['order'] if 'order' in attrs[_x] else 1000):
attr_config = attrs[attr_name]
attr = attr_name
if 'attr' in attr_config:
attr = attr_config['attr']
value = None
if attr_name in kwargs:
value = kwargs[attr_name]
elif 'xml_element' in kwargs and kwargs['xml_element'].get(attr_name) is not None:
if 'xml_deserialize' in attr_config and callable(attr_config['xml_deserialize']):
value = attr_config['xml_deserialize'](kwargs['xml_element'].get(attr_name))
else:
value = self.__basic_parse_xml_attr(kwargs['xml_element'].get(attr_name))
else:
if 'default' in attr_config:
value = attr_config['default']
if 'eval' in attr_config:
setattr(self, attr, eval(attr_config['eval']))
else:
#print "setting %s to %s" % (attr, value)
setattr(self, attr, value)
#Init private attrs
self.__qid = self._qid
assert self.__qid < qubes_max_qid, "VM id out of bounds!"
assert self.name is not None
if not self.verify_name(self.name):
raise QubesException("Invalid characters in VM name")
if self.netvm is not None:
self.netvm.connected_vms[self.qid] = self
# Not in generic way to not create QubesHost() to frequently
if self.maxmem is None:
qubes_host = QubesHost()
total_mem_mb = qubes_host.memory_total/1024
self.maxmem = total_mem_mb/2
# By default allow use all VCPUs
if self.vcpus is None:
qubes_host = QubesHost()
self.vcpus = qubes_host.no_cpus
# Always set if meminfo-writer should be active or not
if 'meminfo-writer' not in self.services:
self.services['meminfo-writer'] = not (len(self.pcidevs) > 0)
# Additionally force meminfo-writer disabled when VM have PCI devices
if len(self.pcidevs) > 0:
self.services['meminfo-writer'] = False
# Some additional checks for template based VM
if self.template is not None:
if not self.template.is_template():
print >> sys.stderr, "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\
format(self.template.qid)
return False
self.template.appvms[self.qid] = self
else:
assert self.root_img is not None, "Missing root_img for standalone VM!"
self.xid = -1
self.xid = self.get_xid()
def absolute_path(self, arg, default):
if arg is not None and os.path.isabs(arg):
return arg
else:
return self.dir_path + "/" + (arg if arg is not None else default)
def relative_path(self, arg):
return arg.replace(self.dir_path + '/', '')
@property
def qid(self):
return self.__qid
@property
def label(self):
return self._label
@label.setter
def label(self, new_label):
self._label = new_label
if self.icon_path:
try:
os.remove(self.icon_path)
except:
pass
os.symlink (new_label.icon_path, self.icon_path)
subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate'])
@property
def netvm(self):
return self._netvm
# Don't know how properly call setter from base class, so workaround it...
@netvm.setter
def netvm(self, new_netvm):
self._set_netvm(new_netvm)
def _set_netvm(self, new_netvm):
if self.is_running() and new_netvm is not None and not new_netvm.is_running():
raise QubesException("Cannot dynamically attach to stopped NetVM")
if self.netvm is not None:
self.netvm.connected_vms.pop(self.qid)
if self.is_running():
subprocess.call(["xl", "network-detach", self.name, "0"], stderr=subprocess.PIPE)
if hasattr(self.netvm, 'post_vm_net_detach'):
self.netvm.post_vm_net_detach(self)
if new_netvm is None:
if not self._do_not_reset_firewall:
# Set also firewall to block all traffic as discussed in #370
if os.path.exists(self.firewall_conf):
shutil.copy(self.firewall_conf, "%s/backup/%s-firewall-%s.xml"
% (qubes_base_dir, self.name, time.strftime('%Y-%m-%d-%H:%M:%S')))
self.write_firewall_conf({'allow': False, 'allowDns': False,
'allowIcmp': False, 'allowYumProxy': False, 'rules': []})
else:
new_netvm.connected_vms[self.qid]=self
self._netvm = new_netvm
if new_netvm is None:
return
if self.is_running():
# refresh IP, DNS etc
self.create_xenstore_entries()
self.attach_network()
if hasattr(self.netvm, 'post_vm_net_attach'):
self.netvm.post_vm_net_attach(self)
@property
def ip(self):
if self.netvm is not None:
return self.netvm.get_ip_for_vm(self.qid)
else:
return None
@property
def netmask(self):
if self.netvm is not None:
return self.netvm.netmask
else:
return None
@property
def gateway(self):
# This is gateway IP for _other_ VMs, so make sense only in NetVMs
return None
@property
def secondary_dns(self):
if self.netvm is not None:
return self.netvm.secondary_dns
else:
return None
@property
def vif(self):
if self.xid < 0:
return None
if self.netvm is None:
return None
return "vif{0}.+".format(self.xid)
@property
def mac(self):
if self._mac is not None:
return self._mac
else:
return "00:16:3E:5E:6C:{qid:02X}".format(qid=self.qid)
@mac.setter
def mac(self, new_mac):
self._mac = new_mac
@property
def updateable(self):
return self.template is None
# Leaved for compatibility
def is_updateable(self):
return self.updateable
def is_networked(self):
if self.is_netvm():
return True
if self.netvm is not None:
return True
else:
return False
def verify_name(self, name):
return re.match(r"^[a-zA-Z0-9_-]*$", name) is not None
def pre_rename(self, new_name):
self.remove_appmenus()
def set_name(self, name):
if self.is_running():
raise QubesException("Cannot change name of running VM!")
if not self.verify_name(name):
raise QubesException("Invalid characters in VM name")
self.pre_rename(name)
new_conf = "%s/%s.conf" % (self.dir_path, name)
if os.path.exists(self.conf_file):
os.rename(self.conf_file, "%s/%s.conf" % (self.dir_path, name))
old_dirpath = self.dir_path
new_dirpath = os.path.dirname(self.dir_path) + '/' + name
os.rename(old_dirpath, new_dirpath)
self.dir_path = new_dirpath
old_name = self.name
self.name = name
if self.private_img is not None:
self.private_img = self.private_img.replace(old_dirpath, new_dirpath)
if self.root_img is not None:
self.root_img = self.root_img.replace(old_dirpath, new_dirpath)
if self.volatile_img is not None:
self.volatile_img = self.volatile_img.replace(old_dirpath, new_dirpath)
if self.conf_file is not None:
self.conf_file = new_conf.replace(old_dirpath, new_dirpath)
if self.appmenus_templates_dir is not None:
self.appmenus_templates_dir = self.appmenus_templates_dir.replace(old_dirpath, new_dirpath)
if self.icon_path is not None:
self.icon_path = self.icon_path.replace(old_dirpath, new_dirpath)
if hasattr(self, 'kernels_dir') and self.kernels_dir is not None:
self.kernels_dir = self.kernels_dir.replace(old_dirpath, new_dirpath)
self.post_rename(old_name)
def post_rename(self, old_name):
self.create_appmenus(verbose=False)
def is_template(self):
return isinstance(self, QubesTemplateVm)
def is_appvm(self):
return isinstance(self, QubesAppVm)
def is_netvm(self):
return isinstance(self, QubesNetVm)
def is_proxyvm(self):
return isinstance(self, QubesProxyVm)
def is_disposablevm(self):
return isinstance(self, QubesDisposableVm)
def get_xl_dominfo(self):
if dry_run:
return
domains = xl_ctx.list_domains()
for dominfo in domains:
domname = xl_ctx.domid_to_name(dominfo.domid)
if domname == self.name:
return dominfo
return None
def get_xc_dominfo(self):
if dry_run:
return
start_xid = self.xid
if start_xid < 0:
start_xid = 0
try:
domains = xc.domain_getinfo(start_xid, qubes_max_qid)
except xen.lowlevel.xc.Error:
return None
for dominfo in domains:
domname = xl_ctx.domid_to_name(dominfo['domid'])
if domname == self.name:
return dominfo
return None
def get_xid(self):
if dry_run:
return 666
dominfo = self.get_xc_dominfo()
if dominfo:
self.xid = dominfo['domid']
return self.xid
else:
return -1
def get_uuid(self):
dominfo = self.get_xl_dominfo()
if dominfo:
vmuuid = uuid.UUID(''.join('%02x' % b for b in dominfo.uuid))
return vmuuid
else:
return None
def get_mem(self):
if dry_run:
return 666
dominfo = self.get_xc_dominfo()
if dominfo:
return dominfo['mem_kb']
else:
return 0
def get_mem_static_max(self):
if dry_run:
return 666
dominfo = self.get_xc_dominfo()
if dominfo:
return dominfo['maxmem_kb']
else:
return 0
def get_per_cpu_time(self):
if dry_run:
import random
return random.random() * 100
dominfo = self.get_xc_dominfo()
if dominfo:
return dominfo['cpu_time']/dominfo['online_vcpus']
else:
return 0
def get_disk_utilization_root_img(self):
if not os.path.exists(self.root_img):
return 0
return self.get_disk_usage(self.root_img)
def get_root_img_sz(self):
if not os.path.exists(self.root_img):
return 0
return os.path.getsize(self.root_img)
def get_power_state(self):
if dry_run:
return "NA"
dominfo = self.get_xc_dominfo()
if dominfo:
if dominfo['paused']:
return "Paused"
elif dominfo['crashed']:
return "Crashed"
elif dominfo['shutdown']:
return "Halting"
elif dominfo['dying']:
return "Dying"
else:
if not self.is_fully_usable():
return "Transient"
else:
return "Running"
else:
return 'Halted'
return "NA"
def is_guid_running(self):
xid = self.get_xid()
if xid < 0:
return False
if not os.path.exists('/var/run/qubes/guid_running.%d' % xid):
return False
return True
def is_fully_usable(self):
# Running gui-daemon implies also VM running
if not self.is_guid_running():
return False
# currently qrexec daemon doesn't cleanup socket in /var/run/qubes, so
# it can be left from some other VM
return True
def is_running(self):
# in terms of Xen and internal logic - starting VM is running
if self.get_power_state() in ["Running", "Transient", "Halting"]:
return True
else:
return False
def is_paused(self):
if self.get_power_state() == "Paused":
return True
else:
return False
def get_start_time(self):
if not self.is_running():
return None
dominfo = self.get_xl_dominfo()
uuid = self.get_uuid()
start_time = xs.read('', "/vm/%s/start_time" % str(uuid))
if start_time != '':
return datetime.fromtimestamp(float(start_time))
else:
return None
def is_outdated(self):
# Makes sense only on VM based on template
if self.template is None:
return False
if not self.is_running():
return False
rootimg_inode = os.stat(self.template.root_img)
try:
rootcow_inode = os.stat(self.template.rootcow_img)
except OSError:
# The only case when rootcow_img doesn't exists is in the middle of
# commit_changes, so VM is outdated right now
return True
current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format(
rootimg_inode[2], rootimg_inode[1],
rootcow_inode[2], rootcow_inode[1])
# 51712 (0xCA00) is xvda
# backend node name not available through xenapi :(
used_dmdev = xs.read('', "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid()))
return used_dmdev != current_dmdev
def get_disk_usage(self, file_or_dir):
if not os.path.exists(file_or_dir):
return 0
p = subprocess.Popen (["du", "-s", "--block-size=1", file_or_dir],
stdout=subprocess.PIPE)
result = p.communicate()
m = re.match(r"^(\d+)\s.*", result[0])
sz = int(m.group(1)) if m is not None else 0
return sz
def get_disk_utilization(self):
return self.get_disk_usage(self.dir_path)
def get_disk_utilization_private_img(self):
return self.get_disk_usage(self.private_img)
def get_private_img_sz(self):
if not os.path.exists(self.private_img):
return 0
return os.path.getsize(self.private_img)
def resize_private_img(self, size):
assert size >= self.get_private_img_sz(), "Cannot shrink private.img"
f_private = open (self.private_img, "a+b")
f_private.truncate (size)
f_private.close ()
retcode = 0
if self.is_running():
# find loop device
p = subprocess.Popen (["sudo", "losetup", "--associated", self.private_img],
stdout=subprocess.PIPE)
result = p.communicate()
m = re.match(r"^(/dev/loop\d+):\s", result[0])
if m is None:
raise QubesException("ERROR: Cannot find loop device!")
loop_dev = m.group(1)
# resize loop device
subprocess.check_call(["sudo", "losetup", "--set-capacity", loop_dev])
retcode = self.run("while [ \"`blockdev --getsize64 /dev/xvdb`\" -lt {0} ]; do ".format(size) +
"head /dev/xvdb > /dev/null; sleep 0.2; done; resize2fs /dev/xvdb", user="root", wait=True)
else:
retcode = subprocess.check_call(["sudo", "resize2fs", "-f", self.private_img])
if retcode != 0:
raise QubesException("resize2fs failed")
# FIXME: should be outside of QubesVM?
def get_timezone(self):
# fc18
if os.path.islink('/etc/localtime'):
return '/'.join(os.readlink('/etc/localtime').split('/')[-2:])
# <=fc17
elif os.path.exists('/etc/sysconfig/clock'):
clock_config = open('/etc/sysconfig/clock', "r")
clock_config_lines = clock_config.readlines()
clock_config.close()
zone_re = re.compile(r'^ZONE="(.*)"')
for line in clock_config_lines:
line_match = zone_re.match(line)
if line_match:
return line_match.group(1)
return None
def cleanup_vifs(self):
"""
Xend does not remove vif when backend domain is down, so we must do it
manually
"""
if not self.is_running():
return
dev_basepath = '/local/domain/%d/device/vif' % self.xid
for dev in xs.ls('', dev_basepath):
# check if backend domain is alive
backend_xid = int(xs.read('', '%s/%s/backend-id' % (dev_basepath, dev)))
if xl_ctx.domid_to_name(backend_xid) is not None:
# check if device is still active
if xs.read('', '%s/%s/state' % (dev_basepath, dev)) == '4':
continue
# remove dead device
xs.rm('', '%s/%s' % (dev_basepath, dev))
def create_xenstore_entries(self, xid = None):
if dry_run:
return
if xid is None:
xid = self.xid
domain_path = xs.get_domain_path(xid)
# Set Xen Store entires with VM networking info:
xs.write('', "{0}/qubes_vm_type".format(domain_path),
self.type)
xs.write('', "{0}/qubes_vm_updateable".format(domain_path),
str(self.updateable))
if self.is_netvm():
xs.write('',
"{0}/qubes_netvm_gateway".format(domain_path),
self.gateway)
xs.write('',
"{0}/qubes_netvm_secondary_dns".format(domain_path),
self.secondary_dns)
xs.write('',
"{0}/qubes_netvm_netmask".format(domain_path),
self.netmask)
xs.write('',
"{0}/qubes_netvm_network".format(domain_path),
self.network)
if self.netvm is not None:
xs.write('', "{0}/qubes_ip".format(domain_path), self.ip)
xs.write('', "{0}/qubes_netmask".format(domain_path),
self.netvm.netmask)
xs.write('', "{0}/qubes_gateway".format(domain_path),
self.netvm.gateway)
xs.write('',
"{0}/qubes_secondary_dns".format(domain_path),
self.netvm.secondary_dns)
tzname = self.get_timezone()
if tzname:
xs.write('',
"{0}/qubes-timezone".format(domain_path),
tzname)
for srv in self.services.keys():
# convert True/False to "1"/"0"
xs.write('', "{0}/qubes-service/{1}".format(domain_path, srv),
str(int(self.services[srv])))
xs.write('',
"{0}/qubes-block-devices".format(domain_path),
'')
xs.write('',
"{0}/qubes-usb-devices".format(domain_path),
'')
xs.write('', "{0}/qubes-debug-mode".format(domain_path),
str(int(self.debug)))
# Fix permissions
xs.set_permissions('', '{0}/device'.format(domain_path),
[{ 'dom': xid }])
xs.set_permissions('', '{0}/memory'.format(domain_path),
[{ 'dom': xid }])
xs.set_permissions('', '{0}/qubes-block-devices'.format(domain_path),
[{ 'dom': xid }])
xs.set_permissions('', '{0}/qubes-usb-devices'.format(domain_path),
[{ 'dom': xid }])
def get_rootdev(self, source_template=None):
if self.template:
return "'script:snapshot:{dir}/root.img:{dir}/root-cow.img,xvda,r',".format(dir=self.template.dir_path)
else:
return "'script:file:{dir}/root.img,xvda,w',".format(dir=self.dir_path)
def get_config_params(self, source_template=None):
args = {}
args['name'] = self.name
if hasattr(self, 'kernels_dir'):
args['kerneldir'] = self.kernels_dir
args['vmdir'] = self.dir_path
args['pcidev'] = str(self.pcidevs).strip('[]')
args['mem'] = str(self.memory)
if self.maxmem < self.memory:
args['mem'] = str(self.maxmem)
args['maxmem'] = str(self.maxmem)
if 'meminfo-writer' in self.services and not self.services['meminfo-writer']:
# If dynamic memory management disabled, set maxmem=mem
args['maxmem'] = args['mem']
args['vcpus'] = str(self.vcpus)
if self.netvm is not None:
args['ip'] = self.ip
args['mac'] = self.mac
args['gateway'] = self.netvm.gateway
args['dns1'] = self.netvm.gateway
args['dns2'] = self.secondary_dns
args['netmask'] = self.netmask
args['netdev'] = "'mac={mac},script=/etc/xen/scripts/vif-route-qubes,ip={ip}".format(ip=self.ip, mac=self.mac)
if self.netvm.qid != 0:
args['netdev'] += ",backend={0}".format(self.netvm.name)
args['netdev'] += "'"
args['disable_network'] = '';
else:
args['ip'] = ''
args['mac'] = ''
args['gateway'] = ''
args['dns1'] = ''
args['dns2'] = ''
args['netmask'] = ''
args['netdev'] = ''
args['disable_network'] = '#';
args['rootdev'] = self.get_rootdev(source_template=source_template)
args['privatedev'] = "'script:file:{dir}/private.img,xvdb,w',".format(dir=self.dir_path)
args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path)
if hasattr(self, 'kernel'):
modulesmode='r'
if self.kernel is None:
modulesmode='w'
args['otherdevs'] = "'script:file:{dir}/modules.img,xvdd,{mode}',".format(dir=self.kernels_dir, mode=modulesmode)
if hasattr(self, 'kernelopts'):
args['kernelopts'] = self.kernelopts
if self.debug:
print >> sys.stderr, "--> Debug mode: adding 'earlyprintk=xen' to kernel opts"
args['kernelopts'] += ' earlyprintk=xen'
return args
@property
def uses_custom_config(self):
return self.conf_file != self.absolute_path(self.name + ".conf", None)
def create_config_file(self, file_path = None, source_template = None, prepare_dvm = False):
if file_path is None:
file_path = self.conf_file
if self.uses_custom_config:
return
if source_template is None:
source_template = self.template
f_conf_template = open(self.config_file_template, 'r')
conf_template = f_conf_template.read()
f_conf_template.close()
template_params = self.get_config_params(source_template)
if prepare_dvm:
template_params['name'] = '%NAME%'
template_params['privatedev'] = ''
template_params['netdev'] = re.sub(r"ip=[0-9.]*", "ip=%IP%", template_params['netdev'])
conf_appvm = open(file_path, "w")
conf_appvm.write(conf_template.format(**template_params))
conf_appvm.close()
def create_on_disk(self, verbose, source_template = None):
if source_template is None:
source_template = self.template
assert source_template is not None
if dry_run:
return
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path)
os.mkdir (self.dir_path)
if verbose:
print >> sys.stderr, "--> Creating the VM config file: {0}".format(self.conf_file)
self.create_config_file(source_template = source_template)
template_priv = source_template.private_img
if verbose:
print >> sys.stderr, "--> Copying the template's private image: {0}".\
format(template_priv)
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", template_priv, self.private_img])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(template_priv, self.private_img))
if os.path.exists(source_template.dir_path + '/vm-' + qubes_whitelisted_appmenus):
if verbose:
print >> sys.stderr, "--> Creating default whitelisted apps list: {0}".\
format(self.dir_path + '/' + qubes_whitelisted_appmenus)
shutil.copy(source_template.dir_path + '/vm-' + qubes_whitelisted_appmenus,
self.dir_path + '/' + qubes_whitelisted_appmenus)
if self.updateable:
template_root = source_template.root_img
if verbose:
print >> sys.stderr, "--> Copying the template's root image: {0}".\
format(template_root)
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", template_root, self.root_img])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(template_root, self.root_img))
kernels_dir = source_template.kernels_dir
if verbose:
print >> sys.stderr, "--> Copying the kernel (set kernel \"none\" to use it): {0}".\
format(kernels_dir)
os.mkdir (self.dir_path + '/kernels')
for f in ("vmlinuz", "initramfs", "modules.img"):
shutil.copy(kernels_dir + '/' + f, self.dir_path + '/kernels/' + f)
if verbose:
print >> sys.stderr, "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\
format(source_template.appmenus_templates_dir, self.appmenus_templates_dir)
shutil.copytree (source_template.appmenus_templates_dir, self.appmenus_templates_dir)
# Create volatile.img
self.reset_volatile_storage(source_template = source_template, verbose=verbose)
if verbose:
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path)
os.symlink (self.label.icon_path, self.icon_path)
def create_appmenus(self, verbose=False, source_template = None):
if source_template is None:
source_template = self.template
vmtype = None
if self.is_netvm():
vmtype = 'servicevms'
else:
vmtype = 'appvms'
try:
if source_template is not None:
subprocess.check_call ([qubes_appmenu_create_cmd, source_template.appmenus_templates_dir, self.name, vmtype])
elif self.appmenus_templates_dir is not None:
subprocess.check_call ([qubes_appmenu_create_cmd, self.appmenus_templates_dir, self.name, vmtype])
else:
# Only add apps to menu
subprocess.check_call ([qubes_appmenu_create_cmd, "none", self.name, vmtype])
except subprocess.CalledProcessError:
print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name)
def get_clone_attrs(self):
return ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \
'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \
'_mac', 'pcidevs', 'include_in_backups', '_label']
def clone_attrs(self, src_vm):
self._do_not_reset_firewall = True
for prop in self.get_clone_attrs():
setattr(self, prop, getattr(src_vm, prop))
self._do_not_reset_firewall = False
def clone_disk_files(self, src_vm, verbose):
if dry_run:
return
if src_vm.is_running():
raise QubesException("Attempt to clone a running VM!")
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path)
os.mkdir (self.dir_path)
if src_vm.private_img is not None and self.private_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\
format(src_vm.private_img, self.private_img)
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", src_vm.private_img, self.private_img])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(src_vm.private_img, self.private_img))
if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\
format(src_vm.root_img, self.root_img)
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", src_vm.root_img, self.root_img])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(src_vm.root_img, self.root_img))
if src_vm.updateable and src_vm.appmenus_templates_dir is not None and self.appmenus_templates_dir is not None:
if verbose:
print >> sys.stderr, "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\
format(src_vm.appmenus_templates_dir, self.appmenus_templates_dir)
shutil.copytree (src_vm.appmenus_templates_dir, self.appmenus_templates_dir)
if os.path.exists(src_vm.dir_path + '/' + qubes_whitelisted_appmenus):
if verbose:
print >> sys.stderr, "--> Copying whitelisted apps list: {0}".\
format(self.dir_path + '/' + qubes_whitelisted_appmenus)
shutil.copy(src_vm.dir_path + '/' + qubes_whitelisted_appmenus,
self.dir_path + '/' + qubes_whitelisted_appmenus)
if src_vm.icon_path is not None and self.icon_path is not None:
if os.path.exists (src_vm.dir_path):
if os.path.islink(src_vm.icon_path):
icon_path = os.readlink(src_vm.icon_path)
if verbose:
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path)
os.symlink (icon_path, self.icon_path)
else:
if verbose:
print >> sys.stderr, "--> Copying icon: {0} -> {1}".format(src_vm.icon_path, self.icon_path)
shutil.copy(src_vm.icon_path, self.icon_path)
# Create appmenus
self.create_appmenus(verbose=verbose)
def remove_appmenus(self):
vmtype = None
if self.is_netvm():
vmtype = 'servicevms'
else:
vmtype = 'appvms'
subprocess.check_call ([qubes_appmenu_remove_cmd, self.name, vmtype])
def verify_files(self):
if dry_run:
return
if not os.path.exists (self.dir_path):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.dir_path))
if self.updateable and not os.path.exists (self.root_img):
raise QubesException (
"VM root image file doesn't exist: {0}".\
format(self.root_img))
if not os.path.exists (self.private_img):
raise QubesException (
"VM private image file doesn't exist: {0}".\
format(self.private_img))
if not os.path.exists (self.kernels_dir + '/vmlinuz'):
raise QubesException (
"VM kernel does not exists: {0}".\
format(self.kernels_dir + '/vmlinuz'))
if not os.path.exists (self.kernels_dir + '/initramfs'):
raise QubesException (
"VM initramfs does not exists: {0}".\
format(self.kernels_dir + '/initramfs'))
if not os.path.exists (self.kernels_dir + '/modules.img'):
raise QubesException (
"VM kernel modules image does not exists: {0}".\
format(self.kernels_dir + '/modules.img'))
return True
def reset_volatile_storage(self, source_template = None, verbose = False):
assert not self.is_running(), "Attempt to clean volatile image of running VM!"
if source_template is None:
source_template = self.template
# Only makes sense on template based VM
if source_template is None:
# For StandaloneVM create it only if not already exists (eg after backup-restore)
if not os.path.exists(self.volatile_img):
if verbose:
print >> sys.stderr, "--> Creating volatile image: {0}...".format (self.volatile_img)
f_root = open (self.root_img, "r")
f_root.seek(0, os.SEEK_END)
root_size = f_root.tell()
f_root.close()
subprocess.check_call([prepare_volatile_img_cmd, self.volatile_img, str(root_size / 1024 / 1024)])
return
if verbose:
print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img)
if dry_run:
return
if os.path.exists (self.volatile_img):
os.remove (self.volatile_img)
retcode = subprocess.call (["tar", "xf", source_template.clean_volatile_img, "-C", self.dir_path])
if retcode != 0:
raise IOError ("Error while unpacking {0} to {1}".\
format(source_template.clean_volatile_img, self.volatile_img))
def remove_from_disk(self):
if dry_run:
return
shutil.rmtree (self.dir_path)
def write_firewall_conf(self, conf):
defaults = self.get_firewall_conf()
for item in defaults.keys():
if item not in conf:
conf[item] = defaults[item]
root = lxml.etree.Element(
"QubesFirwallRules",
policy = "allow" if conf["allow"] else "deny",
dns = "allow" if conf["allowDns"] else "deny",
icmp = "allow" if conf["allowIcmp"] else "deny",
yumProxy = "allow" if conf["allowYumProxy"] else "deny"
)
for rule in conf["rules"]:
# For backward compatibility
if "proto" not in rule:
if rule["portBegin"] is not None and rule["portBegin"] > 0:
rule["proto"] = "tcp"
else:
rule["proto"] = "any"
element = lxml.etree.Element(
"rule",
address=rule["address"],
proto=str(rule["proto"]),
)
if rule["netmask"] is not None and rule["netmask"] != 32:
element.set("netmask", str(rule["netmask"]))
if rule["portBegin"] is not None and rule["portBegin"] > 0:
element.set("port", str(rule["portBegin"]))
if rule["portEnd"] is not None and rule["portEnd"] > 0:
element.set("toport", str(rule["portEnd"]))
root.append(element)
tree = lxml.etree.ElementTree(root)
try:
f = open(self.firewall_conf, 'a') # create the file if not exist
f.close()
with open(self.firewall_conf, 'w') as f:
fcntl.lockf(f, fcntl.LOCK_EX)
tree.write(f, encoding="UTF-8", pretty_print=True)
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
except EnvironmentError as err:
print >> sys.stderr, "{0}: save error: {1}".format(
os.path.basename(sys.argv[0]), err)
return False
# Automatically enable/disable 'yum-proxy-setup' service based on allowYumProxy
if conf['allowYumProxy']:
self.services['yum-proxy-setup'] = True
else:
if self.services.has_key('yum-proxy-setup'):
self.services.pop('yum-proxy-setup')
return True
def has_firewall(self):
return os.path.exists (self.firewall_conf)
def get_firewall_defaults(self):
return { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True, "allowYumProxy": False }
def get_firewall_conf(self):
conf = self.get_firewall_defaults()
try:
tree = lxml.etree.parse(self.firewall_conf)
root = tree.getroot()
conf["allow"] = (root.get("policy") == "allow")
conf["allowDns"] = (root.get("dns") == "allow")
conf["allowIcmp"] = (root.get("icmp") == "allow")
conf["allowYumProxy"] = (root.get("yumProxy") == "allow")
for element in root:
rule = {}
attr_list = ("address", "netmask", "proto", "port", "toport")
for attribute in attr_list:
rule[attribute] = element.get(attribute)
if rule["netmask"] is not None:
rule["netmask"] = int(rule["netmask"])
else:
rule["netmask"] = 32
if rule["port"] is not None:
rule["portBegin"] = int(rule["port"])
else:
# backward compatibility
rule["portBegin"] = 0
# For backward compatibility
if rule["proto"] is None:
if rule["portBegin"] > 0:
rule["proto"] = "tcp"
else:
rule["proto"] = "any"
if rule["toport"] is not None:
rule["portEnd"] = int(rule["toport"])
else:
rule["portEnd"] = None
del(rule["port"])
del(rule["toport"])
conf["rules"].append(rule)
except EnvironmentError as err:
return conf
except (xml.parsers.expat.ExpatError,
ValueError, LookupError) as err:
print("{0}: load error: {1}".format(
os.path.basename(sys.argv[0]), err))
return None
return conf
def run(self, command, user = None, verbose = True, autostart = False, notify_function = None, passio = False, passio_popen = False, passio_stderr=False, ignore_stderr=False, localcmd = None, wait = False, gui = True):
"""command should be in form 'cmdline'
When passio_popen=True, popen object with stdout connected to pipe.
When additionally passio_stderr=True, stderr also is connected to pipe.
When ignore_stderr=True, stderr is connected to /dev/null.
"""
if user is None:
user = self.default_user
null = None
if not self.is_running() and not self.is_paused():
if not autostart:
raise QubesException("VM not running")
try:
if notify_function is not None:
notify_function ("info", "Starting the '{0}' VM...".format(self.name))
elif verbose:
print >> sys.stderr, "Starting the VM '{0}'...".format(self.name)
xid = self.start(verbose=verbose, start_guid = gui, notify_function=notify_function)
except (IOError, OSError, QubesException) as err:
raise QubesException("Error while starting the '{0}' VM: {1}".format(self.name, err))
except (MemoryError) as err:
raise QubesException("Not enough memory to start '{0}' VM! Close one or more running VMs and try again.".format(self.name))
xid = self.get_xid()
if gui and os.getenv("DISPLAY") is not None and not self.is_guid_running():
self.start_guid(verbose = verbose, notify_function = notify_function)
args = [qrexec_client_path, "-d", str(xid), "%s:%s" % (user, command)]
if localcmd is not None:
args += [ "-l", localcmd]
if passio:
os.execv(qrexec_client_path, args)
exit(1)
call_kwargs = {}
if ignore_stderr:
null = open("/dev/null", "w")
call_kwargs['stderr'] = null
if passio_popen:
popen_kwargs={'stdout': subprocess.PIPE}
popen_kwargs['stdin'] = subprocess.PIPE
if passio_stderr:
popen_kwargs['stderr'] = subprocess.PIPE
else:
popen_kwargs['stderr'] = call_kwargs.get('stderr', None)
p = subprocess.Popen (args, **popen_kwargs)
if null:
null.close()
return p
if not wait:
args += ["-e"]
retcode = subprocess.call(args, **call_kwargs)
if null:
null.close()
return retcode
def attach_network(self, verbose = False, wait = True, netvm = None):
if dry_run:
return
if not self.is_running():
raise QubesException ("VM not running!")
if netvm is None:
netvm = self.netvm
if netvm is None:
raise QubesException ("NetVM not set!")
if netvm.qid != 0:
if not netvm.is_running():
if verbose:
print >> sys.stderr, "--> Starting NetVM {0}...".format(netvm.name)
netvm.start()
xs_path = '/local/domain/%d/device/vif/0/state' % (self.xid)
if xs.read('', xs_path) is not None:
# TODO: check its state and backend state (this can be stale vif after NetVM restart)
if verbose:
print >> sys.stderr, "NOTICE: Network already attached"
return
xm_cmdline = ["/usr/sbin/xl", "network-attach", str(self.xid), "script=/etc/xen/scripts/vif-route-qubes", "ip="+self.ip, "backend="+netvm.name ]
retcode = subprocess.call (xm_cmdline)
if retcode != 0:
print >> sys.stderr, ("WARNING: Cannot attach to network to '{0}'!".format(self.name))
if wait:
tries = 0
while xs.read('', xs_path) != '4':
tries += 1
if tries > 50:
raise QubesException ("Network attach timed out!")
time.sleep(0.2)
def wait_for_session(self, notify_function = None):
#self.run('echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d', ignore_stderr=True, gui=False, wait=True)
# Note : User root is redefined to SYSTEM in the Windows agent code
p = self.run('QUBESRPC qubes.WaitForSession none', user="root", passio_popen=True, gui=False, wait=True)
p.communicate(input=self.default_user)
retcode = subprocess.call([qubes_clipd_path])
if retcode != 0:
print >> sys.stderr, "ERROR: Cannot start qclipd!"
if notify_function is not None:
notify_function("error", "ERROR: Cannot start the Qubes Clipboard Notifier!")
def start_guid(self, verbose = True, notify_function = None):
if verbose:
print >> sys.stderr, "--> Starting Qubes GUId..."
xid = self.get_xid()
guid_cmd = [qubes_guid_path, "-d", str(xid), "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)]
if self.debug:
guid_cmd += ['-v', '-v']
retcode = subprocess.call (guid_cmd)
if (retcode != 0) :
raise QubesException("Cannot start qubes_guid!")
if verbose:
print >> sys.stderr, "--> Waiting for qubes-session..."
self.wait_for_session(notify_function)
def start_qrexec_daemon(self, verbose = False, notify_function = None):
if verbose:
print >> sys.stderr, "--> Starting the qrexec daemon..."
xid = self.get_xid()
qrexec_env = os.environ
qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout)
retcode = subprocess.call ([qrexec_daemon_path, str(xid), self.default_user], env=qrexec_env)
if (retcode != 0) :
self.force_shutdown(xid=xid)
raise OSError ("ERROR: Cannot execute qrexec_daemon!")
def start(self, debug_console = False, verbose = False, preparing_dvm = False, start_guid = True, notify_function = None):
if dry_run:
return
# Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting"
if self.get_power_state() != "Halted":
raise QubesException ("VM is already running!")
self.verify_files()
if self.netvm is not None:
if self.netvm.qid != 0:
if not self.netvm.is_running():
if verbose:
print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm.name)
self.netvm.start(verbose = verbose, start_guid = start_guid, notify_function = notify_function)
self.reset_volatile_storage(verbose=verbose)
if verbose:
print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type)
# refresh config file
self.create_config_file()
mem_required = int(self.memory) * 1024 * 1024
qmemman_client = QMemmanClient()
try:
got_memory = qmemman_client.request_memory(mem_required)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
# Bind pci devices to pciback driver
for pci in self.pcidevs:
try:
subprocess.check_call(['sudo', qubes_pciback_cmd, pci])
except subprocess.CalledProcessError:
raise QubesException("Failed to prepare PCI device %s" % pci)
xl_cmdline = ['sudo', '/usr/sbin/xl', 'create', self.conf_file, '-q', '-p']
try:
subprocess.check_call(xl_cmdline)
except:
raise QubesException("Failed to load VM config")
xid = self.get_xid()
self.xid = xid
if preparing_dvm:
self.services['qubes-dvm'] = True
if verbose:
print >> sys.stderr, "--> Setting Xen Store info for the VM..."
self.create_xenstore_entries(xid)
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
qvm_collection.unlock_db()
if verbose:
print >> sys.stderr, "--> Updating firewall rules..."
for vm in qvm_collection.values():
if vm.is_proxyvm() and vm.is_running():
vm.write_iptables_xenstore_entry()
if verbose:
print >> sys.stderr, "--> Starting the VM..."
xc.domain_unpause(xid)
# close() is not really needed, because the descriptor is close-on-exec
# anyway, the reason to postpone close() is that possibly xl is not done
# constructing the domain after its main process exits
# so we close() when we know the domain is up
# the successful unpause is some indicator of it
qmemman_client.close()
if self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'):
self.start_guid(verbose=verbose,notify_function=notify_function)
if not preparing_dvm:
self.start_qrexec_daemon(verbose=verbose,notify_function=notify_function)
if not self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'):
self.start_guid(verbose=verbose,notify_function=notify_function)
if preparing_dvm:
if verbose:
print >> sys.stderr, "--> Preparing config template for DispVM"
self.create_config_file(file_path = self.dir_path + '/dvm.conf', prepare_dvm = True)
# perhaps we should move it before unpause and fork?
# FIXME: this uses obsolete xm api
if debug_console:
from xen.xm import console
if verbose:
print >> sys.stderr, "--> Starting debug console..."
console.execConsole (xid)
return xid
def shutdown(self, force=False, xid = None):
if dry_run:
return
if not self.is_running():
raise QubesException ("VM already stopped!")
subprocess.call (['/usr/sbin/xl', 'shutdown', str(xid) if xid is not None else self.name])
#xc.domain_destroy(self.get_xid())
def force_shutdown(self, xid = None):
if dry_run:
return
if not self.is_running() and not self.is_paused():
raise QubesException ("VM already stopped!")
subprocess.call (['/usr/sbin/xl', 'destroy', str(xid) if xid is not None else self.name])
def pause(self):
if dry_run:
return
xc.domain_pause(self.get_xid())
def unpause(self):
if dry_run:
return
xc.domain_unpause(self.get_xid())
def get_xml_attrs(self):
attrs = {}
attrs_config = self._get_attrs_config()
for attr in attrs_config:
attr_config = attrs_config[attr]
if 'save' in attr_config:
if 'save_skip' in attr_config and eval(attr_config['save_skip']):
continue
if 'save_attr' in attr_config:
attrs[attr_config['save_attr']] = eval(attr_config['save'])
else:
attrs[attr] = eval(attr_config['save'])
return attrs
def create_xml_element(self):
# Compatibility hack (Qubes*VM in type vs Qubes*Vm in XML)...
rx_type = re.compile (r"VM")
attrs = self.get_xml_attrs()
element = lxml.etree.Element(
"Qubes" + rx_type.sub("Vm", self.type),
**attrs)
return element
class QubesTemplateVm(QubesVm):
"""
A class that represents an TemplateVM. A child of QubesVm.
"""
# In which order load this VM type from qubes.xml
load_order = 50
def _get_attrs_config(self):
attrs_config = super(QubesTemplateVm, self)._get_attrs_config()
attrs_config['dir_path']['eval'] = 'value if value is not None else qubes_templates_dir + "/" + self.name'
attrs_config['label']['default'] = default_template_label
# New attributes
# Image for template changes
attrs_config['rootcow_img'] = { 'eval': 'self.dir_path + "/" + default_rootcow_img' }
# Clean image for root-cow and swap (AppVM side)
attrs_config['clean_volatile_img'] = { 'eval': 'self.dir_path + "/" + default_clean_volatile_img' }
attrs_config['appmenus_templates_dir'] = { 'eval': 'self.dir_path + "/" + default_appmenus_templates_subdir' }
return attrs_config
def __init__(self, **kwargs):
super(QubesTemplateVm, self).__init__(**kwargs)
self.appvms = QubesVmCollection()
@property
def type(self):
return "TemplateVM"
@property
def updateable(self):
return True
def get_firewall_defaults(self):
return { "rules": list(), "allow": False, "allowDns": False, "allowIcmp": False, "allowYumProxy": True }
def get_rootdev(self, source_template=None):
return "'script:origin:{dir}/root.img:{dir}/root-cow.img,xvda,w',".format(dir=self.dir_path)
def clone_disk_files(self, src_vm, verbose):
if dry_run:
return
super(QubesTemplateVm, self).clone_disk_files(src_vm=src_vm, verbose=verbose)
for whitelist in ['/vm-' + qubes_whitelisted_appmenus, '/netvm-' + qubes_whitelisted_appmenus]:
if os.path.exists(src_vm.dir_path + whitelist):
if verbose:
print >> sys.stderr, "--> Copying default whitelisted apps list: {0}".\
format(self.dir_path + whitelist)
shutil.copy(src_vm.dir_path + whitelist,
self.dir_path + whitelist)
if verbose:
print >> sys.stderr, "--> Copying the template's clean volatile image:\n{0} ==>\n{1}".\
format(src_vm.clean_volatile_img, self.clean_volatile_img)
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", src_vm.clean_volatile_img, self.clean_volatile_img])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(src_vm.clean_volatile_img, self.clean_volatile_img))
if verbose:
print >> sys.stderr, "--> Copying the template's volatile image:\n{0} ==>\n{1}".\
format(self.clean_volatile_img, self.volatile_img)
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", self.clean_volatile_img, self.volatile_img])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(self.clean_img, self.volatile_img))
# Create root-cow.img
self.commit_changes(verbose=verbose)
def create_appmenus(self, verbose=False, source_template = None):
if source_template is None:
source_template = self.template
try:
subprocess.check_call ([qubes_appmenu_create_cmd, self.appmenus_templates_dir, self.name, "vm-templates"])
except subprocess.CalledProcessError:
print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name)
def remove_appmenus(self):
subprocess.check_call ([qubes_appmenu_remove_cmd, self.name, "vm-templates"])
def pre_rename(self, new_name):
self.remove_appmenus()
def post_rename(self, old_name):
self.create_appmenus(verbose=False)
old_dirpath = os.path.dirname(self.dir_path) + '/' + old_name
self.clean_volatile_img = self.clean_volatile_img.replace(old_dirpath, self.dir_path)
self.rootcow_img = self.rootcow_img.replace(old_dirpath, self.dir_path)
def remove_from_disk(self):
if dry_run:
return
self.remove_appmenus()
super(QubesTemplateVm, self).remove_from_disk()
def verify_files(self):
if dry_run:
return
if not os.path.exists (self.dir_path):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.dir_path))
if not os.path.exists (self.root_img):
raise QubesException (
"VM root image file doesn't exist: {0}".\
format(self.root_img))
if not os.path.exists (self.private_img):
raise QubesException (
"VM private image file doesn't exist: {0}".\
format(self.private_img))
if not os.path.exists (self.volatile_img):
raise QubesException (
"VM volatile image file doesn't exist: {0}".\
format(self.volatile_img))
if not os.path.exists (self.clean_volatile_img):
raise QubesException (
"Clean VM volatile image file doesn't exist: {0}".\
format(self.clean_volatile_img))
if not os.path.exists (self.kernels_dir):
raise QubesException (
"VM's kernels directory does not exist: {0}".\
format(self.kernels_dir))
return True
def reset_volatile_storage(self, verbose = False):
assert not self.is_running(), "Attempt to clean volatile image of running Template VM!"
if verbose:
print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img)
if dry_run:
return
if os.path.exists (self.volatile_img):
os.remove (self.volatile_img)
retcode = subprocess.call (["tar", "xf", self.clean_volatile_img, "-C", self.dir_path])
if retcode != 0:
raise IOError ("Error while unpacking {0} to {1}".\
format(self.template.clean_volatile_img, self.volatile_img))
def commit_changes (self, verbose = False):
assert not self.is_running(), "Attempt to commit changes on running Template VM!"
if verbose:
print >> sys.stderr, "--> Commiting template updates... COW: {0}...".format (self.rootcow_img)
if dry_run:
return
if os.path.exists (self.rootcow_img):
os.rename (self.rootcow_img, self.rootcow_img + '.old')
f_cow = open (self.rootcow_img, "w")
f_root = open (self.root_img, "r")
f_root.seek(0, os.SEEK_END)
f_cow.truncate (f_root.tell()) # make empty sparse file of the same size as root.img
f_cow.close ()
f_root.close()
class QubesNetVm(QubesVm):
"""
A class that represents a NetVM. A child of QubesCowVM.
"""
# In which order load this VM type from qubes.xml
load_order = 70
def _get_attrs_config(self):
attrs_config = super(QubesNetVm, self)._get_attrs_config()
attrs_config['dir_path']['eval'] = 'value if value is not None else qubes_servicevms_dir + "/" + self.name'
attrs_config['label']['default'] = default_servicevm_label
attrs_config['memory']['default'] = 200
# New attributes
attrs_config['netid'] = { 'save': 'str(self.netid)', 'order': 30,
'eval': 'value if value is not None else collection.get_new_unused_netid()' }
attrs_config['netprefix'] = { 'eval': '"10.137.{0}.".format(self.netid)' }
attrs_config['dispnetprefix'] = { 'eval': '"10.138.{0}.".format(self.netid)' }
# Dont save netvm prop
attrs_config['netvm'].pop('save')
attrs_config['uses_default_netvm'].pop('save')
return attrs_config
def __init__(self, **kwargs):
super(QubesNetVm, self).__init__(**kwargs)
self.connected_vms = QubesVmCollection()
self.__network = "10.137.{0}.0".format(self.netid)
self.__netmask = vm_default_netmask
self.__gateway = self.netprefix + "1"
self.__secondary_dns = self.netprefix + "254"
self.__external_ip_allowed_xids = set()
@property
def type(self):
return "NetVM"
@property
def gateway(self):
return self.__gateway
@property
def secondary_dns(self):
return self.__secondary_dns
@property
def netmask(self):
return self.__netmask
@property
def network(self):
return self.__network
def get_ip_for_vm(self, qid):
lo = qid % 253 + 2
assert lo >= 2 and lo <= 254, "Wrong IP address for VM"
return self.netprefix + "{0}".format(lo)
def get_ip_for_dispvm(self, dispid):
lo = dispid % 254 + 1
assert lo >= 1 and lo <= 254, "Wrong IP address for VM"
return self.dispnetprefix + "{0}".format(lo)
def create_xenstore_entries(self, xid = None):
if dry_run:
return
if xid is None:
xid = self.xid
super(QubesNetVm, self).create_xenstore_entries(xid)
xs.write('', "/local/domain/{0}/qubes_netvm_external_ip".format(xid), '')
self.update_external_ip_permissions(xid)
def update_external_ip_permissions(self, xid = -1):
if xid < 0:
xid = self.get_xid()
if xid < 0:
return
command = [
"/usr/bin/xenstore-chmod",
"/local/domain/{0}/qubes_netvm_external_ip".format(xid)
]
command.append("n{0}".format(xid))
for id in self.__external_ip_allowed_xids:
command.append("r{0}".format(id))
return subprocess.check_call(command)
def start(self, **kwargs):
if dry_run:
return
xid=super(QubesNetVm, self).start(**kwargs)
# Connect vif's of already running VMs
for vm in self.connected_vms.values():
if not vm.is_running():
continue
if 'verbose' in kwargs and kwargs['verbose']:
print >> sys.stderr, "--> Attaching network to '{0}'...".format(vm.name)
# Cleanup stale VIFs
vm.cleanup_vifs()
# force frontend to forget about this device
# module actually will be loaded back by udev, as soon as network is attached
vm.run("modprobe -r xen-netfront xennet", user="root")
try:
vm.attach_network(wait=False)
except QubesException as ex:
print >> sys.stderr, ("WARNING: Cannot attach to network to '{0}': {1}".format(vm.name, ex))
return xid
def shutdown(self, force=False):
if dry_run:
return
connected_vms = [vm for vm in self.connected_vms.values() if vm.is_running()]
if connected_vms and not force:
raise QubesException("There are other VMs connected to this VM: " + str([vm.name for vm in connected_vms]))
super(QubesNetVm, self).shutdown(force=force)
def add_external_ip_permission(self, xid):
if int(xid) < 0:
return
self.__external_ip_allowed_xids.add(int(xid))
self.update_external_ip_permissions()
def remove_external_ip_permission(self, xid):
self.__external_ip_allowed_xids.discard(int(xid))
self.update_external_ip_permissions()
def create_on_disk(self, verbose, source_template = None):
if dry_run:
return
super(QubesNetVm, self).create_on_disk(verbose, source_template=source_template)
if os.path.exists(source_template.dir_path + '/netvm-' + qubes_whitelisted_appmenus):
if verbose:
print >> sys.stderr, "--> Creating default whitelisted apps list: {0}".\
format(self.dir_path + '/' + qubes_whitelisted_appmenus)
shutil.copy(source_template.dir_path + '/netvm-' + qubes_whitelisted_appmenus,
self.dir_path + '/' + qubes_whitelisted_appmenus)
if not self.internal:
self.create_appmenus (verbose=verbose, source_template=source_template)
def remove_from_disk(self):
if dry_run:
return
if not self.internal:
self.remove_appmenus()
super(QubesNetVm, self).remove_from_disk()
class QubesProxyVm(QubesNetVm):
"""
A class that represents a ProxyVM, ex FirewallVM. A child of QubesNetVM.
"""
def _get_attrs_config(self):
attrs_config = super(QubesProxyVm, self)._get_attrs_config()
attrs_config['uses_default_netvm']['eval'] = 'False'
# Save netvm prop again
attrs_config['netvm']['save'] = 'str(self.netvm.qid) if self.netvm is not None else "none"'
return attrs_config
def __init__(self, **kwargs):
super(QubesProxyVm, self).__init__(**kwargs)
self.rules_applied = None
@property
def type(self):
return "ProxyVM"
def _set_netvm(self, new_netvm):
old_netvm = self.netvm
super(QubesProxyVm, self)._set_netvm(new_netvm)
if self.netvm is not None:
self.netvm.add_external_ip_permission(self.get_xid())
self.write_netvm_domid_entry()
if old_netvm is not None:
old_netvm.remove_external_ip_permission(self.get_xid())
def post_vm_net_attach(self, vm):
""" Called after some VM net-attached to this ProxyVm """
self.write_iptables_xenstore_entry()
def post_vm_net_detach(self, vm):
""" Called after some VM net-detached from this ProxyVm """
self.write_iptables_xenstore_entry()
def start(self, **kwargs):
if dry_run:
return
retcode = super(QubesProxyVm, self).start(**kwargs)
if self.netvm is not None:
self.netvm.add_external_ip_permission(self.get_xid())
self.write_netvm_domid_entry()
return retcode
def force_shutdown(self, **kwargs):
if dry_run:
return
if self.netvm is not None:
self.netvm.remove_external_ip_permission(kwargs['xid'] if 'xid' in kwargs else self.get_xid())
super(QubesProxyVm, self).force_shutdown(**kwargs)
def create_xenstore_entries(self, xid = None):
if dry_run:
return
if xid is None:
xid = self.xid
super(QubesProxyVm, self).create_xenstore_entries(xid)
xs.write('', "/local/domain/{0}/qubes_iptables_error".format(xid), '')
xs.set_permissions('', "/local/domain/{0}/qubes_iptables_error".format(xid),
[{ 'dom': xid, 'write': True }])
self.write_iptables_xenstore_entry()
def write_netvm_domid_entry(self, xid = -1):
if not self.is_running():
return
if xid < 0:
xid = self.get_xid()
if self.netvm is None:
xs.write('', "/local/domain/{0}/qubes_netvm_domid".format(xid), '')
else:
xs.write('', "/local/domain/{0}/qubes_netvm_domid".format(xid),
"{0}".format(self.netvm.get_xid()))
def write_iptables_xenstore_entry(self):
xs.rm('', "/local/domain/{0}/qubes_iptables_domainrules".format(self.get_xid()))
iptables = "# Generated by Qubes Core on {0}\n".format(datetime.now().ctime())
iptables += "*filter\n"
iptables += ":INPUT DROP [0:0]\n"
iptables += ":FORWARD DROP [0:0]\n"
iptables += ":OUTPUT ACCEPT [0:0]\n"
# Strict INPUT rules
iptables += "-A INPUT -i vif+ -p udp -m udp --dport 68 -j DROP\n"
iptables += "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\n"
iptables += "-A INPUT -p icmp -j ACCEPT\n"
iptables += "-A INPUT -i lo -j ACCEPT\n"
iptables += "-A INPUT -j REJECT --reject-with icmp-host-prohibited\n"
iptables += "-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\n"
# Allow dom0 networking
iptables += "-A FORWARD -i vif0.0 -j ACCEPT\n"
# Deny inter-VMs networking
iptables += "-A FORWARD -i vif+ -o vif+ -j DROP\n"
iptables += "COMMIT\n"
xs.write('', "/local/domain/{0}/qubes_iptables_header".format(self.get_xid()), iptables)
vms = [vm for vm in self.connected_vms.values()]
for vm in vms:
iptables="*filter\n"
conf = vm.get_firewall_conf()
xid = vm.get_xid()
if xid < 0: # VM not active ATM
continue
ip = vm.ip
if ip is None:
continue
# Anti-spoof rules are added by vif-script (vif-route-qubes), here we trust IP address
accept_action = "ACCEPT"
reject_action = "REJECT --reject-with icmp-host-prohibited"
if conf["allow"]:
default_action = accept_action
rules_action = reject_action
else:
default_action = reject_action
rules_action = accept_action
for rule in conf["rules"]:
iptables += "-A FORWARD -s {0} -d {1}".format(ip, rule["address"])
if rule["netmask"] != 32:
iptables += "/{0}".format(rule["netmask"])
if rule["proto"] is not None and rule["proto"] != "any":
iptables += " -p {0}".format(rule["proto"])
if rule["portBegin"] is not None and rule["portBegin"] > 0:
iptables += " --dport {0}".format(rule["portBegin"])
if rule["portEnd"] is not None and rule["portEnd"] > rule["portBegin"]:
iptables += ":{0}".format(rule["portEnd"])
iptables += " -j {0}\n".format(rules_action)
if conf["allowDns"] and self.netvm is not None:
# PREROUTING does DNAT to NetVM DNSes, so we need self.netvm. properties
iptables += "-A FORWARD -s {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(ip,self.netvm.gateway)
iptables += "-A FORWARD -s {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(ip,self.netvm.secondary_dns)
if conf["allowIcmp"]:
iptables += "-A FORWARD -s {0} -p icmp -j ACCEPT\n".format(ip)
if conf["allowYumProxy"]:
iptables += "-A FORWARD -s {0} -p tcp -d {1} --dport {2} -j ACCEPT\n".format(ip, yum_proxy_ip, yum_proxy_port)
else:
iptables += "-A FORWARD -s {0} -p tcp -d {1} --dport {2} -j DROP\n".format(ip, yum_proxy_ip, yum_proxy_port)
iptables += "-A FORWARD -s {0} -j {1}\n".format(ip, default_action)
iptables += "COMMIT\n"
xs.write('', "/local/domain/"+str(self.get_xid())+"/qubes_iptables_domainrules/"+str(xid), iptables)
# no need for ending -A FORWARD -j DROP, cause default action is DROP
self.write_netvm_domid_entry()
self.rules_applied = None
xs.write('', "/local/domain/{0}/qubes_iptables".format(self.get_xid()), 'reload')
class QubesDom0NetVm(QubesNetVm):
def __init__(self, **kwargs):
super(QubesDom0NetVm, self).__init__(qid=0, name="dom0", netid=0,
dir_path=None,
private_img = None,
template = None,
label = default_template_label,
**kwargs)
self.xid = 0
def is_running(self):
return True
def get_xid(self):
return 0
def get_power_state(self):
return "Running"
def get_disk_usage(self, file_or_dir):
return 0
def get_disk_utilization(self):
return 0
def get_disk_utilization_private_img(self):
return 0
def get_private_img_sz(self):
return 0
@property
def ip(self):
return "10.137.0.2"
def start(self, **kwargs):
raise QubesException ("Cannot start Dom0 fake domain!")
def get_xl_dominfo(self):
if dry_run:
return
domains = xl_ctx.list_domains()
for dominfo in domains:
if dominfo.domid == 0:
return dominfo
return None
def get_xc_dominfo(self):
if dry_run:
return
domains = xc.domain_getinfo(0, 1)
return domains[0]
def create_xml_element(self):
return None
def verify_files(self):
return True
class QubesDisposableVm(QubesVm):
"""
A class that represents an DisposableVM. A child of QubesVm.
"""
# In which order load this VM type from qubes.xml
load_order = 120
def _get_attrs_config(self):
attrs_config = super(QubesDisposableVm, self)._get_attrs_config()
# New attributes
attrs_config['dispid'] = { 'save': 'str(self.dispid)' }
return attrs_config
def __init__(self, **kwargs):
super(QubesDisposableVm, self).__init__(dir_path="/nonexistent", **kwargs)
assert self.template is not None, "Missing template for DisposableVM!"
# Use DispVM icon with the same color
if self._label:
self._label = QubesDispVmLabels[self._label.name]
self.icon_path = self._label.icon_path
@property
def type(self):
return "DisposableVM"
@property
def ip(self):
if self.netvm is not None:
return self.netvm.get_ip_for_dispvm(self.dispid)
else:
return None
def get_xml_attrs(self):
# Minimal set - do not inherit rest of attributes
attrs = {}
attrs["qid"] = str(self.qid)
attrs["name"] = self.name
attrs["dispid"] = str(self.dispid)
attrs["template_qid"] = str(self.template.qid)
attrs["label"] = self.label.name
attrs["firewall_conf"] = self.relative_path(self.firewall_conf)
attrs["netvm_qid"] = str(self.netvm.qid) if self.netvm is not None else "none"
return attrs
def verify_files(self):
return True
class QubesAppVm(QubesVm):
"""
A class that represents an AppVM. A child of QubesVm.
"""
def _get_attrs_config(self):
attrs_config = super(QubesAppVm, self)._get_attrs_config()
attrs_config['dir_path']['eval'] = 'value if value is not None else qubes_appvms_dir + "/" + self.name'
return attrs_config
@property
def type(self):
return "AppVM"
def create_on_disk(self, verbose, source_template = None):
if dry_run:
return
super(QubesAppVm, self).create_on_disk(verbose, source_template=source_template)
if not self.internal:
self.create_appmenus (verbose=verbose, source_template=source_template)
def remove_from_disk(self):
if dry_run:
return
self.remove_appmenus()
super(QubesAppVm, self).remove_from_disk()
class QubesHVm(QubesVm):
"""
A class that represents an HVM. A child of QubesVm.
"""
# FIXME: logically should inherit after QubesAppVm, but none of its methods
# are useful for HVM
def _get_attrs_config(self):
attrs = super(QubesHVm, self)._get_attrs_config()
attrs.pop('kernel')
attrs.pop('kernels_dir')
attrs.pop('kernelopts')
attrs.pop('uses_default_kernel')
attrs.pop('uses_default_kernelopts')
attrs['dir_path']['eval'] = 'value if value is not None else qubes_appvms_dir + "/" + self.name'
attrs['volatile_img']['eval'] = 'None'
attrs['config_file_template']['eval'] = 'config_template_hvm'
attrs['drive'] = { 'save': 'str(self.drive)' }
attrs['maxmem'].pop('save')
attrs['timezone'] = { 'default': 'localtime', 'save': 'str(self.timezone)' }
attrs['qrexec_installed'] = { 'default': False, 'save': 'str(self.qrexec_installed)' }
attrs['guiagent_installed'] = { 'default' : False, 'save': 'str(self.guiagent_installed)' }
attrs['_start_guid_first']['eval'] = 'True'
attrs['services']['default'] = "{'meminfo-writer': False}"
# only standalone HVM supported for now
attrs['template']['eval'] = 'None'
attrs['memory']['default'] = default_hvm_memory
return attrs
def __init__(self, **kwargs):
super(QubesHVm, self).__init__(**kwargs)
# Default for meminfo-writer have changed to (correct) False in the
# same version as introduction of guiagent_installed, so for older VMs
# with wrong setting, change it based on 'guiagent_installed' presence
if "guiagent_installed" not in kwargs and \
(not 'xml_element' in kwargs or kwargs['xml_element'].get('guiagent_installed') is None):
self.services['meminfo-writer'] = False
# HVM normally doesn't support dynamic memory management
if not ('meminfo-writer' in self.services and self.services['meminfo-writer']):
self.maxmem = self.memory
# Disable qemu GUID if the user installed qubes gui agent
if self.guiagent_installed:
self._start_guid_first = False
@property
def type(self):
return "HVM"
def is_appvm(self):
return True
def get_clone_attrs(self):
attrs = super(QubesHVm, self).get_clone_attrs()
attrs.remove('kernel')
attrs.remove('uses_default_kernel')
attrs.remove('kernelopts')
attrs.remove('uses_default_kernelopts')
attrs += [ 'timezone' ]
attrs += [ 'qrexec_installed' ]
attrs += [ 'guiagent_installed' ]
return attrs
def create_on_disk(self, verbose, source_template = None):
if dry_run:
return
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path)
os.mkdir (self.dir_path)
if verbose:
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path)
os.symlink (self.label.icon_path, self.icon_path)
if verbose:
print >> sys.stderr, "--> Creating appmenus directory: {0}".format(self.appmenus_templates_dir)
os.mkdir (self.appmenus_templates_dir)
shutil.copy (start_appmenu_template, self.appmenus_templates_dir)
if not self.internal:
self.create_appmenus (verbose, source_template=source_template)
self.create_config_file()
# create empty disk
f_root = open(self.root_img, "w")
f_root.truncate(default_hvm_disk_size)
f_root.close()
# create empty private.img
f_private = open(self.private_img, "w")
f_private.truncate(default_hvm_private_img_size)
f_root.close()
def remove_from_disk(self):
if dry_run:
return
self.remove_appmenus()
super(QubesHVm, self).remove_from_disk()
def get_disk_utilization_private_img(self):
return 0
def get_private_img_sz(self):
return 0
def resize_private_img(self, size):
raise NotImplementedError("HVM has no private.img")
def get_config_params(self, source_template=None):
params = super(QubesHVm, self).get_config_params(source_template=source_template)
params['volatiledev'] = ''
if self.drive:
type_mode = ":cdrom,r"
drive_path = self.drive
# leave empty to use standard syntax in case of dom0
backend_domain = ""
if drive_path.startswith("hd:"):
type_mode = ",w"
drive_path = drive_path[3:]
elif drive_path.startswith("cdrom:"):
type_mode = ":cdrom,r"
drive_path = drive_path[6:]
backend_split = re.match(r"^([a-zA-Z0-9-]*):(.*)", drive_path)
if backend_split:
backend_domain = "," + backend_split.group(1)
drive_path = backend_split.group(2)
# FIXME: os.stat will work only when backend in dom0...
stat_res = None
if backend_domain == "":
stat_res = os.stat(drive_path)
if stat_res and stat.S_ISBLK(stat_res.st_mode):
params['otherdevs'] = "'phy:%s,xvdc%s%s'," % (drive_path, type_mode, backend_domain)
else:
params['otherdevs'] = "'script:file:%s,xvdc%s%s'," % (drive_path, type_mode, backend_domain)
else:
params['otherdevs'] = ''
# Disable currently unused private.img - to be enabled when TemplateHVm done
params['privatedev'] = ''
if self.timezone.lower() == 'localtime':
params['localtime'] = '1'
params['timeoffset'] = '0'
elif self.timezone.isdigit():
params['localtime'] = '0'
params['timeoffset'] = self.timezone
else:
print >>sys.stderr, "WARNING: invalid 'timezone' value: %s" % self.timezone
params['localtime'] = '0'
params['timeoffset'] = '0'
return params
def verify_files(self):
if dry_run:
return
if not os.path.exists (self.dir_path):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.dir_path))
if self.is_updateable() and not os.path.exists (self.root_img):
raise QubesException (
"VM root image file doesn't exist: {0}".\
format(self.root_img))
if not os.path.exists (self.private_img):
print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\
format(self.private_img)
f_private = open(self.private_img, "w")
f_private.truncate(default_hvm_private_img_size)
f_private.close()
return True
def reset_volatile_storage(self, **kwargs):
pass
@property
def vif(self):
if self.xid < 0:
return None
if self.netvm is None:
return None
return "vif{0}.+".format(self.stubdom_xid)
def run(self, command, **kwargs):
if self.qrexec_installed:
if 'gui' in kwargs and kwargs['gui']==False:
command = "nogui:" + command
return super(QubesHVm, self).run(command, **kwargs)
else:
raise QubesException("Needs qrexec agent installed in VM to use this function. See also qvm-prefs.")
@property
def stubdom_xid(self):
if self.xid < 0:
return -1
stubdom_xid_str = xs.read('', '/local/domain/%d/image/device-model-domid' % self.xid)
if stubdom_xid_str is not None:
return int(stubdom_xid_str)
else:
return -1
def start_guid(self, verbose = True, notify_function = None):
# If user force the guiagent, start_guid will mimic a standard QubesVM
if self.guiagent_installed:
super(QubesHVm, self).start_guid(verbose, notify_function)
else:
if verbose:
print >> sys.stderr, "--> Starting Qubes GUId..."
retcode = subprocess.call ([qubes_guid_path, "-d", str(self.stubdom_xid), "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)])
if (retcode != 0) :
raise QubesException("Cannot start qubes_guid!")
def start_qrexec_daemon(self, **kwargs):
if self.qrexec_installed:
super(QubesHVm, self).start_qrexec_daemon(**kwargs)
if self._start_guid_first:
if kwargs.get('verbose'):
print >> sys.stderr, "--> Waiting for user '%s' login..." % self.default_user
self.wait_for_session(notify_function=kwargs.get('notify_function', None))
def pause(self):
if dry_run:
return
xc.domain_pause(self.stubdom_xid)
super(QubesHVm, self).pause()
def unpause(self):
if dry_run:
return
xc.domain_unpause(self.stubdom_xid)
super(QubesHVm, self).unpause()
def is_guid_running(self):
# If user force the guiagent, is_guid_running will mimic a standard QubesVM
if self.guiagent_installed:
return super(QubesHVm, self).is_guid_running()
else:
xid = self.stubdom_xid
if xid < 0:
return False
if not os.path.exists('/var/run/qubes/guid_running.%d' % xid):
return False
return True
register_qubes_vm_class("QubesTemplateVm", QubesTemplateVm)
register_qubes_vm_class("QubesNetVm", QubesNetVm)
register_qubes_vm_class("QubesProxyVm", QubesProxyVm)
register_qubes_vm_class("QubesDisposableVm", QubesDisposableVm)
register_qubes_vm_class("QubesAppVm", QubesAppVm)
register_qubes_vm_class("QubesHVm", QubesHVm)
class QubesVmCollection(dict):
"""
A collection of Qubes VMs indexed by Qubes id (qid)
"""
def __init__(self, store_filename=qubes_store_filename):
super(QubesVmCollection, self).__init__()
self.default_netvm_qid = None
self.default_fw_netvm_qid = None
self.default_template_qid = None
self.default_kernel = None
self.updatevm_qid = None
self.qubes_store_filename = store_filename
self.clockvm_qid = None
def values(self):
for qid in self.keys():
yield self[qid]
def items(self):
for qid in self.keys():
yield (qid, self[qid])
def __iter__(self):
for qid in sorted(super(QubesVmCollection, self).keys()):
yield qid
keys = __iter__
def __setitem__(self, key, value):
if key not in self:
return super(QubesVmCollection, self).__setitem__(key, value)
else:
assert False, "Attempt to add VM with qid that already exists in the collection!"
def add_new_vm(self, vm_type, **kwargs):
if vm_type not in QubesVmClasses.keys():
raise ValueError("Unknown VM type: %s" % vm_type)
qid = self.get_new_unused_qid()
vm = QubesVmClasses[vm_type](qid=qid, collection=self, **kwargs)
if not self.verify_new_vm(vm):
raise QubesException("Wrong VM description!")
self[vm.qid]=vm
# make first created NetVM the default one
if self.default_fw_netvm_qid is None and vm.is_netvm():
self.set_default_fw_netvm(vm)
if self.default_netvm_qid is None and vm.is_proxyvm():
self.set_default_netvm(vm)
# make first created TemplateVM the default one
if self.default_template_qid is None and vm.is_template():
self.set_default_template(vm)
# make first created ProxyVM the UpdateVM
if self.updatevm_qid is None and vm.is_proxyvm():
self.set_updatevm_vm(vm)
# by default ClockVM is the first NetVM
if self.clockvm_qid is None and vm.is_netvm():
self.set_clockvm_vm(vm)
return vm
def add_new_appvm(self, name, template,
dir_path = None, conf_file = None,
private_img = None,
label = None):
warnings.warn("Call to deprecated function, use add_new_vm instead",
DeprecationWarning, stacklevel=2)
return self.add_new_vm("QubesAppVm", name=name, template=template,
dir_path=dir_path, conf_file=conf_file,
private_img=private_img,
netvm = self.get_default_netvm(),
kernel = self.get_default_kernel(),
uses_default_kernel = True,
label=label)
def add_new_hvm(self, name, label = None):
warnings.warn("Call to deprecated function, use add_new_vm instead",
DeprecationWarning, stacklevel=2)
return self.add_new_vm("QubesHVm", name=name, label=label)
def add_new_disposablevm(self, name, template, dispid,
label = None, netvm = None):
warnings.warn("Call to deprecated function, use add_new_vm instead",
DeprecationWarning, stacklevel=2)
return self.add_new_vm("QubesDisposableVm", name=name, template=template,
netvm = netvm,
label=label, dispid=dispid)
def add_new_templatevm(self, name,
dir_path = None, conf_file = None,
root_img = None, private_img = None,
installed_by_rpm = True):
warnings.warn("Call to deprecated function, use add_new_vm instead",
DeprecationWarning, stacklevel=2)
return self.add_new_vm("QubesTemplateVm", name=name,
dir_path=dir_path, conf_file=conf_file,
root_img=root_img, private_img=private_img,
installed_by_rpm=installed_by_rpm,
netvm = self.get_default_netvm(),
kernel = self.get_default_kernel(),
uses_default_kernel = True)
def add_new_netvm(self, name, template,
dir_path = None, conf_file = None,
private_img = None, installed_by_rpm = False,
label = None):
warnings.warn("Call to deprecated function, use add_new_vm instead",
DeprecationWarning, stacklevel=2)
return self.add_new_vm("QubesNetVm", name=name, template=template,
label=label,
private_img=private_img, installed_by_rpm=installed_by_rpm,
uses_default_kernel = True,
dir_path=dir_path, conf_file=conf_file)
def add_new_proxyvm(self, name, template,
dir_path = None, conf_file = None,
private_img = None, installed_by_rpm = False,
label = None):
warnings.warn("Call to deprecated function, use add_new_vm instead",
DeprecationWarning, stacklevel=2)
return self.add_new_vm("QubesProxyVm", name=name, template=template,
label=label,
private_img=private_img, installed_by_rpm=installed_by_rpm,
dir_path=dir_path, conf_file=conf_file,
uses_default_kernel = True,
netvm = self.get_default_fw_netvm())
def set_default_template(self, vm):
assert vm.is_template(), "VM {0} is not a TemplateVM!".format(vm.name)
self.default_template_qid = vm.qid
def get_default_template(self):
if self.default_template_qid is None:
return None
else:
return self[self.default_template_qid]
def set_default_netvm(self, vm):
assert vm.is_netvm(), "VM {0} does not provide network!".format(vm.name)
self.default_netvm_qid = vm.qid
def get_default_netvm(self):
if self.default_netvm_qid is None:
return None
else:
return self[self.default_netvm_qid]
def set_default_kernel(self, kernel):
assert os.path.exists(qubes_kernels_base_dir + '/' + kernel), "Kerel {0} not installed!".format(kernel)
self.default_kernel = kernel
def get_default_kernel(self):
return self.default_kernel
def set_default_fw_netvm(self, vm):
assert vm.is_netvm(), "VM {0} does not provide network!".format(vm.name)
self.default_fw_netvm_qid = vm.qid
def get_default_fw_netvm(self):
if self.default_fw_netvm_qid is None:
return None
else:
return self[self.default_fw_netvm_qid]
def set_updatevm_vm(self, vm):
self.updatevm_qid = vm.qid
def get_updatevm_vm(self):
if self.updatevm_qid is None:
return None
else:
return self[self.updatevm_qid]
def set_clockvm_vm(self, vm):
self.clockvm_qid = vm.qid
def get_clockvm_vm(self):
if self.clockvm_qid is None:
return None
else:
return self[self.clockvm_qid]
def get_vm_by_name(self, name):
for vm in self.values():
if (vm.name == name):
return vm
return None
def get_qid_by_name(self, name):
vm = self.get_vm_by_name(name)
return vm.qid if vm is not None else None
def get_vms_based_on(self, template_qid):
vms = set([vm for vm in self.values()
if (vm.template and vm.template.qid == template_qid)])
return vms
def get_vms_connected_to(self, netvm_qid):
new_vms = [ netvm_qid ]
dependend_vms_qid = []
# Dependency resolving only makes sense on NetVM (or derivative)
if not self[netvm_qid].is_netvm():
return set([])
while len(new_vms) > 0:
cur_vm = new_vms.pop()
for vm in self[cur_vm].connected_vms.values():
if vm.qid not in dependend_vms_qid:
dependend_vms_qid.append(vm.qid)
if vm.is_netvm():
new_vms.append(vm.qid)
vms = [vm for vm in self.values() if vm.qid in dependend_vms_qid]
return vms
def verify_new_vm(self, new_vm):
# Verify that qid is unique
for vm in self.values():
if vm.qid == new_vm.qid:
print >> sys.stderr, "ERROR: The qid={0} is already used by VM '{1}'!".\
format(vm.qid, vm.name)
return False
# Verify that name is unique
for vm in self.values():
if vm.name == new_vm.name:
print >> sys.stderr, "ERROR: The name={0} is already used by other VM with qid='{1}'!".\
format(vm.name, vm.qid)
return False
return True
def get_new_unused_qid(self):
used_ids = set([vm.qid for vm in self.values()])
for id in range (1, qubes_max_qid):
if id not in used_ids:
return id
raise LookupError ("Cannot find unused qid!")
def get_new_unused_netid(self):
used_ids = set([vm.netid for vm in self.values() if vm.is_netvm()])
for id in range (1, qubes_max_netid):
if id not in used_ids:
return id
raise LookupError ("Cannot find unused netid!")
def check_if_storage_exists(self):
try:
f = open (self.qubes_store_filename, 'r')
except IOError:
return False
f.close()
return True
def create_empty_storage(self):
self.qubes_store_file = open (self.qubes_store_filename, 'w')
self.clear()
self.save()
def lock_db_for_reading(self):
self.qubes_store_file = open (self.qubes_store_filename, 'r')
fcntl.lockf (self.qubes_store_file, fcntl.LOCK_SH)
def lock_db_for_writing(self):
self.qubes_store_file = open (self.qubes_store_filename, 'r+')
fcntl.lockf (self.qubes_store_file, fcntl.LOCK_EX)
def unlock_db(self):
fcntl.lockf (self.qubes_store_file, fcntl.LOCK_UN)
self.qubes_store_file.close()
def save(self):
root = lxml.etree.Element(
"QubesVmCollection",
default_template=str(self.default_template_qid) \
if self.default_template_qid is not None else "None",
default_netvm=str(self.default_netvm_qid) \
if self.default_netvm_qid is not None else "None",
default_fw_netvm=str(self.default_fw_netvm_qid) \
if self.default_fw_netvm_qid is not None else "None",
updatevm=str(self.updatevm_qid) \
if self.updatevm_qid is not None else "None",
clockvm=str(self.clockvm_qid) \
if self.clockvm_qid is not None else "None",
default_kernel=str(self.default_kernel) \
if self.default_kernel is not None else "None",
)
for vm in self.values():
element = vm.create_xml_element()
if element is not None:
root.append(element)
tree = lxml.etree.ElementTree(root)
try:
# We need to manually truncate the file, as we open the
# file as "r+" in the lock_db_for_writing() function
self.qubes_store_file.seek (0, os.SEEK_SET)
self.qubes_store_file.truncate()
tree.write(self.qubes_store_file, encoding="UTF-8", pretty_print=True)
except EnvironmentError as err:
print("{0}: export error: {1}".format(
os.path.basename(sys.argv[0]), err))
return False
return True
def set_netvm_dependency(self, element):
kwargs = {}
attr_list = ("qid", "uses_default_netvm", "netvm_qid")
for attribute in attr_list:
kwargs[attribute] = element.get(attribute)
vm = self[int(kwargs["qid"])]
if "uses_default_netvm" not in kwargs:
vm.uses_default_netvm = True
else:
vm.uses_default_netvm = True if kwargs["uses_default_netvm"] == "True" else False
if vm.uses_default_netvm is True:
if vm.is_proxyvm():
netvm = self.get_default_fw_netvm()
else:
netvm = self.get_default_netvm()
kwargs.pop("netvm_qid")
else:
if kwargs["netvm_qid"] == "none" or kwargs["netvm_qid"] is None:
netvm = None
kwargs.pop("netvm_qid")
else:
netvm_qid = int(kwargs.pop("netvm_qid"))
if netvm_qid not in self:
netvm = None
else:
netvm = self[netvm_qid]
# directly set internal attr to not call setters...
vm._netvm = netvm
if netvm:
netvm.connected_vms[vm.qid] = vm
def load_globals(self, element):
default_template = element.get("default_template")
self.default_template_qid = int(default_template) \
if default_template.lower() != "none" else None
default_netvm = element.get("default_netvm")
if default_netvm is not None:
self.default_netvm_qid = int(default_netvm) \
if default_netvm != "None" else None
#assert self.default_netvm_qid is not None
default_fw_netvm = element.get("default_fw_netvm")
if default_fw_netvm is not None:
self.default_fw_netvm_qid = int(default_fw_netvm) \
if default_fw_netvm != "None" else None
#assert self.default_netvm_qid is not None
updatevm = element.get("updatevm")
if updatevm is not None:
self.updatevm_qid = int(updatevm) \
if updatevm != "None" else None
#assert self.default_netvm_qid is not None
clockvm = element.get("clockvm")
if clockvm is not None:
self.clockvm_qid = int(clockvm) \
if clockvm != "None" else None
self.default_kernel = element.get("default_kernel")
def load(self):
self.clear()
dom0vm = QubesDom0NetVm (collection=self)
self[dom0vm.qid] = dom0vm
self.default_netvm_qid = 0
global dom0_vm
dom0_vm = dom0vm
try:
tree = lxml.etree.parse(self.qubes_store_file)
except (EnvironmentError,
xml.parsers.expat.ExpatError) as err:
print("{0}: import error: {1}".format(
os.path.basename(sys.argv[0]), err))
return False
self.load_globals(tree.getroot())
for (vm_class_name, vm_class) in sorted(QubesVmClasses.items(),
key=lambda _x: _x[1].load_order):
for element in tree.findall(vm_class_name):
try:
vm = vm_class(xml_element=element, collection=self)
self[vm.qid] = vm
except (ValueError, LookupError) as err:
print("{0}: import error ({1}): {2}".format(
os.path.basename(sys.argv[0]), vm_class_name, err))
raise
return False
# After importing all VMs, set netvm references, in the same order
for (vm_class_name, vm_class) in sorted(QubesVmClasses.items(),
key=lambda _x: _x[1].load_order):
for element in tree.findall(vm_class_name):
try:
self.set_netvm_dependency(element)
except (ValueError, LookupError) as err:
print("{0}: import error2 ({}): {}".format(
os.path.basename(sys.argv[0]), vm_class_name, err))
return False
# if there was no clockvm entry in qubes.xml, try to determine default:
# root of default NetVM chain
if tree.getroot().get("clockvm") is None:
if self.default_netvm_qid is not None:
clockvm = self[self.default_netvm_qid]
# Find root of netvm chain
while clockvm.netvm is not None:
clockvm = clockvm.netvm
self.clockvm_qid = clockvm.qid
# Disable ntpd in ClockVM - to not conflict with ntpdate (both are using 123/udp port)
if self.clockvm_qid is not None:
self[self.clockvm_qid].services['ntpd'] = False
return True
def pop(self, qid):
if self.default_netvm_qid == qid:
self.default_netvm_qid = None
if self.default_fw_netvm_qid == qid:
self.default_fw_netvm_qid = None
if self.clockvm_qid == qid:
self.clockvm_qid = None
if self.updatevm_qid == qid:
self.updatevm_qid = None
if self.default_template_qid == qid:
self.default_template_qid = None
return super(QubesVmCollection, self).pop(qid)
class QubesDaemonPidfile(object):
def __init__(self, name):
self.name = name
self.path = "/var/run/qubes/" + name + ".pid"
def create_pidfile(self):
f = open (self.path, 'w')
f.write(str(os.getpid()))
f.close()
def pidfile_exists(self):
return os.path.exists(self.path)
def read_pid(self):
f = open (self.path)
pid = f.read ().strip()
f.close()
return int(pid)
def pidfile_is_stale(self):
if not self.pidfile_exists():
return False
# check if the pid file is valid...
proc_path = "/proc/" + str(self.read_pid()) + "/cmdline"
if not os.path.exists (proc_path):
print >> sys.stderr, "Path {0} doesn't exist, assuming stale pidfile.".format(proc_path)
return True
f = open (proc_path)
cmdline = f.read ()
f.close()
# The following doesn't work with python -- one would have to get argv[1] and compare it with self.name...
# if not cmdline.strip().endswith(self.name):
# print >> sys.stderr, "{0} = {1} doesn't seem to point to our process ({2}), assuming stale pidile.".format(proc_path, cmdline, self.name)
# return True
return False # It's a good pidfile
def remove_pidfile(self):
os.remove (self.path)
def __enter__ (self):
# assumes the pidfile doesn't exist -- you should ensure it before opening the context
self.create_pidfile()
def __exit__ (self, exc_type, exc_val, exc_tb):
self.remove_pidfile()
return False
# vim:sw=4:et:
| gpl-2.0 | 6,587,904,509,160,187,000 | 36.293164 | 222 | 0.560292 | false |
SimenB/thefuck | tests/rules/test_yarn_command_replaced.py | 5 | 1026 | import pytest
from thefuck.types import Command
from thefuck.rules.yarn_command_replaced import match, get_new_command
output = ('error `install` has been replaced with `add` to add new '
'dependencies. Run "yarn add {}" instead.').format
@pytest.mark.parametrize('command', [
Command('yarn install redux', output('redux')),
Command('yarn install moment', output('moment')),
Command('yarn install lodash', output('lodash'))])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('yarn install', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('yarn install redux', output('redux')),
'yarn add redux'),
(Command('yarn install moment', output('moment')),
'yarn add moment'),
(Command('yarn install lodash', output('lodash')),
'yarn add lodash')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| mit | -5,044,299,526,262,103,000 | 31.0625 | 70 | 0.680312 | false |
altf4/cpu-level-11 | Tactics/infinite.py | 1 | 6247 | import melee
import Chains
from melee.enums import Action
from Tactics.tactic import Tactic
from Chains.smashattack import SMASH_DIRECTION
from Tactics.punish import Punish
from melee.enums import Character
class Infinite(Tactic):
def __init__(self, logger, controller, framedata, difficulty):
Tactic.__init__(self, logger, controller, framedata, difficulty)
def killpercent(opponent_state):
character = opponent_state.character
if character == Character.CPTFALCON:
return 113
if character == Character.FALCO:
return 103
if character == Character.FOX:
return 96
if character == Character.SHEIK:
return 92
if character == Character.PIKACHU:
return 73
if character == Character.PEACH:
return 80
if character == Character.ZELDA:
return 70
if character == Character.MARTH:
return 89
if character == Character.JIGGLYPUFF:
return 55
if character == Character.SAMUS:
return 89
return 100
def caninfinite(smashbot_state, opponent_state, gamestate, framedata, difficulty):
isroll = framedata.is_roll(opponent_state.character, opponent_state.action)
if opponent_state.action in [Action.SHIELD_START, Action.SHIELD, \
Action.SHIELD_STUN, Action.SHIELD_REFLECT]:
return False
# Don't try to infinite if we're on a platform
if smashbot_state.y > 2 or opponent_state.y > 2:
return False
# Should we try a waveshine infinite?
# They need to have high friction and not fall down
if opponent_state.action in [Action.STANDING, Action.TURNING, Action.DASHING, Action.RUNNING, \
Action.WALK_SLOW, Action.WALK_MIDDLE, Action.WALK_FAST]:
return False
framesleft = Punish.framesleft(opponent_state, framedata, smashbot_state)
# This is off by one for hitstun
framesleft -= 1
# Give up the infinite if we're in our last dashing frame, and are getting close to the edge
# We are at risk of running off the edge when this happens
if (smashbot_state.action == Action.DASHING and smashbot_state.action_frame >= 11):
if (smashbot_state.speed_ground_x_self > 0) == (smashbot_state.x > 0):
edge_x = melee.stages.EDGE_GROUND_POSITION[gamestate.stage]
if opponent_state.x < 0:
edge_x = -edge_x
edgedistance = abs(edge_x - smashbot_state.x)
if edgedistance < 16:
return False
# If opponent is attacking, don't infinite
if framedata.is_attack(opponent_state.character, opponent_state.action):
return False
# If opponent is going to slide to the edge, then we have to stop
endposition = opponent_state.x + framedata.slide_distance(opponent_state, opponent_state.speed_x_attack, framesleft)
if abs(endposition)+5 > melee.stages.EDGE_GROUND_POSITION[gamestate.stage]:
return False
if framedata.characterdata[opponent_state.character]["Friction"] >= 0.06 and \
opponent_state.hitstun_frames_left > 1 and not isroll and opponent_state.on_ground \
and opponent_state.percent < Infinite.killpercent(opponent_state):
return True
return False
def step(self, gamestate, smashbot_state, opponent_state):
self._propagate = (gamestate, smashbot_state, opponent_state)
#If we can't interrupt the chain, just continue it
if self.chain != None and not self.chain.interruptible:
self.chain.step(gamestate, smashbot_state, opponent_state)
return
framesleft = Punish.framesleft(opponent_state, self.framedata, smashbot_state)
# This is off by one for hitstun
framesleft -= 1
shinerange = 9.9
if smashbot_state.action == Action.RUNNING:
shinerange = 12.8
if smashbot_state.action == Action.DASHING:
shinerange = 9.5
# If we shine too close to the edge while accelerating horizontally, we can slide offstage and get into trouble
distance_from_edge = melee.stages.EDGE_GROUND_POSITION[gamestate.stage] - abs(smashbot_state.x)
edgetooclose = smashbot_state.action == Action.EDGE_TEETERING_START or distance_from_edge < 5
# Try to do the shine
if gamestate.distance < shinerange and not edgetooclose:
# Emergency backup shine. If we don't shine now, they'll get out of the combo
if framesleft == 1:
self.chain = None
self.pickchain(Chains.Waveshine)
return
# Cut the run short and just shine now. Don't wait for the cross-up
# This is here to prevent running too close to the edge and sliding off
if smashbot_state.action in [Action.RUNNING, Action.RUN_BRAKE, Action.CROUCH_START] and distance_from_edge < 16:
self.chain = None
self.pickchain(Chains.Waveshine)
return
# We always want to try to shine our opponent towards the center of the stage
# If we are lined up right now, do the shine
if (smashbot_state.x < opponent_state.x < 0) or (0 < opponent_state.x < smashbot_state.x):
self.chain = None
self.pickchain(Chains.Waveshine)
return
# If we are running away from our opponent, just shine now
onright = opponent_state.x < smashbot_state.x
if (smashbot_state.speed_ground_x_self > 0) == onright:
self.chain = None
self.pickchain(Chains.Waveshine)
return
if smashbot_state.action == Action.LANDING_SPECIAL and smashbot_state.action_frame < 28:
self.pickchain(Chains.Nothing)
return
if not (smashbot_state.action == Action.DOWN_B_GROUND_START and smashbot_state.action_frame in [1,2]):
self.pickchain(Chains.Run, [opponent_state.x > smashbot_state.x])
return
return
| gpl-3.0 | -267,456,652,151,708,000 | 42.685315 | 124 | 0.622859 | false |
HieronymusCH/RedNotebook | win/cross-compile-exe.py | 2 | 1801 | #! /usr/bin/env python
import argparse
import logging
import os
import shutil
import sys
from utils import run
logging.basicConfig(level=logging.INFO)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('wine_tarball')
parser.add_argument('build_dir')
return parser.parse_args()
args = parse_args()
DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(DIR)
WINE_DIR = os.path.abspath(args.build_dir)
DRIVE_C = os.path.join(WINE_DIR, 'drive_c')
WINE_TARBALL = os.path.abspath(args.wine_tarball)
assert os.path.exists(WINE_TARBALL), WINE_TARBALL
WINE_RN_DIR = os.path.join(DRIVE_C, 'rednotebook')
WINE_RN_WIN_DIR = os.path.join(WINE_RN_DIR, 'win')
PYINSTALLER = os.path.join(DRIVE_C, 'PyInstaller-2.1', 'pyinstaller.py')
SPEC = os.path.join(BASE_DIR, 'win', 'rednotebook.spec')
WINE_SPEC = os.path.join(WINE_RN_WIN_DIR, 'rednotebook.spec')
WINE_BUILD = os.path.join(DRIVE_C, 'build')
WINE_DIST = os.path.join(DRIVE_C, 'dist')
LOCALE_DIR = os.path.join(WINE_DIST, 'share', 'locale')
WINE_RN_EXE = os.path.join(WINE_DIST, 'rednotebook.exe')
WINE_PYTHON = os.path.join(DRIVE_C, 'Python27', 'python.exe')
if os.path.exists(WINE_DIR):
answer = raw_input('The build dir exists. Overwrite it? (Y/n): ').strip()
if answer and answer.lower() != 'y':
sys.exit('Aborting')
shutil.rmtree(WINE_DIR)
os.environ['WINEPREFIX'] = WINE_DIR
os.mkdir(WINE_DIR)
run(['tar', '-xzf', WINE_TARBALL, '--directory', WINE_DIR])
run(['bzr', 'co', '--lightweight', BASE_DIR, WINE_RN_DIR])
shutil.copy2(SPEC, WINE_SPEC)
run(['wine', WINE_PYTHON, PYINSTALLER, '--workpath', WINE_BUILD,
'--distpath', DRIVE_C, WINE_SPEC]) # will be built at ...DRIVE_C/dist
run(['./build-translations.py', LOCALE_DIR], cwd=DIR)
#run(['wine', WINE_RN_EXE])
| gpl-2.0 | 8,815,186,155,191,652,000 | 32.351852 | 77 | 0.686841 | false |
alangenfeld/cloud-nfs | pyCloud/cloudnfs.py | 1 | 3369 | import boto
import os
import tempfile
import pickle
#bucketName = "cloudnfs"
bucketName = "cs699wisc_samanas"
def download(srcName, dstName) :
"Download the files."
src_uri = boto.storage_uri(bucketName + "/" + srcName, "gs")
dst_uri = boto.storage_uri(dstName, "file")
"Append the object name to the directory name."
dst_key_name = dst_uri.object_name# + os.sep + src_uri.object_name
"Use the new destination key name to create a new destination URI."
new_dst_uri = dst_uri.clone_replace_name(dst_key_name)
print new_dst_uri
"Create a new destination key object."
dst_key = new_dst_uri.new_key()
"Retrieve the source key and create a source key object."
src_key = src_uri.get_key()
"Create a temporary file to hold our copy operation."
tmp = tempfile.TemporaryFile()
src_key.get_file(tmp)
tmp.seek(0)
"Download the object."
dst_key.set_contents_from_file(tmp)
return
def send_file(srcName, dstName) :
# "Create source and destination URIs."
src_uri = boto.storage_uri(srcName, "file")
dst_uri = boto.storage_uri(bucketName, "gs")
# "Create a new destination URI with the source file name as the object name."
new_dst_uri = dst_uri.clone_replace_name(dstName)
# "Create a new destination key object."
dst_key = new_dst_uri.new_key()
# "Retrieve the source key and create a source key object."
src_key = src_uri.get_key()
# "Create a temporary file to hold your copy operation."
tmp = tempfile.TemporaryFile()
src_key.get_file(tmp)
tmp.seek(0)
# "Upload the file."
dst_key.set_contents_from_file(tmp)
return
class entry :
def __init__(self, name):
self.name = name
self.versions = 1
self.version_map = dict({1:get_curr_bucket()})
self.active = True
# This assumes that different versions will always be in a different bucket
# Thus, versioning not supported yet.
def update():
self.versions += 1
self.version_map[self.versions] = get_curr_bucket()
def __str__(self):
return self.name
def print_e(self):
print '%s; Version: %d' % (self.name, self.versions)
print self.active
print self.version_map
def get_curr_bucket():
return bucketName
def print_meta():
meta_list = get_list()
for e in meta_list :
print e
def create_entry(name, entry_list):
entry_list.append(entry(name))
# Creates list if doesn't already exist. If it does, save it.
def store_list(new_list):
meta_list = open('meta.pkl', 'wb')
pickle.dump(new_list, meta_list)
meta_list.close()
send_file('meta.pkl', 'meta.pkl')
def get_list():
try:
# meta.pkl shall be stored in the cloud, we will attempt to download
download('meta.pkl', 'meta.pkl')
except boto.exception.InvalidUriError:
# If it isn't there, create locally and instantiate new list.
meta_list = open('meta.pkl', 'wb')
new_list = []
else:
# meta.pkl exists, lets load it and return.
meta_list = open('meta.pkl', 'rwb')
try:
new_list = pickle.load(meta_list)
except EOFError:
# The list we downloaded is empty. Return empty list.
new_list = []
meta_list.close()
return new_list
| lgpl-3.0 | -6,613,355,761,184,902,000 | 28.814159 | 81 | 0.627189 | false |
whn09/tensorflow | tensorflow/python/util/nest_test.py | 34 | 14870 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class NestTest(test.TestCase):
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
point = collections.namedtuple("Point", ["x", "y"])
structure = (point(x=4, y=2), ((point(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertTrue(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertTrue(nest.is_sequence([]))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
def testFlattenDictItems(self):
dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = {(4, 5, (4, 8)): ("a", "b", ("c", "d"))}
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))}
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_nesting)
named_type_0 = collections.namedtuple("named_0", ("a", "b"))
named_type_1 = collections.namedtuple("named_1", ("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
named_type_0("a", "b"))
nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
named_type_0(3, 4), named_type_1(3, 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(named_type_0(3, 4), named_type_0([3], 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'list'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
def testFlattenUpTo(self):
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
if __name__ == "__main__":
test.main()
| apache-2.0 | -609,341,527,215,267,500 | 43.520958 | 80 | 0.637929 | false |
nitzmahone/ansible | lib/ansible/modules/network/f5/bigiq_application_fastl4_udp.py | 8 | 21580 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_application_fastl4_udp
short_description: Manages BIG-IQ FastL4 UDP applications
description:
- Manages BIG-IQ applications used for load balancing a UDP-based application
with a FastL4 profile.
version_added: 2.6
options:
name:
description:
- Name of the new application.
required: True
description:
description:
- Description of the application.
servers:
description:
- A list of servers that the application is hosted on.
- If you are familiar with other BIG-IP setting, you might also refer to this
list as the list of pool members.
- When creating a new application, at least one server is required.
suboptions:
address:
description:
- The IP address of the server.
required: True
port:
description:
- The port of the server.
- When creating a new application and specifying a server, if this parameter
is not provided, the default of C(8000) will be used.
default: 8000
inbound_virtual:
description:
- Settings to configure the virtual which will receive the inbound connection.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
required: True
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
required: True
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(53) will be used.
default: 53
service_environment:
description:
- Specifies the name of service environment that the application will be
deployed to.
- When creating a new application, this parameter is required.
- The service environment type will be discovered by this module automatically.
Therefore, it is crucial that you maintain unique names for items in the
different service environment types.
- SSGs are not supported for this type of application.
add_analytics:
description:
- Collects statistics of the BIG-IP that the application is deployed to.
- This parameter is only relevant when specifying a C(service_environment) which
is a BIG-IP; not an SSG.
type: bool
default: no
state:
description:
- The state of the resource on the system.
- When C(present), guarantees that the resource exists with the provided attributes.
- When C(absent), removes the resource from the system.
default: present
choices:
- absent
- present
wait:
description:
- If the module should wait for the application to be created, deleted or updated.
type: bool
default: yes
extends_documentation_fragment: f5
notes:
- This module does not support updating of your application (whether deployed or not).
If you need to update the application, the recommended practice is to remove and
re-create.
- Requires BIG-IQ version 6.0 or greater.
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Load balance a UDP-based application with a FastL4 profile
bigiq_application_fastl4_udp:
name: my-app
description: My description
service_environment: my-bigip-device
servers:
- address: 1.2.3.4
port: 8080
- address: 5.6.7.8
port: 8080
inbound_virtual:
name: foo
address: 2.2.2.2
netmask: 255.255.255.255
port: 53
provider:
password: secret
server: lb.mydomain.com
user: admin
state: present
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the application of the resource.
returned: changed
type: string
sample: My application
service_environment:
description: The environment which the service was deployed to.
returned: changed
type: string
sample: my-ssg1
inbound_virtual_destination:
description: The destination of the virtual that was created.
returned: changed
type: string
sample: 6.7.8.9
inbound_virtual_netmask:
description: The network mask of the provided inbound destination.
returned: changed
type: string
sample: 255.255.255.0
inbound_virtual_port:
description: The port the inbound virtual address listens on.
returned: changed
type: int
sample: 80
servers:
description: List of servers, and their ports, that make up the application.
type: complex
returned: changed
contains:
address:
description: The IP address of the server.
returned: changed
type: string
sample: 2.3.4.5
port:
description: The port that the server listens on.
returned: changed
type: int
sample: 8080
sample: hash/dictionary of values
'''
import time
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'templateReference': 'template_reference',
'subPath': 'sub_path',
'configSetName': 'config_set_name',
'defaultDeviceReference': 'default_device_reference',
'addAnalytics': 'add_analytics'
}
api_attributes = [
'resources', 'description', 'configSetName', 'subPath', 'templateReference',
'defaultDeviceReference', 'addAnalytics'
]
returnables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'default_device_reference', 'servers', 'inbound_virtual', 'add_analytics'
]
updatables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'default_device_reference', 'servers', 'add_analytics'
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def http_profile(self):
return "profile_http"
@property
def config_set_name(self):
return self.name
@property
def sub_path(self):
return self.name
@property
def template_reference(self):
filter = "name+eq+'Default-f5-FastL4-UDP-lb-template'"
uri = "https://{0}:{1}/mgmt/cm/global/templates/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No default HTTP LB template was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def default_device_reference(self):
if is_valid_ip(self.service_environment):
# An IP address was specified
filter = "address+eq+'{0}'".format(self.service_environment)
else:
# Assume a hostname was specified
filter = "hostname+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-adccore-allbigipDevices/devices/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"The specified service_environment '{0}' was found.".format(self.service_environment)
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def resources(self):
result = dict()
result.update(self.udp_monitor)
result.update(self.virtual)
result.update(self.pool)
result.update(self.nodes)
return result
@property
def virtual(self):
result = dict()
result['ltm:virtual:c2e739ba116f'] = [
dict(
parameters=dict(
name='virtual',
destinationAddress=self.inbound_virtual['address'],
mask=self.inbound_virtual['netmask'],
destinationPort=self.inbound_virtual.get('port', 53)
),
subcollectionResources=self.profiles
)
]
return result
@property
def profiles(self):
result = {
'profiles:53f9b3028d90': [
dict(
parameters=dict()
)
]
}
return result
@property
def pool(self):
result = dict()
result['ltm:pool:e6879775458c'] = [
dict(
parameters=dict(
name='pool_0'
),
subcollectionResources=self.pool_members
)
]
return result
@property
def pool_members(self):
result = dict()
result['members:b19842fe713a'] = []
for x in self.servers:
member = dict(
parameters=dict(
port=x.get('port', 8000),
nodeReference=dict(
link='#/resources/ltm:node:b19842fe713a/{0}'.format(x['address']),
fullPath='# {0}'.format(x['address'])
)
)
)
result['members:b19842fe713a'].append(member)
return result
@property
def udp_monitor(self):
result = dict()
result['ltm:monitor:udp:22cdcfda0a40'] = [
dict(
parameters=dict(
name='monitor-udp'
)
)
]
return result
@property
def nodes(self):
result = dict()
result['ltm:node:b19842fe713a'] = []
for x in self.servers:
tmp = dict(
parameters=dict(
name=x['address'],
address=x['address']
)
)
result['ltm:node:b19842fe713a'].append(tmp)
return result
@property
def node_addresses(self):
result = [x['address'] for x in self.servers]
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.want.client = self.client
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList?$filter=name+eq+'{2}'".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and 'result' in response and 'totalItems' in response['result'] and response['result']['totalItems'] == 0:
return False
return True
def remove(self):
if self.module.check_mode:
return True
self_link = self.remove_from_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.service_environment is None:
raise F5ModuleError(
"A 'service_environment' must be specified when creating a new application."
)
if self.want.servers is None:
raise F5ModuleError(
"At least one 'servers' item is needed when creating a new application."
)
if self.want.inbound_virtual is None:
raise F5ModuleError(
"An 'inbound_virtual' must be specified when creating a new application."
)
self._set_changed_options()
if self.module.check_mode:
return True
self_link = self.create_on_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if not self.exists():
raise F5ModuleError(
"Failed to deploy application."
)
return True
def create_on_device(self):
params = self.changes.api_params()
params['mode'] = 'CREATE'
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
params = dict(
configSetName=self.want.name,
mode='DELETE'
)
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def wait_for_apply_template_task(self, self_link):
host = 'https://{0}:{1}'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = self_link.replace('https://localhost', host)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if response['status'] == 'FINISHED' and response.get('currentStep', None) == 'DONE':
return True
elif 'errorMessage' in response:
raise F5ModuleError(response['errorMessage'])
time.sleep(5)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
servers=dict(
type='list',
options=dict(
address=dict(required=True),
port=dict(default=8000)
)
),
inbound_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=53)
)
),
service_environment=dict(),
add_analytics=dict(type='bool', default='no'),
state=dict(
default='present',
choices=['present', 'absent']
),
wait=dict(type='bool', default='yes')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,328,987,658,050,901,000 | 30.457726 | 146 | 0.582762 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.