repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
arsfeld/conduit | conduit/modules/ShutterflyModule/ShutterflyModule.py | 1 | 3532 | """
Shutterfly Data Sink
"""
import logging
log = logging.getLogger("modules.Shutterfly")
import conduit
import conduit.utils as Utils
from conduit.datatypes import Rid
import conduit.dataproviders.Image as Image
import conduit.Exceptions as Exceptions
import conduit.datatypes.Photo as Photo
Utils.dataprovider_add_dir_to_path(__file__, "shutterfly")
from shutterfly import Shutterfly
from gettext import gettext as _
MODULES = {
"ShutterflySink" : {"type" : "dataprovider"},
}
class ShutterflySink(Image.ImageSink):
_name_ = _("Shutterfly")
_description_ = _("Synchronize your Shutterfly photos")
_module_type_ = "sink"
_icon_ = "shutterfly"
_configurable_ = True
def __init__(self, *args):
Image.ImageSink.__init__(self)
self.username = ""
self.password = ""
self.album = ""
self.sapi = None
self.salbum = None
self.sphotos = None
def _get_raw_photo_url(self, photoInfo):
return photoInfo.url
def _get_photo_info(self, id):
if self.sphotos.has_key(id):
return self.sphotos[id]
else:
return None
def _get_photo_formats(self):
return ("image/jpeg", )
def refresh(self):
Image.ImageSink.refresh(self)
self.sapi = Shutterfly(self.username, self.password)
albums = self.sapi.getAlbums()
if not albums.has_key(self.album):
self.salbum = self.sapi.createAlbum(self.album)
else:
self.salbum = albums[self.album]
self.sphotos = self.salbum.getPhotos()
def get_all(self):
return self.sphotos.keys()
def get(self, LUID):
#Image.ImageSink.get(self, LUID)
sphoto = self.sphotos[LUID]
f = Photo.Photo(URI=sphoto.url)
f.set_open_URI(sphoto.url)
f.set_UID(LUID)
return f
def delete(self, LUID):
"""
Delete a photo by ID
Deleting a photo invalidates album length and photo index values.
We must reload the photos (or do something else...)
"""
if not self.sphotos.has_key(LUID):
log.warn("Photo does not exist")
return
try:
self.salbum.deletePhoto(self.sphotos[LUID])
except Exception, e:
raise Exceptions.SyncronizeError("Shutterfly Delete Error - Try Again.")
self.sphotos = self.salbum.getPhotos()
def _upload_photo(self, uploadInfo):
"""
Upload to album
"""
try:
ret = self.salbum.uploadPhoto(uploadInfo.url, uploadInfo.mimeType, uploadInfo.name)
return Rid(ret.id)
except Exception, e:
raise Exceptions.SyncronizeError("Shutterfly Upload Error.")
def configure(self, window):
"""
Configures the ShutterflySink
"""
widget = Utils.dataprovider_glade_get_widget(
__file__,
"shutterfly.glade",
"ShutterflySinkConfigDialog")
# Get configuration widgets
username = widget.get_widget("username")
password = widget.get_widget("password")
album = widget.get_widget("album")
# Load the widgets with presets
username.set_text(self.username)
password.set_text(self.password)
album.set_text(self.album)
dlg = widget.get_widget("ShutterflySinkConfigDialog")
response = Utils.run_dialog(dlg, window)
if response == True:
self.username = username.get_text()
self.password = password.get_text()
self.album = album.get_text()
dlg.destroy()
def get_configuration(self):
return {
"username" : self.username,
"password" : self.password,
"album" : self.album
}
def is_configured(self, isSource, isTwoWay):
if len(self.username) < 1:
return False
if len(self.password) < 1:
return False
if len(self.album) < 1:
return False
return True
def get_UID(self):
return self.username+":"+self.album
| gpl-2.0 | -7,614,566,365,720,435,000 | 22.084967 | 86 | 0.693658 | false | 2.95071 | true | false | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/declarative/scrolling.py | 1 | 2311 | #!/usr/bin/env python
# Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved.
# Contact: PySide Team ([email protected])
#
# This file is part of the examples of PySide: Python for Qt.
#
# You may use this file under the terms of the BSD license as follows:
#
# "Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
from PySide.QtCore import QUrl
from PySide.QtGui import QPushButton, QApplication
from PySide.QtDeclarative import QDeclarativeView
# This example uses a QML file to show a scrolling list containing
# all the items listed into dataList.
dataList = ["Item 1", "Item 2", "Item 3", "Item 4"]
app = QApplication([])
view = QDeclarativeView()
ctxt = view.rootContext()
ctxt.setContextProperty("myModel", dataList)
url = QUrl('view.qml')
view.setSource(url)
view.show()
app.exec_()
| epl-1.0 | 1,676,431,145,130,090,000 | 39.54386 | 72 | 0.762441 | false | 4.068662 | false | false | false |
j-marjanovic/myhdl | myhdl/_always_comb.py | 1 | 4522 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2009 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always_comb function. """
from __future__ import absolute_import
import sys
import inspect
from types import FunctionType
import re
import ast
from myhdl import AlwaysCombError
from myhdl._Signal import _Signal, _isListOfSigs
from myhdl._util import _isGenFunc, _dedent
from myhdl._Waiter import _Waiter, _SignalWaiter, _SignalTupleWaiter
from myhdl._instance import _Instantiator
from myhdl._always import _Always
from myhdl._resolverefs import _AttrRefTransformer
from myhdl._visitors import _SigNameVisitor
class _error:
pass
_error.ArgType = "always_comb argument should be a classic function"
_error.NrOfArgs = "always_comb argument should be a function without arguments"
_error.Scope = "always_comb argument should be a local function"
_error.SignalAsInout = "signal (%s) used as inout in always_comb function argument"
_error.EmbeddedFunction = "embedded functions in always_comb function argument not supported"
_error.EmptySensitivityList= "sensitivity list is empty"
def always_comb(func):
if not isinstance( func, FunctionType):
raise AlwaysCombError(_error.ArgType)
if _isGenFunc(func):
raise AlwaysCombError(_error.ArgType)
if func.__code__.co_argcount > 0:
raise AlwaysCombError(_error.NrOfArgs)
c = _AlwaysComb(func)
return c
# class _AlwaysComb(_Instantiator):
class _AlwaysComb(_Always):
# def __init__(self, func, symdict):
# self.func = func
# self.symdict = symdict
# s = inspect.getsource(func)
# # remove decorators
# s = re.sub(r"@.*", "", s)
# s = s.lstrip()
# tree = compiler.parse(s)
# v = _SigNameVisitor(symdict)
# compiler.walk(tree, v)
# self.inputs = v.inputs
# self.outputs = v.outputs
# senslist = []
# for n in self.inputs:
# s = self.symdict[n]
# if isinstance(s, Signal):
# senslist.append(s)
# else: # list of sigs
# senslist.extend(s)
# self.senslist = tuple(senslist)
# self.gen = self.genfunc()
# if len(self.senslist) == 0:
# raise AlwaysCombError(_error.EmptySensitivityList)
# if len(self.senslist) == 1:
# W = _SignalWaiter
# else:
# W = _SignalTupleWaiter
# self.waiter = W(self.gen)
def __init__(self, func):
senslist = []
super(_AlwaysComb, self).__init__(func, senslist)
s = inspect.getsource(func)
s = _dedent(s)
tree = ast.parse(s)
# print ast.dump(tree)
v = _AttrRefTransformer(self)
v.visit(tree)
v = _SigNameVisitor(self.symdict)
v.visit(tree)
self.inputs = v.results['input']
self.outputs = v.results['output']
inouts = v.results['inout'] | self.inputs.intersection(self.outputs)
if inouts:
raise AlwaysCombError(_error.SignalAsInout % inouts)
if v.results['embedded_func']:
raise AlwaysCombError(_error.EmbeddedFunction)
for n in self.inputs:
s = self.symdict[n]
if isinstance(s, _Signal):
senslist.append(s)
elif _isListOfSigs(s):
senslist.extend(s)
self.senslist = tuple(senslist)
if len(self.senslist) == 0:
raise AlwaysCombError(_error.EmptySensitivityList)
def genfunc(self):
senslist = self.senslist
if len(senslist) == 1:
senslist = senslist[0]
func = self.func
while 1:
func()
yield senslist
| lgpl-2.1 | -1,478,581,630,403,665,000 | 33 | 93 | 0.638655 | false | 3.574704 | false | false | false |
enricobacis/cineca-scopus | src/cineca3.py | 1 | 3928 | #!/usr/bin/env python
#coding: utf-8
from contextlib import closing
from operator import itemgetter
from datetime import datetime
from argparse import ArgumentParser
from unicodecsv import DictWriter
from utils import read_cineca_file, csv_to_db
import sqlite3
import json
import re
FIELDS = ['Ateneo', 'Facoltà', 'Fascia', 'Genere', 'S.C.',
'Servizio prestato in altro ateneo', 'Struttura di afferenza',
'author', 'identifier', 'eid', 'title', 'aggregationType',
'citedby-count', 'publicationName', 'isbn', 'issn', 'volume',
'issueIdentifier', 'pageRange', 'pageNum', 'coverDate',
'coverDisplayDate', 'doi', 'numAuthors']
QUERY = 'SELECT entries FROM articles WHERE author = ? AND ateneo = ?'
def pagenum(pageRange):
try:
page = list(map(int, pageRange.split('-')))
return 1 if len(page) == 1 else page[1] - page[0]
except:
return None
def process(entry):
for key, value in list(entry.items()):
if ':' in key:
del entry[key]
key = key.partition(':')[2]
entry[key] = value
match = re.match('Author list of (\d+)', entry.get('message', ''))
if match: entry['numAuthors'] = int(match.group(1))
else: entry['numAuthors'] = len(entry.get('author', [])) or None
# eid and identifier default to 0
entry['eid'] = entry.get('eid', 0)
entry['identifier'] = entry.get('identifier', 0)
# validate coverDate (or default to 1900-01-01)
date = entry.get('coverDate', '')
try:
datesplit = list(map(int, date.split('-')))
if len(datesplit) == 3 and datesplit[1] == 0:
date = '%d-%d-%s' % (datesplit[0], datesplit[1]+1, datesplit[2])
datetime.strptime(date, '%Y-%m-%d')
except: entry['coverDate'] = '1900-01-01'
entry['author'] = entry['Cognome e Nome']
entry['pageNum'] = pagenum(entry.get('pageRange', None))
return entry
def mergedicts(*dicts):
return {k:v for d in dicts for k,v in d.items()}
if __name__ == '__main__':
from config import FILENAME, DBFILE, OUTFILE, PRODUCTSDB
parser = ArgumentParser('convert scopus db to csv')
parser.add_argument('--input', default=FILENAME, help='cineca input file')
parser.add_argument('--db', default=DBFILE, help='database file')
parser.add_argument('--output', default=OUTFILE, help='output csv file')
parser.add_argument('--outdb', default=PRODUCTSDB, help='output db file')
args = parser.parse_args()
with open(args.output, 'wb') as outfile:
csvreader = [row.to_dict() for row in read_cineca_file(args.input)]
authors = [(row['Cognome e Nome'], row['Ateneo'], row) for row in csvreader]
authors.sort(key=itemgetter(0, 1))
csvwriter = DictWriter(outfile, FIELDS, extrasaction='ignore', encoding='utf-8')
csvwriter.writeheader()
with sqlite3.connect(args.db) as connection:
with closing(connection.cursor()) as cursor:
for author, ateneo, authordata in authors:
entries = cursor.execute(QUERY, (author,ateneo)).fetchall()
if not entries:
print('Empty entry added for %s' % author)
csvwriter.writerow(process(authordata))
else:
inserted = set()
for entry in json.loads(entries[0][0]):
ID = entry.get('dc:identifier', '')
print('%s\t%s' % (author, ID))
if ID in inserted:
print(' * duplicate ignored *')
else:
inserted.add(ID)
csvwriter.writerow(process(mergedicts(authordata, entry)))
print('\n[*] Converting csv to sqlite3db ...')
csv_to_db(args.output, args.outdb, 'products')
| mit | -8,302,143,502,810,306,000 | 37.881188 | 90 | 0.578813 | false | 3.729345 | false | false | false |
j-rock/cs598ps | src/py/main.py | 1 | 4059 | import sys
import time
from cssigps.offlineclassifier import *
from cssigps.dataset import *
from cssigps.feature import *
from cssigps.experiments import *
from get_dropbox_path import *
def print_usage():
"""
Print the usage for the main script.
"""
print("USAGE: use the run.sh or the main.py directly.")
print("")
print(" run.sh <EXPERIMENT_NUMBER>")
print(" python main.py <EXPERIMENT_NUMBER>")
if __name__ == '__main__':
# decide which experiment to run based on the command line or user-input
response = ""
if len(sys.argv) >= 2:
response=sys.argv[1]
if response in ["-h","--help"]:
print_usage()
quit()
else:
prompt = "Which experiment would you like to run? [0-2]"
response = raw_input(prompt)
# run experiment
if response == "0":
path=get_dropbox_path()+"old-test/"
run_experiment_0(path)
elif response == "1":
run_experiment_1(include_none=True)
elif response == "2":
run_experiment_2()
elif response == "3":
run_experiment_3()
elif response == "4":
run_experiment_4()
elif response == "5":
path=get_dropbox_path()+"vowels-test/"
run_offline_svm(path)
elif response == "S":
# run single class classifier
c = sys.argv[2]
f = sys.argv[3]
classes=["NONE"]
path=get_dropbox_path()+"yes-no-test/"
factory = FBankFeature()
# select the class
if c == "Y":
path=get_dropbox_path()+"yes-no-test/"
classes.append("Y")
elif c=="N":
path=get_dropbox_path()+"yes-no-test/"
classes.append("N")
elif c=="A":
path=get_dropbox_path()+"vowels-test/"
classes=["A","NONE"]
elif c=="E":
path=get_dropbox_path()+"vowels-test/"
classes=["E","NONE"]
elif c=="I":
path=get_dropbox_path()+"vowels-test/"
classes=["I","NONE"]
elif c=="O":
path=get_dropbox_path()+"vowels-test/"
classes=["O","NONE"]
elif c=="U":
path=get_dropbox_path()+"vowels-test/"
classes=["U","NONE"]
else:
print("class argument invalid")
quit()
# select the feature
if f == "fbank":
factory=FBankFeature()
elif f == "m" or f == "magnitude":
factory=MagnitudeFeature()
elif f == "t" or f == "template":
factory=MultiTemplateFeature(SampleSet(find_testsamples(path),classes=classes).class_rep())
else:
print("feature argument invalid")
samples = find_testsamples(path)
sample_set = SampleSet(samples,classes=classes)
sample_set.stats()
run_sample_experiment(sample_set,feat_factory=factory)
elif response == "M":
# run multi class classifier
c = sys.argv[2]
f = sys.argv[3]
classes=["NONE"]
path=get_dropbox_path()+"yes-no-test/"
factory = FBankFeature()
# select the class
if c == "Y":
path=get_dropbox_path()+"yes-no-test/"
classes=["Y","N","NONE"]
elif c=="A":
path=get_dropbox_path()+"vowels-test/"
classes=["A","E","I","O","U","NONE"]
else:
print("class argument invalid")
quit()
samples = find_testsamples(path)
sample_set = SampleSet(samples,classes=classes)
sample_set.stats()
# select the feature
if f == "fbank":
factory=FBankFeature()
elif f == "m" or f == "magnitude":
factory=MagnitudeFeature()
elif f == "t" or f == "template":
factory=MultiTemplateFeature(SampleSet(find_testsamples(path),classes=classes).class_rep())
else:
print("feature argument invalid")
run_sample_experiment(sample_set,feat_factory=factory)
else:
print("Invalid option. Aborting..")
| mit | -7,437,332,897,349,050,000 | 29.75 | 103 | 0.537078 | false | 3.854701 | true | false | false |
jtaghiyar/kronos | setup.py | 1 | 1513 | '''
Created on Jul 10, 2014
@author: jtaghiyar
'''
import codecs
import os
import re
from setuptools import setup
def read(*paths):
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, *paths)) as f:
return f.read()
def get_version():
version_file = read("kronos", "kronos_version.py")
version_match = re.search(r"^kronos_version = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.md')
setup(name='kronos_pipeliner',
version=get_version(),
description='A workflow assembler for genome analytics and informatics',
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords='pipeline workflow bioinformatics kronos',
author='M. Jafar Taghiyar',
author_email='[email protected]',
url='https://github.com/jtaghiyar/kronos',
license='MIT',
packages=['kronos', 'templates'],
entry_points={'console_scripts':['kronos=kronos:main']},
install_requires = ['pyyaml>=3.11', 'ruffus==2.4.1']
)
| mit | -666,503,168,781,725,600 | 30.520833 | 78 | 0.621943 | false | 3.526807 | false | false | false |
wdv4758h/ZipPy | edu.uci.python.benchmark/src/micro/boolean-logic.py | 1 | 1621 | # zwei 04/24/2014
# micro benchmark: method call polymorphic inspired by richards
import time
iteration = 50000
class Task(object):
def __init__(self, p, w, h):
self.packet_pending = p
self.task_waiting = w
self.task_holding = h
self.link = None
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isTaskHoldingOrWaiting(task_holding, packet_pending, task_waiting):
return task_holding or (not packet_pending and task_waiting)
TASK_LIST = [Task(False, False, True),
Task(False, True, False),
Task(True, True, False),
Task(True, False, True)]
def setupTaskQueue():
prev = None
for t in TASK_LIST:
t.link = prev
prev = t
return t
TASK_QUEUE = setupTaskQueue()
def dostuff():
total = 0
for i in range(iteration):
t = TASK_QUEUE
while t is not None:
if (t.isTaskHoldingOrWaiting()):
total += 1
t = t.link
return total
def noObjectDoStuff():
p = True
w = False
h = True
total = 0
for i in range(iteration):
h = isTaskHoldingOrWaiting(h, p, w)
if (isTaskHoldingOrWaiting(h, p, w)):
total += 1
return total
def measure(num):
print("Start timing...")
start = time.time()
for i in range(num): # 50000
result = dostuff()
print(result)
duration = "%.3f\n" % (time.time() - start)
print("boolean-logic: " + duration)
# warm up
for i in range(500):
dostuff()
measure(1000) | bsd-3-clause | -691,052,674,666,857,300 | 21.527778 | 83 | 0.586675 | false | 3.448936 | false | false | false |
pelson/conda-build | tests/test_misc.py | 4 | 2003 | import json
from os.path import join
import pytest
from conda_build.utils import on_win
import conda_build._link as _link
from conda_build.conda_interface import PathType, EntityEncoder, CrossPlatformStLink
def test_pyc_f_2():
assert _link.pyc_f('sp/utils.py', (2, 7, 9)) == 'sp/utils.pyc'
def test_pyc_f_3():
for f, r in [
('sp/utils.py',
'sp/__pycache__/utils.cpython-34.pyc'),
('sp/foo/utils.py',
'sp/foo/__pycache__/utils.cpython-34.pyc'),
]:
assert _link.pyc_f(f, (3, 4, 2)) == r
def test_pathtype():
hardlink = PathType("hardlink")
assert str(hardlink) == "hardlink"
assert hardlink.__json__() == 'hardlink'
softlink = PathType("softlink")
assert str(softlink) == "softlink"
assert softlink.__json__() == "softlink"
def test_entity_encoder(tmpdir):
test_file = join(str(tmpdir), "test-file")
test_json = {"a": PathType("hardlink"), "b": 1}
with open(test_file, "w") as f:
json.dump(test_json, f, cls=EntityEncoder)
with open(test_file, "r") as f:
json_file = json.load(f)
assert json_file == {"a": "hardlink", "b": 1}
@pytest.mark.skipif(on_win, reason="link not available on win/py2.7")
def test_crossplatform_st_link(tmpdir):
from os import link
test_file = join(str(tmpdir), "test-file")
test_file_linked = join(str(tmpdir), "test-file-linked")
test_file_link = join(str(tmpdir), "test-file-link")
open(test_file, "a").close()
open(test_file_link, "a").close()
link(test_file_link, test_file_linked)
assert 1 == CrossPlatformStLink.st_nlink(test_file)
assert 2 == CrossPlatformStLink.st_nlink(test_file_link)
assert 2 == CrossPlatformStLink.st_nlink(test_file_linked)
@pytest.mark.skipif(not on_win, reason="already tested")
def test_crossplatform_st_link_on_win(tmpdir):
test_file = join(str(tmpdir), "test-file")
open(test_file, "a").close()
assert 1 == CrossPlatformStLink.st_nlink(test_file)
| bsd-3-clause | -808,085,511,508,831,500 | 30.296875 | 84 | 0.636046 | false | 2.994021 | true | false | false |
BiznetGIO/horizon | openstack_dashboard/api/base.py | 1 | 12067 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import Sequence
import functools
from django.conf import settings
import semantic_version
import six
from horizon import exceptions
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
@functools.total_ordering
class Version(object):
def __init__(self, version):
self.version = semantic_version.Version(str(version), partial=True)
def __eq__(self, other):
return self.version == Version(other).version
def __lt__(self, other):
return self.version < Version(other).version
def __repr__(self):
return "Version('%s')" % self.version
def __str__(self):
return str(self.version)
def __hash__(self):
return hash(str(self.version))
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
# As a convenience, we can drop in a placeholder for APIs that we
# have not yet needed to version. This is useful, for example, when
# panels such as the admin metadata_defs wants to check the active
# version even though it's not explicitly defined. Previously
# this caused a KeyError.
if self.preferred:
self.supported[self.preferred] = {"version": self.preferred}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
version = Version(version)
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
version = Version(key)
# Provide a helpful error message if the specified version isn't in the
# supported list.
if version not in self.supported:
choices = ", ".join(str(k) for k in six.iterkeys(self.supported))
msg = ('%s is not a supported API version for the %s service, '
' choices are: %s' % (version, self.service_type, choices))
raise exceptions.ConfigurationError(msg)
self._active = version
return self.supported[self._active]
def clear_active_cache(self):
self._active = None
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
_apiresource = None # Make sure _apiresource is there even in __init__.
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._attrs:
raise
# __getattr__ won't find properties
return getattr(self._apiresource, attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
def to_dict(self):
obj = {}
for key in self._attrs:
obj[key] = getattr(self._apiresource, key, None)
return obj
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def to_dict(self):
return self._apidict
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects.
This turns the individual quotas into Quota objects
for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
"""Merge another QuotaSet into this one.
Existing quotas are not overridden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if 'type' not in service:
continue
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service and service.get('endpoints'):
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
if 'type' not in service:
return None
identity_version = get_version_from_service(service)
service_endpoints = service.get('endpoints', [])
available_endpoints = [endpoint for endpoint in service_endpoints
if region == _get_endpoint_region(endpoint)]
"""if we are dealing with the identity service and there is no endpoint
in the current region, it is okay to use the first endpoint for any
identity service endpoints and we can assume that it is global
"""
if service['type'] == 'identity' and not available_endpoints:
available_endpoints = [endpoint for endpoint in service_endpoints]
for endpoint in available_endpoints:
try:
if identity_version < 3:
return endpoint.get(endpoint_type)
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint.get('interface') == interface:
return endpoint.get('url')
except (IndexError, KeyError):
"""it could be that the current endpoint just doesn't match the
type, continue trying the next one
"""
pass
return None
def url_for(request, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
if not region:
region = request.user.services_region
url = get_url_for_service(service,
region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service.get('endpoints', []):
if 'type' not in service:
continue
# ignore region for identity
if service['type'] == 'identity' or \
_get_endpoint_region(endpoint) == region:
return True
return False
def _get_endpoint_region(endpoint):
"""Common function for getting the region from endpoint.
In Keystone V3, region has been deprecated in favor of
region_id.
This method provides a way to get region that works for
both Keystone V2 and V3.
"""
return endpoint.get('region_id') or endpoint.get('region')
| apache-2.0 | 6,872,752,707,314,681,000 | 31.790761 | 79 | 0.59642 | false | 4.405622 | false | false | false |
ozmartian/tvlinker | tvlinker/threads.py | 1 | 11150 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
from datetime import datetime, timedelta
from tzlocal import get_localzone
import pytz
import requests
from PyQt5.QtCore import QObject, QSettings, QThread, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QMessageBox, qApp
from bs4 import BeautifulSoup
from requests.exceptions import HTTPError
import cloudscraper
from tvlinker.filesize import alternative, size
try:
# noinspection PyPackageRequirements
import simplejson as json
except ImportError:
import json
class ShadowSocks:
config = {
'ssocks': {
'procs': ['ss-qt5', 'sslocal'],
'proxies': {
'http': 'socks5://127.0.0.1:1080',
'https': 'socks5://127.0.0.1:1080'
},
},
'v2ray': {
'procs': ['v2ray'],
'proxies': {
'http': 'socks5://127.0.0.1:10808',
'https': 'socks5://127.0.0.1:10808'
}
}
}
@staticmethod
def detect() -> str:
if sys.platform.startswith('linux'):
ptypes = ShadowSocks.config.keys()
ps = os.popen('ps -Af').read()
for ptype in ptypes:
procs = ShadowSocks.config[ptype]['procs']
for p in procs:
if ps.count(p):
return ptype
return None
@staticmethod
def proxies() -> dict:
proxy_type = ShadowSocks.detect()
return ShadowSocks.config[proxy_type]['proxies'] if proxy_type is not None else {}
class ScrapeWorker(QObject):
addRow = pyqtSignal(list)
workFinished = pyqtSignal()
def __init__(self, source_url: str, useragent: str, maxpages: int):
super(ScrapeWorker, self).__init__()
self.maxpages = maxpages
self.source_url = source_url
self.user_agent = useragent
self.scraper = cloudscraper.create_scraper()
self.scraper.proxies = ShadowSocks.proxies()
self.tz_format = '%b %d %Y %H:%M'
self.tz_local = get_localzone()
self.complete = False
def scrape(self, pagenum: int) -> None:
try:
url = self.source_url.format(pagenum + 1)
req = self.scraper.get(url)
bs = BeautifulSoup(req.text, 'lxml')
posts = bs('div', class_='post')
for post in posts:
dt_utc = datetime.strptime(post.find('div', class_='p-c p-c-time').get_text().strip(), self.tz_format)
# TODO: fix hardcoded DST adjustment
dt_local = dt_utc.replace(tzinfo=pytz.utc).astimezone(self.tz_local) - timedelta(hours=2)
dlsize = post.find('h2').get_text().strip()
table_row = [
dt_local.strftime(self.tz_format),
post.find('a', class_='p-title').get('href').strip(),
post.find('a', class_='p-title').get_text().strip(),
dlsize[dlsize.rfind('(') + 1:len(dlsize) - 1]
]
self.addRow.emit(table_row)
except HTTPError:
sys.stderr.write(sys.exc_info()[0])
# noinspection PyTypeChecker
QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info()[0])
# self.exit()
@pyqtSlot()
def begin(self):
for page in range(self.maxpages):
if QThread.currentThread().isInterruptionRequested():
return
self.scrape(page)
self.complete = True
self.workFinished.emit()
class HostersThread(QThread):
setHosters = pyqtSignal(list)
noLinks = pyqtSignal()
def __init__(self, link_url: str, useragent: str):
QThread.__init__(self)
self.link_url = link_url
self.user_agent = useragent
self.scraper = cloudscraper.create_scraper()
self.scraper.proxies = ShadowSocks.proxies()
def __del__(self) -> None:
self.wait()
def get_hoster_links(self) -> None:
try:
req = self.scraper.get(self.link_url)
bs = BeautifulSoup(req.text, 'lxml')
links = bs.select('div.post h2[style="text-align: center;"]')
self.setHosters.emit(links)
except HTTPError:
print(sys.exc_info()[0])
# noinspection PyTypeChecker
QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info()[0])
QThread.currentThread().quit()
except IndexError:
self.noLinks.emit()
QThread.currentThread().quit()
def run(self) -> None:
self.get_hoster_links()
class RealDebridThread(QThread):
unrestrictedLink = pyqtSignal(str)
supportedHosts = pyqtSignal(dict)
hostStatus = pyqtSignal(dict)
errorMsg = pyqtSignal(list)
class RealDebridAction:
UNRESTRICT_LINK = 0,
SUPPORTED_HOSTS = 1,
HOST_STATUS = 2
def __init__(self,
settings: QSettings,
api_url: str,
link_url: str,
action: RealDebridAction = RealDebridAction.UNRESTRICT_LINK,
check_host: str = None):
QThread.__init__(self)
self.api_url = api_url
self.api_token = settings.value('realdebrid_apitoken')
self.api_proxy = settings.value('realdebrid_apiproxy', False, bool)
self.link_url = link_url
self.action = action
self.check_host = check_host
self.proxies = ShadowSocks.proxies() if self.api_proxy else {}
def __del__(self):
self.wait()
def post(self, endpoint: str, payload: object = None) -> dict:
try:
res = requests.post('{0}{1}?auth_token={2}'.format(self.api_url, endpoint, self.api_token),
data=payload, proxies=self.proxies)
return res.json()
except HTTPError:
print(sys.exc_info())
self.errorMsg.emit([
'ERROR NOTIFICATION',
'<h3>Real-Debrid API Error</h3>'
'A problem occurred whilst communicating with Real-Debrid. Please check your '
'Internet connection.<br/><br/>'
'<b>ERROR LOG:</b><br/>(Error Code %s) %s<br/>%s' %
(qApp.applicationName(), HTTPError.code, HTTPError.reason)
])
# self.exit()
def unrestrict_link(self) -> None:
jsonres = self.post(endpoint='/unrestrict/link', payload={'link': self.link_url})
if 'download' in jsonres.keys():
self.unrestrictedLink.emit(jsonres['download'])
else:
self.errorMsg.emit([
'REALDEBRID ERROR',
'<h3>Could not unrestrict link</h3>The hoster is most likely '
'down, please try again later.<br/><br/>{}'.format(jsonres)
])
def supported_hosts(self) -> None:
jsonres = self.post(endpoint='/hosts')
self.supportedHosts.emit(jsonres)
# def host_status(self, host: str) -> None:
# jsonres = self.post(endpoint='/hosts/status')
# self.hostStatus.emit(jsonres)
def run(self) -> None:
if self.action == RealDebridThread.RealDebridAction.UNRESTRICT_LINK:
self.unrestrict_link()
elif self.action == RealDebridThread.RealDebridAction.SUPPORTED_HOSTS:
self.supported_hosts()
# elif self.action == RealDebridThread.HOST_STATUS:
# self.host_status(self.check_host)
class Aria2Thread(QThread):
aria2Confirmation = pyqtSignal(bool)
def __init__(self, settings: QSettings, link_url: str):
QThread.__init__(self)
self.rpc_host = settings.value('aria2_rpc_host')
self.rpc_port = settings.value('aria2_rpc_port')
self.rpc_secret = settings.value('aria2_rpc_secret')
self.rpc_username = settings.value('aria2_rpc_username')
self.rpc_password = settings.value('aria2_rpc_password')
self.link_url = link_url
def __del__(self) -> None:
self.wait()
def add_uri(self) -> None:
user, passwd = '', ''
if len(self.rpc_username) > 0 and len(self.rpc_password) > 0:
user = self.rpc_username
passwd = self.rpc_password
elif len(self.rpc_secret) > 0:
user = 'token'
passwd = self.rpc_secret
aria2_endpoint = '%s:%s/jsonrpc' % (self.rpc_host, self.rpc_port)
headers = {'Content-Type': 'application/json'}
payload = json.dumps(
{
'jsonrpc': '2.0',
'id': 1,
'method': 'aria2.addUri',
'params': ['%s:%s' % (user, passwd), [self.link_url]]
},
sort_keys=False).encode('utf-8')
try:
from urllib.parse import urlencode
from urllib.request import Request, urlopen
req = Request(aria2_endpoint, headers=headers, data=payload)
res = urlopen(req).read().decode('utf-8')
jsonres = json.loads(res)
# res = requests.post(aria2_endpoint, headers=headers, data=payload)
# jsonres = res.json()
self.aria2Confirmation.emit('result' in jsonres.keys())
except HTTPError:
print(sys.exc_info())
# noinspection PyTypeChecker
QMessageBox.critical(None, 'ERROR NOTIFICATION', sys.exc_info(), QMessageBox.Ok)
self.aria2Confirmation.emit(False)
# self.exit()
def run(self) -> None:
self.add_uri()
class DownloadThread(QThread):
dlComplete = pyqtSignal()
dlProgress = pyqtSignal(int)
dlProgressTxt = pyqtSignal(str)
def __init__(self, link_url: str, dl_path: str):
QThread.__init__(self)
self.download_link = link_url
self.download_path = dl_path
self.cancel_download = False
self.proxies = ShadowSocks.proxies()
def __del__(self) -> None:
self.wait()
def download_file(self) -> None:
req = requests.get(self.download_link, stream=True, proxies=self.proxies)
filesize = int(req.headers['Content-Length'])
filename = os.path.basename(self.download_path)
downloadedChunk = 0
blockSize = 8192
start = time.clock()
with open(self.download_path, 'wb') as f:
for chunk in req.iter_content(chunk_size=blockSize):
if self.cancel_download or not chunk:
req.close()
break
f.write(chunk)
downloadedChunk += len(chunk)
progress = float(downloadedChunk) / filesize
self.dlProgress.emit(progress * 100)
dlspeed = downloadedChunk // (time.clock() - start) / 1000
progressTxt = '<b>Downloading {0}</b>:<br/>{1} of <b>{3}</b> [{2:.2%}] [{4} kbps]' \
.format(filename, downloadedChunk, progress, size(filesize, system=alternative), dlspeed)
self.dlProgressTxt.emit(progressTxt)
self.dlComplete.emit()
def run(self) -> None:
self.download_file()
| gpl-3.0 | 5,962,978,612,205,332,000 | 34.623003 | 118 | 0.562332 | false | 3.804162 | false | false | false |
xunilrj/sandbox | courses/course-edx-dat2031x/Simulation.py | 1 | 2680 | # -*- coding: utf-8 -*-
def sim_normal(nums, mean = 600, sd = 30):
import numpy as np
import numpy.random as nr
for n in nums:
dist = nr.normal(loc = mean, scale = sd, size = n)
titl = 'Normal distribution with ' + str(n) + ' values'
print('Summary for ' + str(n) + ' samples')
print(dist_summary(dist, titl))
print('Emperical 95% CIs')
print(np.percentile(dist, [2.5, 97.5]))
print(' ')
return('Done!')
def sim_poisson(nums, mean = 600):
import numpy as np
import numpy.random as nr
for n in nums:
dist = nr.poisson(lam = mean, size = n)
titl = 'Poisson distribution with ' + str(n) + ' values'
print(dist_summary(dist, titl))
print('Emperical 95% CIs')
print(np.percentile(dist, [2.5, 97.5]))
print(' ')
return('Done!')
def dist_summary(dist, names = 'dist_name'):
import pandas as pd
import matplotlib.pyplot as plt
ser = pd.Series(dist)
fig = plt.figure(1, figsize=(9, 6))
ax = fig.gca()
ser.hist(ax = ax, bins = 120)
ax.set_title('Frequency distribution of ' + names)
ax.set_ylabel('Frequency')
plt.show()
return(ser.describe())
def gen_profits(num):
import numpy.random as nr
unif = nr.uniform(size = num)
out = [5 if x < 0.3 else (3.5 if x < 0.6 else 4) for x in unif]
return(out)
def gen_tips(num):
import numpy.random as nr
unif = nr.uniform(size = num)
out = [0 if x < 0.5 else (0.25 if x < 0.7
else (1.0 if x < 0.9 else 2.0)) for x in unif]
return(out)
def sim_lemonade(num, mean = 600, sd = 30, pois = False):
## Simulate the profits and tips for
## a lemonade stand.
import numpy.random as nr
## number of customer arrivals
if pois:
arrivals = nr.poisson(lam = mean, size = num)
else:
arrivals = nr.normal(loc = mean, scale = sd, size = num)
print(dist_summary(arrivals, 'customer arrivals per day'))
## Compute distibution of average profit per arrival
proft = gen_profits(num)
print(dist_summary(proft, 'profit per arrival'))
## Total profits are profit per arrival
## times number of arrivals.
total_profit = arrivals * proft
print(dist_summary(total_profit, 'total profit per day'))
## Compute distribution of average tips per arrival
tps = gen_tips(num)
print(dist_summary(tps, 'tips per arrival'))
## Compute average tips per day
total_tips = arrivals * tps
print(dist_summary(total_tips, 'total tips per day'))
## Compute total profits plus total tips.
total_take = total_profit + total_tips
return(dist_summary(total_take, 'total net per day'))
| apache-2.0 | 8,129,955,515,365,953,000 | 29.804598 | 67 | 0.614179 | false | 3.179122 | false | false | false |
samdroid-apps/sugar-toolkit-gtk3 | src/sugar3/bundle/activitybundle.py | 1 | 14091 | # Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""Sugar activity bundles
UNSTABLE.
"""
from ConfigParser import ConfigParser
from locale import normalize
import os
import shutil
import tempfile
import logging
from sugar3 import env
from sugar3.bundle.bundle import Bundle, \
MalformedBundleException, NotInstalledException
from sugar3.bundle.bundleversion import NormalizedVersion
from sugar3.bundle.bundleversion import InvalidVersionError
def _expand_lang(locale):
# Private method from gettext.py
locale = normalize(locale)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = locale.find('@')
if pos >= 0:
modifier = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = locale.find('.')
if pos >= 0:
codeset = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = locale.find('_')
if pos >= 0:
territory = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = locale
ret = []
for i in range(mask + 1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY:
val += territory
if i & COMPONENT_CODESET:
val += codeset
if i & COMPONENT_MODIFIER:
val += modifier
ret.append(val)
ret.reverse()
return ret
class ActivityBundle(Bundle):
"""A Sugar activity bundle
See http://wiki.laptop.org/go/Activity_bundles for details
"""
MIME_TYPE = 'application/vnd.olpc-sugar'
_zipped_extension = '.xo'
_unzipped_extension = '.activity'
_infodir = 'activity'
def __init__(self, path, translated=True):
Bundle.__init__(self, path)
self.activity_class = None
self.bundle_exec = None
self._name = None
self._icon = None
self._bundle_id = None
self._mime_types = None
self._show_launcher = True
self._tags = None
self._activity_version = '0'
self._summary = None
self._single_instance = False
info_file = self.get_file('activity/activity.info')
if info_file is None:
raise MalformedBundleException('No activity.info file')
self._parse_info(info_file)
if translated:
linfo_file = self._get_linfo_file()
if linfo_file:
self._parse_linfo(linfo_file)
def _parse_info(self, info_file):
cp = ConfigParser()
cp.readfp(info_file)
section = 'Activity'
if cp.has_option(section, 'bundle_id'):
self._bundle_id = cp.get(section, 'bundle_id')
else:
if cp.has_option(section, 'service_name'):
self._bundle_id = cp.get(section, 'service_name')
logging.error('ATTENTION: service_name property in the '
'activity.info file is deprecated, should be '
' changed to bundle_id')
else:
raise MalformedBundleException(
'Activity bundle %s does not specify a bundle id' %
self._path)
if cp.has_option(section, 'name'):
self._name = cp.get(section, 'name')
else:
raise MalformedBundleException(
'Activity bundle %s does not specify a name' % self._path)
if cp.has_option(section, 'exec'):
self.bundle_exec = cp.get(section, 'exec')
else:
raise MalformedBundleException(
'Activity bundle %s must specify either class or exec' %
self._path)
if cp.has_option(section, 'mime_types'):
mime_list = cp.get(section, 'mime_types').strip(';')
self._mime_types = [mime.strip() for mime in mime_list.split(';')]
if cp.has_option(section, 'show_launcher'):
if cp.get(section, 'show_launcher') == 'no':
self._show_launcher = False
if cp.has_option(section, 'tags'):
tag_list = cp.get(section, 'tags').strip(';')
self._tags = [tag.strip() for tag in tag_list.split(';')]
if cp.has_option(section, 'icon'):
self._icon = cp.get(section, 'icon')
if cp.has_option(section, 'activity_version'):
version = cp.get(section, 'activity_version')
try:
NormalizedVersion(version)
except InvalidVersionError:
raise MalformedBundleException(
'Activity bundle %s has invalid version number %s' %
(self._path, version))
self._activity_version = version
if cp.has_option(section, 'summary'):
self._summary = cp.get(section, 'summary')
if cp.has_option(section, 'single_instance'):
if cp.get(section, 'single_instance') == 'yes':
self._single_instance = True
def _get_linfo_file(self):
# Using method from gettext.py, first find languages from environ
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
# Next, normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# Finally, select a language
for lang in nelangs:
linfo_path = os.path.join('locale', lang, 'activity.linfo')
linfo_file = self.get_file(linfo_path)
if linfo_file is not None:
return linfo_file
return None
def _parse_linfo(self, linfo_file):
cp = ConfigParser()
cp.readfp(linfo_file)
section = 'Activity'
if cp.has_option(section, 'name'):
self._name = cp.get(section, 'name')
if cp.has_option(section, 'summary'):
self._summary = cp.get(section, 'summary')
if cp.has_option(section, 'tags'):
tag_list = cp.get(section, 'tags').strip(';')
self._tags = [tag.strip() for tag in tag_list.split(';')]
def get_locale_path(self):
"""Get the locale path inside the (installed) activity bundle."""
if self._zip_file is not None:
raise NotInstalledException
return os.path.join(self._path, 'locale')
def get_icons_path(self):
"""Get the icons path inside the (installed) activity bundle."""
if self._zip_file is not None:
raise NotInstalledException
return os.path.join(self._path, 'icons')
def get_name(self):
"""Get the activity user-visible name."""
return self._name
def get_bundle_id(self):
"""Get the activity bundle id"""
return self._bundle_id
def get_icon(self):
"""Get the activity icon name"""
# FIXME: this should return the icon data, not a filename, so that
# we don't need to create a temp file in the zip case
icon_path = os.path.join('activity', self._icon + '.svg')
if self._zip_file is None:
return os.path.join(self._path, icon_path)
else:
icon_data = self.get_file(icon_path).read()
temp_file, temp_file_path = tempfile.mkstemp(prefix=self._icon,
suffix='.svg')
os.write(temp_file, icon_data)
os.close(temp_file)
return temp_file_path
def get_activity_version(self):
"""Get the activity version"""
return self._activity_version
def get_command(self):
"""Get the command to execute to launch the activity factory"""
if self.bundle_exec:
command = os.path.expandvars(self.bundle_exec)
else:
command = 'sugar-activity ' + self.activity_class
return command
def get_mime_types(self):
"""Get the MIME types supported by the activity"""
return self._mime_types
def get_tags(self):
"""Get the tags that describe the activity"""
return self._tags
def get_summary(self):
"""Get the summary that describe the activity"""
return self._summary
def get_single_instance(self):
"""Get whether there should be a single instance for the activity"""
return self._single_instance
def get_show_launcher(self):
"""Get whether there should be a visible launcher for the activity"""
return self._show_launcher
def install(self):
install_dir = env.get_user_activities_path()
self._unzip(install_dir)
install_path = os.path.join(install_dir, self._zip_root_dir)
self.install_mime_type(install_path)
return install_path
def install_mime_type(self, install_path):
""" Update the mime type database and install the mime type icon
"""
xdg_data_home = os.getenv('XDG_DATA_HOME',
os.path.expanduser('~/.local/share'))
mime_path = os.path.join(install_path, 'activity', 'mimetypes.xml')
if os.path.isfile(mime_path):
mime_dir = os.path.join(xdg_data_home, 'mime')
mime_pkg_dir = os.path.join(mime_dir, 'packages')
if not os.path.isdir(mime_pkg_dir):
os.makedirs(mime_pkg_dir)
installed_mime_path = os.path.join(mime_pkg_dir,
'%s.xml' % self._bundle_id)
self._symlink(mime_path, installed_mime_path)
os.spawnlp(os.P_WAIT, 'update-mime-database',
'update-mime-database', mime_dir)
mime_types = self.get_mime_types()
if mime_types is not None:
installed_icons_dir = \
os.path.join(xdg_data_home,
'icons/sugar/scalable/mimetypes')
if not os.path.isdir(installed_icons_dir):
os.makedirs(installed_icons_dir)
for mime_type in mime_types:
mime_icon_base = os.path.join(install_path, 'activity',
mime_type.replace('/', '-'))
svg_file = mime_icon_base + '.svg'
info_file = mime_icon_base + '.icon'
self._symlink(svg_file,
os.path.join(installed_icons_dir,
os.path.basename(svg_file)))
self._symlink(info_file,
os.path.join(installed_icons_dir,
os.path.basename(info_file)))
def _symlink(self, src, dst):
if not os.path.isfile(src):
return
if not os.path.islink(dst) and os.path.exists(dst):
raise RuntimeError('Do not remove %s if it was not '
'installed by sugar', dst)
logging.debug('Link resource %s to %s', src, dst)
if os.path.lexists(dst):
logging.debug('Relink %s', dst)
os.unlink(dst)
os.symlink(src, dst)
def uninstall(self, force=False, delete_profile=False):
install_path = self.get_path()
if os.path.islink(install_path):
# Don't remove the actual activity dir if it's a symbolic link
# because we may be removing user data.
os.unlink(install_path)
return
xdg_data_home = os.getenv('XDG_DATA_HOME',
os.path.expanduser('~/.local/share'))
mime_dir = os.path.join(xdg_data_home, 'mime')
installed_mime_path = os.path.join(mime_dir, 'packages',
'%s.xml' % self._bundle_id)
if os.path.exists(installed_mime_path):
os.remove(installed_mime_path)
os.spawnlp(os.P_WAIT, 'update-mime-database',
'update-mime-database', mime_dir)
mime_types = self.get_mime_types()
if mime_types is not None:
installed_icons_dir = \
os.path.join(xdg_data_home,
'icons/sugar/scalable/mimetypes')
if os.path.isdir(installed_icons_dir):
for f in os.listdir(installed_icons_dir):
path = os.path.join(installed_icons_dir, f)
if os.path.islink(path) and \
os.readlink(path).startswith(install_path):
os.remove(path)
if delete_profile:
bundle_profile_path = env.get_profile_path(self._bundle_id)
if os.path.exists(bundle_profile_path):
os.chmod(bundle_profile_path, 0775)
shutil.rmtree(bundle_profile_path, ignore_errors=True)
self._uninstall(install_path)
def is_user_activity(self):
return self.get_path().startswith(env.get_user_activities_path())
| lgpl-2.1 | 4,325,474,794,642,606,600 | 34.583333 | 78 | 0.560145 | false | 4.063149 | false | false | false |
uclouvain/osis | learning_unit/ddd/domain/description_fiche.py | 1 | 2627 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
import attr
@attr.s(slots=True)
class DescriptionFiche:
resume = attr.ib(type=str, default=None)
resume_en = attr.ib(type=str, default=None)
teaching_methods = attr.ib(type=str, default=None)
teaching_methods_en = attr.ib(type=str, default=None)
evaluation_methods = attr.ib(type=str, default=None)
evaluation_methods_en = attr.ib(type=str, default=None)
other_informations = attr.ib(type=str, default=None)
other_informations_en = attr.ib(type=str, default=None)
online_resources = attr.ib(type=str, default=None)
online_resources_en = attr.ib(type=str, default=None)
bibliography = attr.ib(type=str, default=None)
mobility = attr.ib(type=str, default=None)
last_update = attr.ib(type=datetime.datetime, default=None)
author = attr.ib(type=str, default=None)
@attr.s(slots=True)
class DescriptionFicheForceMajeure:
teaching_methods = attr.ib(type=str, default=None)
teaching_methods_en = attr.ib(type=str, default=None)
evaluation_methods = attr.ib(type=str, default=None)
evaluation_methods_en = attr.ib(type=str, default=None)
other_informations = attr.ib(type=str, default=None)
other_informations_en = attr.ib(type=str, default=None)
last_update = attr.ib(type=datetime.datetime, default=None)
author = attr.ib(type=str, default=None)
| agpl-3.0 | -6,738,978,772,855,739,000 | 45.070175 | 87 | 0.680883 | false | 3.558266 | false | false | false |
Aplopio/document-converter | converters/utilities.py | 1 | 4138 | import sys
import re
import os
import shutil
import logging as log
sys.path.append('..')
from config import OUTPUT_FOLDER, UPLOAD_FOLDER
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TMP_DIR = os.path.join(PARENT_DIR, UPLOAD_FOLDER)
from html_pdf import HtmlPdf
from html_txt import HtmlTxt
from pdf_html import PdfHtml
from txt_html import TxtHtml
from doc_pdf import DocPdf
from ppt_pdf import PptPdf
from rtf_pdf import RtfPdf
from utils import get_file_extension
from file_manager import FileManager
AVAILABLE_CONVERTERS = [(HtmlPdf, 'htmlpdf'), (HtmlTxt, 'htmltxt'),
(PdfHtml, 'pdfhtml'), (TxtHtml, 'txthtml'),
(DocPdf, 'docpdf'), (PptPdf, 'pptpdf'),
(RtfPdf, 'rtfpdf'), ]
def class_selector(input_format, output_format, result=None):
result = result or []
if input_format == output_format:
return result
direct_converter = get_direct_converter(input_format, output_format)
if direct_converter:
result.append(direct_converter)
return result
input_regex = make_regex(input_format)
input_matches = get_input_matches(input_regex)
for input_match in input_matches:
converter, converter_expression = input_match
intermediate_format = get_intermediate_format(converter_expression,
input_format)
result.append(input_match)
converter_list = class_selector(intermediate_format, output_format,
result)
if converter_list:
return converter_list
else:
result.pop()
def get_intermediate_format(converter_expression, input_format):
return re.sub(input_format, '', converter_expression)
def get_input_matches(input_regex):
return [(converter, expression)
for converter, expression in AVAILABLE_CONVERTERS
if input_regex.match(expression)]
def make_regex(format_string):
return re.compile('^%s'%format_string)
def get_direct_converter(input_format, output_format):
converter_expression = '%s%s'%(input_format, output_format)
for converter, expression in AVAILABLE_CONVERTERS:
if re.match(converter_expression, expression):
return (converter, expression)
def get_input_format(input_files_objects):
sample_input_file = input_files_objects[0].get_input_file_path()
input_format = get_file_extension(sample_input_file)
return input_format
def set_flags_of_file_objects(input_files_objects, output_files_objects):
for input_file_object, output_file_object in zip(input_files_objects,
output_files_objects):
if (not output_file_object) or output_file_object == input_file_object:
input_file_object.converted = False
else:
output_file_name = os.path.basename(
output_file_object.get_input_file_path())
os.system('mv %s %s' % (
output_file_object.get_input_file_path(), OUTPUT_FOLDER))
input_file_object.set_output_file_path(
os.path.join(OUTPUT_FOLDER, output_file_name))
input_file_object.converted = True
return input_files_objects
def get_files_objects(files_paths):
files_objects = []
for file_path in files_paths:
if file_path:
file_object = FileManager(None, input_file_path=file_path)
files_objects.append(file_object)
else:
files_objects.append(None)
return files_objects
def handle_failed_conversion(input_file):
if not input_file or not os.path.isfile(input_file):
return
failed_conversion_dir = os.path.join(TMP_DIR, 'failed_conversions')
if not os.path.isdir(failed_conversion_dir):
os.makedirs(failed_conversion_dir)
filename = os.path.basename(input_file)
try:
shutil.copyfile(input_file, os.path.join(failed_conversion_dir,
filename))
except IOError, ie:
log.error(ie)
| mit | 4,393,052,712,470,979,600 | 33.483333 | 79 | 0.639198 | false | 3.803309 | false | false | false |
codilime/veles | python/veles/scli/client.py | 1 | 13837 | # Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import socket
import ssl
import msgpack
from veles.proto import messages, msgpackwrap
from veles.proto.messages import PROTO_VERSION
from veles.schema import nodeid
from veles.util import helpers
class Client(object):
def __init__(self, sock, key, name='scli', version='1.0',
description='', type='scli', quit_on_close=False):
self.sock = sock
wrapper = msgpackwrap.MsgpackWrapper()
self.unpacker = wrapper.unpacker
self.packer = wrapper.packer
self.client_name = name
self.client_version = version
self.client_description = description
self.client_type = type
self.quit_on_close = quit_on_close
self._authorize(helpers.prepare_auth_key(key))
def _authorize(self, key):
self.sock.sendall(key)
self.send_msg(messages.MsgConnect(
proto_version=PROTO_VERSION,
client_name=self.client_name,
client_version=self.client_version,
client_description=self.client_description,
client_type=self.client_type,
quit_on_close=self.quit_on_close,
))
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnected):
print('Connected to server: {}'.format(pkt.server_name))
elif isinstance(pkt, messages.MsgConnectionError):
raise pkt.err
else:
print(pkt)
raise Exception('weird reply when attempting to connect')
def getpkt(self):
while True:
try:
return messages.MsgpackMsg.load(self.unpacker.unpack())
except msgpack.OutOfData:
pass
data = self.sock.recv(1024)
if not data:
raise Exception("end of file")
self.unpacker.feed(data)
def send_msg(self, msg):
self.sock.sendall(self.packer.pack(msg.dump()))
def request(self, msg):
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgRequestAck) and pkt.rid == 0:
return msg.id
elif isinstance(pkt, messages.MsgRequestError) and pkt.rid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to request')
def create(self, parent, tags=set(), attr={}, data={}, bindata={},
pos=(None, None)):
msg = messages.MsgCreate(
id=nodeid.NodeID(),
parent=parent,
pos_start=pos[0],
pos_end=pos[1],
tags=tags,
attr=attr,
data=data,
bindata=bindata,
rid=0,
)
self.request(msg)
return msg.id
def delete(self, obj):
msg = messages.MsgDelete(
id=obj,
rid=0
)
self.request(msg)
def set_parent(self, obj, parent):
msg = messages.MsgSetParent(
id=obj,
parent=parent,
rid=0
)
self.request(msg)
def set_pos(self, obj, start, end):
msg = messages.MsgSetPos(
id=obj,
pos_start=start,
pos_end=end,
rid=0
)
self.request(msg)
def add_tag(self, obj, tag):
msg = messages.MsgAddTag(
id=obj,
tag=tag,
rid=0
)
self.request(msg)
def del_tag(self, obj, tag):
msg = messages.MsgDelTag(
id=obj,
tag=tag,
rid=0
)
self.request(msg)
def set_attr(self, obj, key, data):
msg = messages.MsgSetAttr(
id=obj,
key=key,
data=data,
rid=0
)
self.request(msg)
def set_data(self, obj, key, data):
msg = messages.MsgSetData(
id=obj,
rid=0,
key=key,
data=data,
)
self.request(msg)
def set_bindata(self, obj, key, start, data, truncate=False):
msg = messages.MsgSetBinData(
id=obj,
rid=0,
key=key,
start=start,
data=data,
truncate=truncate,
)
self.request(msg)
def get(self, obj):
msg = messages.MsgGet(
id=obj,
qid=0,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0:
return pkt.obj
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get')
def get_sub(self, obj):
msg = messages.MsgGet(
id=obj,
qid=0,
sub=True,
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0:
yield pkt.obj
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get')
def get_data(self, obj, key):
msg = messages.MsgGetData(
id=obj,
qid=0,
key=key,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0:
return pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_data')
def get_data_sub(self, obj, key):
msg = messages.MsgGetData(
id=obj,
qid=0,
key=key,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0:
yield pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_data')
def get_bindata(self, obj, key, start=0, end=None):
msg = messages.MsgGetBinData(
id=obj,
qid=0,
key=key,
start=start,
end=end,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0:
return pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_bindata')
def get_bindata_sub(self, obj, key, start=0, end=None):
msg = messages.MsgGetBinData(
id=obj,
qid=0,
key=key,
start=start,
end=end,
sub=True,
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0:
yield pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_bindata')
def list(self, obj):
msg = messages.MsgGetList(
qid=0,
parent=obj,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0:
return pkt.objs
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list')
def list_sub(self, obj):
msg = messages.MsgGetList(
qid=0,
parent=obj,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0:
yield pkt
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list')
def query(self, obj, sig, params, checks=None):
params = sig.params.dump(params)
msg = messages.MsgGetQuery(
qid=0,
node=obj,
query=sig.name,
params=params,
trace=checks is not None
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
return sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to get_query')
def query_sub(self, obj, sig, params, checks=None):
params = sig.params.dump(params)
msg = messages.MsgGetQuery(
qid=0,
node=obj,
query=sig.name,
params=params,
trace=checks is not None,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
yield sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to get_query')
def run_method(self, obj, sig, params):
params = sig.params.dump(params)
msg = messages.MsgMethodRun(
mid=0,
node=obj,
method=sig.name,
params=params
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgMethodResult) and pkt.mid == 0:
return sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgMethodError) and pkt.mid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to run_method')
def run_broadcast(self, sig, params):
params = sig.params.dump(params)
msg = messages.MsgBroadcastRun(
bid=0,
broadcast=sig.name,
params=params
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgBroadcastResult) and pkt.bid == 0:
return [sig.result.load(result) for result in pkt.results]
else:
print(pkt)
raise Exception('weird reply to run_broadcast')
def list_connections(self):
msg = messages.MsgListConnections(
qid=0,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0:
return pkt.connections
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list_connections')
def list_connections_sub(self):
msg = messages.MsgListConnections(
qid=0,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0:
yield pkt
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list_connections')
class UnixClient(Client):
def __init__(self, path, key, **kwargs):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(path)
super(UnixClient, self).__init__(sock, key, **kwargs)
class TcpClient(Client):
def __init__(self, ip, port, key, **kwargs):
sock = socket.create_connection((ip, port))
super(TcpClient, self).__init__(sock, key, **kwargs)
class SslClient(Client):
def __init__(self, ip, port, key, fingerprint, **kwargs):
sock = socket.create_connection((ip, port))
sc = ssl.SSLContext()
sock = sc.wrap_socket(sock)
cert = sock.getpeercert(True)
helpers.validate_cert(cert, fingerprint)
super(SslClient, self).__init__(sock, key, **kwargs)
def create_client(url):
url = helpers.parse_url(url)
if url.scheme == helpers.UrlScheme.UNIX_SCHEME:
return UnixClient(url.path, url.auth_key)
elif url.scheme == helpers.UrlScheme.TCP_SCHEME:
return TcpClient(url.host, url.port, url.auth_key)
elif url.scheme == helpers.UrlScheme.SSL_SCHEME:
return SslClient(url.host, url.port, url.auth_key, url.fingerprint)
else:
raise ValueError('Wrong scheme provided!')
| apache-2.0 | -1,839,075,806,865,339,100 | 30.094382 | 78 | 0.539206 | false | 3.984164 | false | false | false |
askin/GNazar | GNazar/gnazar.py | 1 | 6314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under GPL v2
# Copyright 2010, Aşkın Yollu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import pygtk
import gtk
import gettext
import pynotify
import time
import os
import sys
import locale
import random
import platform
gtk.gdk.threads_init()
#Translation stuff
localedir = "/usr/share/gnazar/locale"
gettext.bindtextdomain('gnazar', localedir)
gettext.textdomain('gnazar')
sharedirs = '/usr/share'
_ = gettext.gettext
class GNazar():
def __init__(self):
# create a new Status Icon
self.gnazar = gtk.StatusIcon()
self.gnazar.set_from_file(
'%s/icons/hicolor/22x22/apps/gnazar-deactive.png' % sharedirs)
self.gnazar.set_tooltip(
_("GNazar - You are completely demilitarized..."))
self.gnazar.set_visible(True)
self.status = False
# create menu
self.menu = gtk.Menu()
self.gnazar.connect("popup_menu", self.show_menu)
# connect
_quit = gtk.ImageMenuItem(gtk.STOCK_QUIT)
_quit.connect("activate", self.destroy)
_about = gtk.ImageMenuItem(gtk.STOCK_ABOUT)
_about.connect("activate", self.show_about)
_protect = gtk.ImageMenuItem(gtk.STOCK_OK)
_protect.connect("activate", self.protect)
_protect.set_label(_("Protect"))
_release = gtk.ImageMenuItem(gtk.STOCK_CANCEL)
_release.set_label(_("Release"))
_release.connect("activate", self.release)
# add to menu
self.menu.add(_protect)
self.menu.add(_release)
self.menu.add(_about)
self.menu.add(_quit)
self.menu.show_all()
# notification
pynotify.init(_("GNazar Application"))
# init attack
self.total_attack = 0
self.defated_attack = 0
self.running = True
import thread
thread.start_new_thread(self._notification, ())
def main(self):
# gtk main
gtk.main()
'''
show popup menu
'''
def show_menu(self, status_icon, button, activate_time):
self.menu.popup(None, None, gtk.status_icon_position_menu,
button, activate_time, status_icon)
# random notification
def _notification(self):
while(self.running):
time.sleep(random.randrange(3600, 18000))
#time.sleep(4) # testing
self.notification()
'''
show about
'''
def show_about(self, widget):
about = gtk.AboutDialog()
about.set_program_name("GNazar")
about.set_icon_from_file("%s/icons/hicolor/22x22/apps/gnazar.png"
% sharedirs)
about.set_version("0.1")
about.set_copyright("(c) Aşkın Yollu")
# FIXME: make it generic (mac, bsd, win etc..)
dist_name = platform.dist()[0]
about.set_comments(_("GNazar is a useful part of the %s" % dist_name))
about.set_website("http://www.askin.ws")
about.set_logo(gtk.gdk.pixbuf_new_from_file(
"%s/icons/hicolor/32x32/apps/gnazar.png" % sharedirs))
about.set_translator_credits(_("TRANSLATORS"))
about.set_artists([_("THANKSFORICONS")])
about.run()
about.destroy()
# destroy callback
def destroy(self, widget):
self.gnazar.set_visible(False)
self.running = False
gtk.main_quit()
# popup callback
def protect(self, widget):
if self.status == False:
dialog = gtk.MessageDialog(
parent=None,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_OK,
message_format=_("GNazar is starting to protect your "
"computer from harmful looks...")
)
dialog.set_title(_("GNazar Application"))
dialog.connect('response', self.dialog_destroyer)
dialog.show()
self.status = True
self.gnazar.set_tooltip(_("GNazar - No harmful look allowed!"))
self.gnazar.set_from_file("%s/icons/hicolor/22x22/apps/gnazar.png"
% sharedirs)
def release(self, widget):
if self.status == True:
dialog = gtk.MessageDialog(
parent=None,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_WARNING,
buttons=gtk.BUTTONS_OK,
message_format=_("GNazar is stopping to protect your computer"
" from harmful looks...")
)
dialog.set_title(_("GNazar Application"))
dialog.connect('response', self.dialog_destroyer)
dialog.show()
self.status = False
self.gnazar.set_tooltip(
_("GNazar - You are completely demilitarized..."))
self.gnazar.set_from_file(
"%s/icons/hicolor/22x22/apps/gnazar-deactive.png" % sharedirs)
def notification(self):
self.total_attack += 1
if self.status == True:
self.defated_attack += 1
title = _("Nazar eliminated")
body = _("Nazar Received and eliminated successfuly")
icon = "gtk-apply"
else:
title = _("Nazar harmed")
body = _("Nazar Received and it HARMED!")
icon = "dialog-warning"
self.gnazar.set_tooltip(
_("GNazar - %s attacks received so far, %s"
" are defated and %s are received...") %
(self.total_attack,
self.defated_attack,
self.total_attack - self.defated_attack))
notify = pynotify.Notification(title, body, icon)
notify.set_urgency(pynotify.URGENCY_NORMAL)
notify.set_timeout(pynotify.EXPIRES_NEVER)
notify.show()
def dialog_destroyer(self, dialog, widget):
dialog.destroy()
def main():
si = GNazar()
si.main()
| gpl-2.0 | 1,321,648,834,766,344,000 | 31.525773 | 79 | 0.572266 | false | 3.778443 | false | false | false |
laurmurclar/mitmproxy | mitmproxy/tools/console/flowview.py | 1 | 23737 | import math
import os
import sys
from functools import lru_cache
from typing import Optional, Union # noqa
import urwid
from mitmproxy import contentviews
from mitmproxy import exceptions
from mitmproxy import export
from mitmproxy import http
from mitmproxy.net.http import Headers
from mitmproxy.net.http import status_codes
from mitmproxy.tools.console import common
from mitmproxy.tools.console import flowdetailview
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import searchable
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import tabs
class SearchError(Exception):
pass
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("b", "save request/response body"),
("C", "export flow to clipboard"),
("D", "duplicate flow"),
("d", "delete flow"),
("e", "edit request/response"),
("f", "load full body data"),
("m", "change body display mode for this entity\n(default mode can be changed in the options)"),
(None,
common.highlight_key("automatic", "a") +
[("text", ": automatic detection")]
),
(None,
common.highlight_key("hex", "e") +
[("text", ": Hex")]
),
(None,
common.highlight_key("html", "h") +
[("text", ": HTML")]
),
(None,
common.highlight_key("image", "i") +
[("text", ": Image")]
),
(None,
common.highlight_key("javascript", "j") +
[("text", ": JavaScript")]
),
(None,
common.highlight_key("json", "s") +
[("text", ": JSON")]
),
(None,
common.highlight_key("urlencoded", "u") +
[("text", ": URL-encoded data")]
),
(None,
common.highlight_key("raw", "r") +
[("text", ": raw data")]
),
(None,
common.highlight_key("xml", "x") +
[("text", ": XML")]
),
("E", "export flow to file"),
("r", "replay request"),
("V", "revert changes to request"),
("v", "view body in external viewer"),
("w", "save all flows matching current view filter"),
("W", "save this flow"),
("x", "delete body"),
("z", "encode/decode a request/response"),
("tab", "next tab"),
("h, l", "previous tab, next tab"),
("space", "next flow"),
("|", "run script on this flow"),
("/", "search (case sensitive)"),
("n", "repeat search forward"),
("N", "repeat search backwards"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
('heading_key', "q"), ":back ",
]
class FlowViewHeader(urwid.WidgetWrap):
def __init__(self, master: "mitmproxy.console.master.ConsoleMaster", f: http.HTTPFlow):
self.master = master
self.flow = f
self._w = common.format_flow(
f,
False,
extended=True,
hostheader=self.master.options.showhost
)
signals.flow_change.connect(self.sig_flow_change)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self._w = common.format_flow(
flow,
False,
extended=True,
hostheader=self.master.options.showhost
)
TAB_REQ = 0
TAB_RESP = 1
class FlowView(tabs.Tabs):
highlight_color = "focusfield"
def __init__(self, master, view, flow, tab_offset):
self.master, self.view, self.flow = master, view, flow
super().__init__(
[
(self.tab_request, self.view_request),
(self.tab_response, self.view_response),
(self.tab_details, self.view_details),
],
tab_offset
)
self.show()
self.last_displayed_body = None
signals.flow_change.connect(self.sig_flow_change)
def tab_request(self):
if self.flow.intercepted and not self.flow.response:
return "Request intercepted"
else:
return "Request"
def tab_response(self):
if self.flow.intercepted and self.flow.response:
return "Response intercepted"
else:
return "Response"
def tab_details(self):
return "Detail"
def view_request(self):
return self.conn_text(self.flow.request)
def view_response(self):
return self.conn_text(self.flow.response)
def view_details(self):
return flowdetailview.flowdetails(self.view, self.flow)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self.show()
def content_view(self, viewmode, message):
if message.raw_content is None:
msg, body = "", [urwid.Text([("error", "[content missing]")])]
return msg, body
else:
s = self.view.settings[self.flow]
full = s.get((self.tab_offset, "fullcontents"), False)
if full:
limit = sys.maxsize
else:
limit = contentviews.VIEW_CUTOFF
flow_modify_cache_invalidation = hash((
message.raw_content,
message.headers.fields,
getattr(message, "path", None),
))
# we need to pass the message off-band because it's not hashable
self._get_content_view_message = message
return self._get_content_view(viewmode, limit, flow_modify_cache_invalidation)
@lru_cache(maxsize=200)
def _get_content_view(self, viewmode, max_lines, _):
message = self._get_content_view_message
self._get_content_view_message = None
description, lines, error = contentviews.get_message_content_view(
viewmode, message
)
if error:
signals.add_log(error, "error")
# Give hint that you have to tab for the response.
if description == "No content" and isinstance(message, http.HTTPRequest):
description = "No request content (press tab to view response)"
# If the users has a wide terminal, he gets fewer lines; this should not be an issue.
chars_per_line = 80
max_chars = max_lines * chars_per_line
total_chars = 0
text_objects = []
for line in lines:
txt = []
for (style, text) in line:
if total_chars + len(text) > max_chars:
text = text[:max_chars - total_chars]
txt.append((style, text))
total_chars += len(text)
if total_chars == max_chars:
break
# round up to the next line.
total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)
text_objects.append(urwid.Text(txt))
if total_chars == max_chars:
text_objects.append(urwid.Text([
("highlight", "Stopped displaying data after %d lines. Press " % max_lines),
("key", "f"),
("highlight", " to load all data.")
]))
break
return description, text_objects
def viewmode_get(self):
override = self.view.settings[self.flow].get(
(self.tab_offset, "prettyview"),
None
)
return self.master.options.default_contentview if override is None else override
def conn_text(self, conn):
if conn:
txt = common.format_keyvals(
[(h + ":", v) for (h, v) in conn.headers.items(multi=True)],
key = "header",
val = "text"
)
viewmode = self.viewmode_get()
msg, body = self.content_view(viewmode, conn)
cols = [
urwid.Text(
[
("heading", msg),
]
),
urwid.Text(
[
" ",
('heading', "["),
('heading_key', "m"),
('heading', (":%s]" % viewmode)),
],
align="right"
)
]
title = urwid.AttrWrap(urwid.Columns(cols), "heading")
txt.append(title)
txt.extend(body)
else:
txt = [
urwid.Text(""),
urwid.Text(
[
("highlight", "No response. Press "),
("key", "e"),
("highlight", " and edit any aspect to add one."),
]
)
]
return searchable.Searchable(self.view, txt)
def set_method_raw(self, m):
if m:
self.flow.request.method = m
signals.flow_change.send(self, flow = self.flow)
def edit_method(self, m):
if m == "e":
signals.status_prompt.send(
prompt = "Method",
text = self.flow.request.method,
callback = self.set_method_raw
)
else:
for i in common.METHOD_OPTIONS:
if i[1] == m:
self.flow.request.method = i[0].upper()
signals.flow_change.send(self, flow = self.flow)
def set_url(self, url):
request = self.flow.request
try:
request.url = str(url)
except ValueError:
return "Invalid URL."
signals.flow_change.send(self, flow = self.flow)
def set_resp_status_code(self, status_code):
try:
status_code = int(status_code)
except ValueError:
return None
self.flow.response.status_code = status_code
if status_code in status_codes.RESPONSES:
self.flow.response.reason = status_codes.RESPONSES[status_code]
signals.flow_change.send(self, flow = self.flow)
def set_resp_reason(self, reason):
self.flow.response.reason = reason
signals.flow_change.send(self, flow = self.flow)
def set_headers(self, fields, conn):
conn.headers = Headers(fields)
signals.flow_change.send(self, flow = self.flow)
def set_query(self, lst, conn):
conn.query = lst
signals.flow_change.send(self, flow = self.flow)
def set_path_components(self, lst, conn):
conn.path_components = lst
signals.flow_change.send(self, flow = self.flow)
def set_form(self, lst, conn):
conn.urlencoded_form = lst
signals.flow_change.send(self, flow = self.flow)
def edit_form(self, conn):
self.master.view_grideditor(
grideditor.URLEncodedFormEditor(
self.master,
conn.urlencoded_form.items(multi=True),
self.set_form,
conn
)
)
def edit_form_confirm(self, key, conn):
if key == "y":
self.edit_form(conn)
def set_cookies(self, lst, conn):
conn.cookies = lst
signals.flow_change.send(self, flow = self.flow)
def set_setcookies(self, data, conn):
conn.cookies = data
signals.flow_change.send(self, flow = self.flow)
def edit(self, part):
if self.tab_offset == TAB_REQ:
message = self.flow.request
else:
if not self.flow.response:
self.flow.response = http.HTTPResponse.make(200, b"")
message = self.flow.response
self.flow.backup()
if message == self.flow.request and part == "c":
self.master.view_grideditor(
grideditor.CookieEditor(
self.master,
message.cookies.items(multi=True),
self.set_cookies,
message
)
)
if message == self.flow.response and part == "c":
self.master.view_grideditor(
grideditor.SetCookieEditor(
self.master,
message.cookies.items(multi=True),
self.set_setcookies,
message
)
)
if part == "r":
# Fix an issue caused by some editors when editing a
# request/response body. Many editors make it hard to save a
# file without a terminating newline on the last line. When
# editing message bodies, this can cause problems. For now, I
# just strip the newlines off the end of the body when we return
# from an editor.
c = self.master.spawn_editor(message.get_content(strict=False) or b"")
message.content = c.rstrip(b"\n")
elif part == "f":
if not message.urlencoded_form and message.raw_content:
signals.status_prompt_onekey.send(
prompt = "Existing body is not a URL-encoded form. Clear and edit?",
keys = [
("yes", "y"),
("no", "n"),
],
callback = self.edit_form_confirm,
args = (message,)
)
else:
self.edit_form(message)
elif part == "h":
self.master.view_grideditor(
grideditor.HeaderEditor(
self.master,
message.headers.fields,
self.set_headers,
message
)
)
elif part == "p":
p = message.path_components
self.master.view_grideditor(
grideditor.PathEditor(
self.master,
p,
self.set_path_components,
message
)
)
elif part == "q":
self.master.view_grideditor(
grideditor.QueryEditor(
self.master,
message.query.items(multi=True),
self.set_query, message
)
)
elif part == "u":
signals.status_prompt.send(
prompt = "URL",
text = message.url,
callback = self.set_url
)
elif part == "m" and message == self.flow.request:
signals.status_prompt_onekey.send(
prompt = "Method",
keys = common.METHOD_OPTIONS,
callback = self.edit_method
)
elif part == "o":
signals.status_prompt.send(
prompt = "Code",
text = str(message.status_code),
callback = self.set_resp_status_code
)
elif part == "m" and message == self.flow.response:
signals.status_prompt.send(
prompt = "Message",
text = message.reason,
callback = self.set_resp_reason
)
signals.flow_change.send(self, flow = self.flow)
def view_flow(self, flow):
signals.pop_view_state.send(self)
self.master.view_flow(flow, self.tab_offset)
def _view_nextprev_flow(self, idx, flow):
if not self.view.inbounds(idx):
signals.status_message.send(message="No more flows")
return
self.view_flow(self.view[idx])
def view_next_flow(self, flow):
return self._view_nextprev_flow(self.view.index(flow) + 1, flow)
def view_prev_flow(self, flow):
return self._view_nextprev_flow(self.view.index(flow) - 1, flow)
def change_this_display_mode(self, t):
view = contentviews.get_by_shortcut(t)
if view:
self.view.settings[self.flow][(self.tab_offset, "prettyview")] = view.name
else:
self.view.settings[self.flow][(self.tab_offset, "prettyview")] = None
signals.flow_change.send(self, flow=self.flow)
def keypress(self, size, key):
conn = None # type: Optional[Union[http.HTTPRequest, http.HTTPResponse]]
if self.tab_offset == TAB_REQ:
conn = self.flow.request
elif self.tab_offset == TAB_RESP:
conn = self.flow.response
key = super().keypress(size, key)
# Special case: Space moves over to the next flow.
# We need to catch that before applying common.shortcuts()
if key == " ":
self.view_next_flow(self.flow)
return
key = common.shortcuts(key)
if key in ("up", "down", "page up", "page down"):
# Pass scroll events to the wrapped widget
self._w.keypress(size, key)
elif key == "a":
self.flow.resume()
self.master.view.update(self.flow)
elif key == "A":
for f in self.view:
if f.intercepted:
f.resume()
self.master.view.update(self.flow)
elif key == "d":
if self.flow.killable:
self.flow.kill()
self.view.remove(self.flow)
if not self.view.focus.flow:
self.master.view_flowlist()
else:
self.view_flow(self.view.focus.flow)
elif key == "D":
cp = self.flow.copy()
self.master.view.add(cp)
self.master.view.focus.flow = cp
self.view_flow(cp)
signals.status_message.send(message="Duplicated.")
elif key == "p":
self.view_prev_flow(self.flow)
elif key == "r":
try:
self.master.replay_request(self.flow)
except exceptions.ReplayException as e:
signals.add_log("Replay error: %s" % e, "warn")
signals.flow_change.send(self, flow = self.flow)
elif key == "V":
if self.flow.modified():
self.flow.revert()
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Reverted.")
else:
signals.status_message.send(message="Flow not modified.")
elif key == "W":
signals.status_prompt_path.send(
prompt = "Save this flow",
callback = self.master.save_one_flow,
args = (self.flow,)
)
elif key == "|":
signals.status_prompt_path.send(
prompt = "Send flow to script",
callback = self.master.run_script_once,
args = (self.flow,)
)
elif key == "e":
if self.tab_offset == TAB_REQ:
signals.status_prompt_onekey.send(
prompt="Edit request",
keys=(
("cookies", "c"),
("query", "q"),
("path", "p"),
("url", "u"),
("header", "h"),
("form", "f"),
("raw body", "r"),
("method", "m"),
),
callback=self.edit
)
elif self.tab_offset == TAB_RESP:
signals.status_prompt_onekey.send(
prompt="Edit response",
keys=(
("cookies", "c"),
("code", "o"),
("message", "m"),
("header", "h"),
("raw body", "r"),
),
callback=self.edit
)
else:
signals.status_message.send(
message="Tab to the request or response",
expire=1
)
elif key in set("bfgmxvzEC") and not conn:
signals.status_message.send(
message = "Tab to the request or response",
expire = 1
)
return
elif key == "b":
if self.tab_offset == TAB_REQ:
common.ask_save_body("q", self.flow)
else:
common.ask_save_body("s", self.flow)
elif key == "f":
self.view.settings[self.flow][(self.tab_offset, "fullcontents")] = True
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Loading all body data...")
elif key == "m":
p = list(contentviews.view_prompts)
p.insert(0, ("Clear", "C"))
signals.status_prompt_onekey.send(
self,
prompt = "Display mode",
keys = p,
callback = self.change_this_display_mode
)
elif key == "E":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
signals.status_prompt_onekey.send(
self,
prompt = "Export to file",
keys = [(e[0], e[1]) for e in export.EXPORTERS],
callback = common.export_to_clip_or_file,
args = (scope, self.flow, common.ask_save_path)
)
elif key == "C":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
signals.status_prompt_onekey.send(
self,
prompt = "Export to clipboard",
keys = [(e[0], e[1]) for e in export.EXPORTERS],
callback = common.export_to_clip_or_file,
args = (scope, self.flow, common.copy_to_clipboard_or_prompt)
)
elif key == "x":
conn.content = None
signals.flow_change.send(self, flow=self.flow)
elif key == "v":
if conn.raw_content:
t = conn.headers.get("content-type")
if "EDITOR" in os.environ or "PAGER" in os.environ:
self.master.spawn_external_viewer(conn.get_content(strict=False), t)
else:
signals.status_message.send(
message = "Error! Set $EDITOR or $PAGER."
)
elif key == "z":
self.flow.backup()
e = conn.headers.get("content-encoding", "identity")
if e != "identity":
try:
conn.decode()
except ValueError:
signals.status_message.send(
message = "Could not decode - invalid data?"
)
else:
signals.status_prompt_onekey.send(
prompt = "Select encoding: ",
keys = (
("gzip", "z"),
("deflate", "d"),
("brotli", "b"),
),
callback = self.encode_callback,
args = (conn,)
)
signals.flow_change.send(self, flow = self.flow)
else:
# Key is not handled here.
return key
def encode_callback(self, key, conn):
encoding_map = {
"z": "gzip",
"d": "deflate",
"b": "br",
}
conn.encode(encoding_map[key])
signals.flow_change.send(self, flow = self.flow)
| mit | 642,012,547,737,181,300 | 33.551674 | 104 | 0.487256 | false | 4.237993 | false | false | false |
kuzmoyev/Google-Calendar-Simple-API | tests/test_attachment.py | 1 | 3788 | from unittest import TestCase
from gcsa.attachment import Attachment
from gcsa.serializers.attachment_serializer import AttachmentSerializer
DOC_URL = 'https://docs.google.com/document/d/1uDvwcxOsXkzl2Bod0YIfrIQ5MqfBhnc1jusYdH1xCZo/edit?usp=sharing'
class TestAttachment(TestCase):
def test_create(self):
attachment = Attachment('My doc',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.document")
self.assertEqual(attachment.title, 'My doc')
with self.assertRaises(ValueError):
Attachment('My doc',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.something")
class TestAttachmentSerializer(TestCase):
def test_to_json(self):
attachment = Attachment('My doc',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.document")
attachment_json = {
'title': 'My doc',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.document"
}
self.assertDictEqual(AttachmentSerializer.to_json(attachment), attachment_json)
attachment = Attachment('My doc2',
file_url=DOC_URL,
mime_type="application/vnd.google-apps.drawing",
icon_link="https://some_link.com",
file_id='abc123')
attachment_json = {
'title': 'My doc2',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.drawing",
'iconLink': "https://some_link.com",
'fileId': 'abc123'
}
serializer = AttachmentSerializer(attachment)
self.assertDictEqual(serializer.get_json(), attachment_json)
def test_to_object(self):
attachment_json = {
'title': 'My doc',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.document"
}
attachment = AttachmentSerializer.to_object(attachment_json)
self.assertEqual(attachment.title, 'My doc')
self.assertEqual(attachment.file_url, DOC_URL)
self.assertEqual(attachment.mime_type, "application/vnd.google-apps.document")
self.assertIsNone(attachment.icon_link)
self.assertIsNone(attachment.file_id)
attachment_json = {
'title': 'My doc2',
'fileUrl': DOC_URL,
'mimeType': "application/vnd.google-apps.drawing",
'iconLink': "https://some_link.com",
'fileId': 'abc123'
}
serializer = AttachmentSerializer(attachment_json)
attachment = serializer.get_object()
self.assertEqual(attachment.title, 'My doc2')
self.assertEqual(attachment.file_url, DOC_URL)
self.assertEqual(attachment.mime_type, "application/vnd.google-apps.drawing")
self.assertEqual(attachment.icon_link, "https://some_link.com")
self.assertEqual(attachment.file_id, 'abc123')
attachment_json_str = """{
"title": "My doc3",
"fileUrl": "%s",
"mimeType": "application/vnd.google-apps.drawing",
"iconLink": "https://some_link.com",
"fileId": "abc123"
}
""" % DOC_URL
attachment = AttachmentSerializer.to_object(attachment_json_str)
self.assertEqual(attachment.title, 'My doc3')
self.assertEqual(attachment.file_url, DOC_URL)
self.assertEqual(attachment.mime_type, "application/vnd.google-apps.drawing")
self.assertEqual(attachment.icon_link, "https://some_link.com")
self.assertEqual(attachment.file_id, 'abc123')
| mit | 5,226,027,302,797,445,000 | 38.873684 | 108 | 0.587381 | false | 4.029787 | true | false | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_web_application_firewall_policies_operations.py | 1 | 20908 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations(object):
"""WebApplicationFirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def get(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
policy_name, # type: str
parameters, # type: "_models.WebApplicationFirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
| mit | -6,169,865,617,561,209,000 | 48.079812 | 215 | 0.643486 | false | 4.363105 | true | false | false |
abhinavsingh/proxy.py | examples/websocket_client.py | 1 | 1465 | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import time
from proxy.http.websocket import WebsocketClient, WebsocketFrame, websocketOpcodes
# globals
client: WebsocketClient
last_dispatch_time: float
static_frame = memoryview(WebsocketFrame.text(b'hello'))
num_echos = 10
def on_message(frame: WebsocketFrame) -> None:
"""WebsocketClient on_message callback."""
global client, num_echos, last_dispatch_time
print('Received %r after %d millisec' %
(frame.data, (time.time() - last_dispatch_time) * 1000))
assert(frame.data == b'hello' and frame.opcode ==
websocketOpcodes.TEXT_FRAME)
if num_echos > 0:
client.queue(static_frame)
last_dispatch_time = time.time()
num_echos -= 1
else:
client.close()
if __name__ == '__main__':
# Constructor establishes socket connection
client = WebsocketClient(
b'echo.websocket.org',
80,
b'/',
on_message=on_message)
# Perform handshake
client.handshake()
# Queue some data for client
client.queue(static_frame)
last_dispatch_time = time.time()
# Start event loop
client.run()
| bsd-3-clause | -4,967,191,948,487,152,000 | 28.18 | 86 | 0.654558 | false | 3.665829 | false | false | false |
sebalas/fake-useragent | fake_useragent/utils.py | 1 | 2970 | import os
import re
from . import settings
try: # Python 2
from urllib import urlopen, quote_plus
except ImportError: # Python 3
from urllib.request import urlopen
from urllib.parse import quote_plus
try:
import json
except ImportError:
import simplejson as json
def get(url, annex=None):
if annex is not None:
url = url % (quote_plus(annex), )
return urlopen(url).read()
def get_browsers():
"""
very very hardcoded/dirty re/split stuff, but no dependencies
"""
html = get(settings.BROWSERS_STATS_PAGE)
html = html.decode('windows-1252')
html = html.split('<table class="reference notranslate">')[1]
html = html.split('</table>')[0]
browsers = re.findall(r'\.asp">(.+?)<', html, re.UNICODE)
for value, override in settings.OVERRIDES:
browsers = [
value if browser == override else browser
for browser in browsers
]
browsers_statistics = re.findall(
r'td\sclass="right">(.+?)\s', html, re.UNICODE
)
# TODO: ensure encoding
return list(zip(browsers, browsers_statistics))
def get_browser_versions(browser):
"""
very very hardcoded/dirty re/split stuff, but no dependencies
"""
html = get(settings.BROWSER_BASE_PAGE, browser)
html = html.decode('iso-8859-1')
html = html.split('<div id=\'liste\'>')[1]
html = html.split('</div>')[0]
browsers_iter = re.finditer(r'\.php\'>(.+?)</a', html, re.UNICODE)
count = 0
browsers = []
for browser in browsers_iter:
if 'more' in browser.group(1).lower():
continue
# TODO: ensure encoding
browsers.append(browser.group(1))
count += 1
if count == settings.BROWSERS_COUNT_LIMIT:
break
return browsers
def load():
browsers_dict = {}
randomize_dict = {}
for item in get_browsers():
browser, percent = item
browser_key = browser
for replacement in settings.REPLACEMENTS:
browser_key = browser_key.replace(replacement, '')
browser_key = browser_key.lower()
browsers_dict[browser_key] = get_browser_versions(browser)
for counter in range(int(float(percent))):
randomize_dict[str(len(randomize_dict))] = browser_key
db = {}
db['browsers'] = browsers_dict
db['randomize'] = randomize_dict
return db
def write(data):
data = json.dumps(data, ensure_ascii=False)
# no codecs\with for python 2.5
f = open(settings.DB, 'w+')
f.write(data)
f.close()
def read():
# no codecs\with for python 2.5
f = open(settings.DB, 'r')
data = f.read()
f.close()
return json.loads(data)
def exist():
return os.path.isfile(settings.DB)
def rm():
if exist():
os.remove(settings.DB)
def update():
if exist():
rm()
write(load())
def load_cached():
if not exist():
update()
return read()
| apache-2.0 | 6,406,799,067,328,781,000 | 19.915493 | 70 | 0.6 | false | 3.69863 | false | false | false |
Daeinar/norx-py | norx.py | 1 | 7942 | """
Python2 implementation of NORX.
------
:author: Philipp Jovanovic <[email protected]>, 2014-2015.
:license: CC0, see LICENSE for more details.
"""
from struct import pack, unpack
class NORX(object):
def __init__(self, w=64, r=4, d=1, t=256):
assert w in [32, 64]
assert r >= 1
assert d >= 0
assert 10 * w >= t >= 0
self.NORX_W = w
self.NORX_R = r
self.NORX_D = d
self.NORX_T = t
self.NORX_N = w * 2
self.NORX_K = w * 4
self.NORX_B = w * 16
self.NORX_C = w * 6
self.RATE = self.NORX_B - self.NORX_C
self.HEADER_TAG = 1 << 0
self.PAYLOAD_TAG = 1 << 1
self.TRAILER_TAG = 1 << 2
self.FINAL_TAG = 1 << 3
self.BRANCH_TAG = 1 << 4
self.MERGE_TAG = 1 << 5
self.BYTES_WORD = w / 8
self.BYTES_TAG = t / 8
self.WORDS_RATE = self.RATE / w
self.BYTES_RATE = self.WORDS_RATE * self.BYTES_WORD
if w == 32:
self.R = (8, 11, 16, 31)
self.U = (0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0x254F537A,
0x38531D48, 0x839C6E83, 0xF97A3AE5, 0x8C91D88C, 0x11EAFB59)
self.M = 0xffffffff
self.fmt = '<L'
elif w == 64:
self.R = (8, 19, 40, 63)
self.U = (0x243F6A8885A308D3, 0x13198A2E03707344, 0xA4093822299F31D0, 0x082EFA98EC4E6C89, 0xAE8858DC339325A1,
0x670A134EE52D7FA6, 0xC4316D80CD967541, 0xD21DFBF8B630B762, 0x375A18D261E7F892, 0x343D1F187D92285B)
self.M = 0xffffffffffffffff
self.fmt = '<Q'
def load(self, x):
return unpack(self.fmt, x)[0]
def store(self, x):
return pack(self.fmt, x)
def ROTR(self, a, r):
return ((a >> r) | (a << (self.NORX_W - r))) & self.M
def H(self, a, b):
return ((a ^ b) ^ ((a & b) << 1)) & self.M
def G(self, a, b, c, d):
a = self.H(a, b)
d = self.ROTR(a ^ d, self.R[0])
c = self.H(c, d)
b = self.ROTR(b ^ c, self.R[1])
a = self.H(a, b)
d = self.ROTR(a ^ d, self.R[2])
c = self.H(c, d)
b = self.ROTR(b ^ c, self.R[3])
return a, b, c, d
def F(self, S):
# Column step
S[0], S[4], S[8], S[12] = self.G(S[0], S[4], S[8], S[12])
S[1], S[5], S[9], S[13] = self.G(S[1], S[5], S[9], S[13])
S[2], S[6], S[10], S[14] = self.G(S[2], S[6], S[10], S[14])
S[3], S[7], S[11], S[15] = self.G(S[3], S[7], S[11], S[15])
# Diagonal step
S[0], S[5], S[10], S[15] = self.G(S[0], S[5], S[10], S[15])
S[1], S[6], S[11], S[12] = self.G(S[1], S[6], S[11], S[12])
S[2], S[7], S[8], S[13] = self.G(S[2], S[7], S[8], S[13])
S[3], S[4], S[9], S[14] = self.G(S[3], S[4], S[9], S[14])
def permute(self, S):
for i in xrange(self.NORX_R):
self.F(S)
def pad(self, x):
y = bytearray(self.BYTES_RATE)
y[:len(x)] = x
y[len(x)] = 0x01
y[self.BYTES_RATE-1] |= 0x80
return y
def init(self, S, n, k):
b = self.BYTES_WORD
K = [self.load(k[b*i:b*(i+1)]) for i in xrange(self.NORX_K / self.NORX_W)]
N = [self.load(n[b*i:b*(i+1)]) for i in xrange(self.NORX_N / self.NORX_W)]
U = self.U
S[0], S[1], S[2], S[3] = U[0], N[0], N[1], U[1]
S[4], S[5], S[6], S[7] = K[0], K[1], K[2], K[3]
S[8], S[9], S[10], S[11] = U[2], U[3], U[4], U[5]
S[12], S[13], S[14], S[15] = U[6], U[7], U[8], U[9]
S[12] ^= self.NORX_W
S[13] ^= self.NORX_R
S[14] ^= self.NORX_D
S[15] ^= self.NORX_T
self.permute(S)
def inject_tag(self, S, tag):
S[15] ^= tag
def process_header(self, S, x):
return self.absorb_data(S, x, self.HEADER_TAG)
def process_trailer(self, S, x):
return self.absorb_data(S, x, self.TRAILER_TAG)
def absorb_data(self, S, x, tag):
inlen = len(x)
if inlen > 0:
i, n = 0, self.BYTES_RATE
while inlen >= n:
self.absorb_block(S, x[n*i:n*(i+1)], tag)
inlen -= n
i += 1
self.absorb_lastblock(S, x[n*i:n*i+inlen], tag)
def absorb_block(self, S, x, tag):
b = self.BYTES_WORD
self.inject_tag(S, tag)
self.permute(S)
for i in xrange(self.WORDS_RATE):
S[i] ^= self.load(x[b*i:b*(i+1)])
def absorb_lastblock(self, S, x, tag):
y = self.pad(x)
self.absorb_block(S, y, tag)
def encrypt_data(self, S, x):
c = bytearray()
inlen = len(x)
if inlen > 0:
i, n = 0, self.BYTES_RATE
while inlen >= n:
c += self.encrypt_block(S, x[n*i:n*(i+1)])
inlen -= n
i += 1
c += self.encrypt_lastblock(S, x[n*i:n*i+inlen])
return c
def encrypt_block(self, S, x):
c = bytearray()
b = self.BYTES_WORD
self.inject_tag(S, self.PAYLOAD_TAG)
self.permute(S)
for i in xrange(self.WORDS_RATE):
S[i] ^= self.load(x[b*i:b*(i+1)])
c += self.store(S[i])
return c[:self.BYTES_RATE]
def encrypt_lastblock(self, S, x):
y = self.pad(x)
c = self.encrypt_block(S, y)
return c[:len(x)]
def decrypt_data(self, S, x):
m = bytearray()
inlen = len(x)
if inlen > 0:
i, n = 0, self.BYTES_RATE
while inlen >= n:
m += self.decrypt_block(S, x[n*i:n*(i+1)])
inlen -= n
i += 1
m += self.decrypt_lastblock(S, x[n*i:n*i+inlen])
return m
def decrypt_block(self, S, x):
m = bytearray()
b = self.BYTES_WORD
self.inject_tag(S, self.PAYLOAD_TAG)
self.permute(S)
for i in xrange(self.WORDS_RATE):
c = self.load(x[b*i:b*(i+1)])
m += self.store(S[i] ^ c)
S[i] = c
return m[:self.BYTES_RATE]
def decrypt_lastblock(self, S, x):
m = bytearray()
y = bytearray()
b = self.BYTES_WORD
self.inject_tag(S, self.PAYLOAD_TAG)
self.permute(S)
for i in xrange(self.WORDS_RATE):
y += self.store(S[i])
y[:len(x)] = bytearray(x)
y[len(x)] ^= 0x01
y[self.BYTES_RATE-1] ^= 0x80
for i in xrange(self.WORDS_RATE):
c = self.load(y[b*i:b*(i+1)])
m += self.store(S[i] ^ c)
S[i] = c
return m[:len(x)]
def generate_tag(self, S):
t = bytearray()
self.inject_tag(S, self.FINAL_TAG)
self.permute(S)
self.permute(S)
for i in xrange(self.WORDS_RATE):
t += self.store(S[i])
return t[:self.BYTES_TAG]
def verify_tag(self, t0, t1):
acc = 0
for i in xrange(self.BYTES_TAG):
acc |= t0[i] ^ t1[i]
return (((acc - 1) >> 8) & 1) - 1
def aead_encrypt(self, h, m, t, n, k):
assert len(k) == self.NORX_K / 8
assert len(n) == self.NORX_N / 8
c = bytearray()
S = [0] * 16
self.init(S, n, k)
self.process_header(S, h)
c += self.encrypt_data(S, m)
self.process_trailer(S, t)
c += self.generate_tag(S)
return str(c)
def aead_decrypt(self, h, c, t, n, k):
assert len(k) == self.NORX_K / 8
assert len(n) == self.NORX_N / 8
assert len(c) >= self.BYTES_TAG
m = bytearray()
c = bytearray(c)
S = [0] * 16
d = len(c)-self.BYTES_TAG
c, t0 = c[:d], c[d:]
self.init(S, n, k)
self.process_header(S, h)
m += self.decrypt_data(S, c)
self.process_trailer(S, t)
t1 = self.generate_tag(S)
if self.verify_tag(t0, t1) != 0:
m = ''
return str(m)
| cc0-1.0 | -3,776,232,993,985,714,000 | 30.515873 | 121 | 0.467137 | false | 2.640293 | false | false | false |
timfreund/pycontrol-shed | pycontrolshed/model.py | 1 | 13233 | # Copyright (C) 2011 Tim Freund and contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from functools import wraps
from pycontrol import pycontrol
import logging
import pycontrolshed
import socket
# In [1]: route_domains = bigip.Networking.RouteDomain.get_list()
# In [2]: route_domains
# Out[2]: [2220L]
log = logging.getLogger('pycontrolshed.model')
def partitioned(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
partition = kwargs.get('partition', None)
if partition:
orig_partition = self.bigip.Management.Partition.get_active_partition()
self.bigip.active_partition = partition
rc = f(self, *args, **kwargs)
self.bigip.active_partition = orig_partition
return rc
else:
return f(self, *args, **kwargs)
return wrapper
class NodeAssistant(object):
def __init__(self, bigip):
self.bigip = bigip
def disable(self, nodes, partition=None):
self.enable_disable_nodes(nodes, 'STATE_DISABLED', partition=partition)
def enable(self, nodes, partition=None):
self.enable_disable_nodes(nodes, 'STATE_ENABLED', partition=partition)
@partitioned
def enable_disable_nodes(self, nodes, target_state, partition=None):
if isinstance(nodes, basestring):
nodes = [nodes]
targets = []
states = []
for node in nodes:
targets.append(self.bigip.host_to_node(node))
states.append(target_state)
self.bigip.LocalLB.NodeAddress.set_session_enabled_state(node_addresses=targets,
states=states)
return self.status(nodes)
@partitioned
def status(self, nodes, partition=None):
if isinstance(nodes, basestring):
nodes = [nodes]
targets = [self.bigip.host_to_node(node) for node in nodes]
statuses = self.bigip.LocalLB.NodeAddress.get_session_enabled_state(node_addresses=targets)
rc = []
for node, status in zip(targets, statuses):
rc.append({'node': node,
'fqdn': self.bigip.node_to_host(node),
'status': status})
return rc
class VirtualAssistant(object):
def __init__(self, bigip):
self.bigip = bigip
@partitioned
def servers(self, partition=None):
return self.bigip.LocalLB.VirtualServer.get_list()
@partitioned
def all_server_statistics(self, partition=None):
return self.bigip.LocalLB.VirtualServer.get_all_statistics()
@partitioned
def addresses(self, partition=None):
return self.bigip.LocalLB.VirtualAddress.get_list()
@partitioned
def all_address_statistics(self, partition=None):
return self.bigip.LocalLB.VirtualAddress.get_all_statistics()
class PoolAssistant(object):
def __init__(self, bigip):
self.bigip = bigip
def create_type(self, type_name):
return self.bigip.LocalLB.PoolMember.typefactory.create(type_name)
@partitioned
def pools(self, partition=None):
return self.bigip.LocalLB.Pool.get_list()
@partitioned
def members(self, pools, partition=None):
if isinstance(pools, basestring):
pools = [pools]
session_status_list = self.bigip.LocalLB.PoolMember.get_session_enabled_state(pools)
monitor_status_list = self.bigip.LocalLB.PoolMember.get_monitor_status(pools)
rc = {}
for pool, sessions, monitors in zip(pools, session_status_list, monitor_status_list):
members = []
for session, monitor in zip(sessions, monitors):
members.append({'address': session.member.address,
'port': session.member.port,
'monitor': monitor,
'session': session})
rc[pool] = {'members': members}
return rc
@partitioned
def multi_member_statistics(self, pools, members, partition=None):
seq_members = []
ippd_seq_seq = self.create_type('Common.IPPortDefinitionSequenceSequence')
ippd_seq_seq.item = seq_members
empty_pools = []
if isinstance(members, list):
pass
elif isinstance(members, dict):
mlist = []
for k in pools:
if len(members[k]['members']) == 0:
empty_pools.append(k)
else:
mlist.append(members[k]['members'])
for ep in empty_pools:
pools.remove(ep)
members = mlist
for member_list in members:
seq_members.append(self.pool_members_to_ippd_seq(member_list))
stats = self.bigip.LocalLB.PoolMember.get_statistics(pool_names=pools, members=ippd_seq_seq)
rc = {}
for p, s in zip(pools, stats):
s = self.collapse_member_statistics(s)
rc[p] = s
return rc
@partitioned
def member_statistics(self, pool, member, partition=None):
# TODO refactor this to be a special case of multi_member_statistics
pools = [pool]
if isinstance(member, basestring):
ipp_member = self.bigip.host_port_to_ipportdef(*member.split(':'))
member = ipp_member
ippd_seq_seq = self.create_type('Common.IPPortDefinitionSequenceSequence')
ippd_seq = self.create_type('Common.IPPortDefinitionSequence')
ippd_seq_seq.item = ippd_seq
ippd_seq.item = member
# this is kind of garbage too... see TODO above
stats = self.bigip.LocalLB.PoolMember.get_statistics(pool_names=pools, members=ippd_seq_seq)[0].statistics[0]
return stats
def disable_member(self, pool_name, members, partition=None):
return self.enable_disable_members(pool_name, members, 'STATE_DISABLED', partition=partition)
def enable_member(self, pool_name, members, partition=None):
return self.enable_disable_members(pool_name, members, 'STATE_ENABLED', partition=partition)
@partitioned
def enable_disable_members(self, pool_name, members, target_state, partition=None):
pools = [pool_name]
if isinstance(members, basestring) or members.__class__.__name__.count('IPPortDefinition'):
members = [members]
session_states = self.create_type('LocalLB.PoolMember.MemberSessionStateSequence')
session_states.item = []
for member in members:
if isinstance(member, basestring):
ipp_member = self.bigip.host_port_to_ipportdef(*member.split(':'))
member = ipp_member
state = self.create_type('LocalLB.PoolMember.MemberSessionState')
state.member = member
state.session_state = target_state
session_states.item.append(state)
self.bigip.LocalLB.PoolMember.set_session_enabled_state(pool_names=pools,
session_states=[session_states])
return self.members(pools, partition=partition)
def pool_members_to_ippd_seq(self, members):
ippd_seq = self.create_type('Common.IPPortDefinitionSequence')
ippd_members = []
ippd_seq.item = ippd_members
for member in members:
address = None
port = None
if isinstance(member, dict):
address = member['address']
port = member['port']
elif isinstance(member, basestring):
address, port = member.split(':')
else:
raise Exception("Unknown member type")
ippd_members.append(self.bigip.host_port_to_ipportdef(address, port))
return ippd_seq
def collapse_member_statistics(self, pool_stats):
stats = {}
# LocalLB.PoolMember.MemberStatisticEntry
for mse in pool_stats.statistics:
member_id = "%s:%d" % (mse.member.address,
mse.member.port)
stats[member_id] = {}
for stat in mse.statistics:
stats[member_id][stat.type] = {'high': stat.value.high,
'low': stat.value.low}
return stats
class PyCtrlShedBIGIP(pycontrol.BIGIP):
def __init__(self, *args, **kwargs):
pycontrol.BIGIP.__init__(self, *args, **kwargs)
self.nodes = NodeAssistant(self)
self.pools = PoolAssistant(self)
self.virtual = VirtualAssistant(self)
self._active_partition = None
@property
def active_partition(self):
if self._active_partition:
return self._active_partition
self._active_partition = str(self.Management.Partition.get_active_partition())
return self._active_partition
@active_partition.setter
def active_partition(self, partition):
self.Management.Partition.set_active_partition(partition)
self._active_partition = partition
self._route_domains = self.Networking.RouteDomain.get_list()
def host_port_to_ipportdef(self, host, port):
ipp = self.LocalLB.PoolMember.typefactory.create('Common.IPPortDefinition')
ipp.address = self.host_to_node(host)
ipp.port = int(port)
return ipp
def host_to_node(self, host):
# If someone provides us with a route domain, we're going to trust
# that they know what route domain to use.
if host.count('%'):
host, route_domain = host.split('%', 1)
return "%s%%%s" % (socket.gethostbyname(host), route_domain)
node = socket.gethostbyname(host)
if (len(self.route_domains) == 1) and self.route_domains[0] != 0:
node += "%%%d" % self.route_domains[0]
return node
def node_to_ip(self, node):
if node.count('%'):
return node.split('%')[0]
return node
def node_to_host(self, node):
return socket.getfqdn(self.node_to_ip(node))
@property
def route_domains(self):
if hasattr(self, '_route_domains'):
return self._route_domains
self._route_domains = self.Networking.RouteDomain.get_list()
return self._route_domains
@property
def partitions(self):
partitions = []
for partition in self.Management.Partition.get_partition_list():
partitions.append({
'name': partition['partition_name'],
'description': partition["description"]
})
return partitions
class Environment(object):
def __init__(self, name, hosts=[], wsdls=None, username=None):
self.name = name
self.hosts = hosts
self.bigips = {}
self.username = username
self.wsdls = wsdls
if self.wsdls is None:
self.wsdls = [
'LocalLB.NodeAddress', 'LocalLB.Pool', 'LocalLB.PoolMember',
'LocalLB.Rule', 'LocalLB.VirtualAddress', 'LocalLB.VirtualServer',
'Management.Partition', 'Networking.RouteDomain',
'System.Failover',
]
for host in self.hosts:
self.connect_to_bigip(host)
def __setattr__(self, name, value):
if name in ['hosts', 'wsdls']:
if isinstance(value, str) or isinstance(value, unicode):
object.__setattr__(self, name, [host.strip() for host in value.split(',')])
else:
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, value)
def configure(self, config):
for k, v in config.items(self.name):
setattr(self, k, v)
@property
def all_bigip_connections(self):
return [self.bigips[bigip] for bigip in self.bigips]
@property
def active_bigip_connection(self):
for host in self.hosts:
bigip = self.connect_to_bigip(host)
if bigip.System.Failover.get_failover_state() == 'FAILOVER_STATE_ACTIVE':
return bigip
raise Exception('No active BIGIP devices were found in this environment (%s)' % self.name)
def connect_to_bigip(self, host, wsdls=None, force_reconnect=False):
if not(wsdls):
wsdls = self.wsdls
if not hasattr(self, 'password'):
log.debug('No password has been set, attempting to retrive via keychain capabilities')
password = pycontrolshed.get_password(self.name, self.username)
if password:
log.debug('Password retrived from the keychain')
self.password = password
else:
log.error('No password is available')
if host not in self.bigips or force_reconnect:
self.bigips[host] = PyCtrlShedBIGIP(host,
self.username,
self.password,
fromurl=True,
wsdls=wsdls)
return self.bigips[host]
| gpl-2.0 | 8,453,095,348,816,026,000 | 35.555249 | 117 | 0.590796 | false | 4.154788 | false | false | false |
Scratchcat1/AATC | flask_app/Flask_Test_App.py | 1 | 5816 | from flask import Flask, flash, redirect, render_template, request, session, abort
import random,os,ast,prettytable
from flask_app import forms
import AATC_Server_002 as AATC_Server
import HedaBot
COMMANDS = HedaBot.CreateCommandDictionary()
COMMANDS["AddFlight"][2]["Type"] = lambda x: HedaBot.SplitWaypoints(x,":")
COMMANDS["AddFlight"][2]["Query"] = COMMANDS["AddFlight"][2]["Query"].replace("returns","colons")
app = Flask(__name__)
app.config.from_object('flask_app.config')
@app.route("/")
def home():
## session["UserID"] = random.randint(0,1000)
return render_template("base.html",user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},Commands = COMMANDS)
@app.route("/help")
def help_page():
return render_template("help.html",name = session.get("UserID"),user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/base")
def base():
return render_template("base.html",user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/quote")
def quote():
quotes = ObtainQuote(3)
return render_template("quote.html", quotes = quotes,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/login", methods=['GET', 'POST'])
def login():
form = forms.LoginForm()
if form.validate_on_submit():
print("Loggin in ...")
if form.Username.data == form.Password.data:
session["UserID"] = form.Username.data
else:
session["UserID"] = -1
return render_template("LoginForm.html",title = "Login",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")})
@app.route("/dyno", methods=['GET', 'POST'])
def dyno():
items = [{"name":"Username"},{"name":"Password"}]
fields = [{"name":"Username","form":forms.wtforms.StringField('Username', validators=[forms.DataRequired()])},
{"name":"Password","form":forms.wtforms.StringField('Password', validators=[forms.DataRequired()])}]
#form = forms.DynoForm(fields = items)
form = forms.update_form(fields)
print(form.__dict__)
if form.validate_on_submit():
print("Loggin in ...")
print(form.fields.data)
if form.Username.data == form.Password.data:
session["UserID"] = form.Username.data
else:
session["UserID"] = -1
#print(form.fields.__dict__)
return render_template("DynamicForm.html",title = "Login",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = fields)
@app.route("/command/<string:command>",methods=['GET', 'POST'])
def Dynamic_Form(command):
if command not in COMMANDS:
return "FAILURE COMMAND DOES NOT EXIST"
Fields = Generate_Fields(command)
form = forms.update_form(Fields)
if form.validate_on_submit():
packet = Evaluate_Form(command,form)
WebConnection = AATC_Server.WebConnection(session.get("UserID",-1))
Sucess,Message,Data = WebConnection.Main(packet)
if command == "Login":
session["UserID"] = Data
Data = []
rendered = RenderResults(Sucess,Message,Data)
print(rendered)
return render_template("DynamicForm2.html",title = "Output",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = Fields ,Commands = COMMANDS, OUTPUT = True, rendered_result = rendered)
return render_template("DynamicForm2.html",title = "command",form = form,user = {"Username":session.get("UserID"), "UserID":session.get("UserID")},fields = Fields,Commands = COMMANDS)
def Generate_Fields(command):
Queries = COMMANDS[command]
Fields = []
for x in range(1,len(Queries)+1):
query_name = Queries[x]["Query"]
field = {"name":query_name ,"form":forms.wtforms.StringField(query_name, validators=[forms.DataRequired()])}
Fields.append(field)
return Fields
def Evaluate_Form(command,form):
Queries = COMMANDS[command]
Arguments = []
for x in range(1,len(Queries)+1):
Arguments.append( Queries[x]["Type"](form.__dict__[Queries[x]["Query"]].data))
packet = (command,Arguments)
return packet
def RenderResults(Sucess,Message,Data = None):
render = ""
render += "Sucess >>"+str(Sucess)+"\n"
render += "Message >>"+str(Message) +"\n"
if Data not in [None,[]]:
try:
Columns = ast.literal_eval(Message)
Table = prettytable.PrettyTable(Columns)
for row in Data:
Table.add_row(row)
render += str(Table)
except Exception as e:
render += "Error creating asthetic table"+str(e) +"\n"
for row in Data:
render += str(row)+"\n"
render += ""
rendered = render.split("\n")
return rendered
##def ObtainQuote(number = 1):
## with open(os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)),"SkyrimDialogue.txt"),"r") as f:
## for i,line in enumerate(f):
## pass
##
## responses = []
## for f in range(number):
## lineNum = random.randint(0,i+1)
## with open(os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)),"SkyrimDialogue.txt"),"r") as f:
## for x in range(lineNum):
## line = f.readline()
## responses.append( line.rstrip().split("\t")[-1:][0])
## return responses
def main_app(app):
app.secret_key = "abcewhfuhiwuhef"
app.run(host = "0.0.0.0")
if __name__ == "__main__":
main_app(app)
| gpl-3.0 | 1,400,593,260,159,722,000 | 31.813953 | 234 | 0.597146 | false | 3.576876 | false | false | false |
ragupta-git/ImcSdk | imcsdk/mometa/comm/CommSnmp.py | 1 | 8759 | """This module contains the general information for CommSnmp ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class CommSnmpConsts:
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
COM2_SEC_NONE = "None"
COM2_SEC_DISABLED = "disabled"
COM2_SEC_FULL = "full"
COM2_SEC_LIMITED = "limited"
PROTO_ALL = "all"
PROTO_NONE = "none"
PROTO_TCP = "tcp"
PROTO_UDP = "udp"
class CommSnmp(ManagedObject):
"""This is CommSnmp class."""
consts = CommSnmpConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("CommSnmp", "commSnmp", "snmp-svc", VersionMeta.Version151f, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'commSvcEp'], [u'commSnmpTrap', u'commSnmpUser'], ["Get", "Set"]),
"modular": MoMeta("CommSnmp", "commSnmp", "snmp-svc", VersionMeta.Version2013e, "InputOutput", 0xfff, [], ["admin", "read-only", "user"], [u'commSvcEp'], [u'commSnmpTrap', u'commSnmpUser'], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"com2_sec": MoPropertyMeta("com2_sec", "com2Sec", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["None", "disabled", "full", "limited"], []),
"community": MoPropertyMeta("community", "community", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"engine_id_key": MoPropertyMeta("engine_id_key", "engineIdKey", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 27, r"""[^#!&]{0,27}""", [], []),
"port": MoPropertyMeta("port", "port", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-65535"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"sys_contact": MoPropertyMeta("sys_contact", "sysContact", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, 0, 64, None, [], []),
"sys_location": MoPropertyMeta("sys_location", "sysLocation", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, 0, 64, None, [], []),
"trap_community": MoPropertyMeta("trap_community", "trapCommunity", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"engine_id": MoPropertyMeta("engine_id", "engineId", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["all", "none", "tcp", "udp"], []),
},
"modular": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"com2_sec": MoPropertyMeta("com2_sec", "com2Sec", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["None", "disabled", "full", "limited"], []),
"community": MoPropertyMeta("community", "community", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"engine_id_key": MoPropertyMeta("engine_id_key", "engineIdKey", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 27, r"""[^#!&]{0,27}""", [], []),
"port": MoPropertyMeta("port", "port", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-65535"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"sys_contact": MoPropertyMeta("sys_contact", "sysContact", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200, 0, 64, None, [], []),
"sys_location": MoPropertyMeta("sys_location", "sysLocation", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, 0, 64, None, [], []),
"trap_community": MoPropertyMeta("trap_community", "trapCommunity", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[!#$%\(\)\*\+,\-\./:<=\[\]\^_\{\}~a-zA-Z0-9]{0,18}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"engine_id": MoPropertyMeta("engine_id", "engineId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["all", "none", "tcp", "udp"], []),
},
}
prop_map = {
"classic": {
"adminState": "admin_state",
"com2Sec": "com2_sec",
"community": "community",
"dn": "dn",
"engineIdKey": "engine_id_key",
"port": "port",
"rn": "rn",
"status": "status",
"sysContact": "sys_contact",
"sysLocation": "sys_location",
"trapCommunity": "trap_community",
"childAction": "child_action",
"descr": "descr",
"engineId": "engine_id",
"name": "name",
"proto": "proto",
},
"modular": {
"adminState": "admin_state",
"com2Sec": "com2_sec",
"community": "community",
"dn": "dn",
"engineIdKey": "engine_id_key",
"port": "port",
"rn": "rn",
"status": "status",
"sysContact": "sys_contact",
"sysLocation": "sys_location",
"trapCommunity": "trap_community",
"childAction": "child_action",
"descr": "descr",
"engineId": "engine_id",
"name": "name",
"proto": "proto",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_state = None
self.com2_sec = None
self.community = None
self.engine_id_key = None
self.port = None
self.status = None
self.sys_contact = None
self.sys_location = None
self.trap_community = None
self.child_action = None
self.descr = None
self.engine_id = None
self.name = None
self.proto = None
ManagedObject.__init__(self, "CommSnmp", parent_mo_or_dn, **kwargs)
| apache-2.0 | 1,721,278,884,973,908,700 | 63.881481 | 230 | 0.574837 | false | 3.296575 | false | false | false |
wevote/WebAppPublic | apis_v1/documentation_source/positions_count_for_one_ballot_item_doc.py | 1 | 2560 | # apis_v1/documentation_source/positions_count_for_one_ballot_item_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def positions_count_for_one_ballot_item_doc_template_values(url_root):
"""
Show documentation about positionsCountForOneBallotItem
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'ballot_item_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for one ballot item.',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "success": boolean,\n' \
' "status": string,\n' \
' "ballot_item_we_vote_id: string,\n' \
' "ballot_item_list": list ' \
'(we return a list so this API can be consumed like positionsCountForAllBallotItems)\n' \
' [\n' \
' "ballot_item_we_vote_id": string,\n' \
' "support_count": integer,\n' \
' "oppose_count": integer,\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'positionsCountForOneBallotItem',
'api_slug': 'positionsCountForOneBallotItem',
'api_introduction':
"Retrieve all positions held by this voter in one list.",
'try_now_link': 'apis_v1:positionsCountForOneBallotItemView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| bsd-3-clause | 5,725,262,911,566,859,000 | 38.384615 | 115 | 0.5375 | false | 3.803863 | false | false | false |
longde123/MultiversePlatform | server/config/common/character_factory.py | 1 | 4399 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from multiverse.mars import *
from multiverse.mars.objects import *
from multiverse.mars.core import *
from multiverse.mars.events import *
from multiverse.mars.util import *
from multiverse.mars.plugins import *
from multiverse.server.plugins import *
from multiverse.server.math import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from java.lang import *
displayContext = DisplayContext("human_female.mesh")
displayContext.addSubmesh(DisplayContext.Submesh("bodyShape-lib.0",
"human_female.skin_material"))
displayContext.addSubmesh(DisplayContext.Submesh("head_aShape-lib.0",
"human_female.head_a_material"))
displayContext.addSubmesh(DisplayContext.Submesh("hair_bShape-lib.0",
"human_female.hair_b_material"))
# default player template
player = Template("DefaultPlayer")
player.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_DISPLAY_CONTEXT,
displayContext)
player.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_OBJECT_TYPE,
ObjectTypes.player)
player.put(InventoryClient.NAMESPACE,
InventoryClient.TEMPL_ITEMS,
"")
ObjectManagerClient.registerTemplate(player)
# character factory
class SampleFactory (CharacterFactory):
def createCharacter(self, worldName, uid, properties):
name = properties.get("characterName");
# Player start location
loc = Point(-135343, 0, -202945)
# Player start instance; assumes you have an instance named "default"
instanceOid = InstanceClient.getInstanceOid("default")
overrideTemplate = Template()
if name:
overrideTemplate.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_NAME, name)
overrideTemplate.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_INSTANCE, Long(instanceOid))
overrideTemplate.put(WorldManagerClient.NAMESPACE,
WorldManagerClient.TEMPL_LOC, loc)
# Initialize the player's instance restore stack
restorePoint = InstanceRestorePoint("default", loc)
restorePoint.setFallbackFlag(True)
restoreStack = LinkedList()
restoreStack.add(restorePoint)
overrideTempate.put(Namespace.OBJECT_MANAGER,
ObjectManagerClient.TEMPL_INSTANCE_RESTORE_STACK, restoreStack)
overrideTempate.put(Namespace.OBJECT_MANAGER,
ObjectManagerClient.TEMPL_CURRENT_INSTANCE_NAME, "default")
# Make the player persistent (will be saved in database)
overrideTemplate.put(Namespace.OBJECT_MANAGER,
ObjectManagerClient.TEMPL_PERSISTENT, Boolean(True));
# Create the player object
objOid = ObjectManagerClient.generateObject(
"DefaultPlayer", overrideTemplate)
Log.debug("SampleFactory: generated obj oid=" + str(objOid))
return objOid
sampleFactory = SampleFactory()
LoginPlugin.getCharacterGenerator().setCharacterFactory(sampleFactory);
| mit | 2,857,242,688,523,992,600 | 39.357798 | 81 | 0.711525 | false | 4.246139 | false | false | false |
shoopio/shoop | shuup/importer/admin_module/import_views.py | 1 | 7325 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import logging
import os
from datetime import datetime
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.transaction import atomic
from django.http.response import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, TemplateView, View
from shuup.admin.shop_provider import get_shop
from shuup.importer.admin_module.forms import ImportForm, ImportSettingsForm
from shuup.importer.transforms import transform_file
from shuup.importer.utils import (
get_import_file_path, get_importer, get_importer_choices
)
from shuup.utils.excs import Problem
logger = logging.getLogger(__name__)
class ImportProcessView(TemplateView):
template_name = "shuup/importer/admin/import_process.jinja"
importer = None
def dispatch(self, request, *args, **kwargs):
self.importer_cls = get_importer(request.GET.get("importer"))
self.model_str = request.GET.get("importer")
self.lang = request.GET.get("lang")
return super(ImportProcessView, self).dispatch(request, *args, **kwargs)
def _transform_request_file(self):
try:
filename = get_import_file_path(self.request.GET.get("n"))
if not os.path.isfile(filename):
raise ValueError(_("%s is not a file") % self.request.GET.get("n"))
except:
raise Problem(_("File missing."))
try:
mode = "xls"
if filename.endswith("xlsx"):
mode = "xlsx"
if filename.endswith("csv"):
mode = "csv"
if self.importer_cls.custom_file_transformer:
return self.importer_cls.transform_file(mode, filename)
return transform_file(mode, filename)
except (Exception, RuntimeError) as e:
messages.error(self.request, e)
def prepare(self):
self.data = self._transform_request_file()
if self.data is None:
return False
self.importer = self.importer_cls(self.data, get_shop(self.request), self.lang)
self.importer.process_data()
if self.request.method == "POST":
# check if mapping was done
for field in self.importer.unmatched_fields:
key = "remap[%s]" % field
vals = self.request.POST.getlist(key)
if len(vals):
self.importer.manually_match(field, vals[0])
self.importer.do_remap()
self.settings_form = ImportSettingsForm(data=self.request.POST if self.request.POST else None)
if self.settings_form.is_bound:
self.settings_form.is_valid()
return True
def post(self, request, *args, **kwargs):
prepared = self.prepare()
if not prepared:
return redirect(reverse("shuup_admin:importer.import"))
try:
with atomic():
self.importer.do_import(self.settings_form.cleaned_data["import_mode"])
except Exception:
logger.exception("Failed to import data")
messages.error(request, _("Failed to import the file."))
return redirect(reverse("shuup_admin:importer.import"))
self.template_name = "shuup/importer/admin/import_process_complete.jinja"
return self.render_to_response(self.get_context_data(**kwargs))
def get_context_data(self, **kwargs):
context = super(ImportProcessView, self).get_context_data(**kwargs)
context["data"] = self.data
context["importer"] = self.importer
context["form"] = self.settings_form
context["model_fields"] = self.importer.get_fields_for_mapping()
context["visible_rows"] = self.data.rows[1:5]
return context
def get(self, request, *args, **kwargs):
prepared = self.prepare()
if not prepared:
return redirect(reverse("shuup_admin:importer.import"))
return self.render_to_response(self.get_context_data(**kwargs))
class ImportView(FormView):
template_name = "shuup/importer/admin/import.jinja"
form_class = ImportForm
def post(self, request, *args, **kwargs):
file = self.request.FILES["file"]
basename, ext = os.path.splitext(file.name)
import_name = "%s%s" % (hashlib.sha256(("%s" % datetime.now()).encode("utf-8")).hexdigest(), ext)
full_path = get_import_file_path(import_name)
if not os.path.isdir(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
with open(full_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
next_url = request.POST.get("next")
importer = request.POST.get("importer")
lang = request.POST.get("language")
return redirect("%s?n=%s&importer=%s&lang=%s" % (next_url, import_name, importer, lang))
def get_form_kwargs(self):
kwargs = super(ImportView, self).get_form_kwargs()
initial = kwargs.get("initial", {})
initial["importer"] = self.request.GET.get("importer", initial.get("initial"))
kwargs.update({
"request": self.request,
"initial": initial
})
return kwargs
def get_context_data(self, **kwargs):
context = super(ImportView, self).get_context_data(**kwargs)
# check whether the importer has a example file template
# if so, we also add a url to download the example file
importer = self.request.GET.get("importer")
# no importer passed, get the first choice available
if not importer:
importers = list(get_importer_choices())
if importers:
importer = importers[0][0]
if importer:
importer_cls = get_importer(importer)
context.update(importer_cls.get_help_context_data(self.request))
context["importer"] = importer_cls
return context
class ExampleFileDownloadView(View):
def get(self, request, *args, **kwargs):
importer = request.GET.get("importer")
file_name = request.GET.get("file_name")
if not importer or not file_name:
return HttpResponseBadRequest(_("Invalid parameters"))
importer_cls = get_importer(importer)
if not importer_cls or not importer_cls.has_example_file():
raise Http404(_("Invalid importer"))
example_file = importer_cls.get_example_file(file_name)
if not example_file:
raise Http404(_("Invalid file name"))
response = HttpResponse(content_type=example_file.content_type)
response['Content-Disposition'] = 'attachment; filename=%s' % example_file.file_name
data = importer_cls.get_example_file_content(example_file, request)
if not data:
raise Http404(_("File not found"))
data.seek(0)
response.write(data.getvalue())
return response
| agpl-3.0 | -5,630,403,041,267,978,000 | 37.151042 | 105 | 0.632628 | false | 4.020307 | false | false | false |
PeridotYouClod/gRPC-Makerboards | generated/proto_out/sensors_pb2_grpc.py | 1 | 20413 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import generated.proto_out.sensors_pb2 as sensors__pb2
class FrontEndStub(object):
"""http://www.grpc.io/docs/guides/concepts.html is good reference for #tags
#FrontEnd #Simple
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLux = channel.unary_unary(
'/FrontEnd/GetLux',
request_serializer=sensors__pb2.GetLuxRequest.SerializeToString,
response_deserializer=sensors__pb2.GetLuxReply.FromString,
)
self.GetTemperature = channel.unary_unary(
'/FrontEnd/GetTemperature',
request_serializer=sensors__pb2.GetTemperatureRequest.SerializeToString,
response_deserializer=sensors__pb2.GetTemperatureReply.FromString,
)
self.GetSound = channel.unary_unary(
'/FrontEnd/GetSound',
request_serializer=sensors__pb2.GetSoundRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSoundReply.FromString,
)
self.GetIrButtonPressed = channel.unary_unary(
'/FrontEnd/GetIrButtonPressed',
request_serializer=sensors__pb2.GetIrButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetIrButtonPressedReply.FromString,
)
self.GetSonar = channel.unary_unary(
'/FrontEnd/GetSonar',
request_serializer=sensors__pb2.GetSonarRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSonarReply.FromString,
)
self.SetLedStrip = channel.unary_unary(
'/FrontEnd/SetLedStrip',
request_serializer=sensors__pb2.SetLedStripRequest.SerializeToString,
response_deserializer=sensors__pb2.SetLedStripReply.FromString,
)
self.GetButtonPressed = channel.unary_unary(
'/FrontEnd/GetButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
self.SendToRfBlaster = channel.unary_unary(
'/FrontEnd/SendToRfBlaster',
request_serializer=sensors__pb2.SendToRfBlasterRequest.SerializeToString,
response_deserializer=sensors__pb2.SendToRfBlasterReply.FromString,
)
class FrontEndServicer(object):
"""http://www.grpc.io/docs/guides/concepts.html is good reference for #tags
#FrontEnd #Simple
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def GetLux(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTemperature(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSound(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIrButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSonar(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLedStrip(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRfBlaster(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FrontEndServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLux': grpc.unary_unary_rpc_method_handler(
servicer.GetLux,
request_deserializer=sensors__pb2.GetLuxRequest.FromString,
response_serializer=sensors__pb2.GetLuxReply.SerializeToString,
),
'GetTemperature': grpc.unary_unary_rpc_method_handler(
servicer.GetTemperature,
request_deserializer=sensors__pb2.GetTemperatureRequest.FromString,
response_serializer=sensors__pb2.GetTemperatureReply.SerializeToString,
),
'GetSound': grpc.unary_unary_rpc_method_handler(
servicer.GetSound,
request_deserializer=sensors__pb2.GetSoundRequest.FromString,
response_serializer=sensors__pb2.GetSoundReply.SerializeToString,
),
'GetIrButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetIrButtonPressed,
request_deserializer=sensors__pb2.GetIrButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetIrButtonPressedReply.SerializeToString,
),
'GetSonar': grpc.unary_unary_rpc_method_handler(
servicer.GetSonar,
request_deserializer=sensors__pb2.GetSonarRequest.FromString,
response_serializer=sensors__pb2.GetSonarReply.SerializeToString,
),
'SetLedStrip': grpc.unary_unary_rpc_method_handler(
servicer.SetLedStrip,
request_deserializer=sensors__pb2.SetLedStripRequest.FromString,
response_serializer=sensors__pb2.SetLedStripReply.SerializeToString,
),
'GetButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
'SendToRfBlaster': grpc.unary_unary_rpc_method_handler(
servicer.SendToRfBlaster,
request_deserializer=sensors__pb2.SendToRfBlasterRequest.FromString,
response_serializer=sensors__pb2.SendToRfBlasterReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'FrontEnd', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PushFrontEndStub(object):
"""#FrontEnd #ServerStreaming
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world. This server is for streaming events.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Subscribe = channel.unary_unary(
'/PushFrontEnd/Subscribe',
request_serializer=sensors__pb2.SubscribeRequest.SerializeToString,
response_deserializer=sensors__pb2.SubscribeReply.FromString,
)
self.StreamButtonPressed = channel.unary_stream(
'/PushFrontEnd/StreamButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
class PushFrontEndServicer(object):
"""#FrontEnd #ServerStreaming
The FrontEnd server is the endpoint that most client interactions
should use. These are public facing and used by servers in the outside
world. This server is for streaming events.
Note: Currently there is no security in place so this should only be used
for localhost applications only be used behind a firewall.
"""
def Subscribe(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PushFrontEndServicer_to_server(servicer, server):
rpc_method_handlers = {
'Subscribe': grpc.unary_unary_rpc_method_handler(
servicer.Subscribe,
request_deserializer=sensors__pb2.SubscribeRequest.FromString,
response_serializer=sensors__pb2.SubscribeReply.SerializeToString,
),
'StreamButtonPressed': grpc.unary_stream_rpc_method_handler(
servicer.StreamButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'PushFrontEnd', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ArduinoStub(object):
"""#Backend #Simple
Arduino server handles interactions between Arduino brand devices & other
servers. (New to Arduino: https://www.arduino.cc/en/Guide/Introduction)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetIrButtonPressed = channel.unary_unary(
'/Arduino/GetIrButtonPressed',
request_serializer=sensors__pb2.GetIrButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetIrButtonPressedReply.FromString,
)
self.GetSonar = channel.unary_unary(
'/Arduino/GetSonar',
request_serializer=sensors__pb2.GetSonarRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSonarReply.FromString,
)
self.SendToRfBlaster = channel.unary_unary(
'/Arduino/SendToRfBlaster',
request_serializer=sensors__pb2.SendToRfBlasterRequest.SerializeToString,
response_deserializer=sensors__pb2.SendToRfBlasterReply.FromString,
)
class ArduinoServicer(object):
"""#Backend #Simple
Arduino server handles interactions between Arduino brand devices & other
servers. (New to Arduino: https://www.arduino.cc/en/Guide/Introduction)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def GetIrButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSonar(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRfBlaster(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ArduinoServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetIrButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetIrButtonPressed,
request_deserializer=sensors__pb2.GetIrButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetIrButtonPressedReply.SerializeToString,
),
'GetSonar': grpc.unary_unary_rpc_method_handler(
servicer.GetSonar,
request_deserializer=sensors__pb2.GetSonarRequest.FromString,
response_serializer=sensors__pb2.GetSonarReply.SerializeToString,
),
'SendToRfBlaster': grpc.unary_unary_rpc_method_handler(
servicer.SendToRfBlaster,
request_deserializer=sensors__pb2.SendToRfBlasterRequest.FromString,
response_serializer=sensors__pb2.SendToRfBlasterReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Arduino', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class WioLinkStub(object):
"""#Backend #Simple
WioLink server handles interactions between Wio Link brand devices & other
servers. (New to Wio Link: http://wiki.seeed.cc/Wio_Link/)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLux = channel.unary_unary(
'/WioLink/GetLux',
request_serializer=sensors__pb2.GetLuxRequest.SerializeToString,
response_deserializer=sensors__pb2.GetLuxReply.FromString,
)
self.GetTemperature = channel.unary_unary(
'/WioLink/GetTemperature',
request_serializer=sensors__pb2.GetTemperatureRequest.SerializeToString,
response_deserializer=sensors__pb2.GetTemperatureReply.FromString,
)
self.GetSound = channel.unary_unary(
'/WioLink/GetSound',
request_serializer=sensors__pb2.GetSoundRequest.SerializeToString,
response_deserializer=sensors__pb2.GetSoundReply.FromString,
)
self.SetLedStrip = channel.unary_unary(
'/WioLink/SetLedStrip',
request_serializer=sensors__pb2.SetLedStripRequest.SerializeToString,
response_deserializer=sensors__pb2.SetLedStripReply.FromString,
)
self.GetButtonPressed = channel.unary_unary(
'/WioLink/GetButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
class WioLinkServicer(object):
"""#Backend #Simple
WioLink server handles interactions between Wio Link brand devices & other
servers. (New to Wio Link: http://wiki.seeed.cc/Wio_Link/)
Note: Do not have clients depend on this it should be behind a FrontEnd.
"""
def GetLux(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTemperature(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSound(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLedStrip(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WioLinkServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLux': grpc.unary_unary_rpc_method_handler(
servicer.GetLux,
request_deserializer=sensors__pb2.GetLuxRequest.FromString,
response_serializer=sensors__pb2.GetLuxReply.SerializeToString,
),
'GetTemperature': grpc.unary_unary_rpc_method_handler(
servicer.GetTemperature,
request_deserializer=sensors__pb2.GetTemperatureRequest.FromString,
response_serializer=sensors__pb2.GetTemperatureReply.SerializeToString,
),
'GetSound': grpc.unary_unary_rpc_method_handler(
servicer.GetSound,
request_deserializer=sensors__pb2.GetSoundRequest.FromString,
response_serializer=sensors__pb2.GetSoundReply.SerializeToString,
),
'SetLedStrip': grpc.unary_unary_rpc_method_handler(
servicer.SetLedStrip,
request_deserializer=sensors__pb2.SetLedStripRequest.FromString,
response_serializer=sensors__pb2.SetLedStripReply.SerializeToString,
),
'GetButtonPressed': grpc.unary_unary_rpc_method_handler(
servicer.GetButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'WioLink', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PushStub(object):
"""#ServerStreaming #Backend
Push server pushes data when a sensor event occurs for the client to react
to.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Subscribe = channel.unary_unary(
'/Push/Subscribe',
request_serializer=sensors__pb2.SubscribeRequest.SerializeToString,
response_deserializer=sensors__pb2.SubscribeReply.FromString,
)
self.StreamButtonPressed = channel.unary_stream(
'/Push/StreamButtonPressed',
request_serializer=sensors__pb2.GetButtonPressedRequest.SerializeToString,
response_deserializer=sensors__pb2.GetButtonPressedReply.FromString,
)
class PushServicer(object):
"""#ServerStreaming #Backend
Push server pushes data when a sensor event occurs for the client to react
to.
"""
def Subscribe(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamButtonPressed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PushServicer_to_server(servicer, server):
rpc_method_handlers = {
'Subscribe': grpc.unary_unary_rpc_method_handler(
servicer.Subscribe,
request_deserializer=sensors__pb2.SubscribeRequest.FromString,
response_serializer=sensors__pb2.SubscribeReply.SerializeToString,
),
'StreamButtonPressed': grpc.unary_stream_rpc_method_handler(
servicer.StreamButtonPressed,
request_deserializer=sensors__pb2.GetButtonPressedRequest.FromString,
response_serializer=sensors__pb2.GetButtonPressedReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Push', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| mit | -6,543,290,364,991,223,000 | 38.483559 | 85 | 0.725371 | false | 4.157434 | false | false | false |
skirpichev/omg | diofant/vector/dyadic.py | 1 | 8076 | from ..core import AtomicExpr, Integer, Pow
from ..matrices import ImmutableMatrix
from .basisdependent import (BasisDependent, BasisDependentAdd,
BasisDependentMul, BasisDependentZero)
class Dyadic(BasisDependent):
"""
Super class for all Dyadic-classes.
References
==========
* https://en.wikipedia.org/wiki/Dyadic_tensor
* Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
"""
_op_priority = 13.0
@property
def components(self):
"""
Returns the components of this dyadic in the form of a
Python dictionary mapping BaseDyadic instances to the
corresponding measure numbers.
"""
# The '_components' attribute is defined according to the
# subclass of Dyadic the instance belongs to.
return self._components
def dot(self, other):
"""
Returns the dot product(also called inner product) of this
Dyadic, with another Dyadic or Vector.
If 'other' is a Dyadic, this returns a Dyadic. Else, it returns
a Vector (unless an error is encountered).
Parameters
==========
other : Dyadic/Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> D1 = N.i.outer(N.j)
>>> D2 = N.j.outer(N.j)
>>> D1.dot(D2)
(N.i|N.j)
>>> D1.dot(N.j)
N.i
"""
from .vector import Vector
if isinstance(other, BasisDependentZero):
return Vector.zero
elif isinstance(other, Vector):
outvec = Vector.zero
for k, v in self.components.items():
vect_dot = k.args[1].dot(other)
outvec += vect_dot * v * k.args[0]
return outvec
elif isinstance(other, Dyadic):
outdyad = Dyadic.zero
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
vect_dot = k1.args[1].dot(k2.args[0])
outer_product = k1.args[0].outer(k2.args[1])
outdyad += vect_dot * v1 * v2 * outer_product
return outdyad
else:
raise TypeError('Inner product is not defined for ' +
str(type(other)) + ' and Dyadics.')
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product between this Dyadic, and a Vector, as a
Vector instance.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> d = N.i.outer(N.i)
>>> d.cross(N.j)
(N.i|N.k)
"""
from .vector import Vector
if other == Vector.zero:
return Dyadic.zero
elif isinstance(other, Vector):
outdyad = Dyadic.zero
for k, v in self.components.items():
cross_product = k.args[1].cross(other)
outer = k.args[0].outer(cross_product)
outdyad += v * outer
return outdyad
else:
raise TypeError(str(type(other)) + ' not supported for ' +
'cross with dyadics')
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def to_matrix(self, system, second_system=None):
"""
Returns the matrix form of the dyadic with respect to one or two
coordinate systems.
Parameters
==========
system : CoordSysCartesian
The coordinate system that the rows and columns of the matrix
correspond to. If a second system is provided, this
only corresponds to the rows of the matrix.
second_system : CoordSysCartesian, optional, default=None
The coordinate system that the columns of the matrix correspond
to.
Examples
========
>>> from diofant.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> v = N.i + 2*N.j
>>> d = v.outer(N.i)
>>> d.to_matrix(N)
Matrix([
[1, 0, 0],
[2, 0, 0],
[0, 0, 0]])
>>> q = Symbol('q')
>>> P = N.orient_new_axis('P', q, N.k)
>>> d.to_matrix(N, P)
Matrix([
[ cos(q), -sin(q), 0],
[2*cos(q), -2*sin(q), 0],
[ 0, 0, 0]])
"""
if second_system is None:
second_system = system
return ImmutableMatrix([i.dot(self).dot(j) for i in system for j in
second_system]).reshape(3, 3)
class BaseDyadic(Dyadic, AtomicExpr):
"""Class to denote a base dyadic tensor component."""
def __new__(cls, vector1, vector2):
from .vector import Vector, BaseVector, VectorZero
# Verify arguments
if not isinstance(vector1, (BaseVector, VectorZero)) or \
not isinstance(vector2, (BaseVector, VectorZero)):
raise TypeError('BaseDyadic cannot be composed of non-base ' +
'vectors')
# Handle special case of zero vector
elif vector1 == Vector.zero or vector2 == Vector.zero:
return Dyadic.zero
# Initialize instance
obj = super().__new__(cls, vector1, vector2)
obj._base_instance = obj
obj._measure_number = 1
obj._components = {obj: Integer(1)}
obj._sys = vector1._sys
obj._pretty_form = ('(' + vector1._pretty_form + '|' +
vector2._pretty_form + ')')
obj._latex_form = ('(' + vector1._latex_form + '{|}' +
vector2._latex_form + ')')
return obj
def __str__(self, printer=None):
return '(' + str(self.args[0]) + '|' + str(self.args[1]) + ')'
_diofantstr = __str__
_diofantrepr = _diofantstr
class DyadicMul(BasisDependentMul, Dyadic):
"""Products of scalars and BaseDyadics."""
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_dyadic(self):
"""The BaseDyadic involved in the product."""
return self._base_instance
@property
def measure_number(self):
"""The scalar expression involved in the definition of
this DyadicMul.
"""
return self._measure_number
class DyadicAdd(BasisDependentAdd, Dyadic):
"""Class to hold dyadic sums."""
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def __str__(self, printer=None):
ret_str = ''
items = list(self.components.items())
items.sort(key=lambda x: x[0].__str__())
for k, v in items:
temp_dyad = k * v
ret_str += temp_dyad.__str__(printer) + ' + '
return ret_str[:-3]
__repr__ = __str__
_diofantstr = __str__
class DyadicZero(BasisDependentZero, Dyadic):
"""Class to denote a zero dyadic."""
_op_priority = 13.1
_pretty_form = '(0|0)'
_latex_form = r'(\mathbf{\hat{0}}|\mathbf{\hat{0}})'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
def _dyad_div(one, other):
"""Helper for division involving dyadics."""
if isinstance(other, Dyadic):
raise TypeError('Cannot divide two dyadics')
else:
return DyadicMul(one, Pow(other, -1))
Dyadic._expr_type = Dyadic
Dyadic._mul_func = DyadicMul
Dyadic._add_func = DyadicAdd
Dyadic._zero_func = DyadicZero
Dyadic._base_func = BaseDyadic
Dyadic._div_helper = _dyad_div
Dyadic.zero = DyadicZero()
| bsd-3-clause | -3,575,280,719,085,110,000 | 28.911111 | 79 | 0.541605 | false | 3.768549 | false | false | false |
UCBerkeleySETI/blimpy | blimpy/plotting/plot_time_series.py | 1 | 1628 | from .config import *
from ..utils import rebin, db
from .plot_utils import calc_extent
def plot_time_series(wf, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):
""" Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = wf.grab_data(f_start, f_stop, if_id)
# Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = np.nanmean(plot_data, axis=1)
else:
plot_data = np.nanmean(plot_data)
if logged and wf.header['nbits'] >= 8:
plot_data = db(plot_data)
# Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.
extent = calc_extent(wf, plot_f=plot_f, plot_t=wf.timestamps, MJD_time=MJD_time)
plot_t = np.linspace(extent[2], extent[3], len(wf.timestamps))
if MJD_time:
tlabel = "Time [MJD]"
else:
tlabel = "Time [s]"
if logged:
plabel = "Power [dB]"
else:
plabel = "Power [counts]"
# Reverse oder if vertical orientation.
if 'v' in orientation:
plt.plot(plot_data, plot_t, **kwargs)
plt.xlabel(plabel)
else:
plt.plot(plot_t, plot_data, **kwargs)
plt.xlabel(tlabel)
plt.ylabel(plabel)
ax.autoscale(axis='both', tight=True)
| bsd-3-clause | -8,949,116,032,728,782,000 | 30.921569 | 117 | 0.616093 | false | 3.211045 | false | false | false |
openstack/horizon | openstack_dashboard/dashboards/admin/volumes/forms.py | 1 | 10388 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.snapshots.forms \
import populate_status_choices
from openstack_dashboard.dashboards.project.volumes \
import forms as project_forms
from openstack_dashboard.dashboards.project.volumes.tables \
import VolumesTableBase as volumes_table
# This set of states was pulled from cinder's admin_actions.py
SETTABLE_STATUSES = (
'attaching', 'available', 'creating', 'deleting', 'detaching', 'error',
'error_deleting', 'in-use', 'maintenance', 'reserved')
STATUS_CHOICES = tuple(
status for status in volumes_table.STATUS_DISPLAY_CHOICES
if status[0] in SETTABLE_STATUSES
)
class ManageVolume(forms.SelfHandlingForm):
identifier = forms.CharField(
max_length=255,
label=_("Identifier"),
help_text=_("Name or other identifier for existing volume"))
id_type = forms.ThemableChoiceField(
label=_("Identifier Type"),
help_text=_("Type of backend device identifier provided"))
host = forms.CharField(
max_length=255,
label=_("Host"),
help_text=_("Cinder host on which the existing volume resides; "
"takes the form: host@backend-name#pool"))
name = forms.CharField(
max_length=255,
label=_("Volume Name"),
required=False,
help_text=_("Volume name to be assigned"))
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 4}),
label=_("Description"), required=False)
metadata = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 2}),
label=_("Metadata"), required=False,
help_text=_("Comma-separated key=value pairs"),
validators=[utils_validators.validate_metadata])
volume_type = forms.ThemableChoiceField(
label=_("Volume Type"),
required=False)
availability_zone = forms.ThemableChoiceField(
label=_("Availability Zone"),
required=False)
bootable = forms.BooleanField(
label=_("Bootable"),
required=False,
help_text=_("Specifies that the newly created volume "
"should be marked as bootable"))
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
self.fields['id_type'].choices = [("source-name", _("Name"))] + \
[("source-id", _("ID"))]
volume_types = cinder.volume_type_list(request)
self.fields['volume_type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
self.fields['availability_zone'].choices = \
project_forms.availability_zones(request)
def handle(self, request, data):
try:
az = data.get('availability_zone')
# assume user enters metadata with "key1=val1,key2=val2"
# convert to dictionary
metadataDict = {}
metadata = data.get('metadata')
if metadata:
metadata.replace(" ", "")
for item in metadata.split(','):
key, value = item.split('=')
metadataDict[key] = value
cinder.volume_manage(request,
host=data['host'],
identifier=data['identifier'],
id_type=data['id_type'],
name=data['name'],
description=data['description'],
volume_type=data['volume_type'],
availability_zone=az,
metadata=metadataDict,
bootable=data['bootable'])
# for success message, use identifier if user does not
# provide a volume name
volume_name = data['name']
if not volume_name:
volume_name = data['identifier']
messages.success(
request,
_('Successfully sent the request to manage volume: %s')
% volume_name)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to manage volume."),
redirect=redirect)
class UnmanageVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.CharField(label=_("Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_id = forms.CharField(label=_("ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
def handle(self, request, data):
try:
cinder.volume_unmanage(request, self.initial['volume_id'])
messages.success(
request,
_('Successfully sent the request to unmanage volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to unmanage volume."),
redirect=redirect)
class MigrateVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ThemableChoiceField(
label=_("Destination Host"),
help_text=_("Choose a Host to migrate to."))
force_host_copy = forms.BooleanField(label=_("Force Host Copy"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.name, host.name)
for host in hosts
if host.name != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available")))
return sorted(host_list)
def handle(self, request, data):
try:
cinder.volume_migrate(request,
self.initial['volume_id'],
data['host'],
data['force_host_copy'])
messages.success(
request,
_('Successfully sent the request to migrate volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Failed to migrate volume."),
redirect=redirect)
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ThemableChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
# Initial values have to be operated before super() otherwise the
# initial values will get overwritten back to the raw value
current_status = kwargs['initial']['status']
kwargs['initial'].pop('status')
super().__init__(request, *args, **kwargs)
self.fields['status'].choices = populate_status_choices(
current_status, STATUS_CHOICES)
def handle(self, request, data):
# Obtain the localized status for including in the message
for choice in self.fields['status'].choices:
if choice[0] == data['status']:
new_status = choice[1]
break
else:
new_status = data['status']
try:
cinder.volume_reset_state(request,
self.initial['volume_id'],
data['status'])
messages.success(request,
_('Successfully updated volume status to "%s".') %
new_status)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume status to "%s".') %
new_status, redirect=redirect)
| apache-2.0 | 679,940,254,032,079,700 | 40.386454 | 79 | 0.545148 | false | 4.958473 | false | false | false |
ijmarshall/robotreviewer3 | robotreviewer/robots/rationale_robot.py | 1 | 13649 | """
the BiasRobot class takes the full text of a clinical trial as
input as a robotreviewer.data_structures.MultiDict, and returns
bias information in the same format, which can easily be converted
to JSON.
there are multiple ways to build a MultiDict, however the most common
way used in this project is as a PDF binary.
pdf_binary = ...
pdfr = PDFReader()
data = pdfr.convert(pdf_binary)
robot = BiasRobot()
annotations = robot.annotate(data)
"""
# Authors: Iain Marshall <[email protected]>
# Joel Kuiper <[email protected]>
# Byron Wallace <[email protected]>
import uuid
import operator
import pickle
import numpy as np
from collections import OrderedDict, defaultdict
import robotreviewer
import logging
log = logging.getLogger(__name__)
import sys
sys.path.append('robotreviewer/ml') # need this for loading the rationale_CNN module
from celery.contrib import rdb
__version__ = {"name": "Risk of bias (CNN/SVM ensemble)",
"version_number": "3",
"publication_url": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5300751/",
"cite_bibtex": """@inproceedings{zhang2016rationale,
title={Rationale-augmented convolutional neural networks for text classification},
author={Zhang, Ye and Marshall, Iain and Wallace, Byron C},
booktitle={Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing},
volume={2016},
pages={795},
year={2016},
organization={NIH Public Access}
}""", "cite_text": "Zhang, Ye, Iain J Marshall, and Byron C. Wallace. “Rationale-Augmented Convolutional Neural Networks for Text Classification.” Proceedings of Empirical Methods in Natural Language Processing (EMNLP), 2016."
}
class BiasRobot:
def __init__(self, top_k=3):
"""
`top_k` refers to 'top-k recall'.
top-1 recall will return the single most relevant sentence
in the document, and top-3 recall the 3 most relevant.
The validation study assessed the accuracy of top-3 and top-1
and we suggest top-3 as default
"""
self.bias_domains = ['Random sequence generation']
self.top_k = top_k
self.bias_domains = {'RSG': 'Random sequence generation',
'AC': 'Allocation concealment',
'BPP': 'Blinding of participants and personnel',
'BOA': 'Blinding of outcome assessment',
'IOD': 'Incomplete outcome data',
'SR': 'Selective reporting'
}
###
# Here we take a simple ensembling approach in which we combine the
# predictions made by our rationaleCNN model and the JAMIA (linear)
# multi task variant.
###
self.all_domains = ['RSG', 'AC', 'BPP', 'BOA']
from robotreviewer.ml.classifier import MiniClassifier
from robotreviewer.ml.vectorizer import ModularVectorizer
from robotreviewer.ml.rationale_CNN import RationaleCNN, Document
global RationaleCNN, Document, MiniClassifier, ModularVectorizer
# CNN domains
vectorizer_str = 'robotreviewer/data/keras/vectorizers/{}.pickle'
arch_str = 'robotreviewer/data/keras/models/{}.json'
weight_str = 'robotreviewer/data/keras/models/{}.hdf5'
self.CNN_models = OrderedDict()
for bias_domain in ['RSG', 'AC', 'BPP', 'BOA']:
# Load vectorizer and keras model
vectorizer_loc = vectorizer_str.format(bias_domain)
arch_loc = arch_str.format(bias_domain)
weight_loc = weight_str.format(bias_domain)
preprocessor = pickle.load(open(vectorizer_loc, 'rb'))
preprocessor.tokenizer.oov_token = None # TODO check with Byron
self.CNN_models[bias_domain] = RationaleCNN(preprocessor,
document_model_architecture_path=arch_loc,
document_model_weights_path=weight_loc)
# Linear domains (these are joint models!)
self.linear_sent_clf = MiniClassifier(robotreviewer.get_data('bias/bias_sent_level.npz'))
self.linear_doc_clf = MiniClassifier(robotreviewer.get_data('bias/bias_doc_level.npz'))
self.linear_vec = ModularVectorizer(norm=None, non_negative=True, binary=True, ngram_range=(1, 2),
n_features=2**26)
def simple_borda_count(self, a, b, weights=None):
'''
Basic Borda count implementation for just two lists.
Assumes that a and b are lists of indices sorted
in *increasing* preference (so top-ranked sentence
should be the last element).
'''
rank_scores_dict = defaultdict(int)
if weights is None:
weights = np.ones(2)
# ensure list sizes are equal. note that the CNN
# model will always assume/force 200 sentences,
# whereas BoW model will not. so here we trim if
# necessary, effectively taking the max_index
# top sentences from each model and pooling these.
a_n, b_n = len(a), len(b)
max_index = min(a_n, b_n)
a = a[-max_index:]
b = b[-max_index:]
for i in range(max_index):
score = i+1 # 1 ... m
rank_scores_dict[a[i]] += weights[0]*score
rank_scores_dict[b[i]] += weights[1]*score
sorted_indices = sorted(rank_scores_dict.items(), key=operator.itemgetter(1), reverse=True)
return [index[0] for index in sorted_indices]
def annotate(self, doc_text, top_k=None, threshold=0.5):
"""
Annotate full text of clinical trial report
`top_k` can be overridden here, else defaults to the class
default set in __init__
"""
log.info('getting top k')
top_k = self.top_k if not top_k else top_k
doc_len = len(doc_text.text)
doc_sents = [sent.text for sent in doc_text.sents]
doc_sent_start_i = [sent.start_char for sent in doc_text.sents]
doc_sent_end_i = [sent.end_char for sent in doc_text.sents]
structured_data = []
#for domain, model in self.models.items():
log.info('starting modeling')
for domain in self.all_domains:
log.info('STARTING DOMAIN {}'.format(domain))
###
# linear model predictions (all domains)
#if type(model) == tuple: # linear model
log.info('doing linear predictions')
(vec, sent_clf, doc_clf) = (self.linear_vec, self.linear_sent_clf, self.linear_doc_clf)
doc_domains = [self.bias_domains[domain]] * len(doc_sents)
doc_X_i = zip(doc_sents, doc_domains)
vec.builder_clear()
vec.builder_add_docs(doc_sents)
vec.builder_add_docs(doc_X_i)
doc_sents_X = vec.builder_transform()
doc_sents_preds = sent_clf.decision_function(doc_sents_X)
linear_high_prob_sent_indices = np.argsort(doc_sents_preds)
###
# CNN predictions
log.info('doing cnn predictions')
bias_prob_CNN = None
if domain in self.CNN_models:
model = self.CNN_models[domain]
log.info('model selected for {}'.format(domain))
doc = Document(doc_id=None, sentences=doc_sents) # make consumable for RA-CNN
log.info('Doc done {}'.format(domain))
# this never comes back
bias_prob_CNN, high_prob_sent_indices_CNN = model.predict_and_rank_sentences_for_doc(doc, num_rationales=len(doc), return_rationale_indices=True)
log.info('got probs {}'.format(domain))
high_prob_sent_indices = self.simple_borda_count(high_prob_sent_indices_CNN,
linear_high_prob_sent_indices)[:top_k]
# and now the overall (doc-level) prediction from the CNN model.
# bias_prob = 1 --> low risk
# from riskofbias2:
# doc_y[mapped_domain] = 1 if domain["RATING"] == "YES" else -1
# # simplifying to LOW risk of bias = 1 *v* HIGH/UNKNOWN risk = -1
####
bias_pred = int(bias_prob_CNN >= threshold) # low risk if True and high/unclear otherwise
else:
# no aggregation here (since no CNN model for this domain)
high_prob_sent_indices = linear_high_prob_sent_indices[-top_k:]
high_prob_sent_indices = linear_high_prob_sent_indices[::-1] # put highest prob sentence first
#if domain == "BOA":
# high_prob_sents_CNN = [doc_sents[i] for i in high_prob_sent_indices_CNN]
# Find high probability sentences
#from celery.contrib import rdb
#rdb.set_trace()
high_prob_sents = [doc_sents[i] for i in high_prob_sent_indices]
high_prob_start_i = [doc_sent_start_i[i] for i in high_prob_sent_indices]
high_prob_end_i = [doc_sent_end_i[i] for i in high_prob_sent_indices]
high_prob_prefixes = [doc_text.text[max(0, offset-20):offset] for offset in high_prob_start_i]
high_prob_suffixes = [doc_text.text[offset: min(doc_len, offset+20)] for offset in high_prob_end_i]
high_prob_sents_j = " ".join(high_prob_sents)
# overall pred from linear model
vec.builder_clear()
vec.builder_add_docs([doc_text.text])
vec.builder_add_docs([(doc_text.text, self.bias_domains[domain])])
sent_domain_interaction = "-s-" + self.bias_domains[domain]
vec.builder_add_docs([(high_prob_sents_j, sent_domain_interaction)])
X = vec.builder_transform()
bias_prob_linear = doc_clf.predict_proba(X)[0]
# if we have a CNN pred, too, then average; otherwise
# rely on linear model.
bias_prob = bias_prob_linear
if bias_prob_CNN is not None:
bias_prob = (bias_prob_CNN + bias_prob_linear) / 2.0
bias_pred = int(bias_prob >= threshold)
bias_class = ["high/unclear", "low"][bias_pred] # prediction
annotation_metadata = []
for sent in zip(high_prob_sents, high_prob_start_i, high_prob_prefixes, high_prob_suffixes):
sent_metadata = {"content": sent[0],
"position": sent[1],
"uuid": str(uuid.uuid1()),
"prefix": sent[2],
"suffix": sent[3]}
annotation_metadata.append(sent_metadata)
structured_data.append({"domain": self.bias_domains[domain],
"judgement": bias_class,
"annotations": annotation_metadata})
return structured_data
def pdf_annotate(self, data):
log.info('retrieving text')
doc_text = data.get('parsed_text')
if not doc_text:
return data # we've got to know the text at least..
structured_data = self.annotate(doc_text)
data.ml["bias"] = structured_data
log.info('done predictions, ready to return answers')
return data
def api_annotate(self, articles):
if not all(('parsed_fullText' in article for article in articles)):
raise Exception('Bias model requires full text to be able to complete annotation')
annotations = []
for article in articles:
if article.get('skip_annotation'):
annotations.append([])
else:
annotations.append(self.annotate(article['parsed_fullText']))
# reformat annotations to API formatting
api_domain_titles = {
'Random sequence generation': 'random_sequence_generation',
'Allocation concealment': 'allocation_concealment',
'Blinding of participants and personnel': 'blinding_participants_personnel',
'Blinding of outcome assessment': 'blinding_outcome_assessment'}
out = []
for r in annotations:
row = {}
for b in r:
row[api_domain_titles[b['domain']]] = {
"judgement": b['judgement'],
"annotations": [{"text": an['content'], "start_index":an['position'] } for an in b['annotations']]
}
out.append(row)
return out
@staticmethod
def get_marginalia(data):
"""
Get marginalia formatted for Spa from structured data
"""
marginalia = []
for row in data['bias']:
marginalia.append({
"type": "Risk of Bias",
"title": row['domain'],
"annotations": row['annotations'],
"description": "**Overall risk of bias prediction**: {}".format(row['judgement'])
})
return marginalia
@staticmethod
def get_domains():
return [u'Random sequence generation',
u'Allocation concealment',
u'Blinding of participants and personnel',
u'Blinding of outcome assessment']
#u'Incomplete outcome data',
#u'Selective reporting']
| gpl-3.0 | -8,401,132,343,254,432,000 | 38.781341 | 236 | 0.577281 | false | 3.963114 | false | false | false |
zork9/pygame-pyMM | bombertoad.py | 1 | 3050 |
# Copyright (c) 2013 Johan Ceuppens.
# All rights reserved.
# Redistribution and use in source and binary forms are permitted
# provided that the above copyright notice and this paragraph are
# duplicated in all such forms and that any documentation,
# advertising materials, and other materials related to such
# distribution and use acknowledge that the software was developed
# by the Johan Ceuppens. The name of the
# Johan Ceuppens may not be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
# Copyright (C) Johan Ceuppens 2010
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame
from pygame.locals import *
from gameobject import *
from bullet import *
from stateimagelibrary import *
import random
from time import *
from math import *
from random import *
from rng import *
class BomberToad(Gameobject):
"Dude on Toad throwing Bombs"
def __init__(self,xx,yy):
Gameobject.__init__(self, xx, yy)
self.w = 100
self.h = 100
self.hitpoints = 2
self.yy = yy
self.stimlib = Stateimagelibrary()
image = pygame.image.load('./pics/bomber-left-1.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
image = pygame.image.load('./pics/bomber-left-2.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
image = pygame.image.load('./pics/bomber-left-3.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
image = pygame.image.load('./pics/bomber-left-4.bmp').convert()
image.set_colorkey((0,0,0))
self.stimlib.addpicture(image)
self.counter = 0
def draw(self, screen, room):
if randint(0,100) != 100 and self.counter == 0:
self.counter = 0
self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, 0)
else:
self.counter += 1
self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, self.counter)
if self.counter >= 3:
self.counter = 0
room.gameobjects.append(Bullet(self.x+room.relativex,self.y+room.relativey, "left"))
def update(self,room,player):
1
def fight(self,room,player,keydown = -1):
1
| gpl-2.0 | 8,758,164,902,192,934,000 | 34.882353 | 95 | 0.71377 | false | 3.497706 | false | false | false |
MadsJensen/agency_connectivity | make_df_hilbert_data.py | 1 | 1383 | import numpy as np
import pandas as pd
import scipy.io as sio
from my_settings import *
data = sio.loadmat("/home/mje/Projects/agency_connectivity/Data/data_all.mat")[
"data_all"]
column_keys = ["subject", "trial", "condition", "shift"]
result_df = pd.DataFrame(columns=column_keys)
for k, subject in enumerate(subjects):
p8_invol_shift = data[k, 3] - np.mean(data[k, 0])
p8_vol_shift = data[k, 2] - np.mean(data[k, 0])
p8_vol_bs_shift = data[k, 1] - np.mean(data[k, 0])
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "vol_bs",
"shift": p8_vol_bs_shift[j + 1][0]}])
result_df = result_df.append(row, ignore_index=True)
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "vol",
"shift": p8_vol_shift[j + 1][0]}])
result_df = result_df.append(row, ignore_index=True)
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "invol",
"shift": p8_invol_shift[j][0]}])
result_df = result_df.append(row, ignore_index=True)
| bsd-3-clause | 5,829,760,773,845,085,000 | 32.731707 | 79 | 0.501808 | false | 3.440299 | false | false | false |
jdf76/plugin.video.youtube | resources/lib/youtube_plugin/kodion/utils/http_server.py | 1 | 21426 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2018-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from six.moves import BaseHTTPServer
from six.moves.urllib.parse import parse_qs, urlparse
from six.moves import range
import json
import os
import re
import requests
import socket
import xbmc
import xbmcaddon
import xbmcgui
from .. import logger
class YouTubeRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(self.addon_id)
whitelist_ips = addon.getSetting('kodion.http.ip.whitelist')
whitelist_ips = ''.join(whitelist_ips.split())
self.whitelist_ips = whitelist_ips.split(',')
self.local_ranges = ('10.', '172.16.', '192.168.', '127.0.0.1', 'localhost', '::1')
self.chunk_size = 1024 * 64
try:
self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id).decode('utf-8')
except AttributeError:
self.base_path = xbmc.translatePath('special://temp/%s' % self.addon_id)
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def connection_allowed(self):
client_ip = self.client_address[0]
log_lines = ['HTTPServer: Connection from |%s|' % client_ip]
conn_allowed = client_ip.startswith(self.local_ranges)
log_lines.append('Local range: |%s|' % str(conn_allowed))
if not conn_allowed:
conn_allowed = client_ip in self.whitelist_ips
log_lines.append('Whitelisted: |%s|' % str(conn_allowed))
if not conn_allowed:
logger.log_debug('HTTPServer: Connection from |%s| not allowed' % client_ip)
else:
if self.path != '/ping':
logger.log_debug(' '.join(log_lines))
return conn_allowed
# noinspection PyPep8Naming
def do_GET(self):
addon = xbmcaddon.Addon('plugin.video.youtube')
dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true'
api_config_enabled = addon.getSetting('youtube.api.config.page') == 'true'
if self.path == '/client_ip':
client_json = json.dumps({"ip": "{ip}".format(ip=self.client_address[0])})
self.send_response(200)
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Content-Length', len(client_json))
self.end_headers()
self.wfile.write(client_json.encode('utf-8'))
if self.path != '/ping':
logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))
if not self.connection_allowed():
self.send_error(403)
else:
if dash_proxy_enabled and self.path.endswith('.mpd'):
file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\'))
file_chunk = True
logger.log_debug('HTTPServer: Request file path |{file_path}|'.format(file_path=file_path.encode('utf-8')))
try:
with open(file_path, 'rb') as f:
self.send_response(200)
self.send_header('Content-Type', 'application/xml+dash')
self.send_header('Content-Length', os.path.getsize(file_path))
self.end_headers()
while file_chunk:
file_chunk = f.read(self.chunk_size)
if file_chunk:
self.wfile.write(file_chunk)
except IOError:
response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8'))
self.send_error(404, response)
elif api_config_enabled and self.path == '/api':
html = self.api_config_page()
html = html.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', len(html))
self.end_headers()
for chunk in self.get_chunks(html):
self.wfile.write(chunk)
elif api_config_enabled and self.path.startswith('/api_submit'):
addon = xbmcaddon.Addon('plugin.video.youtube')
i18n = addon.getLocalizedString
xbmc.executebuiltin('Dialog.Close(addonsettings,true)')
old_api_key = addon.getSetting('youtube.api.key')
old_api_id = addon.getSetting('youtube.api.id')
old_api_secret = addon.getSetting('youtube.api.secret')
query = urlparse(self.path).query
params = parse_qs(query)
api_key = params.get('api_key', [None])[0]
api_id = params.get('api_id', [None])[0]
api_secret = params.get('api_secret', [None])[0]
if api_key and api_id and api_secret:
footer = i18n(30638)
else:
footer = u''
if re.search(r'api_key=(?:&|$)', query):
api_key = ''
if re.search(r'api_id=(?:&|$)', query):
api_id = ''
if re.search(r'api_secret=(?:&|$)', query):
api_secret = ''
updated = []
if api_key is not None and api_key != old_api_key:
addon.setSetting('youtube.api.key', api_key)
updated.append(i18n(30201))
if api_id is not None and api_id != old_api_id:
addon.setSetting('youtube.api.id', api_id)
updated.append(i18n(30202))
if api_secret is not None and api_secret != old_api_secret:
updated.append(i18n(30203))
addon.setSetting('youtube.api.secret', api_secret)
if addon.getSetting('youtube.api.key') and addon.getSetting('youtube.api.id') and \
addon.getSetting('youtube.api.secret'):
enabled = i18n(30636)
else:
enabled = i18n(30637)
if not updated:
updated = i18n(30635)
else:
updated = i18n(30631) % u', '.join(updated)
html = self.api_submit_page(updated, enabled, footer)
html = html.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', len(html))
self.end_headers()
for chunk in self.get_chunks(html):
self.wfile.write(chunk)
elif self.path == '/ping':
self.send_error(204)
else:
self.send_error(501)
# noinspection PyPep8Naming
def do_HEAD(self):
logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))
if not self.connection_allowed():
self.send_error(403)
else:
addon = xbmcaddon.Addon('plugin.video.youtube')
dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true'
if dash_proxy_enabled and self.path.endswith('.mpd'):
file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\'))
if not os.path.isfile(file_path):
response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8'))
self.send_error(404, response)
else:
self.send_response(200)
self.send_header('Content-Type', 'application/xml+dash')
self.send_header('Content-Length', os.path.getsize(file_path))
self.end_headers()
else:
self.send_error(501)
# noinspection PyPep8Naming
def do_POST(self):
logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))
if not self.connection_allowed():
self.send_error(403)
elif self.path.startswith('/widevine'):
license_url = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_url')
license_token = xbmcgui.Window(10000).getProperty('plugin.video.youtube-license_token')
if not license_url:
self.send_error(404)
return
if not license_token:
self.send_error(403)
return
size_limit = None
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
li_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Bearer %s' % license_token
}
result = requests.post(url=license_url, headers=li_headers, data=post_data, stream=True)
response_length = int(result.headers.get('content-length'))
content = result.raw.read(response_length)
content_split = content.split('\r\n\r\n'.encode('utf-8'))
response_header = content_split[0].decode('utf-8', 'ignore')
response_body = content_split[1]
response_length = len(response_body)
match = re.search(r'^Authorized-Format-Types:\s*(?P<authorized_types>.+?)\r*$', response_header, re.MULTILINE)
if match:
authorized_types = match.group('authorized_types').split(',')
logger.log_debug('HTTPServer: Found authorized formats |{authorized_fmts}|'.format(authorized_fmts=authorized_types))
fmt_to_px = {'SD': (1280 * 528) - 1, 'HD720': 1280 * 720, 'HD': 7680 * 4320}
if 'HD' in authorized_types:
size_limit = fmt_to_px['HD']
elif 'HD720' in authorized_types:
if xbmc.getCondVisibility('system.platform.android') == 1:
size_limit = fmt_to_px['HD720']
else:
size_limit = fmt_to_px['SD']
elif 'SD' in authorized_types:
size_limit = fmt_to_px['SD']
self.send_response(200)
if size_limit:
self.send_header('X-Limit-Video', 'max={size_limit}px'.format(size_limit=str(size_limit)))
for d in list(result.headers.items()):
if re.match('^[Cc]ontent-[Ll]ength$', d[0]):
self.send_header(d[0], response_length)
else:
self.send_header(d[0], d[1])
self.end_headers()
for chunk in self.get_chunks(response_body):
self.wfile.write(chunk)
else:
self.send_error(501)
# noinspection PyShadowingBuiltins
def log_message(self, format, *args):
return
def get_chunks(self, data):
for i in range(0, len(data), self.chunk_size):
yield data[i:i + self.chunk_size]
@staticmethod
def api_config_page():
addon = xbmcaddon.Addon('plugin.video.youtube')
i18n = addon.getLocalizedString
api_key = addon.getSetting('youtube.api.key')
api_id = addon.getSetting('youtube.api.id')
api_secret = addon.getSetting('youtube.api.secret')
html = Pages().api_configuration.get('html')
css = Pages().api_configuration.get('css')
html = html.format(css=css, title=i18n(30634), api_key_head=i18n(30201), api_id_head=i18n(30202),
api_secret_head=i18n(30203), api_id_value=api_id, api_key_value=api_key,
api_secret_value=api_secret, submit=i18n(30630), header=i18n(30634))
return html
@staticmethod
def api_submit_page(updated_keys, enabled, footer):
addon = xbmcaddon.Addon('plugin.video.youtube')
i18n = addon.getLocalizedString
html = Pages().api_submit.get('html')
css = Pages().api_submit.get('css')
html = html.format(css=css, title=i18n(30634), updated=updated_keys, enabled=enabled, footer=footer, header=i18n(30634))
return html
class Pages(object):
api_configuration = {
'html':
u'<!doctype html>\n<html>\n'
u'<head>\n\t<meta charset="utf-8">\n'
u'\t<title>{title}</title>\n'
u'\t<style>\n{css}\t</style>\n'
u'</head>\n<body>\n'
u'\t<div class="center">\n'
u'\t<h5>{header}</h5>\n'
u'\t<form action="/api_submit" class="config_form">\n'
u'\t\t<label for="api_key">\n'
u'\t\t<span>{api_key_head}</span><input type="text" name="api_key" value="{api_key_value}" size="50"/>\n'
u'\t\t</label>\n'
u'\t\t<label for="api_id">\n'
u'\t\t<span>{api_id_head}</span><input type="text" name="api_id" value="{api_id_value}" size="50"/>\n'
u'\t\t</label>\n'
u'\t\t<label for="api_secret">\n'
u'\t\t<span>{api_secret_head}</span><input type="text" name="api_secret" value="{api_secret_value}" size="50"/>\n'
u'\t\t</label>\n'
u'\t\t<input type="submit" value="{submit}">\n'
u'\t</form>\n'
u'\t</div>\n'
u'</body>\n</html>',
'css':
u'body {\n'
u' background: #141718;\n'
u'}\n'
u'.center {\n'
u' margin: auto;\n'
u' width: 600px;\n'
u' padding: 10px;\n'
u'}\n'
u'.config_form {\n'
u' width: 575px;\n'
u' height: 145px;\n'
u' font-size: 16px;\n'
u' background: #1a2123;\n'
u' padding: 30px 30px 15px 30px;\n'
u' border: 5px solid #1a2123;\n'
u'}\n'
u'h5 {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u' font-weight: 600;\n'
u' width: 575px;\n'
u' height: 20px;\n'
u' background: #0f84a5;\n'
u' padding: 5px 30px 5px 30px;\n'
u' border: 5px solid #0f84a5;\n'
u' margin: 0px;\n'
u'}\n'
u'.config_form input[type=submit],\n'
u'.config_form input[type=button],\n'
u'.config_form input[type=text],\n'
u'.config_form textarea,\n'
u'.config_form label {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u'}\n'
u'.config_form label {\n'
u' display:block;\n'
u' margin-bottom: 10px;\n'
u'}\n'
u'.config_form label > span {\n'
u' display: inline-block;\n'
u' float: left;\n'
u' width: 150px;\n'
u'}\n'
u'.config_form input[type=text] {\n'
u' background: transparent;\n'
u' border: none;\n'
u' border-bottom: 1px solid #147a96;\n'
u' width: 400px;\n'
u' outline: none;\n'
u' padding: 0px 0px 0px 0px;\n'
u'}\n'
u'.config_form input[type=text]:focus {\n'
u' border-bottom: 1px dashed #0f84a5;\n'
u'}\n'
u'.config_form input[type=submit],\n'
u'.config_form input[type=button] {\n'
u' width: 150px;\n'
u' background: #141718;\n'
u' border: none;\n'
u' padding: 8px 0px 8px 10px;\n'
u' border-radius: 5px;\n'
u' color: #fff;\n'
u' margin-top: 10px\n'
u'}\n'
u'.config_form input[type=submit]:hover,\n'
u'.config_form input[type=button]:hover {\n'
u' background: #0f84a5;\n'
u'}\n'
}
api_submit = {
'html':
u'<!doctype html>\n<html>\n'
u'<head>\n\t<meta charset="utf-8">\n'
u'\t<title>{title}</title>\n'
u'\t<style>\n{css}\t</style>\n'
u'</head>\n<body>\n'
u'\t<div class="center">\n'
u'\t<h5>{header}</h5>\n'
u'\t<div class="content">\n'
u'\t\t<span>{updated}</span>\n'
u'\t\t<span>{enabled}</span>\n'
u'\t\t<span> </span>\n'
u'\t\t<span> </span>\n'
u'\t\t<span> </span>\n'
u'\t\t<span> </span>\n'
u'\t\t<div class="textcenter">\n'
u'\t\t\t<span><small>{footer}</small></span>\n'
u'\t\t</div>\n'
u'\t</div>\n'
u'\t</div>\n'
u'</body>\n</html>',
'css':
u'body {\n'
u' background: #141718;\n'
u'}\n'
u'.center {\n'
u' margin: auto;\n'
u' width: 600px;\n'
u' padding: 10px;\n'
u'}\n'
u'.textcenter {\n'
u' margin: auto;\n'
u' width: 600px;\n'
u' padding: 10px;\n'
u' text-align: center;\n'
u'}\n'
u'.content {\n'
u' width: 575px;\n'
u' height: 145px;\n'
u' background: #1a2123;\n'
u' padding: 30px 30px 15px 30px;\n'
u' border: 5px solid #1a2123;\n'
u'}\n'
u'h5 {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u' font-weight: 600;\n'
u' width: 575px;\n'
u' height: 20px;\n'
u' background: #0f84a5;\n'
u' padding: 5px 30px 5px 30px;\n'
u' border: 5px solid #0f84a5;\n'
u' margin: 0px;\n'
u'}\n'
u'span {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 16px;\n'
u' color: #fff;\n'
u' display: block;\n'
u' float: left;\n'
u' width: 575px;\n'
u'}\n'
u'small {\n'
u' font-family: Arial, Helvetica, sans-serif;\n'
u' font-size: 12px;\n'
u' color: #fff;\n'
u'}\n'
}
def get_http_server(address=None, port=None):
addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(addon_id)
address = address if address else addon.getSetting('kodion.http.listen')
address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '0.0.0.0'
port = int(port) if port else 50152
try:
server = BaseHTTPServer.HTTPServer((address, port), YouTubeRequestHandler)
return server
except socket.error as e:
logger.log_debug('HTTPServer: Failed to start |{address}:{port}| |{response}|'.format(address=address, port=port, response=str(e)))
xbmcgui.Dialog().notification(addon.getAddonInfo('name'), str(e),
xbmc.translatePath('special://home/addons/{0!s}/icon.png'.format(addon.getAddonInfo('id'))),
5000, False)
return None
def is_httpd_live(address=None, port=None):
addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(addon_id)
address = address if address else addon.getSetting('kodion.http.listen')
address = '127.0.0.1' if address == '0.0.0.0' else address
address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1'
port = int(port) if port else 50152
url = 'http://{address}:{port}/ping'.format(address=address, port=port)
try:
response = requests.get(url)
result = response.status_code == 204
if not result:
logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response=response.status_code))
return result
except:
logger.log_debug('HTTPServer: Ping |{address}:{port}| |{response}|'.format(address=address, port=port, response='failed'))
return False
def get_client_ip_address(address=None, port=None):
addon_id = 'plugin.video.youtube'
addon = xbmcaddon.Addon(addon_id)
address = address if address else addon.getSetting('kodion.http.listen')
address = '127.0.0.1' if address == '0.0.0.0' else address
address = address if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', address) else '127.0.0.1'
port = int(port) if port else 50152
url = 'http://{address}:{port}/client_ip'.format(address=address, port=port)
response = requests.get(url)
ip_address = None
if response.status_code == 200:
response_json = response.json()
if response_json:
ip_address = response_json.get('ip')
return ip_address
| gpl-2.0 | 4,763,203,937,845,970,000 | 41.511905 | 146 | 0.519882 | false | 3.521118 | true | false | false |
InnovArul/codesmart | Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_soft_epsilon.py | 1 | 3861 | from __future__ import print_function
import numpy as np
from grid import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
import matplotlib.pyplot as plt
from monte_carlo_exploring_starts import max_dict
EPS = 1e-4
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = {'U', 'D', 'L', 'R'}
def random_action(a, eps=0.1):
p = np.random.random()
if(p < 1 - eps):
return a
else:
return np.random.choice(list(ALL_POSSIBLE_ACTIONS))
# monte carlo sampling - finding out optimal policy (policy iteration)
def play_game(grid, policy):
all_states = list(grid.actions.keys())
state = (2, 0)
# instead of taking random action at first step, consider the action which is probabilistic with the policy
a = random_action(policy[state])
grid.set_state(state)
states_actions_rewards = [(state, a, 0)] # action is corresponding to the one which is going to be taken
while True:
r = grid.move(a)
state = grid.current_state()
#print(prev_state)
# if game over, break the loop
if grid.game_over():
states_actions_rewards.append((state, None, r)) # agent has hit the wall and we should not allow it to happen
break
else:
# collect the next action that we are gonna take and insert into the trace
a = random_action(policy[state])
states_actions_rewards.append((state, a, r))
# calculate the returns by working backwards from terminal state
G = 0
states_actions_returns = []
for i, state_action_reward in enumerate(reversed(states_actions_rewards)):
state, action, reward = state_action_reward
if i != 0:
states_actions_returns.append((state, action, G))
G = reward + GAMMA * G
states_actions_returns.reverse()
return states_actions_returns
def max_dict(hash):
max_key = None
max_val = float('-inf')
for k in hash:
if(hash[k] > max_val):
max_key, max_val = k, hash[k]
return max_key, max_val
if __name__ == '__main__':
#grid = standard_grid()
grid = negative_grid(-0.1)
print('grid')
print_values(grid.rewards, grid)
# init random policy
policy = {}
for s in grid.actions:
policy[s] = np.random.choice(list(ALL_POSSIBLE_ACTIONS))
print('policy')
print_policy(policy, grid)
# initialioze Q(s, a)
Q = {}
returns = {} # buffer to hold all the returns for a state during monte-carlo game plays
for s in grid.actions: # if state is non terminal
Q[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
# for all the possible actions, initialize Q(s,a)
Q[s][a] = 0
returns[(s, a)] = []
# deltas
deltas = []
for sample in range(5000):
if sample % 500 == 0:
print(sample)
biggest_change = 0
# generate an episode and adapt Q(s, a)
states_actions_returns = play_game(grid, policy)
seen_states_actions = set()
for s, a, G in states_actions_returns:
key = (s, a)
if s not in seen_states_actions:
old_q = Q[s][a]
returns[key].append(G)
Q[s][a] = np.mean(returns[key])
seen_states_actions.add(key)
biggest_change = max(biggest_change, abs(G - old_q))
deltas.append(biggest_change)
# policy improvement
for s in Q:
policy[s] = max_dict(Q[s])[0]
plt.plot(deltas)
plt.show()
V = {}
# policy improvement
for s in Q:
V[s] = max_dict(Q[s])[1]
print('grid')
print_values(V, grid)
print('policy')
print_policy(policy, grid)
| gpl-2.0 | -5,864,667,669,027,085,000 | 29.164063 | 121 | 0.573168 | false | 3.645892 | false | false | false |
naturalness/sensibility | sensibility/language/java/__init__.py | 1 | 6245 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import sys
import token
from io import BytesIO
from keyword import iskeyword
from pathlib import Path
from typing import (
Any, AnyStr, Callable, IO, Iterable, Optional, Tuple, Union,
overload, cast
)
import javac_parser
from .. import Language, SourceSummary
from ...lexical_analysis import Lexeme, Location, Position, Token
from ...vocabulary import NoSourceRepresentationError, Vocabulary, Vind
here = Path(__file__).parent
class JavaVocabulary(Vocabulary):
"""
The vocabulary, except it returns from
"""
first_entry_num = len(Vocabulary.SPECIAL_ENTRIES)
def __init__(self, entries: Iterable[str], reprs: Iterable[str]) -> None:
super().__init__(entries)
# Create a look-up table for source representations.
# The special tokens <unk>, <s>, </s> have NO reprs, thus are not
# stored.
self._index2repr = tuple(reprs)
assert len(self._index2text) == self.first_entry_num + len(self._index2repr)
def to_source_text(self, idx: Vind) -> str:
if idx < self.first_entry_num:
raise NoSourceRepresentationError(idx)
return self._index2repr[idx - self.first_entry_num]
@staticmethod
def load() -> 'JavaVocabulary':
entries = []
reprs = []
# Load from a tab-separated-values file
with open(here / 'vocabulary.tsv') as vocab_file:
first_entry = JavaVocabulary.first_entry_num
for expected_num, line in enumerate(vocab_file, start=first_entry):
# src_repr -- source representation
num, entry, src_repr = line.rstrip().split()
assert expected_num == int(num)
entries.append(entry)
reprs.append(src_repr)
return JavaVocabulary(entries, reprs)
def to_str(source: Union[str, bytes, IO[bytes]]) -> str:
"""
Coerce an input format to a Unicode string.
"""
if isinstance(source, str):
return source
elif isinstance(source, bytes):
# XXX: Assume it's UTF-8 encoded!
return source.decode('UTF-8')
else:
raise NotImplementedError
class LazyVocabulary:
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, value):
if not hasattr(self, 'value'):
self.value = self.fn()
return self.value
class JavaToken(Token):
"""
HACK: javac_parser has some... interesting ideas about normalization.
so add a `_raw` field to the token.
"""
# TODO: fix with upstream (javac_parser) to return a sensible value for the normalized value
__slots__ = ('_raw',)
def __init__(self, *, _raw: str, name: str, value: str, start: Position, end: Position) -> None:
super().__init__(name=name, value=value, start=start, end=end)
self._raw = _raw
def __repr__(self) -> str:
cls = type(self).__name__
return (f"{cls}(_raw={self._raw!r}"
f"name={self.name!r}, value={self.value!r}, "
f"start={self.start!r}, end={self.end!r})")
class Java(Language):
"""
Defines the Java 8 programming language.
"""
extensions = {'.java'}
vocabulary = cast(Vocabulary, LazyVocabulary(JavaVocabulary.load))
@property
def java(self):
"""
Lazily start up the Java server. This decreases the chances of things
going horribly wrong when two seperate process initialize
the Java language instance around the same time.
"""
if not hasattr(self, '_java_server'):
self._java_server = javac_parser.Java()
# Py4j usually crashes as Python is cleaning up after exit() so
# decrement the servers' reference count to lessen the chance of
# that happening.
@atexit.register
def remove_reference():
del self._java_server
return self._java_server
def tokenize(self, source: Union[str, bytes, IO[bytes]]) -> Iterable[Token]:
tokens = self.java.lex(to_str(source))
# Each token is a tuple with the following structure
# (reproduced from javac_parser.py):
# 1. Lexeme type
# 2. Value (as it appears in the source file)
# 3. A 2-tuple of start line, start column
# 4. A 2-tuple of end line, end column
# 5. A whitespace-free representation of the value
for name, raw_value, start, end, normalized in tokens:
# Omit the EOF token, as it's only useful for the parser.
if name == 'EOF':
continue
# Take the NORMALIZED value, as Java allows unicode escapes in
# ARBITRARY tokens and then things get hairy here.
yield JavaToken(_raw=raw_value,
name=name, value=normalized,
start=Position(line=start[0], column=start[1]),
end=Position(line=end[0], column=end[1]))
def check_syntax(self, source: Union[str, bytes]) -> bool:
return self.java.get_num_parse_errors(to_str(source)) == 0
def summarize_tokens(self, source: Iterable[Token]) -> SourceSummary:
toks = [tok for tok in source if tok.name != 'EOF']
slines = set(line for tok in toks for line in tok.lines)
return SourceSummary(n_tokens=len(toks), sloc=len(slines))
def vocabularize_tokens(self, source: Iterable[Token]) -> Iterable[Tuple[Location, str]]:
for token in source:
yield token.location, token.name
java: Language = Java()
| apache-2.0 | 170,523,511,138,746,530 | 33.888268 | 100 | 0.620657 | false | 3.883706 | false | false | false |
postlund/pyatv | tests/support/test_dns.py | 1 | 10011 | """Unit tests for pyatv.support.dns"""
import io
import typing
import pytest
from pyatv.support import dns
@pytest.mark.parametrize(
"name,expected",
(
("_http._tcp.local", (None, "_http._tcp", "local")),
("foo._http._tcp.local", ("foo", "_http._tcp", "local")),
("foo.bar._http._tcp.local", ("foo.bar", "_http._tcp", "local")),
),
ids=("ptr", "no_dot", "with_dot"),
)
def test_happy_service_instance_names(name, expected):
assert dns.ServiceInstanceName.split_name(name) == expected
@pytest.mark.parametrize(
"name",
(
"_http.local",
"._tcp.local",
"_http.foo._tcp.local",
"_tcp._http.local",
),
ids=("no_proto", "no_service", "split", "reversed"),
)
def test_sad_service_instance_names(name):
with pytest.raises(ValueError):
dns.ServiceInstanceName.split_name(name)
# mapping is test_id: tuple(name, expected_raw)
encode_domain_names = {
"root": (".", b"\x00"),
"empty": ("", b"\x00"),
"example.com": ("example.com", b"\x07example\x03com\x00"),
"example.com_list": (["example", "com"], b"\x07example\x03com\x00"),
"unicode": ("Bücher.example", b"\x07B\xc3\xbccher\x07example\x00"),
"dotted_instance": (
"Dot.Within._http._tcp.example.local",
b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00",
),
"dotted_instance_list": (
["Dot.Within", "_http", "_tcp", "example", "local"],
b"\x0aDot.Within\x05_http\x04_tcp\x07example\x05local\x00",
),
"truncated_ascii": (
(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
".test"
),
(
b"\x3fabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijk"
b"\x04test"
b"\x00"
),
),
"truncated_unicode": (
(
# The 'a' is at the beginning to force the codepoints to be split at 63
# bytes. The next line is also at the right length to be below 88 characters
# even if each kana is counted as a double-width character. Additionally,
# this sequence is NF*D* normalized, not NFC (which is what is used for
# Net-Unicode).
"aがあいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめも"
".test"
),
(
b"\x3d"
b"a\xe3\x81\x8c\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a"
b"\xe3\x81\x8b\xe3\x81\x8d\xe3\x81\x8f\xe3\x81\x91\xe3\x81\x93\xe3\x81\x95"
b"\xe3\x81\x97\xe3\x81\x99\xe3\x81\x9b\xe3\x81\x9d\xe3\x81\x9f\xe3\x81\xa1"
b"\xe3\x81\xa4\xe3\x81\xa6"
b"\x04test"
b"\x00"
),
),
}
@pytest.mark.parametrize(
"name,expected_raw",
[pytest.param(*value, id=key) for key, value in encode_domain_names.items()],
)
def test_qname_encode(name, expected_raw):
assert dns.qname_encode(name) == expected_raw
# mapping is test_id: tuple(raw_name, offset, expected_name, expected_offset)
# If expected offset is None, it means len(raw_name), otherwise it's like an array index
# (positive is from the beginning, negative from the end)
decode_domain_names = {
"simple": (b"\x03foo\x07example\x03com\x00", 0, "foo.example.com", None),
"null": (b"\00", 0, "", None),
"compressed": (b"aaaa\x04test\x00\x05label\xC0\x04\xAB\xCD", 10, "label.test", -2),
# This case has two levels of compression
"multi_compressed": (
b"aaaa\x04test\x00\x05label\xC0\x04\x03foo\xC0\x0A\xAB\xCD",
18,
"foo.label.test",
-2,
),
# Taken straight from the Internationalized Domain name Wikipedia page
"idna": (b"\x0Dxn--bcher-kva\x07example\x00", 0, "bücher.example", None),
# Taken from issue #919. Apple puts a non-breaking space between "Apple" and "TV".
"nbsp": (
b"\x10Apple\xc2\xa0TV (4167)\x05local\x00",
0,
"Apple\xa0TV (4167).local",
None,
),
# This is a doozy of a test case; it's covering a couple different areas of Unicode,
# as well as exercising that DNS-SD allows dots in instance names.
"unicode": (
(
b"\x1d\xe5\xb1\x85\xe9\x96\x93 Apple\xc2\xa0TV. En Espa\xc3\xb1ol"
b"\x05local"
b"\x00"
),
0,
"居間 Apple TV. En Español.local",
None,
),
}
@pytest.mark.parametrize(
"raw_name,offset,expected_name,expected_offset",
[pytest.param(*value, id=key) for key, value in decode_domain_names.items()],
)
def test_domain_name_parsing(
raw_name: bytes,
offset: int,
expected_name: str,
expected_offset: typing.Optional[int],
):
with io.BytesIO(raw_name) as buffer:
buffer.seek(offset)
name = dns.parse_domain_name(buffer)
assert name == expected_name
if expected_offset is None:
assert buffer.tell() == len(raw_name)
else:
# if expected_offset is positive, this will wrap around to the beginning, if
# it's negative it won't.
raw_len = len(raw_name)
assert buffer.tell() == (raw_len + expected_offset) % raw_len
# mapping is test_id: tuple(encoded_data, expected_data, expected_offset)
# If expected offset is None, it means len(raw_name), otherwise it's like an array index
# (positive is from the beginning, negative from the end)
decode_strings = {
"null": (b"\x00", b"", None),
# 63 is significant because that's the max length for a domain label, but not a
# character-string (they have similar encodings).
"len_63": (b"\x3F" + (63 * b"0"), (63 * b"0"), None),
# For similar reasons as 63, 64 is significant because it would set only one of the
# flag bits for name compression if domain-name encoding is assumed.
"len_64": (b"\x40" + (64 * b"0"), (64 * b"0"), None),
# Ditto for 128, but the other flag
"len_128": (b"\x80" + (128 * b"0"), (128 * b"0"), None),
# ...and 192 is both flags
"len_192": (b"\xC0" + (192 * b"0"), (192 * b"0"), None),
# 255 is the max length a character-string can be
"len_255": (b"\xFF" + (255 * b"0"), (255 * b"0"), None),
"trailing": (b"\x0A" + (10 * b"2") + (17 * b"9"), (10 * b"2"), -17),
}
@pytest.mark.parametrize(
"encoded_data,expected_data,expected_offset",
[pytest.param(*value, id=key) for key, value in decode_strings.items()],
)
def test_string_parsing(
encoded_data: bytes,
expected_data: bytes,
expected_offset: typing.Optional[int],
):
with io.BytesIO(encoded_data) as buffer:
name = dns.parse_string(buffer)
assert name == expected_data
if expected_offset is None:
assert buffer.tell() == len(encoded_data)
else:
# if expected_offset is positive, this will wrap around to the beginning, if
# it's negative it won't.
data_len = len(encoded_data)
assert buffer.tell() == (data_len + expected_offset) % data_len
def test_dns_sd_txt_parse_single():
"""Test that a TXT RDATA section with one key can be parsed properly."""
data = b"\x07foo=bar"
extra_data = data + b"\xDE\xAD\xBE\xEF" * 3
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"bar"}
def test_dns_sd_txt_parse_multiple():
"""Test that a TXT RDATA section with multiple keys can be parsed properly."""
data = b"\x07foo=bar\x09spam=eggs"
extra_data = data + b"\xDE\xAD\xBE\xEF" * 2
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"bar", "spam": b"eggs"}
def test_dns_sd_txt_parse_binary():
"""Test that a TXT RDATA section with a binary value can be parsed properly."""
# 0xfeed can't be decoded as UTF-8 or ASCII, so it'll thrown an error if it's not
# being treated as binary data.
data = b"\x06foo=\xFE\xED"
extra_data = data + b"\xDE\xAD\xBE\xEF" * 3
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"\xFE\xED"}
def test_dns_sd_txt_parse_long():
"""Test that a TXT RDATA section with a long value can be parsed properly."""
# If TXT records are being parsed the same way domain names are, this won't work as
# the data is too long to fit in a label.
data = b"\xCCfoo=" + b"\xCA\xFE" * 100
extra_data = data + b"\xDE\xAD\xBE\xEF" * 3
with io.BytesIO(extra_data) as buffer:
txt_dict = dns.parse_txt_dict(buffer, len(data))
assert buffer.tell() == len(data)
assert txt_dict == {"foo": b"\xCA\xFE" * 100}
@pytest.mark.parametrize(
"record_type,data,expected",
[
(dns.QueryType.A, b"\x0A\x00\x00\x2A", "10.0.0.42"),
(dns.QueryType.PTR, b"\x03foo\x07example\x03com\x00", "foo.example.com"),
(dns.QueryType.TXT, b"\x07foo=bar", {"foo": b"bar"}),
(
dns.QueryType.SRV,
b"\x00\x0A\x00\x00\x00\x50\x03foo\x07example\x03com\x00",
{
"priority": 10,
"weight": 0,
"port": 80,
"target": "foo.example.com",
},
),
],
# Use the name of the record type as the test id
ids=(
t.name
for t in (
dns.QueryType.A,
dns.QueryType.PTR,
dns.QueryType.TXT,
dns.QueryType.SRV,
)
),
)
def test_parse_rdata(
record_type: dns.QueryType,
data: bytes,
expected: typing.Any,
):
with io.BytesIO(data) as buffer:
assert record_type.parse_rdata(buffer, len(data)) == expected
assert buffer.tell() == len(data)
| mit | 8,634,286,590,532,209,000 | 34.715827 | 88 | 0.591903 | false | 3.068294 | true | false | false |
Ale-/civics | apps/models/migrations/0028_auto_20170924_1153.py | 1 | 1318 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-09-24 11:53
from __future__ import unicode_literals
import apps.models.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '0027_auto_20170922_1554'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.ImageField(blank=True, help_text='Sube una imagen representativa del evento haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/events/'), verbose_name='Imagen'),
),
migrations.AlterField(
model_name='initiative',
name='image',
field=models.ImageField(blank=True, help_text='Sube una imagen representativa de la iniciativa haciendo click en la imagen inferior. La imagen ha de tener ancho mínimo de 300 píxeles y máximo de 1920, y altura mínima de 300 píxeles y máxima de 1280. Formatos permitidos: PNG, JPG, JPEG.', upload_to=apps.models.utils.RenameCivicsImage('images/initiatives/'), verbose_name='Imagen'),
),
]
| gpl-3.0 | -8,622,433,261,067,904,000 | 49.230769 | 394 | 0.688361 | false | 3.306329 | false | false | false |
baixuexue123/djmo | utils/csv_response_.py | 1 | 2274 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import csv
import codecs
import cStringIO
from django.http import StreamingHttpResponse
from django.views.generic import View
"""
流式响应StreamingHttpResponse可以快速,节省内存地产生一个大型文件
"""
class Echo(object):
"""An object that implements just the write method of the file-like interface."""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
class UnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwargs):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwargs)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([handle_column(s) for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
value = self.stream.write(data)
# empty queue
self.queue.truncate(0)
return value
def writerows(self, rows):
for row in rows:
self.writerow(row)
class ExampleView(View):
headers = ('一些', '表头')
def get(self, request):
result = (
('第一行', '数据1'),
('第二行', '数据2')
)
echoer = Echo()
writer = UnicodeWriter(echoer)
def csv_iterator():
yield codecs.BOM_UTF8
yield writer.writerow(self.headers)
for column in result:
yield writer.writerow(column)
response = StreamingHttpResponse(
(row for row in csv_iterator()),
content_type="text/csv;charset=utf-8"
)
response['Content-Disposition'] = 'attachment;filename="example.csv"'
return response
| mit | -4,467,090,691,717,978,600 | 27.25641 | 85 | 0.599819 | false | 3.985533 | false | false | false |
YaoQ/faceplusplus-demo | hello.py | 1 | 2906 | #!/usr/bin/env python2
# Import system libraries and define helper functions
import time
import sys
import os
import os.path
from pprint import pformat
# First import the API class from the SDK
from facepp import API
from facepp import File
def print_result(hint, result):
def encode(obj):
if type(obj) is unicode:
return obj.encode('utf-8')
if type(obj) is dict:
return {encode(k): encode(v) for (k, v) in obj.iteritems()}
if type(obj) is list:
return [encode(i) for i in obj]
return obj
print hint
result = encode(result)
print '\n'.join([' ' + i for i in pformat(result, width = 75).split('\n')])
def init():
fdir = os.path.dirname(__file__)
with open(os.path.join(fdir, 'apikey.cfg')) as f:
exec(f.read())
srv = locals().get('SERVER')
return API(API_KEY, API_SECRET, srv = srv)
# In this tutorial, you will learn how to call Face ++ APIs and implement a
# simple App which could recognize a face image in 3 candidates.
api = init()
# Here are the person names and their face images
IMAGE_DIR = 'http://cn.faceplusplus.com/static/resources/python_demo/'
PERSONS = [
('Jim Parsons', IMAGE_DIR + '1.jpg'),
('Leonardo DiCaprio', IMAGE_DIR + '2.jpg'),
('Andy Liu', IMAGE_DIR + '3.jpg')
]
TARGET_IMAGE = IMAGE_DIR + '4.jpg'
# Step 1: Detect faces in the 3 pictures and find out their positions and
# attributes
FACES = {name: api.detection.detect(url = url)
for name, url in PERSONS}
for name, face in FACES.iteritems():
print_result(name, face)
# Step 2: create persons using the face_id
for name, face in FACES.iteritems():
rst = api.person.create(
person_name = name, face_id = face['face'][0]['face_id'])
print_result('create person {}'.format(name), rst)
# Step 3: create a new group and add those persons in it
rst = api.group.create(group_name = 'standard')
print_result('create group', rst)
rst = api.group.add_person(group_name = 'standard', person_name = FACES.iterkeys())
print_result('add these persons to group', rst)
# Step 4: train the model
rst = api.train.identify(group_name = 'standard')
print_result('train', rst)
# wait for training to complete
rst = api.wait_async(rst['session_id'])
print_result('wait async', rst)
# Step 5: recognize face in a new image
rst = api.recognition.identify(group_name = 'standard', url = TARGET_IMAGE)
print_result('recognition result', rst)
print '=' * 60
print 'The person with highest confidence:', \
rst['face'][0]['candidate'][0]['person_name']
# Finally, delete the persons and group because they are no longer needed
api.group.delete(group_name = 'standard')
api.person.delete(person_name = FACES.iterkeys())
# Congratulations! You have finished this tutorial, and you can continue
# reading our API document and start writing your own App using Face++ API!
# Enjoy :)
| gpl-2.0 | -6,944,040,991,084,908,000 | 31.288889 | 83 | 0.67309 | false | 3.390898 | false | false | false |
noskill/virt-manager | virtManager/connect.py | 1 | 15892 | #
# Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import os
import logging
import socket
from gi.repository import Gio
from gi.repository import GObject
from gi.repository import Gtk
from . import uiutil
from .baseclass import vmmGObjectUI
(HV_QEMU,
HV_XEN,
HV_LXC,
HV_QEMU_SESSION,
HV_BHYVE) = range(5)
(CONN_SSH,
CONN_TCP,
CONN_TLS) = range(3)
def current_user():
try:
import getpass
return getpass.getuser()
except:
return ""
def default_conn_user(conn):
if conn == CONN_SSH:
return "root"
return current_user()
class vmmConnect(vmmGObjectUI):
__gsignals__ = {
"completed": (GObject.SignalFlags.RUN_FIRST, None, [str, bool]),
"cancelled": (GObject.SignalFlags.RUN_FIRST, None, []),
}
def __init__(self):
vmmGObjectUI.__init__(self, "connect.ui", "vmm-open-connection")
self.builder.connect_signals({
"on_hypervisor_changed": self.hypervisor_changed,
"on_transport_changed": self.transport_changed,
"on_hostname_combo_changed": self.hostname_combo_changed,
"on_connect_remote_toggled": self.connect_remote_toggled,
"on_username_entry_changed": self.username_changed,
"on_hostname_changed": self.hostname_changed,
"on_cancel_clicked": self.cancel,
"on_connect_clicked": self.open_conn,
"on_vmm_open_connection_delete_event": self.cancel,
})
self.browser = None
self.browser_sigs = []
# Set this if we can't resolve 'hostname.local': means avahi
# prob isn't configured correctly, and we should strip .local
self.can_resolve_local = None
# Plain hostname resolve failed, means we should just use IP addr
self.can_resolve_hostname = None
self.set_initial_state()
self.dbus = None
self.avahiserver = None
try:
self.dbus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
self.avahiserver = Gio.DBusProxy.new_sync(self.dbus, 0, None,
"org.freedesktop.Avahi", "/",
"org.freedesktop.Avahi.Server", None)
except Exception, e:
logging.debug("Couldn't contact avahi: %s", str(e))
self.reset_state()
@staticmethod
def default_uri(always_system=False):
if os.path.exists('/var/lib/xen'):
if (os.path.exists('/dev/xen/evtchn') or
os.path.exists("/proc/xen")):
return 'xen:///'
if (os.path.exists("/usr/bin/qemu") or
os.path.exists("/usr/bin/qemu-kvm") or
os.path.exists("/usr/bin/kvm") or
os.path.exists("/usr/libexec/qemu-kvm")):
if always_system or os.geteuid() == 0:
return "qemu:///system"
else:
return "qemu:///session"
return None
def cancel(self, ignore1=None, ignore2=None):
logging.debug("Cancelling open connection")
self.close()
self.emit("cancelled")
return 1
def close(self, ignore1=None, ignore2=None):
logging.debug("Closing open connection")
self.topwin.hide()
if self.browser:
for obj, sig in self.browser_sigs:
obj.disconnect(sig)
self.browser_sigs = []
self.browser = None
def show(self, parent, reset_state=True):
logging.debug("Showing open connection")
if reset_state:
self.reset_state()
self.topwin.set_transient_for(parent)
self.topwin.present()
self.start_browse()
def _cleanup(self):
pass
def set_initial_state(self):
self.widget("connect").grab_default()
combo = self.widget("hypervisor")
model = Gtk.ListStore(str)
model.append(["QEMU/KVM"])
model.append(["Xen"])
model.append(["LXC (Linux Containers)"])
model.append(["QEMU/KVM user session"])
if self.config.with_bhyve:
model.append(["Bhyve"])
combo.set_model(model)
uiutil.set_combo_text_column(combo, 0)
combo = self.widget("transport")
model = Gtk.ListStore(str)
model.append(["SSH"])
model.append(["TCP (SASL, Kerberos)"])
model.append(["SSL/TLS with certificates"])
combo.set_model(model)
uiutil.set_combo_text_column(combo, 0)
# Hostname combo box entry
hostListModel = Gtk.ListStore(str, str, str)
host = self.widget("hostname")
host.set_model(hostListModel)
host.set_entry_text_column(2)
hostListModel.set_sort_column_id(2, Gtk.SortType.ASCENDING)
def reset_state(self):
self.set_default_hypervisor()
self.widget("transport").set_active(0)
self.widget("autoconnect").set_sensitive(True)
self.widget("autoconnect").set_active(True)
self.widget("hostname").get_model().clear()
self.widget("hostname").get_child().set_text("")
self.widget("connect-remote").set_active(False)
self.widget("username-entry").set_text("")
self.connect_remote_toggled(self.widget("connect-remote"))
self.populate_uri()
def is_remote(self):
# Whether user is requesting a remote connection
return self.widget("connect-remote").get_active()
def set_default_hypervisor(self):
default = self.default_uri(always_system=True)
if not default or default.startswith("qemu"):
self.widget("hypervisor").set_active(HV_QEMU)
elif default.startswith("xen"):
self.widget("hypervisor").set_active(HV_XEN)
def add_service(self, interface, protocol, name, typ, domain, flags):
ignore = flags
try:
# Async service resolving
res = self.avahiserver.ServiceResolverNew("(iisssiu)",
interface, protocol,
name, typ, domain, -1, 0)
resint = Gio.DBusProxy.new_sync(self.dbus, 0, None,
"org.freedesktop.Avahi", res,
"org.freedesktop.Avahi.ServiceResolver",
None)
def cb(proxy, sender, signal, args):
ignore = proxy
ignore = sender
if signal == "Found":
self.add_conn_to_list(*args)
sig = resint.connect("g-signal", cb)
self.browser_sigs.append((resint, sig))
except Exception, e:
logging.exception(e)
def remove_service(self, interface, protocol, name, typ, domain, flags):
ignore = domain
ignore = protocol
ignore = flags
ignore = interface
ignore = typ
try:
model = self.widget("hostname").get_model()
name = str(name)
for row in model:
if row[0] == name:
model.remove(row.iter)
except Exception, e:
logging.exception(e)
def add_conn_to_list(self, interface, protocol, name, typ, domain,
host, aprotocol, address, port, text, flags):
ignore = domain
ignore = protocol
ignore = flags
ignore = interface
ignore = typ
ignore = text
ignore = aprotocol
ignore = port
try:
model = self.widget("hostname").get_model()
for row in model:
if row[2] == str(name):
# Already present in list
return
host = self.sanitize_hostname(str(host))
model.append([str(address), str(host), str(name)])
except Exception, e:
logging.exception(e)
def start_browse(self):
if self.browser or not self.avahiserver:
return
# Call method to create new browser, and get back an object path for it.
interface = -1 # physical interface to use? -1 is unspec
protocol = 0 # 0 = IPv4, 1 = IPv6, -1 = Unspecified
service = '_libvirt._tcp' # Service name to poll for
flags = 0 # Extra option flags
domain = "" # Domain to browse in. NULL uses default
bpath = self.avahiserver.ServiceBrowserNew("(iissu)",
interface, protocol,
service, domain, flags)
# Create browser interface for the new object
self.browser = Gio.DBusProxy.new_sync(self.dbus, 0, None,
"org.freedesktop.Avahi", bpath,
"org.freedesktop.Avahi.ServiceBrowser",
None)
def cb(proxy, sender, signal, args):
ignore = proxy
ignore = sender
if signal == "ItemNew":
self.add_service(*args)
elif signal == "ItemRemove":
self.remove_service(*args)
self.browser_sigs.append((self.browser,
self.browser.connect("g-signal", cb)))
def hostname_combo_changed(self, src):
model = src.get_model()
txt = src.get_child().get_text()
row = None
for currow in model:
if currow[2] == txt:
row = currow
break
if not row:
return
ip = row[0]
host = row[1]
entry = host
if not entry:
entry = ip
self.widget("hostname").get_child().set_text(entry)
def hostname_changed(self, src_ignore):
self.populate_uri()
def hypervisor_changed(self, src):
is_session = (src.get_active() == HV_QEMU_SESSION)
uiutil.set_grid_row_visible(
self.widget("session-warning-box"), is_session)
uiutil.set_grid_row_visible(
self.widget("connect-remote"), not is_session)
uiutil.set_grid_row_visible(
self.widget("username-entry"), not is_session)
uiutil.set_grid_row_visible(
self.widget("hostname"), not is_session)
uiutil.set_grid_row_visible(
self.widget("transport"), not is_session)
if is_session:
self.widget("connect-remote").set_active(False)
self.populate_uri()
def username_changed(self, src_ignore):
self.populate_uri()
def connect_remote_toggled(self, src_ignore):
is_remote = self.is_remote()
self.widget("hostname").set_sensitive(is_remote)
self.widget("transport").set_sensitive(is_remote)
self.widget("autoconnect").set_active(not is_remote)
self.widget("username-entry").set_sensitive(is_remote)
self.populate_default_user()
self.populate_uri()
def transport_changed(self, src_ignore):
self.populate_default_user()
self.populate_uri()
def populate_uri(self):
uri = self.generate_uri()
self.widget("uri-entry").set_text(uri)
def populate_default_user(self):
conn = self.widget("transport").get_active()
default_user = default_conn_user(conn)
self.widget("username-entry").set_text(default_user)
def generate_uri(self):
hv = self.widget("hypervisor").get_active()
conn = self.widget("transport").get_active()
host = self.widget("hostname").get_child().get_text().strip()
user = self.widget("username-entry").get_text()
is_remote = self.is_remote()
hvstr = ""
if hv == HV_XEN:
hvstr = "xen"
elif hv == HV_QEMU or hv == HV_QEMU_SESSION:
hvstr = "qemu"
elif hv == HV_BHYVE:
hvstr = "bhyve"
else:
hvstr = "lxc"
addrstr = ""
if user:
addrstr += user + "@"
addrstr += host
hoststr = ""
if not is_remote:
hoststr = ":///"
else:
if conn == CONN_TLS:
hoststr = "+tls://"
if conn == CONN_SSH:
hoststr = "+ssh://"
if conn == CONN_TCP:
hoststr = "+tcp://"
hoststr += addrstr + "/"
uri = hvstr + hoststr
if hv in (HV_QEMU, HV_BHYVE):
uri += "system"
elif hv == HV_QEMU_SESSION:
uri += "session"
return uri
def validate(self):
is_remote = self.is_remote()
host = self.widget("hostname").get_child().get_text()
if is_remote and not host:
return self.err.val_err(_("A hostname is required for "
"remote connections."))
return True
def open_conn(self, ignore):
if not self.validate():
return
auto = False
if self.widget("autoconnect").get_sensitive():
auto = self.widget("autoconnect").get_active()
uri = self.generate_uri()
logging.debug("Generate URI=%s, auto=%s", uri, auto)
self.close()
self.emit("completed", uri, auto)
def sanitize_hostname(self, host):
if host == "linux" or host == "localhost":
host = ""
if host.startswith("linux-"):
tmphost = host[6:]
try:
long(tmphost)
host = ""
except ValueError:
pass
if host:
host = self.check_resolve_host(host)
return host
def check_resolve_host(self, host):
# Try to resolve hostname
#
# Avahi always uses 'hostname.local', but for some reason
# fedora 12 out of the box can't resolve '.local' names
# Attempt to resolve the name. If it fails, remove .local
# if present, and try again
if host.endswith(".local"):
if self.can_resolve_local is False:
host = host[:-6]
elif self.can_resolve_local is None:
try:
socket.getaddrinfo(host, None)
except:
logging.debug("Couldn't resolve host '%s'. Stripping "
"'.local' and retrying.", host)
self.can_resolve_local = False
host = self.check_resolve_host(host[:-6])
else:
self.can_resolve_local = True
else:
if self.can_resolve_hostname is False:
host = ""
elif self.can_resolve_hostname is None:
try:
socket.getaddrinfo(host, None)
except:
logging.debug("Couldn't resolve host '%s'. Disabling "
"host name resolution, only using IP addr",
host)
self.can_resolve_hostname = False
else:
self.can_resolve_hostname = True
return host
| gpl-2.0 | 1,945,841,404,944,853,200 | 32.527426 | 80 | 0.543418 | false | 4.083248 | false | false | false |
sravangottapu/Ip_Scanner | ip_scanner.py | 1 | 1187 | import threading
import time
import os
import re
import shlex
import _thread
import sys
import subprocess
alive = True
f = open("list.txt","w")
class myThread(threading.Thread):
def __init__(self,var,ip):
threading.Thread.__init__(self)
self.var = var
self.ip = ip
def run(self):
if(alive):
ping_ip(self.var,self.ip)
#self._stop.set()
print("Thread Exited")
def ping_ip(cmd,ip):
try:
output = subprocess.check_output(cmd)
f.write(ip)
f.write("\n")
print(ip + "Reachable")
except:
print(ip + "Not Reachable")
first = input("Enter the first Ip")
second = input("Enter the second Ip")
first = int(first)
second = int(second)
ping = "ping "
c1 = "-c1 "
start = time.time()
cmd_no_ip = ping + c1
t_end = time.time() + 2
for i in range(first,second):
ip = "172.16.114."+str(i)
cmd = cmd_no_ip + ip
cmd = shlex.split(cmd)
try:
thread1 = myThread(cmd,ip)
thread1.start()
thread1.join(1)
except:
print("Not thread" + ip)
end = time.time()
end = end - start
alive = False
print("Total Time" + str(end))
sys.exit()
quit()
| gpl-3.0 | 6,060,178,347,985,288,000 | 21.396226 | 45 | 0.581297 | false | 3.131926 | false | false | false |
mbj4668/pyang | pyang/plugins/threegpp.py | 1 | 11115 | """3GPP usage guidelines plugin
See 3GPP TS 32.160 clause 6.2
Copyright Ericsson 2020
Author [email protected]
Revision 2020-11-25
Checks implemented
6.2.1.2 Module name starts with _3gpp-
6.2.1.3 namespace pattern urn:3gpp:sa5:<module-name>
6.2.1.4-a prefix ends with 3gpp
6.2.1.4-b prefix.length <= 10 char
6.2.1.5 yang 1.1 missing
6.2.1.5 yang 1.1 incorrect
6.2.1.6-a anydata
6.2.1.6-b anyxml
6.2.1.6-c rpc
6.2.1.6-d deviation
6.2.1.9 description not needed for enum, bit, choice, container,
leaf-list, leaf, typedef, grouping, augment, uses
6.2.1.b-a module-description-missing
6.2.1.b-b module-organization-missing
6.2.1.b-c module-organization includes 3gpp
6.2.1.b-d module-contact-missing
6.2.1.b-d module-contact-incorrect
6.2.1.c module-reference-missing
6.2.1.c module-reference-incorrect
6.2.1.d-a module-revision-missing
6.2.1.d-a module-revision-reference-missing
6.2.1.e default meaning
6.2.1.f-a linelength > 80
6.2.1.f-b no-tabs
6.2.1.f-c no-strange-chars
6.2.1.f-d no-CR-chars
6.2-a no-containers
"""
import optparse
import re
import io
import sys
from pyang import plugin
from pyang import statements
from pyang import error
from pyang.error import err_add
from pyang.plugins import lint
def pyang_plugin_init():
plugin.register_plugin(THREEGPPlugin())
class THREEGPPlugin(lint.LintPlugin):
def __init__(self):
lint.LintPlugin.__init__(self)
self.modulename_prefixes = ['_3gpp']
def add_opts(self, optparser):
optlist = [
optparse.make_option("--3gpp",
dest="threegpp",
action="store_true",
help="Validate the module(s) according to " \
"3GPP rules."),
]
optparser.add_options(optlist)
def setup_ctx(self, ctx):
if not ctx.opts.threegpp:
return
self._setup_ctx(ctx)
error.add_error_code(
'3GPP_BAD_NAMESPACE_VALUE', 3,
'3GPP: the namespace should be urn:3gpp:sa5:%s')
statements.add_validation_fun(
'grammar', ['namespace'],
lambda ctx, s: self.v_chk_namespace(ctx, s))
error.add_error_code(
'3GPP_BAD_PREFIX_VALUE', 3,
'3GPP: the prefix should end with 3gpp')
error.add_error_code(
'3GPP_TOO_LONG_PREFIX', 3,
'3GPP: the prefix should not be longer than 13 characters')
statements.add_validation_fun(
'grammar', ['prefix'],
lambda ctx, s: self.v_chk_prefix(ctx, s))
error.add_error_code(
'3GPP_BAD_YANG_VERSION', 3,
'3GPP: the yang-version should be 1.1')
statements.add_validation_fun(
'grammar', ['yang-version'],
lambda ctx, s: self.v_chk_yang_version(ctx, s))
# check that yang-version is present. If not,
# it defaults to 1. which is bad for 3GPP
statements.add_validation_fun(
'grammar', ['module'],
lambda ctx, s: self.v_chk_yang_version_present(ctx, s))
error.add_error_code(
'3GPP_STATEMENT_NOT_ALLOWED', 3,
('3GPP: YANG statements anydata, anyxml, deviation, rpc '
'should not be used'))
statements.add_validation_fun(
'grammar', ['anydata' , 'anyxml' , 'deviation' , 'rpc'],
lambda ctx, s: self.v_chk_not_allowed_statements(ctx, s))
error.add_error_code(
'3GPP_BAD_ORGANIZATION', 3,
'3GPP: organization statement must include 3GPP')
statements.add_validation_fun(
'grammar', ['organization'],
lambda ctx, s: self.v_chk_organization(ctx, s))
error.add_error_code(
'3GPP_BAD_CONTACT', 3,
'3GPP: incorrect contact statement')
statements.add_validation_fun(
'grammar', ['contact'],
lambda ctx, s: self.v_chk_contact(ctx, s))
error.add_error_code(
'3GPP_MISSING_MODULE_REFERENCE', 3,
'3GPP: the module should have a reference substatement')
statements.add_validation_fun(
'grammar', ['module'],
lambda ctx, s: self.v_chk_module_reference_present(ctx, s))
error.add_error_code(
'3GPP_BAD_MODULE_REFERENCE', 3,
'3GPP: the module\'s reference substatement is incorrect')
statements.add_validation_fun(
'grammar', ['reference'],
lambda ctx, s: self.v_chk_module_reference(ctx, s))
error.add_error_code(
'3GPP_TAB_IN_FILE', 3,
'3GPP: tab characters should not be used in YANG modules')
error.add_error_code(
'3GPP_WHITESPACE_AT_END_OF_LINE', 3,
'3GPP: extra whitespace should not be added at the end of the line')
error.add_error_code(
'3GPP_LONG_LINE', 3,
'3GPP: line longer than 80 characters')
error.add_error_code(
'3GPP_CR_IN_FILE', 3,
('3GPP: Carriage-return characters should not be used. '
'End-of-line should be just one LF character'))
error.add_error_code(
'3GPP_NON_ASCII', 4,
'3GPP: the module should only use ASCII characters')
statements.add_validation_fun(
'grammar', ['module'],
lambda ctx, s: self.v_chk_3gpp_format(ctx, s))
error.add_error_code(
'3GPP_LIMITED_CONTAINER_USE', 4,
('3GPP: containers should only be used to contain the attributes '
'of a class'))
statements.add_validation_fun(
'grammar', ['container'],
lambda ctx, s: self.v_chk_limited_container_use(ctx, s))
def pre_validate_ctx(self, ctx, modules):
if ctx.opts.threegpp:
ctx.canonical = False
return
def v_chk_namespace(self, ctx, stmt):
r = 'urn:3gpp:sa5:' + stmt.i_module.arg +'$'
if re.match(r, stmt.arg) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_NAMESPACE_VALUE',
stmt.i_module.arg)
def v_chk_prefix(self, ctx, stmt):
if stmt.parent.keyword != 'module' :
return
r = '.+3gpp$'
if re.match(r, stmt.arg) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_PREFIX_VALUE',())
if len(stmt.arg) > 13 :
err_add(ctx.errors, stmt.pos, '3GPP_TOO_LONG_PREFIX',())
def v_chk_yang_version_present(self, ctx, stmt):
yang_version_present = False
for stmt in stmt.substmts:
if stmt.keyword == 'yang-version' :
yang_version_present = True
if not(yang_version_present) :
err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',())
def v_chk_yang_version(self, ctx, stmt):
r = '1.1'
if re.match(r, stmt.arg) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_YANG_VERSION',())
def v_chk_not_allowed_statements(self, ctx, stmt):
err_add(ctx.errors, stmt.pos, '3GPP_STATEMENT_NOT_ALLOWED',())
def v_chk_organization(self, ctx, stmt):
r = '3GPP'
if re.search(r, stmt.arg, re.IGNORECASE) is None:
err_add(ctx.errors, stmt.pos, '3GPP_BAD_ORGANIZATION',())
def v_chk_contact(self, ctx, stmt):
if stmt.arg != ('https://www.3gpp.org/DynaReport/'
'TSG-WG--S5--officials.htm?Itemid=464'):
err_add(ctx.errors, stmt.pos, '3GPP_BAD_CONTACT',())
def v_chk_module_reference_present(self, ctx, stmt):
module_reference_present = False
for stmt in stmt.substmts:
if stmt.keyword == 'reference' :
module_reference_present = True
if not(module_reference_present) :
err_add(ctx.errors, stmt.pos, '3GPP_MISSING_MODULE_REFERENCE',())
def v_chk_module_reference(self, ctx, stmt):
if stmt.parent.keyword != 'module' :
return
if not(stmt.arg.startswith('3GPP TS ')) :
err_add(ctx.errors, stmt.pos, '3GPP_BAD_MODULE_REFERENCE',())
def v_chk_3gpp_format(self, ctx, stmt):
if (not(stmt.arg.startswith("_3gpp"))):
return
filename = stmt.pos.ref
try:
fd = io.open(filename, "r", encoding="utf-8", newline='')
pos = error.Position(stmt.pos.ref)
pos.top = stmt
lineno = 0
for line in fd:
lineno += 1
pos.line = lineno
# no tabs
if (line.find('\t') != -1 ):
err_add(ctx.errors, pos, '3GPP_TAB_IN_FILE',())
# no whitespace after the line
# removed for now as there are just too many of these
# errors
# if (re.search('.*\s+\n',line) != None ):
# err_add(ctx.errors, self.pos,
# '3GPP_WHITESPACE_AT_END_OF_LINE',())
# lines shorter then 80 char
if (len(line) > 82 ):
err_add(ctx.errors, pos, '3GPP_LONG_LINE',())
# EOL should be just NL no CR
if (line.find('\r') != -1 ):
err_add(ctx.errors, pos, '3GPP_CR_IN_FILE',())
# only us-ascii chars
try:
line.encode('ascii')
except UnicodeEncodeError:
err_add(ctx.errors, pos, '3GPP_NON_ASCII',())
except IOError as ex:
sys.stderr.write("error %s: %s\n" % (filename, ex))
sys.exit(1)
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
sys.stderr.write("%s: unicode error: %s\n" % (filename, s))
sys.exit(1)
def v_chk_limited_container_use(self, ctx, stmt):
if stmt.arg != 'attributes' or stmt.parent.keyword != 'list' :
err_add(ctx.errors, stmt.pos, '3GPP_LIMITED_CONTAINER_USE',())
def post_validate_ctx(self, ctx, modules):
if not ctx.opts.threegpp:
return
"""Remove some lint errors that 3GPP considers acceptable"""
for ctx_error in ctx.errors[:]:
if ((ctx_error[1] == "LINT_MISSING_REQUIRED_SUBSTMT"
or ctx_error[1] == "LINT_MISSING_RECOMMENDED_SUBSTMT")
and ctx_error[2][2] == 'description'
and (ctx_error[2][1] == 'enum'
or ctx_error[2][1] == 'bit'
or ctx_error[2][1] == 'choice'
or ctx_error[2][1] == 'container'
or ctx_error[2][1] == 'leaf-list'
or ctx_error[2][1] == 'leaf'
or ctx_error[2][1] == 'typedef'
or ctx_error[2][1] == 'grouping'
or ctx_error[2][1] == 'augment'
or ctx_error[2][1] == 'uses')):
# remove error from ctx
ctx.errors.remove(ctx_error)
return
| isc | 1,029,402,971,902,015,200 | 34.970874 | 79 | 0.544939 | false | 3.382532 | false | false | false |
silvio/elbe | elbepack/xmldefaults.py | 1 | 4019 |
import random
import string
import sys
armel_defaults = {
"arch": "armel",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb",
"nicmodel": "smc91c111"
}
armel_virtio_defaults = {
"arch": "armel",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm-virtio",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb",
"nicmodel": "smc91c111"
}
armhf_defaults = {
"arch": "armhf",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb -cpu cortex-a9",
"nicmodel": "smc91c111"
}
armhf_virtio_defaults = {
"arch": "armhf",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-arm-virtio",
"userinterpr": "qemu-arm-static",
"console": "ttyAMA0,115200n1",
"machine": "versatilepb -cpu cortex-a9",
"nicmodel": "virtio"
}
ppc_defaults = {
"arch": "powerpc",
"size": "20G",
"mem": "256",
"interpreter": "qemu-system-ppc",
"userinterpr": "qemu-ppc-static",
"console": "ttyPZ0,115200n1",
"machine": "mac99",
"nicmodel": "rtl8139"
}
amd64_defaults = {
"arch": "amd64",
"size": "20G",
"mem": "1024",
"interpreter": "kvm",
"console": "ttyS0,115200n1",
"machine": "pc",
"nicmodel": "virtio"
}
i386_defaults = {
"arch": "i386",
"size": "20G",
"mem": "1024",
"interpreter": "kvm",
"console": "ttyS0,115200n1",
"machine": "pc",
"nicmodel": "virtio"
}
defaults = { "armel": armel_defaults,
"armel-virtio": armel_virtio_defaults,
"armhf": armhf_defaults,
"armhf-virtio": armhf_virtio_defaults,
"ppc": ppc_defaults,
"amd64": amd64_defaults,
"i386": i386_defaults,
"nodefaults": {} }
xml_field_path = {
"arch": "project/buildimage/arch",
"size": "project/buildimage/size",
"mem": "project/buildimage/mem",
"interpreter": "project/buildimage/interpreter",
"console": "project/buildimage/console",
"machine": "project/buildimage/machine",
"nicmodel": "project/buildimage/NIC/model"
}
def get_random_mac():
binaddr = [random.randint(0,256) for i in range(6) ]
binaddr[0] &= 0xfe
binaddr[0] |= 0x02
s = map( lambda x: "%02x" % x, binaddr )
return string.join( s, ":" )
class ElbeDefaults(object):
def __init__(self, build_type):
if not defaults.has_key(build_type):
print "Please specify a valid buildtype."
print "Valid buildtypes:"
print defaults.keys()
sys.exit(20)
self.defaults = defaults[build_type]
self.defaults["nicmac"] = get_random_mac()
def __getitem__( self, key ):
if self.defaults.has_key( key ):
return self.defaults[key]
print "No Default value has been Provided"
print "Either use a valid buildtype, or provide the field in the xml File."
print "The location in the xml is here:"
print xml_field_path[key]
sys.exit(20)
| gpl-3.0 | 1,458,241,890,226,960,400 | 28.123188 | 91 | 0.464543 | false | 3.497824 | false | false | false |
shaarli/python-shaarli-client | setup.py | 1 | 2012 | #!/usr/bin/env python3
"""Setup script for shaarli-client"""
import codecs
import os
import re
from setuptools import find_packages, setup
def get_long_description():
"""Reads the main README.rst to get the program's long description"""
with codecs.open('README.rst', 'r', 'utf-8') as f_readme:
return f_readme.read()
def get_package_metadata(attribute):
"""Reads metadata from the main package's __init__"""
with open(os.path.join('shaarli_client', '__init__.py'), 'r') as f_init:
return re.search(
r'^__{attr}__\s*=\s*[\'"]([^\'"]*)[\'"]'.format(attr=attribute),
f_init.read(), re.MULTILINE
).group(1)
setup(
name=get_package_metadata('title'),
version=get_package_metadata('version'),
description=get_package_metadata('brief'),
long_description=get_long_description(),
author=get_package_metadata('author'),
maintainer='VirtualTam',
maintainer_email='[email protected]',
license='MIT',
url='https://github.com/shaarli/python-shaarli-client',
keywords='bookmark bookmarking shaarli social',
packages=find_packages(exclude=['tests.*', 'tests']),
entry_points={
'console_scripts': [
'shaarli = shaarli_client.main:main',
],
},
install_requires=[
'requests >= 2.25',
'pyjwt == 2.0.1'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
]
)
| mit | -5,212,573,942,387,458,000 | 31.451613 | 76 | 0.598907 | false | 3.839695 | false | false | false |
de-tour/detour | server/handling.py | 1 | 6094 | import cherrypy
from cherrypy.lib.static import serve_file
from cherrypy.process.plugins import SimplePlugin
from queue import Queue, Empty
from collections import namedtuple
from concurrent import Crawler
import parsing
import json
import traceback
import random
from urllib.parse import unquote
from ws4py.websocket import WebSocket
from ws4py.messaging import TextMessage
PoolItem = namedtuple('PoolItem', ['verb', 'args', 'output'])
class Search:
def __init__(self):
self.engines_suggest = []
self.engines_search = []
self.add_engines(parsing.sites)
self.pool_suggest = Crawler(cls_list=self.engines_suggest)
self.pool_search = Crawler(cls_list=self.engines_search)
def start(self):
self.pool_suggest.start()
self.pool_search.start()
def add_engines(self, engines):
for Engine in engines:
if parsing.is_balancer(Engine):
self.add_engines(Engine.balance())
else:
if parsing.can_suggest(Engine):
self.engines_suggest.append(Engine)
if parsing.can_search(Engine):
self.engines_search.append(Engine)
def stop(self):
self.pool_suggest.stop()
self.pool_search.stop()
def suggest(self, keyword):
if not keyword:
yield []
return
output = Queue()
k = len(self.engines_suggest) // 2
for engine in random.sample(self.engines_suggest, k):
self.pool_suggest.put(engine, PoolItem('suggest', (keyword,), output))
failure = 0
result_set = set()
while failure < 1:
try:
result_set.update(output.get(timeout=1))
except Empty:
failure += 1
ordered_results = parsing.rank_list(result_set, keyword)[0:10]
result_set = set(ordered_results)
yield ordered_results
def search(self, keyword, from_id):
if not keyword:
yield []
return
output = Queue()
for engine in self.engines_search:
if not parsing.is_meta(engine):
self.pool_search.put(engine, PoolItem('search', (keyword, from_id + 1, None), output))
else:
for site in parsing.domains:
filtered = engine.site_filter(site, keyword)
self.pool_search.put(engine, PoolItem('search', (filtered, from_id + 1, None), output))
failure = 0
result_set = set()
while failure < 5:
try:
new_results = set(output.get(timeout=1))
print('Search %s: %d unique results' % (repr(keyword), len(result_set)))
yield parsing.rank_list(new_results - result_set, keyword)
result_set.update(new_results)
except Empty:
failure += 1
class WSHandler(WebSocket):
def opened(self):
cherrypy.engine.log('WebSocket opened')
def received_message(self, msg):
cherrypy.engine.log('Received ' + str(msg))
try:
params = json.loads(str(msg))
verb = params['verb']
if verb == 'suggest':
self.ws_suggest(unquote(params['keyword']))
elif verb == 'search':
self.ws_search(unquote(params['keyword']), params['from_id'])
else:
raise ValueError('Unknown verb. (suggest, serach)')
except (KeyError, AttributeError, TypeError, ValueError) as e:
cherrypy.engine.log('Handler Exception - %s' % repr(e))
cherrypy.engine.log(traceback.format_exc())
def closed(self, code, reason):
cherrypy.engine.log('A client left')
def ws_suggest(self, keyword):
results = Queue()
cherrypy.engine.publish('detour_suggest', keyword, results)
generator = results.get()
for item in generator:
if item:
msg = json.dumps({'from': keyword, 'results': item})
cherrypy.engine.publish('websocket-broadcast', msg)
def ws_search(self, keyword, from_id):
results = Queue()
cherrypy.engine.publish('detour_search', keyword, from_id, results)
generator = results.get()
for r_list in generator:
if r_list:
d = {
'results': [r.items() for r in r_list],
'keyword': keyword,
'from_id': from_id,
}
cherrypy.engine.publish('websocket-broadcast', json.dumps(d))
class Daemon(SimplePlugin):
def __init__(self, bus):
SimplePlugin.__init__(self, bus)
def start(self):
self.bus.log('Daemon plugin starts')
self.priority = 70
self.search_daemon = Search()
self.search_daemon.start()
self.bus.subscribe('detour_suggest', self.suggest_handler)
self.bus.subscribe('detour_search', self.search_handler)
def stop(self):
self.bus.unsubscribe('detour_suggest', self.suggest_handler)
self.bus.unsubscribe('detour_search', self.search_handler)
self.search_daemon.stop()
self.bus.log('Daemon plugin stops')
def suggest_handler(self, keyword, bucket):
self.bus.log('Suggest ' + repr(keyword))
generator = self.search_daemon.suggest(keyword)
print("suggest_handler: got generator")
bucket.put(generator)
def search_handler(self, keyword, from_id, bucket):
self.bus.log('Search ' + repr(keyword) + ' from ID ' + repr(from_id))
generator = self.search_daemon.search(keyword, from_id)
print("search_handler: got generator")
bucket.put(generator)
class Detour:
def __init__(self, public):
self.public = public
@cherrypy.expose
def index(self, q=None):
return serve_file(self.public + '/index.html')
@cherrypy.expose
def ws(self):
handler = cherrypy.request.ws_handler
cherrypy.log("Handler created: %s" % repr(handler))
| gpl-3.0 | 3,814,767,568,741,064,000 | 32.119565 | 107 | 0.582212 | false | 4.154056 | false | false | false |
niklasberglund/freesprints | source/freesprints/__init__.py | 1 | 9191 | import pygame, sys
import pygame.font
from pygame.locals import *
import logging
import fs_menu
import helpers as helpers
import plugins
import os.path
import race
import hardware
import defaults
import logging
from rainbow_logging_handler import RainbowLoggingHandler
DISPLAY_RESOLUTION = (1024, 768)
# platform-specific imports
if helpers.is_running_on_rpi():# running on Raspberry Pi
import RPi.GPIO
import os
print "ON RASPBERRY PI"
#os.environ['SDL_VIDEODRIVER']="fbcon"
#os.environ["SDL_FBDEV"] = "/dev/fb1"
print "SET DRIVER"
else: # running on computer
import FakeRPi.GPIO
class Application(object):
instance = None
state = None
# application state constants
STATE_MAINMENU = 0
STATE_INGAME = 1
# member variables
window_surface = None
menu_surface = None
menu = None
state = STATE_MAINMENU
plugin_loader = None
roller_controller = None
race_options = None
race_object = None
selected_plugin_index = 0 # 0 by default. this should ideally be restored from stored settings
def __init__(self):
print "Application.__init__"
pygame.font.init()
menu_options_dict = {
"font_path": "fonts/Cave-Story.ttf",
"font_size": 42,
"color_background": (0, 0, 0),
"color_text": (255, 255, 255),
"color_text_highlight": (100, 20, 45)
}
menu_structure = [
{
"title": "New race",
"callback": self.start_race,
"submenu": [
{
"title": "Start",
"callback": self.start_race
},
{
"title": "Race visualizer",
"callback": None,
"submenu_populator_callback": self.populate_visualizers,
"identifier": "race_visualizer_selection"
},
{
"title": "Number of rollers",
"input": {
"type": "int",
"verifier": None,
"value": "2"
},
"callback": self.start_race
},
{
"title": "Roller diameter(mm)",
"input": {
"type": "int",
"verifier": None,
"value": "200"
}
}
]
},
{
"title": "Options",
"callback": self.show_options
},
{
"title": "Exit",
"callback": self.exit
}
]
#self.window_surface = pygame.display.set_mode((500, 400), pygame.FULLSCREEN, 32)
pygame.display.init()
self.window_surface = pygame.display.set_mode(defaults.RESOLUTION, 0, 32)
menu_options = fs_menu.MenuOptions(menu_options_dict)
self.menu = fs_menu.Menu(self.window_surface, menu_structure, menu_options)
self.roller_controller = hardware.RollerController()
def load_plugins(self):
self.plugin_loader = plugins.PluginLoader()
def start_race(self):
print "start game"
self.state = self.STATE_INGAME
race_options = race.Options()
race_participants = ([
race.Participant("Niklas", 7, Color("red")),
race.Participant("Some loser", 11, Color("blue"))
])
self.race_object = race.Race(race_options, race_participants)
plugins = self.plugin_loader.getAvailablePlugins()
self.race_object.start()
plugins[self.selected_plugin_index].start(self.race_object)
def show_options(self):
print "show options"
def populate_visualizers(self):
print "populate_visualizers"
submenu = []
pluginIndex = 0
for plugin in self.plugin_loader.getAvailablePlugins():
submenu.append({
"title": plugin.name,
"callback": self.select_plugin,
"tag": pluginIndex
})
pluginIndex = pluginIndex + 1
return submenu
def select_plugin(self, plugin_index):
print "selected plugin with index " + str(plugin_index)
self.selected_plugin_index = plugin_index
def exit(self):
pygame.quit()
sys.exit()
def hide(self):
pass
def get_window_surface(self):
return self.window_surface
def game_loop(self):
# run the game loop
while True:
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
self.exit()
elif event.type == pygame.locals.KEYUP:
if self.state == self.STATE_MAINMENU:
self.menu.registerKeypress(event.key)
elif event.key == pygame.locals.K_ESCAPE:
self.exit()
def start(self):
# set up pygame
pygame.init()
pygame.font.init()
if helpers.is_running_on_rpi():
disp_no = os.getenv("DISPLAY")
if disp_no:
print "I'm running under X display = {0}".format(disp_no)
# Check which frame buffer drivers are available
# Start with fbcon since directfb hangs with composite output
drivers = ['fbcon', 'directfb', 'svgalib']
found = False
for driver in drivers:
# Make sure that SDL_VIDEODRIVER is set
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print 'Driver: {0} failed.'.format(driver)
continue
found = True
break
if not found:
raise Exception('No suitable video driver found!')
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
print "Framebuffer size: %d x %d" % (size[0], size[1])
#self.window_surface = pygame.display.set_mode(size, pygame.FULLSCREEN)
# set up the window
pygame.display.set_caption('Freesprints')
# set up the colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# set up fonts
#availableFonts = pygame.font.get_fonts()
font_path = "./fonts/Cave-Story.ttf"
#basicFont = pygame.font.SysFont(None, 30)
basicFont = pygame.font.Font(font_path, 48)
# set up the text
#text = basicFont.render('asdasd', True, WHITE, BLUE)
#textRect = text.get_rect()
#textRect.centerx = self.window_surface.get_rect().centerx
#textRect.centery = self.window_surface.get_rect().centery
# draw the white background onto the surface
self.window_surface.fill(BLACK)
# draw a green polygon onto the surface
#pygame.draw.polygon(self.window_surface, GREEN, ((146, 0), (291, 106), (236, 277), (56, 277), (0, 106)))
# draw some blue lines onto the surface
#pygame.draw.line(self.window_surface, BLUE, (60, 60), (120, 60), 4)
#pygame.draw.line(self.window_surface, BLUE, (120, 60), (60, 120))
#pygame.draw.line(self.window_surface, BLUE, (60, 120), (120, 120), 4)
# draw a blue circle onto the surface
#pygame.draw.circle(self.window_surface, BLUE, (300, 50), 20, 0)
# draw a red ellipse onto the surface
#pygame.draw.ellipse(self.window_surface, RED, (450, 160, 40, 80), 1)
# menu background
background = pygame.image.load('images/menu_background.png').convert()
backgroundRect = background.get_rect()
backgroundRect.x = 0
backgroundRect.y = 0
self.window_surface.blit(background, backgroundRect)
# draw the window onto the screen
pygame.display.update()
self.menu.render()
self.game_loop()
app = None
logger = None
def get_app():
global app
if app == None:
app = Application()
return app
def get_logger():
global logger
if logger == None:
logger = logging.getLogger('freesprints')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] %(name)s %(funcName)s():%(lineno)d\t%(message)s") # same as default
# setup colored logging
handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True))
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def init():
global app
print "start"
app = get_app()
app.load_plugins()
app.start()
| mit | -8,094,526,841,021,716,000 | 29.842282 | 121 | 0.527581 | false | 4.20064 | false | false | false |
pburdet/hyperspy | hyperspy/_signals/eds.py | 1 | 21939 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
from hyperspy import utils
from hyperspy._signals.spectrum import Spectrum
from hyperspy.misc.elements import elements as elements_db
from hyperspy.misc.eds import utils as utils_eds
from hyperspy.misc.utils import isiterable
class EDSSpectrum(Spectrum):
_signal_type = "EDS"
def __init__(self, *args, **kwards):
Spectrum.__init__(self, *args, **kwards)
if self.metadata.Signal.signal_type == 'EDS':
print('The microscope type is not set. Use '
'set_signal_type(\'EDS_TEM\') or set_signal_type(\'EDS_SEM\')')
self.metadata.Signal.binned = True
def _get_line_energy(self, Xray_line, FWHM_MnKa=None):
"""
Get the line energy and the energy resolution of a Xray line.
The return values are in the same units than the signal axis
Parameters
----------
Xray_line : strings
Valid element X-ray lines e.g. Fe_Kb.
FWHM_MnKa: {None, float, 'auto'}
The energy resolution of the detector in eV
if 'auto', used the one in
'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'
Returns
------
float: the line energy, if FWHM_MnKa is None
(float,float): the line energy and the energy resolution, if FWHM_MnKa is not None
"""
units_name = self.axes_manager.signal_axes[0].units
if FWHM_MnKa == 'auto':
if self.metadata.Signal.signal_type == 'EDS_SEM':
FWHM_MnKa = self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa
elif self.metadata.Signal.signal_type == 'EDS_TEM':
FWHM_MnKa = self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa
else:
raise NotImplementedError(
"This method only works for EDS_TEM or EDS_SEM signals. "
"You can use `set_signal_type(\"EDS_TEM\")` or"
"`set_signal_type(\"EDS_SEM\")` to convert to one of these"
"signal types.")
line_energy = utils_eds._get_energy_xray_line(Xray_line)
if units_name == 'eV':
line_energy *= 1000
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa,
line_energy / 1000) * 1000
elif units_name == 'keV':
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa,
line_energy)
else:
raise ValueError(
"%s is not a valid units for the energy axis. "
"Only `eV` and `keV` are supported. "
"If `s` is the variable containing this EDS spectrum:\n "
">>> s.axes_manager.signal_axes[0].units = \'keV\' \n"
% (units_name))
if FWHM_MnKa is None:
return line_energy
else:
return line_energy, line_FWHM
def _get_beam_energy(self):
"""
Get the beam energy.
The return value is in the same units than the signal axis
"""
if "Acquisition_instrument.SEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy
elif "Acquisition_instrument.TEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy
else:
raise AttributeError(
"To use this method the beam energy `Acquisition_instrument.TEM.beam_energy` "
"or `Acquisition_instrument.SEM.beam_energy` must be defined in "
"`metadata`.")
units_name = self.axes_manager.signal_axes[0].units
if units_name == 'eV':
beam_energy = beam_energy * 1000
return beam_energy
def sum(self, axis):
"""Sum the data over the given axis.
Parameters
----------
axis : {int, string}
The axis can be specified using the index of the axis in
`axes_manager` or the axis name.
Returns
-------
s : Signal
See also
--------
sum_in_mask, mean
Examples
--------
>>> import numpy as np
>>> s = Signal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.sum(-1).data.shape
(64,64)
# If we just want to plot the result of the operation
s.sum(-1, True).plot()
"""
# modify time spend per spectrum
if "Acquisition_instrument.SEM" in self.metadata:
mp = self.metadata.Acquisition_instrument.SEM
else:
mp = self.metadata.Acquisition_instrument.TEM
if mp.has_item('Detector.EDS.live_time'):
mp.Detector.EDS.live_time = mp.Detector.EDS.live_time * \
self.axes_manager.shape[axis]
return super(EDSSpectrum, self).sum(axis)
def rebin(self, new_shape):
"""Rebins the data to the new shape
Parameters
----------
new_shape: tuple of ints
The new shape must be a divisor of the original shape
"""
new_shape_in_array = []
for axis in self.axes_manager._axes:
new_shape_in_array.append(
new_shape[axis.index_in_axes_manager])
factors = (np.array(self.data.shape) /
np.array(new_shape_in_array))
s = super(EDSSpectrum, self).rebin(new_shape)
# modify time per spectrum
if "Acquisition_instrument.SEM.Detector.EDS.live_time" in s.metadata:
for factor in factors:
s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time *= factor
if "Acquisition_instrument.TEM.Detector.EDS.live_time" in s.metadata:
for factor in factors:
s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time *= factor
return s
def set_elements(self, elements):
"""Erase all elements and set them.
Parameters
----------
elements : list of strings
A list of chemical element symbols.
See also
--------
add_elements, set_line, add_lines.
Examples
--------
>>> s = signals.EDSSEMSpectrum(np.arange(1024))
>>> s.set_elements(['Ni', 'O'],['Ka','Ka'])
Adding Ni_Ka Line
Adding O_Ka Line
>>> s.mapped_paramters.Acquisition_instrument.SEM.beam_energy = 10
>>> s.set_elements(['Ni', 'O'])
Adding Ni_La Line
Adding O_Ka Line
"""
# Erase previous elements and X-ray lines
if "Sample.elements" in self.metadata:
del self.metadata.Sample.elements
self.add_elements(elements)
def add_elements(self, elements):
"""Add elements and the corresponding X-ray lines.
The list of elements is stored in `metadata.Sample.elements`
Parameters
----------
elements : list of strings
The symbol of the elements.
See also
--------
set_elements, add_lines, set_lines.
"""
if not isiterable(elements) or isinstance(elements, basestring):
raise ValueError(
"Input must be in the form of a list. For example, "
"if `s` is the variable containing this EDS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
if "Sample.elements" in self.metadata:
elements_ = set(self.metadata.Sample.elements)
else:
elements_ = set()
for element in elements:
if element in elements_db:
elements_.add(element)
else:
raise ValueError(
"%s is not a valid chemical element symbol." % element)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
self.metadata.Sample.elements = sorted(list(elements_))
def set_lines(self,
lines,
only_one=True,
only_lines=("Ka", "La", "Ma")):
"""Erase all Xrays lines and set them.
See add_lines for details.
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
See also
--------
add_lines, add_elements, set_elements..
"""
if "Sample.xray_lines" in self.metadata:
del self.metadata.Sample.xray_lines
self.add_lines(lines=lines,
only_one=only_one,
only_lines=only_lines)
def add_lines(self,
lines=(),
only_one=True,
only_lines=("Ka", "La", "Ma")):
"""Add X-rays lines to the internal list.
Although most functions do not require an internal list of
X-ray lines because they can be calculated from the internal
list of elements, ocassionally it might be useful to customize the
X-ray lines to be use by all functions by default using this method.
The list of X-ray lines is stored in
`metadata.Sample.xray_lines`
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list. If the list is empty (default), and
`metadata.Sample.elements` is
defined, add the lines of all those elements.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
See also
--------
set_lines, add_elements, set_elements.
"""
if "Sample.xray_lines" in self.metadata:
xray_lines = set(self.metadata.Sample.xray_lines)
else:
xray_lines = set()
# Define the elements which Xray lines has been customized
# So that we don't attempt to add new lines automatically
elements = set()
for line in xray_lines:
elements.add(line.split("_")[0])
end_energy = self.axes_manager.signal_axes[0].high_value
for line in lines:
try:
element, subshell = line.split("_")
except ValueError:
raise ValueError(
"Invalid line symbol. "
"Please provide a valid line symbol e.g. Fe_Ka")
if element in elements_db:
elements.add(element)
if subshell in elements_db[element]['Atomic_properties']['Xray_lines']:
lines_len = len(xray_lines)
xray_lines.add(line)
if lines_len != len(xray_lines):
print("%s line added," % line)
else:
print("%s line already in." % line)
if (self._get_line_energy(element + '_' + subshell) > end_energy):
print("Warning: %s %s is above the data energy range."
% (element, subshell))
else:
raise ValueError(
"%s is not a valid line of %s." % (line, element))
else:
raise ValueError(
"%s is not a valid symbol of an element." % element)
if "Sample.elements" in self.metadata:
extra_elements = (set(self.metadata.Sample.elements) -
elements)
if extra_elements:
new_lines = self._get_lines_from_elements(
extra_elements,
only_one=only_one,
only_lines=only_lines)
if new_lines:
self.add_lines(list(new_lines) + list(lines))
self.add_elements(elements)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
if "Sample.xray_lines" in self.metadata:
xray_lines = xray_lines.union(
self.metadata.Sample.xray_lines)
self.metadata.Sample.xray_lines = sorted(list(xray_lines))
def _get_lines_from_elements(self,
elements,
only_one=False,
only_lines=("Ka", "La", "Ma")):
"""Returns the X-ray lines of the given elements in spectral range
of the data.
Parameters
----------
elements : list of strings
A list containing the symbol of the chemical elements.
only_one : bool
If False, add all the lines of each element in the data spectral
range. If True only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be returned.
Returns
-------
"""
beam_energy = self._get_beam_energy()
end_energy = self.axes_manager.signal_axes[0].high_value
if beam_energy < end_energy:
end_energy = beam_energy
lines = []
for element in elements:
# Possible line (existing and excited by electron)
element_lines = []
for subshell in elements_db[element]['Atomic_properties']['Xray_lines'].keys():
if only_lines and subshell not in only_lines:
continue
if (self._get_line_energy(element + '_' + subshell) < end_energy):
element_lines.append(element + "_" + subshell)
if only_one and element_lines:
# Choose the best line
select_this = -1
for i, line in enumerate(element_lines):
if (self._get_line_energy(line) < beam_energy / 2):
select_this = i
break
element_lines = [element_lines[select_this], ]
if not element_lines:
print(("There is not X-ray line for element %s " % element) +
"in the data spectral range")
else:
lines.extend(element_lines)
return lines
def get_lines_intensity(self,
xray_lines=None,
plot_result=False,
integration_window_factor=2.,
only_one=True,
only_lines=("Ka", "La", "Ma"),
**kwargs):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
defined as defined in
`self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa` or
`self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa`.
Parameters
----------
xray_lines: {None, "best", list of string}
If None,
if `mapped.parameters.Sample.elements.xray_lines` contains a
list of lines use those.
If `mapped.parameters.Sample.elements.xray_lines` is undefined
or empty but `mapped.parameters.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
integration_window_factor: Float
The integration window is centered at the center of the X-ray
line and its width is defined by this factor (2 by default)
times the calculated FWHM of the line.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as Signal subclasses.
Examples
--------
>>> specImg.get_lines_intensity(["C_Ka", "Ta_Ma"])
See also
--------
set_elements, add_elements.
"""
if xray_lines is None:
if 'Sample.xray_lines' in self.metadata:
xray_lines = self.metadata.Sample.xray_lines
elif 'Sample.elements' in self.metadata:
xray_lines = self._get_lines_from_elements(
self.metadata.Sample.elements,
only_one=only_one,
only_lines=only_lines)
else:
raise ValueError(
"Not X-ray line, set them with `add_elements`")
intensities = []
# test 1D Spectrum (0D problem)
#signal_to_index = self.axes_manager.navigation_dimension - 2
for Xray_line in xray_lines:
line_energy, line_FWHM = self._get_line_energy(Xray_line,
FWHM_MnKa='auto')
det = integration_window_factor * line_FWHM / 2.
img = self[..., line_energy - det:line_energy + det
].integrate1D(-1)
img.metadata.General.title = (
'Intensity of %s at %.2f %s from %s' %
(Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
self.metadata.General.title))
if img.axes_manager.navigation_dimension >= 2:
img = img.as_image([0, 1])
elif img.axes_manager.navigation_dimension == 1:
img.axes_manager.set_signal_dimension(1)
if plot_result and img.axes_manager.signal_dimension == 0:
print("%s at %s %s : Intensity = %.2f"
% (Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
img.data))
intensities.append(img)
if plot_result and img.axes_manager.signal_dimension != 0:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
def get_take_off_angle(self):
"""Calculate the take-off-angle (TOA).
TOA is the angle with which the X-rays leave the surface towards
the detector. Parameters are read in 'SEM.tilt_stage',
'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and 'SEM.Detector.EDS.elevation_angle'
in 'metadata'.
Returns
-------
take_off_angle: float (Degree)
See also
--------
utils.eds.take_off_angle
Notes
-----
Defined by M. Schaffer et al., Ultramicroscopy 107(8), pp 587-597 (2007)
"""
if self.metadata.Signal.signal_type == 'EDS_SEM':
mp = self.metadata.Acquisition_instrument.SEM
elif self.metadata.Signal.signal_type == 'EDS_TEM':
mp = self.metadata.Acquisition_instrument.TEM
tilt_stage = mp.tilt_stage
azimuth_angle = mp.Detector.EDS.azimuth_angle
elevation_angle = mp.Detector.EDS.elevation_angle
TOA = utils.eds.take_off_angle(tilt_stage, azimuth_angle,
elevation_angle)
return TOA
| gpl-3.0 | 1,777,743,600,678,881,300 | 37.489474 | 104 | 0.548749 | false | 4.252568 | false | false | false |
stevedh/queryutils | queryutils/user.py | 1 | 2181 |
from json import JSONEncoder
class User(object):
def __init__(self, name):
self.name = name
self.sessions = {}
self.queries = []
class VerboseUserEncoder(JSONEncoder):
def encode(self, obj):
user_dict = {}
user_dict['name'] = obj.name
session_dict = {}
for (session_id, session) in obj.sessions.iteritems():
session_dict[session_id] = SessionEncoder().default(session)
query_list = []
for query in obj.queries:
query_list.append(QueryEncoder().default(query))
user_dict['queries'] = query_list
return user_dict
def default(self, obj):
if isinstance(obj, User):
return self.encode(obj)
return JSONEncoder.default(self, obj)
class UserEncoder(JSONEncoder):
def encode(self, obj):
user_dict = {}
user_dict['name'] = obj.name
session_dict = {}
for (session_id, session) in obj.sessions.iteritems():
session_dict['id'] = session_id
query_list = []
for query in session.queries:
query_dict = {}
query_dict['delta'] = query.delta
query_dict['time'] = query.time
query_dict['text'] = query.text
query_list.append(query_dict)
session_dict['queries'] = query_list
session_dict['user'] = obj.name
try:
autorecurring_query_list = []
for query in obj.autorecurring_queries:
query_dict = {}
query_dict['repeat_delta'] = query.repeat_delta
query_dict['time'] = query.time
query_dict['text'] = query.text
autorecurring_query_list.append(query_dict)
user_dict['autorecurring_queries'] = autorecurring_query_list
except AttributeError:
print "Not encoding autorecurring queries. No such attribute."
user_dict['sessions'] = session_dict
return user_dict
def default(self, obj):
if isinstance(obj, User):
return self.encode(obj)
return JSONEncoder.default(self, obj)
| bsd-3-clause | 8,245,894,538,224,595,000 | 33.078125 | 74 | 0.558459 | false | 4.218569 | false | false | false |
ajhager/copycat | copycat/workspace/string.py | 1 | 12784 | # Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""String"""
import random
import copycat.toolbox as toolbox
class String(object):
"""String is a letter string in the workspace.
This could be the initial string, modified string or target string.
Each object in a string has a unique string number that identifies
it from other objects in the string."""
def __init__(self, workspace, string):
self.workspace = workspace
self.slipnet = self.workspace.slipnet
self.name = string
self.highest_string_number = -1
self.length = len(string)
self.letters = {}
self.groups = {}
self.proposed_groups = {}
self.object_positions = {}
self.left_right_bonds = {}
self.from_to_bonds = {}
self.proposed_bonds = {}
self.intra_string_unhappiness = 0
self.bonds_to_scan_distribution = range(self.length)
def add_to_object_positions(self, obj, position):
"""Add an object to the object positions."""
if position in self.object_positions:
self.object_positions[position].append(obj)
else:
self.object_positions[position] = [obj]
def remove_from_object_positions(self, obj, position):
"""Remove an object from the object positions."""
if obj in self.object_positions[position]:
self.object_positions[position].remove(obj)
def add_letter(self, letter):
"""Add a letter to the string."""
self.highest_string_number += 1
letter.string_number = self.highest_string_number
position = letter.left_string_position
self.letters[position] = letter
self.add_to_object_positions(letter, position)
def get_letters(self):
"""Return a list of letters in the string."""
return [self.letters[index] for index in sorted(self.letters.keys())]
def get_letter(self, position):
"""Return the letter at the given position in the string."""
return self.letters.get(position)
def get_random_letter(self):
"""Return a random letter from the string."""
return random.choice(self.get_letters())
def get_leftmost_letter(self):
"""Return the leftmost letter in the string."""
return self.letters.get(0)
def get_rightmost_letter(self):
"""Return the rightmost letter in the string."""
return self.letters.get(len(self.letters) - 1)
def add_group(self, group):
"""Add a group to the string."""
self.highest_string_number += 1
group.string_number = self.highest_string_number
self.groups[group.left_object.string_number] = group
self.add_to_object_positions(group, group.left_string_position)
self.add_to_object_positions(group, group.right_string_position)
def remove_group(self, group):
"""Remove a group from the string."""
if group.left_object.string_number in self.groups:
del self.groups[group.left_object.string_number]
self.remove_from_object_positions(group, group.left_string_position)
self.remove_from_object_positions(group, group.right_string_position)
def get_groups(self):
"""Return a list of groups in the string."""
return list(self.groups.values())
def get_group(self, position):
"""Return the group at the given position in letters.
Positions start at 0 and refer to the position of the leftmost object
in the group."""
return self.get_letter(position).group
def get_existing_group(self, group):
"""Return the group in the string if it has the same properties as
the given group."""
existing_group = self.groups.get(group.left_object.string_number)
if existing_group:
if existing_group.length == group.length and \
existing_group.group_category == group.group_category and \
existing_group.direction_category == group.direction_category:
return existing_group
def add_proposed_group(self, group):
"""Add a proposed group to the string."""
position = (group.left_object.string_number,
group.right_object.string_number)
if position in self.proposed_groups:
self.proposed_groups[position].append(group)
else:
self.proposed_groups[position] = [group]
def remove_proposed_group(self, group):
"""Remove a proposed group from the string."""
position = (group.left_object.string_number,
group.right_object.string_number)
items = self.proposed_groups.get(position, [])
if group in items:
self.proposed_groups[position].remove(group)
def get_proposed_groups(self):
"""Return a list of the proposed groups in the string."""
return list(set(toolbox.flatten(self.proposed_groups.values())))
def get_proposed_group(self, first, second):
"""Return the proposed group at first, second position."""
return self.proposed_groups.get((first, second))
def add_bond(self, bond):
"""Add a bond to the string, sameness bonds in both directions."""
left_number = bond.left_object.string_number
right_number = bond.right_object.string_number
self.left_right_bonds[(left_number, right_number)] = bond
from_number = bond.from_object.string_number
to_number = bond.to_object.string_number
self.from_to_bonds[(from_number, to_number)] = bond
if bond.bond_category == self.slipnet.plato_sameness:
self.left_right_bonds[(right_number, left_number)] = bond
self.from_to_bonds[(to_number, from_number)] = bond
def remove_bond(self, bond):
"""Remove a built bond from the string."""
left_number = bond.left_object.string_number
right_number = bond.right_object.string_number
if (left_number, right_number) in self.left_right_bonds:
del self.left_right_bonds[(left_number, right_number)]
from_number = bond.from_object.string_number
to_number = bond.to_object.string_number
if (from_number, to_number) in self.from_to_bonds:
del self.from_to_bonds[(from_number, to_number)]
if bond.bond_category == self.slipnet.plato_sameness:
if (right_number, left_number) in self.left_right_bonds:
del self.left_right_bonds[(right_number, left_number)]
if (to_number, from_number) in self.from_to_bonds:
del self.from_to_bonds[(to_number, from_number)]
def get_bonds(self):
"""Return a list of the built bonds in the string."""
return list(set(self.from_to_bonds.values()))
def get_bond(self, from_object, to_object):
"""Return the bond between the two objects, if any."""
return self.from_to_bonds.get((from_object.string_number,
to_object.string_number))
def get_existing_bond(self, bond):
"""Return the bond in the string if it has the same properties as
the given bond."""
existing_bond = self.get_bond(bond.from_object, bond.to_object)
if existing_bond:
if existing_bond.bond_category == bond.bond_category and \
existing_bond.direction_category == bond.direction_category:
return existing_bond
def add_proposed_bond(self, bond):
"""Add the proposed bond to the string."""
position = (bond.from_object.string_number,
bond.to_object.string_number)
if position in self.proposed_bonds:
self.proposed_bonds[position].append(bond)
else:
self.proposed_bonds[position] = [bond]
def remove_proposed_bond(self, bond):
"""Add the proposed bond to the string."""
position = (bond.from_object.string_number,
bond.to_object.string_number)
if position in self.proposed_bonds:
items = self.proposed_bonds[position]
if bond in items:
self.proposed_bonds[position].remove(bond)
def get_proposed_bonds(self):
"""Return a list of proposed bonds in the string."""
return list(set(toolbox.flatten(self.proposed_bonds.values())))
def get_proposed_bond(self, first, second):
"""Return a proposed bonds at first, second in the string."""
return self.proposed_bonds.get((first, second))
def get_objects(self, category=None):
"""Return the list of objects of the given object category.
If no category is given, return all objects."""
if category == self.slipnet.plato_letter:
return self.get_letters()
elif category == self.slipnet.plato_group:
return self.get_groups()
return self.get_letters() + self.get_groups()
def get_non_string_spanning_objects(self):
"""Return all objects that do not span the entire string."""
return [o for o in self.get_objects() if not o.spans_whole_string()]
def get_random_object(self, method=None):
"""Return a random object from the string."""
if method:
objects = self.get_objects()
values = [getattr(obj, method) for obj in objects]
values = self.workspace.temperature_adjusted_values(values)
return objects[toolbox.weighted_index(values)]
return random.choice(self.get_objects())
def get_random_leftmost_object(self):
"""Return a random leftmost object from the string."""
leftmost_objects = []
category = self.slipnet.plato_string_position_category
for obj in self.get_objects():
if obj.get_descriptor(category) == self.slipnet.plato_leftmost:
leftmost_objects.append(obj)
if leftmost_objects:
values = [obj.relative_importance for obj in leftmost_objects]
return toolbox.weighted_select(values, leftmost_objects)
def update_relative_importances(self):
"""Update the relative, normalized importances of all the objects in
the string."""
raw_importance = sum([o.raw_importance for o in self.get_objects()])
for obj in self.get_objects():
if raw_importance == 0:
importance = 0
else:
quot = obj.raw_importance / float(raw_importance)
importance = round(100 * quot)
obj.relative_importance = importance
def update_intra_string_unhappiness(self):
"""Calculate the average of the intra-string unhappiness of all the
objects in the string."""
unhappiness = [o.intra_string_unhappiness for o in self.get_objects()]
self.intra_string_unhappiness = round(toolbox.average(*unhappiness))
def local_bond_category_relevance(self, bond_category):
"""A function of how many bonds in the string have the given bond
category. This function is not perfect; it gives just a rough
estimate of the relevance of this bond category."""
objects = self.get_non_string_spanning_objects()
if len(objects) == 1:
return 0
bond_count = 0
for obj in objects:
if obj.right_bond:
if obj.right_bond.bond_category == bond_category:
bond_count += 1
return 100 * (float(bond_count) / (len(objects) - 1))
def local_direction_category_relevance(self, direction_category):
"""A function of how many bonds in the string have the given direction
category. This function is not perfect; it gives just a rough estimate
of the relevance of this direction category."""
objects = self.get_non_string_spanning_objects()
if len(objects) == 1:
return 0
bond_count = 0
for obj in objects:
if obj.right_bond:
if obj.right_bond.direction_category == direction_category:
bond_count += 1
return 100 * (float(bond_count) / (len(objects) - 1))
| gpl-2.0 | -2,942,944,997,608,735,000 | 41.471761 | 80 | 0.632431 | false | 4.076531 | false | false | false |
noahlittle/noahlittle.github.io | iCTRL/var/mobile/pentest/exploits/iCTRL/cupp/cupp.py | 1 | 55986 | #!/usr/bin/python
#
# [Program]
#
# CUPP 3.1
# Common User Passwords Profiler
#
#
#
# [Author]
#
# Muris Kurgas aka j0rgan
# j0rgan [at] remote-exploit [dot] org
# http://www.remote-exploit.org
# http://www.azuzi.me
#
#
#
# [License]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See 'docs/LICENSE' for more information.
import sys
import os
import ftplib
import ConfigParser
import urllib
import gzip
import csv
# Reading configuration file...
config = ConfigParser.ConfigParser()
config.read('cupp.cfg')
years = config.get('years', 'years').split(',')
chars = config.get('specialchars', 'chars').split(',')
numfrom = config.getint('nums','from')
numto = config.getint('nums','to')
wcfrom = config.getint('nums','wcfrom')
wcto = config.getint('nums','wcto')
threshold = config.getint('nums','threshold')
# 1337 mode configs, well you can add more lines if you add it to config file too.
# You will need to add more lines in two places in cupp.py code as well...
a = config.get('leet','a')
i = config.get('leet','i')
e = config.get('leet','e')
t = config.get('leet','t')
o = config.get('leet','o')
s = config.get('leet','s')
g = config.get('leet','g')
z = config.get('leet','z')
# for concatenations...
def concats(seq, start, stop):
for mystr in seq:
for num in xrange(start, stop):
yield mystr + str(num)
# for sorting and making combinations...
def komb(seq, start):
for mystr in seq:
for mystr1 in start:
yield mystr + mystr1
if len(sys.argv) < 2 or sys.argv[1] == '-h':
print " ___________ "
print " \033[07m cupp.py! \033[27m # Common"
print " \ # User"
print " \ \033[1;31m,__,\033[1;m # Passwords"
print " \ \033[1;31m(\033[1;moo\033[1;31m)____\033[1;m # Profiler"
print " \033[1;31m(__) )\ \033[1;m "
print " \033[1;31m ||--|| \033[1;m\033[05m*\033[25m\033[1;m [ Muris Kurgas | [email protected] ]\r\n\r\n"
print " [ Options ]\r\n"
print " -h You are looking at it baby! :)"
print " For more help take a look in docs/README"
print " Global configuration file is cupp.cfg\n"
print " -i Interactive questions for user password profiling\r\n"
print " -w Use this option to improve existing dictionary,"
print " or WyD.pl output to make some pwnsauce\r\n"
print " -l Download huge wordlists from repository\r\n"
print " -a Parse default usernames and passwords directly from Alecto DB."
print " Project Alecto uses purified databases of Phenoelit and CIRT"
print " which where merged and enhanced.\r\n"
print " -v Version of the program\r\n"
exit()
elif sys.argv[1] == '-v':
print "\r\n \033[1;31m[ cupp.py ] v3.1\033[1;m\r\n"
print " * Hacked up by j0rgan - [email protected]"
print " * http://www.remote-exploit.org\r\n"
print " Take a look docs/README file for more info about the program\r\n"
exit()
elif sys.argv[1] == '-w':
if len(sys.argv) < 3:
print "\r\n[Usage]: "+sys.argv[0]+" -w [FILENAME]\r\n"
exit()
fajl = open(sys.argv[2], "r")
listic = fajl.readlines()
linije = 0
for line in listic:
linije += 1
listica = []
for x in listic:
listica += x.split()
print "\r\n *************************************************"
print " * \033[1;31mWARNING!!!\033[1;m *"
print " * Using large wordlists in some *"
print " * options bellow is NOT recommended! *"
print " *************************************************\r\n"
conts = raw_input("[>] Do you want to concatenate all words from wordlist? Y/[N]: ").lower()
if conts == "y" and linije > threshold:
print "\r\n[-] Maximum number of words for concatenation is "+str(threshold)
print "[-] Check configuration file for increasing this number.\r\n"
conts = raw_input("[>] Do you want to concatenate all words from wordlist? Y/[N]: ").lower()
conts = conts
cont = ['']
if conts == "y":
for cont1 in listica:
for cont2 in listica:
if listica.index(cont1) != listica.index(cont2):
cont.append(cont1+cont2)
spechars = ['']
spechars1 = raw_input("[>] Do you want to add special chars at the end of words? Y/[N]: ").lower()
if spechars1 == "y":
for spec1 in chars:
spechars.append(spec1)
for spec2 in chars:
spechars.append(spec1+spec2)
for spec3 in chars:
spechars.append(spec1+spec2+spec3)
randnum = raw_input("[>] Do you want to add some random numbers at the end of words? Y/[N]").lower()
leetmode = raw_input("[>]Leet mode? (i.e. leet = 1337) Y/[N]: ").lower()
kombinacija1 = list(komb(listica, years))
kombinacija2 = ['']
if conts == "y":
kombinacija2 = list(komb(cont, years))
kombinacija3 = ['']
kombinacija4 = ['']
if spechars1 == "y":
kombinacija3 = list(komb(listica, spechars))
if conts == "y":
kombinacija4 = list(komb(cont, spechars))
kombinacija5 = ['']
kombinacija6 = ['']
if randnum == "y":
kombinacija5 = list(concats(listica, numfrom, numto))
if conts == "y":
kombinacija6 = list(concats(cont, numfrom, numto))
print "\r\n[+] Now making a dictionary..."
print "[+] Sorting list and removing duplicates..."
komb_unique1 = dict.fromkeys(kombinacija1).keys()
komb_unique2 = dict.fromkeys(kombinacija2).keys()
komb_unique3 = dict.fromkeys(kombinacija3).keys()
komb_unique4 = dict.fromkeys(kombinacija4).keys()
komb_unique5 = dict.fromkeys(kombinacija5).keys()
komb_unique6 = dict.fromkeys(kombinacija6).keys()
komb_unique7 = dict.fromkeys(listica).keys()
komb_unique8 = dict.fromkeys(cont).keys()
uniqlist = komb_unique1+komb_unique2+komb_unique3+komb_unique4+komb_unique5+komb_unique6+komb_unique7+komb_unique8
unique_lista = dict.fromkeys(uniqlist).keys()
unique_leet = []
if leetmode == "y":
for x in unique_lista: # if you want to add more leet chars, you will need to add more lines in cupp.cfg too...
x = x.replace('a',a)
x = x.replace('i',i)
x = x.replace('e',e)
x = x.replace('t',t)
x = x.replace('o',o)
x = x.replace('s',s)
x = x.replace('g',g)
x = x.replace('z',z)
unique_leet.append(x)
unique_list = unique_lista + unique_leet
unique_list_finished = []
for x in unique_list:
if len(x) > wcfrom and len(x) < wcto:
unique_list_finished.append(x)
f = open ( sys.argv[2]+'.cupp.txt', 'w' )
unique_list_finished.sort()
f.write (os.linesep.join(unique_list_finished))
f = open ( sys.argv[2]+'.cupp.txt', 'r' )
lines = 0
for line in f:
lines += 1
f.close()
print "[+] Saving dictionary to \033[1;31m"+sys.argv[2]+".cupp.txt\033[1;m, counting \033[1;31m"+str(lines)+" words.\033[1;m"
print "[+] Now load your pistolero with \033[1;31m"+sys.argv[2]+".cupp.txt\033[1;m and shoot! Good luck!"
fajl.close()
exit()
elif sys.argv[1] == '-i':
print "\r\n[+] Insert the informations about the victim to make a dictionary"
print "[+] If you don't know all the info, just hit enter when asked! ;)\r\n"
# We need some informations first!
name = raw_input("[>] Name: ").lower()
while len(name) == 0 or name == " " or name == " " or name == " ":
print "\r\n[-] You must enter a name at least!"
name = raw_input("[>] Name: ").lower()
name = str(name)
surname = raw_input("[>] Surname: ").lower()
nick = raw_input("[>] Nickname: ").lower()
birthdate = raw_input("[>] Birthdate (DDMMYYYY): ")
while len(birthdate) != 0 and len(birthdate) != 8:
print "\r\n[-] You must enter 8 digits for birthday!"
birthdate = raw_input("[>] Birthdate (DDMMYYYY): ")
birthdate = str(birthdate)
print "\r\n"
wife = raw_input("[>] Wife's(husband's) name: ").lower()
wifen = raw_input("[>] Wife's(husband's) nickname: ").lower()
wifeb = raw_input("[>] Wife's(husband's) birthdate (DDMMYYYY): ")
while len(wifeb) != 0 and len(wifeb) != 8:
print "\r\n[-] You must enter 8 digits for birthday!"
wifeb = raw_input("[>] Wife's(husband's) birthdate (DDMMYYYY): ")
wifeb = str(wifeb)
print "\r\n"
kid = raw_input("[>] Child's name: ").lower()
kidn = raw_input("[>] Child's nickname: ").lower()
kidb = raw_input("[>] Child's birthdate (DDMMYYYY): ")
while len(kidb) != 0 and len(kidb) != 8:
print "\r\n[-] You must enter 8 digits for birthday!"
kidb = raw_input("[>] Child's birthdate (DDMMYYYY): ")
kidb = str(kidb)
print "\r\n"
pet = raw_input("[>] Pet's name: ").lower()
company = raw_input("[>] Company name: ").lower()
print "\r\n"
words = ['']
oth = raw_input("[>] Do you want to add some key words about the victim? Y/[N]: ").lower()
if oth == "y":
words = raw_input("[>] Please enter the words, separated by comma. [i.e. hacker, juice, black]: ").lower().split(", ")
spechars = ['']
spechars1 = raw_input("[>] Do you want to add special chars at the end of words? Y/[N]: ").lower()
if spechars1 == "y":
for spec1 in chars:
spechars.append(spec1)
for spec2 in chars:
spechars.append(spec1+spec2)
for spec3 in chars:
spechars.append(spec1+spec2+spec3)
randnum = raw_input("[>] Do you want to add some random numbers at the end of words? Y/[N]").lower()
leetmode = raw_input("[>] Leet mode? (i.e. leet = 1337) Y/[N]: ").lower()
print "\r\n[+] Now making a dictionary..."
# Now me must do some string modifications...
# Birthdays first
birthdate_yy = birthdate[-2:]
birthdate_yyy = birthdate[-3:]
birthdate_yyyy = birthdate[-4:]
birthdate_xd = birthdate[1:2]
birthdate_xm = birthdate[3:4]
birthdate_dd = birthdate[:2]
birthdate_mm = birthdate[2:4]
wifeb_yy = wifeb[-2:]
wifeb_yyy = wifeb[-3:]
wifeb_yyyy = wifeb[-4:]
wifeb_xd = wifeb[1:2]
wifeb_xm = wifeb[3:4]
wifeb_dd = wifeb[:2]
wifeb_mm = wifeb[2:4]
kidb_yy = kidb[-2:]
kidb_yyy = kidb[-3:]
kidb_yyyy = kidb[-4:]
kidb_xd = kidb[1:2]
kidb_xm = kidb[3:4]
kidb_dd = kidb[:2]
kidb_mm = kidb[2:4]
# Convert first letters to uppercase...
nameup = name.title()
surnameup = surname.title()
nickup = nick.title()
wifeup = wife.title()
wifenup = wifen.title()
kidup = kid.title()
kidnup = kidn.title()
petup = pet.title()
companyup = company.title()
wordsup = []
for words1 in words:
wordsup.append(words1.title())
word = words+wordsup
# reverse a name
rev_name = name[::-1]
rev_nameup = nameup[::-1]
rev_nick = nick[::-1]
rev_nickup = nickup[::-1]
rev_wife = wife[::-1]
rev_wifeup = wifeup[::-1]
rev_kid = kid[::-1]
rev_kidup = kidup[::-1]
reverse = [rev_name, rev_nameup, rev_nick, rev_nickup, rev_wife, rev_wifeup, rev_kid, rev_kidup]
rev_n = [rev_name, rev_nameup, rev_nick, rev_nickup]
rev_w = [rev_wife, rev_wifeup]
rev_k = [rev_kid, rev_kidup]
# Let's do some serious work! This will be a mess of code, but... who cares? :)
# Birthdays combinations
bds = [birthdate_yy, birthdate_yyy, birthdate_yyyy, birthdate_xd, birthdate_xm, birthdate_dd, birthdate_mm]
bdss = []
for bds1 in bds:
bdss.append(bds1)
for bds2 in bds:
if bds.index(bds1) != bds.index(bds2):
bdss.append(bds1+bds2)
for bds3 in bds:
if bds.index(bds1) != bds.index(bds2) and bds.index(bds2) != bds.index(bds3) and bds.index(bds1) != bds.index(bds3):
bdss.append(bds1+bds2+bds3)
# For a woman...
wbds = [wifeb_yy, wifeb_yyy, wifeb_yyyy, wifeb_xd, wifeb_xm, wifeb_dd, wifeb_mm]
wbdss = []
for wbds1 in wbds:
wbdss.append(wbds1)
for wbds2 in wbds:
if wbds.index(wbds1) != wbds.index(wbds2):
wbdss.append(wbds1+wbds2)
for wbds3 in wbds:
if wbds.index(wbds1) != wbds.index(wbds2) and wbds.index(wbds2) != wbds.index(wbds3) and wbds.index(wbds1) != wbds.index(wbds3):
wbdss.append(wbds1+wbds2+wbds3)
# and a child...
kbds = [kidb_yy, kidb_yyy, kidb_yyyy, kidb_xd, kidb_xm, kidb_dd, kidb_mm]
kbdss = []
for kbds1 in kbds:
kbdss.append(kbds1)
for kbds2 in kbds:
if kbds.index(kbds1) != kbds.index(kbds2):
kbdss.append(kbds1+kbds2)
for kbds3 in kbds:
if kbds.index(kbds1) != kbds.index(kbds2) and kbds.index(kbds2) != kbds.index(kbds3) and kbds.index(kbds1) != kbds.index(kbds3):
kbdss.append(kbds1+kbds2+kbds3)
# string combinations....
kombinaac = [pet, petup, company, companyup]
kombina = [name, surname, nick, nameup, surnameup, nickup]
kombinaw = [wife, wifen, wifeup, wifenup, surname, surnameup]
kombinak = [kid, kidn, kidup, kidnup, surname, surnameup]
kombinaa = []
for kombina1 in kombina:
kombinaa.append(kombina1)
for kombina2 in kombina:
if kombina.index(kombina1) != kombina.index(kombina2) and kombina.index(kombina1.title()) != kombina.index(kombina2.title()):
kombinaa.append(kombina1+kombina2)
kombinaaw = []
for kombina1 in kombinaw:
kombinaaw.append(kombina1)
for kombina2 in kombinaw:
if kombinaw.index(kombina1) != kombinaw.index(kombina2) and kombinaw.index(kombina1.title()) != kombinaw.index(kombina2.title()):
kombinaaw.append(kombina1+kombina2)
kombinaak = []
for kombina1 in kombinak:
kombinaak.append(kombina1)
for kombina2 in kombinak:
if kombinak.index(kombina1) != kombinak.index(kombina2) and kombinak.index(kombina1.title()) != kombinak.index(kombina2.title()):
kombinaak.append(kombina1+kombina2)
komb1 = list(komb(kombinaa, bdss))
komb2 = list(komb(kombinaaw, wbdss))
komb3 = list(komb(kombinaak, kbdss))
komb4 = list(komb(kombinaa, years))
komb5 = list(komb(kombinaac, years))
komb6 = list(komb(kombinaaw, years))
komb7 = list(komb(kombinaak, years))
komb8 = list(komb(word, bdss))
komb9 = list(komb(word, wbdss))
komb10 = list(komb(word, kbdss))
komb11 = list(komb(word, years))
komb12 = ['']
komb13 = ['']
komb14 = ['']
komb15 = ['']
komb16 = ['']
komb21 = ['']
if randnum == "y":
komb12 = list(concats(word, numfrom, numto))
komb13 = list(concats(kombinaa, numfrom, numto))
komb14 = list(concats(kombinaac, numfrom, numto))
komb15 = list(concats(kombinaaw, numfrom, numto))
komb16 = list(concats(kombinaak, numfrom, numto))
komb21 = list(concats(reverse, numfrom, numto))
komb17 = list(komb(reverse, years))
komb18 = list(komb(rev_w, wbdss))
komb19 = list(komb(rev_k, kbdss))
komb20 = list(komb(rev_n, bdss))
komb001 = ['']
komb002 = ['']
komb003 = ['']
komb004 = ['']
komb005 = ['']
komb006 = ['']
if spechars1 == "y":
komb001 = list(komb(kombinaa, spechars))
komb002 = list(komb(kombinaac, spechars))
komb003 = list(komb(kombinaaw , spechars))
komb004 = list(komb(kombinaak , spechars))
komb005 = list(komb(word, spechars))
komb006 = list(komb(reverse, spechars))
print "[+] Sorting list and removing duplicates..."
komb_unique1 = dict.fromkeys(komb1).keys()
komb_unique2 = dict.fromkeys(komb2).keys()
komb_unique3 = dict.fromkeys(komb3).keys()
komb_unique4 = dict.fromkeys(komb4).keys()
komb_unique5 = dict.fromkeys(komb5).keys()
komb_unique6 = dict.fromkeys(komb6).keys()
komb_unique7 = dict.fromkeys(komb7).keys()
komb_unique8 = dict.fromkeys(komb8).keys()
komb_unique9 = dict.fromkeys(komb9).keys()
komb_unique10 = dict.fromkeys(komb10).keys()
komb_unique11 = dict.fromkeys(komb11).keys()
komb_unique12 = dict.fromkeys(komb12).keys()
komb_unique13 = dict.fromkeys(komb13).keys()
komb_unique14 = dict.fromkeys(komb14).keys()
komb_unique15 = dict.fromkeys(komb15).keys()
komb_unique16 = dict.fromkeys(komb16).keys()
komb_unique17 = dict.fromkeys(komb17).keys()
komb_unique18 = dict.fromkeys(komb18).keys()
komb_unique19 = dict.fromkeys(komb19).keys()
komb_unique20 = dict.fromkeys(komb20).keys()
komb_unique21 = dict.fromkeys(komb21).keys()
komb_unique01 = dict.fromkeys(kombinaa).keys()
komb_unique02 = dict.fromkeys(kombinaac).keys()
komb_unique03 = dict.fromkeys(kombinaaw).keys()
komb_unique04 = dict.fromkeys(kombinaak).keys()
komb_unique05 = dict.fromkeys(word).keys()
komb_unique07 = dict.fromkeys(komb001).keys()
komb_unique08 = dict.fromkeys(komb002).keys()
komb_unique09 = dict.fromkeys(komb003).keys()
komb_unique010 = dict.fromkeys(komb004).keys()
komb_unique011 = dict.fromkeys(komb005).keys()
komb_unique012 = dict.fromkeys(komb006).keys()
uniqlist = bdss+wbdss+kbdss+reverse+komb_unique01+komb_unique02+komb_unique03+komb_unique04+komb_unique05+komb_unique1+komb_unique2+komb_unique3+komb_unique4+komb_unique5+komb_unique6+komb_unique7+komb_unique8+komb_unique9+komb_unique10+komb_unique11+komb_unique12+komb_unique13+komb_unique14+komb_unique15+komb_unique16+komb_unique17+komb_unique18+komb_unique19+komb_unique20+komb_unique21+komb_unique07+komb_unique08+komb_unique09+komb_unique010+komb_unique011+komb_unique012
unique_lista = dict.fromkeys(uniqlist).keys()
unique_leet = []
if leetmode == "y":
for x in unique_lista: # if you want to add more leet chars, you will need to add more lines in cupp.cfg too...
x = x.replace('a',a)
x = x.replace('i',i)
x = x.replace('e',e)
x = x.replace('t',t)
x = x.replace('o',o)
x = x.replace('s',s)
x = x.replace('g',g)
x = x.replace('z',z)
unique_leet.append(x)
unique_list = unique_lista + unique_leet
unique_list_finished = []
for x in unique_list:
if len(x) > wcfrom and len(x) < wcto:
unique_list_finished.append(x)
unique_list_finished.sort()
f = open ( name+'.txt', 'w' )
f.write (os.linesep.join(unique_list_finished))
f = open ( name+'.txt', 'r' )
lines = 0
for line in f:
lines += 1
f.close()
print "[+] Saving dictionary to \033[1;31m"+name+".txt\033[1;m, counting \033[1;31m"+str(lines)+"\033[1;m words."
print "[+] Now load your pistolero with \033[1;31m"+name+".txt\033[1;m and shoot! Good luck!"
exit()
elif sys.argv[1] == '-a':
url = config.get('alecto','alectourl')
print "\r\n[+] Checking if alectodb is not present..."
if os.path.isfile('alectodb.csv.gz') == 0:
print "[+] Downloading alectodb.csv.gz..."
webFile = urllib.urlopen(url)
localFile = open(url.split('/')[-1], 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
f = gzip.open('alectodb.csv.gz', 'rb')
data = csv.reader(f)
usernames = []
passwords = []
for row in data:
usernames.append(row[5])
passwords.append(row[6])
gus = list(set(usernames))
gpa = list(set(passwords))
gus.sort()
gpa.sort()
print "\r\n[+] Exporting to alectodb-usernames.txt and alectodb-passwords.txt\r\n[+] Done."
f = open ( 'alectodb-usernames.txt', 'w' )
f.write (os.linesep.join(gus))
f.close()
f = open ( 'alectodb-passwords.txt', 'w' )
f.write (os.linesep.join(gpa))
f.close()
f.close()
sys.exit()
elif sys.argv[1] == '-l':
ftpname = config.get('downloader','ftpname')
ftpurl = config.get('downloader','ftpurl')
ftppath = config.get('downloader','ftppath')
ftpuser = config.get('downloader','ftpuser')
ftppass = config.get('downloader','ftppass')
if os.path.isdir('dictionaries') == 0:
os.mkdir('dictionaries')
print " \r\n Choose the section you want to download:\r\n"
print " 1 Moby 14 french 27 places"
print " 2 afrikaans 15 german 28 polish"
print " 3 american 16 hindi 39 random"
print " 4 aussie 17 hungarian 30 religion"
print " 5 chinese 18 italian 31 russian"
print " 6 computer 19 japanese 32 science"
print " 7 croatian 20 latin 33 spanish"
print " 8 czech 21 literature 34 swahili"
print " 9 danish 22 movieTV 35 swedish"
print " 10 databases 23 music 36 turkish"
print " 11 dictionaries 24 names 37 yiddish"
print " 12 dutch 25 net 38 exit program"
print " 13 finnish 26 norwegian \r\n"
print " \r\n Files will be downloaded from "+ftpname+" repository"
print " \r\n Tip: After downloading wordlist, you can improve it with -w option\r\n"
filedown = raw_input("[>] Enter number: ")
filedown.isdigit()
while filedown.isdigit() == 0:
print "\r\n[-] Wrong choice. "
filedown = raw_input("[>] Enter number: ")
filedown = str(filedown)
while int(filedown) > 38:
print "\r\n[-] Wrong choice. "
filedown = raw_input("[>] Enter number: ")
filedown = str(filedown)
def handleDownload(block):
file.write(block)
print ".",
def downloader():
ftp.login(ftpuser, ftppass)
ftp.cwd(ftppath)
def filequitter():
file.close()
print ' done.'
if filedown == "1":
print "\r\n[+] connecting...\r\n"
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('Moby')
if os.path.isdir('dictionaries/Moby/') == 0:
os.mkdir('dictionaries/Moby/')
dire = 'dictionaries/Moby/'
file = open(dire+'mhyph.tar.gz', 'wb')
print "\r\n[+] downloading mhyph.tar.gz..."
ftp.retrbinary('RETR ' + 'mhyph.tar.gz', handleDownload)
filequitter()
file = open(dire+'mlang.tar.gz', 'wb')
print "\r\n[+] downloading mlang.tar.gz..."
ftp.retrbinary('RETR ' + 'mlang.tar.gz', handleDownload)
filequitter()
file = open(dire+'moby.tar.gz', 'wb')
print "\r\n[+] downloading moby.tar.gz..."
ftp.retrbinary('RETR ' + 'moby.tar.gz', handleDownload)
filequitter()
file = open(dire+'mpos.tar.gz', 'wb')
print "\r\n[+] downloading mpos.tar.gz..."
ftp.retrbinary('RETR ' + 'mpos.tar.gz', handleDownload)
filequitter()
file = open(dire+'mpron.tar.gz', 'wb')
print "\r\n[+] downloading mpron.tar.gz..."
ftp.retrbinary('RETR ' + 'mpron.tar.gz', handleDownload)
filequitter()
file = open(dire+'mthes.tar.gz', 'wb')
print "\r\n[+] downloading mthes.tar.gz..."
ftp.retrbinary('RETR ' + 'mthes.tar.gz', handleDownload)
filequitter()
file = open(dire+'mwords.tar.gz', 'wb')
print "\r\n[+] downloading mwords.tar.gz..."
ftp.retrbinary('RETR ' + 'mwords.tar.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "2":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('afrikaans')
if os.path.isdir('dictionaries/afrikaans/') == 0:
os.mkdir('dictionaries/afrikaans/')
dire = 'dictionaries/afrikaans/'
file = open(dire+'afr_dbf.zip', 'wb')
print "\r\n[+] downloading afr_dbf.zip..."
ftp.retrbinary('RETR ' + 'afr_dbf.zip', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "3":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('american')
if os.path.isdir('dictionaries/american/') == 0:
os.mkdir('dictionaries/american/')
dire = 'dictionaries/american/'
file = open(dire+'dic-0294.tar.gz', 'wb')
print "\r\n[+] downloading dic-0294.tar.gz..."
ftp.retrbinary('RETR ' + 'dic-0294.tar.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "4":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('aussie')
if os.path.isdir('dictionaries/aussie/') == 0:
os.mkdir('dictionaries/aussie/')
dire = 'dictionaries/aussie/'
file = open(dire+'oz.Z', 'wb')
print "\r\n[+] downloading oz.Z..."
ftp.retrbinary('RETR ' + 'oz.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "5":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('chinese')
if os.path.isdir('dictionaries/chinese/') == 0:
os.mkdir('dictionaries/chinese/')
dire = 'dictionaries/chinese/'
file = open(dire+'chinese.Z', 'wb')
print "\r\n[+] downloading chinese.Z..."
ftp.retrbinary('RETR ' + 'chinese.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "6":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('computer')
if os.path.isdir('dictionaries/computer/') == 0:
os.mkdir('dictionaries/computer/')
dire = 'dictionaries/computer/'
file = open(dire+'Domains.Z', 'wb')
print "\r\n[+] downloading Domains.Z..."
ftp.retrbinary('RETR ' + 'Domains.Z', handleDownload)
filequitter()
file = open(dire+'Dosref.Z', 'wb')
print "\r\n[+] downloading Dosref.Z..."
ftp.retrbinary('RETR ' + 'Dosref.Z', handleDownload)
filequitter()
file = open(dire+'Ftpsites.Z', 'wb')
print "\r\n[+] downloading Ftpsites.Z..."
ftp.retrbinary('RETR ' + 'Ftpsites.Z', handleDownload)
filequitter()
file = open(dire+'Jargon.Z', 'wb')
print "\r\n[+] downloading Jargon.Z..."
ftp.retrbinary('RETR ' + 'Jargon.Z', handleDownload)
filequitter()
file = open(dire+'common-passwords.txt.Z', 'wb')
print "\r\n[+] downloading common-passwords.txt.Z..."
ftp.retrbinary('RETR ' + 'common-passwords.txt.Z', handleDownload)
filequitter()
file = open(dire+'etc-hosts.Z', 'wb')
print "\r\n[+] downloading etc-hosts.Z..."
ftp.retrbinary('RETR ' + 'etc-hosts.Z', handleDownload)
filequitter()
file = open(dire+'foldoc.gz', 'wb')
print "\r\n[+] downloading foldoc.gz..."
ftp.retrbinary('RETR ' + 'foldoc.gz', handleDownload)
filequitter()
file = open(dire+'language-list.Z', 'wb')
print "\r\n[+] downloading language-list.Z..."
ftp.retrbinary('RETR ' + 'language-list.Z', handleDownload)
filequitter()
file = open(dire+'unix.Z', 'wb')
print "\r\n[+] downloading unix.Z..."
ftp.retrbinary('RETR ' + 'unix.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "7":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('croatian')
if os.path.isdir('dictionaries/croatian/') == 0:
os.mkdir('dictionaries/croatian/')
dire = 'dictionaries/croatian/'
file = open(dire+'croatian.gz', 'wb')
print "\r\n[+] downloading croatian.gz..."
ftp.retrbinary('RETR ' + 'croatian.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "8":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('czech')
if os.path.isdir('dictionaries/czech/') == 0:
os.mkdir('dictionaries/czech/')
dire = 'dictionaries/czech/'
file = open(dire+'czech-wordlist-ascii-cstug-novak.Z', 'wb')
print "\r\n[+] downloading czech-wordlist-ascii-cstug-novak.Z..."
ftp.retrbinary('RETR ' + 'czech-wordlist-ascii-cstug-novak.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "9":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('danish')
if os.path.isdir('dictionaries/danish/') == 0:
os.mkdir('dictionaries/danish/')
dire = 'dictionaries/danish/'
file = open(dire+'danish.words.Z', 'wb')
print "\r\n[+] downloading danish.words.Z..."
ftp.retrbinary('RETR ' + 'danish.words.Z', handleDownload)
filequitter()
file = open(dire+'dansk.zip', 'wb')
print "\r\n[+] downloading dansk.zip..."
ftp.retrbinary('RETR ' + 'dansk.zip', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "10":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('databases')
if os.path.isdir('dictionaries/databases/') == 0:
os.mkdir('dictionaries/databases/')
dire = 'dictionaries/databases/'
file = open(dire+'acronyms.Z', 'wb')
print "\r\n[+] downloading acronyms.Z..."
ftp.retrbinary('RETR ' + 'acronyms.Z', handleDownload)
filequitter()
file = open(dire+'att800.Z', 'wb')
print "\r\n[+] downloading att800.Z..."
ftp.retrbinary('RETR ' + 'att800.Z', handleDownload)
filequitter()
file = open(dire+'computer-companies.Z', 'wb')
print "\r\n[+] downloading computer-companies.Z..."
ftp.retrbinary('RETR ' + 'computer-companies.Z', handleDownload)
filequitter()
file = open(dire+'world_heritage.Z', 'wb')
print "\r\n[+] downloading world_heritage.Z..."
ftp.retrbinary('RETR ' + 'world_heritage.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "11":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('dictionaries')
if os.path.isdir('dictionaries/dictionaries/') == 0:
os.mkdir('dictionaries/dictionaries/')
dire = 'dictionaries/dictionaries/'
file = open(dire+'Antworth.gz', 'wb')
print "\r\n[+] downloading Antworth.gz..."
ftp.retrbinary('RETR ' + 'Antworth.gz', handleDownload)
filequitter()
file = open(dire+'CRL.words.gz', 'wb')
print "\r\n[+] downloading CRL.words.gz..."
ftp.retrbinary('RETR ' + 'CRL.words.gz', handleDownload)
filequitter()
file = open(dire+'Roget.words.gz', 'wb')
print "\r\n[+] downloading Roget.words.gz..."
ftp.retrbinary('RETR ' + 'Roget.words.gz', handleDownload)
filequitter()
file = open(dire+'Unabr.dict.gz', 'wb')
print "\r\n[+] downloading Unabr.dict.gz..."
ftp.retrbinary('RETR ' + 'Unabr.dict.gz', handleDownload)
filequitter()
file = open(dire+'Unix.dict.gz', 'wb')
print "\r\n[+] downloading Unix.dict.gz..."
ftp.retrbinary('RETR ' + 'Unix.dict.gz', handleDownload)
filequitter()
file = open(dire+'englex-dict.gz', 'wb')
print "\r\n[+] downloading englex-dict.gz..."
ftp.retrbinary('RETR ' + 'englex-dict.gz', handleDownload)
filequitter()
file = open(dire+'knuth_britsh.gz', 'wb')
print "\r\n[+] downloading knuth_britsh.gz..."
ftp.retrbinary('RETR ' + 'knuth_britsh.gz', handleDownload)
filequitter()
file = open(dire+'knuth_words.gz', 'wb')
print "\r\n[+] downloading knuth_words.gz..."
ftp.retrbinary('RETR ' + 'knuth_words.gz', handleDownload)
filequitter()
file = open(dire+'pocket-dic.gz', 'wb')
print "\r\n[+] downloading pocket-dic.gz..."
ftp.retrbinary('RETR ' + 'pocket-dic.gz', handleDownload)
filequitter()
file = open(dire+'shakesp-glossary.gz', 'wb')
print "\r\n[+] downloading shakesp-glossary.gz..."
ftp.retrbinary('RETR ' + 'shakesp-glossary.gz', handleDownload)
filequitter()
file = open(dire+'special.eng.gz', 'wb')
print "\r\n[+] downloading special.eng.gz..."
ftp.retrbinary('RETR ' + 'special.eng.gz', handleDownload)
filequitter()
file = open(dire+'words-english.gz', 'wb')
print "\r\n[+] downloading words-english.gz..."
ftp.retrbinary('RETR ' + 'words-english.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "12":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('dutch')
if os.path.isdir('dictionaries/dutch/') == 0:
os.mkdir('dictionaries/dutch/')
dire = 'dictionaries/dutch/'
file = open(dire+'words.dutch.Z', 'wb')
print "\r\n[+] downloading words.dutch.Z..."
ftp.retrbinary('RETR ' + 'words.dutch.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "13":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('finnish')
if os.path.isdir('dictionaries/finnish/') == 0:
os.mkdir('dictionaries/finnish/')
dire = 'dictionaries/finnish/'
file = open(dire+'finnish.gz', 'wb')
print "\r\n[+] downloading finnish.gz..."
ftp.retrbinary('RETR ' + 'finnish.gz', handleDownload)
filequitter()
file = open(dire+'firstnames.finnish.gz', 'wb')
print "\r\n[+] downloading firstnames.finnish.gz..."
ftp.retrbinary('RETR ' + 'firstnames.finnish.gz', handleDownload)
filequitter()
file = open(dire+'words.finnish.FAQ.gz', 'wb')
print "\r\n[+] downloading words.finnish.FAQ.gz..."
ftp.retrbinary('RETR ' + 'words.finnish.FAQ.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "14":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('french')
if os.path.isdir('dictionaries/french/') == 0:
os.mkdir('dictionaries/french/')
dire = 'dictionaries/french/'
file = open(dire+'dico.Z', 'wb')
print "\r\n[+] downloading dico.Z..."
ftp.retrbinary('RETR ' + 'dico.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "15":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('german')
if os.path.isdir('dictionaries/german/') == 0:
os.mkdir('dictionaries/german/')
dire = 'dictionaries/german/'
file = open(dire+'deutsch.dic.Z', 'wb')
print "\r\n[+] downloading deutsch.dic.Z..."
ftp.retrbinary('RETR ' + 'deutsch.dic.Z', handleDownload)
filequitter()
file = open(dire+'germanl.Z', 'wb')
print "\r\n[+] downloading germanl.Z..."
ftp.retrbinary('RETR ' + 'germanl.Z', handleDownload)
filequitter()
file = open(dire+'words.german.Z', 'wb')
print "\r\n[+] downloading words.german.Z..."
ftp.retrbinary('RETR ' + 'words.german.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "16":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('hindi')
if os.path.isdir('dictionaries/hindi/') == 0:
os.mkdir('dictionaries/hindi/')
dire = 'dictionaries/hindi/'
file = open(dire+'hindu-names.Z', 'wb')
print "\r\n[+] downloading hindu-names.Z..."
ftp.retrbinary('RETR ' + 'hindu-names.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "17":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('hungarian')
if os.path.isdir('dictionaries/hungarian/') == 0:
os.mkdir('dictionaries/hungarian/')
dire = 'dictionaries/hungarian/'
file = open(dire+'hungarian.gz', 'wb')
print "\r\n[+] downloading hungarian.gz..."
ftp.retrbinary('RETR ' + 'hungarian.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "18":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('italian')
if os.path.isdir('dictionaries/italian/') == 0:
os.mkdir('dictionaries/italian/')
dire = 'dictionaries/italian/'
file = open(dire+'words.italian.Z', 'wb')
print "\r\n[+] downloading words.italian.Z..."
ftp.retrbinary('RETR ' + 'words.italian.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "19":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('japanese')
if os.path.isdir('dictionaries/japanese/') == 0:
os.mkdir('dictionaries/japanese/')
dire = 'dictionaries/japanese/'
file = open(dire+'words.japanese.Z', 'wb')
print "\r\n[+] downloading words.japanese.Z..."
ftp.retrbinary('RETR ' + 'words.japanese.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "20":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('latin')
if os.path.isdir('dictionaries/latin/') == 0:
os.mkdir('dictionaries/latin/')
dire = 'dictionaries/latin/'
file = open(dire+'wordlist.aug.Z', 'wb')
print "\r\n[+] downloading wordlist.aug.Z..."
ftp.retrbinary('RETR ' + 'wordlist.aug.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "21":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('literature')
if os.path.isdir('dictionaries/literature/') == 0:
os.mkdir('dictionaries/literature/')
dire = 'dictionaries/literature/'
file = open(dire+'LCarrol.gz', 'wb')
print "\r\n[+] downloading LCarrol.gz..."
ftp.retrbinary('RETR ' + 'LCarrol.gz', handleDownload)
filequitter()
file = open(dire+'Paradise.Lost.gz', 'wb')
print "\r\n[+] downloading Paradise.Lost.gz..."
ftp.retrbinary('RETR ' + 'Paradise.Lost.gz', handleDownload)
filequitter()
file = open(dire+'aeneid.gz', 'wb')
print "\r\n[+] downloading aeneid.gz..."
ftp.retrbinary('RETR ' + 'aeneid.gz', handleDownload)
filequitter()
file = open(dire+'arthur.gz', 'wb')
print "\r\n[+] downloading arthur.gz..."
ftp.retrbinary('RETR ' + 'arthur.gz', handleDownload)
filequitter()
file = open(dire+'cartoon.gz', 'wb')
print "\r\n[+] downloading cartoon.gz..."
ftp.retrbinary('RETR ' + 'cartoon.gz', handleDownload)
filequitter()
file = open(dire+'cartoons-olivier.gz', 'wb')
print "\r\n[+] downloading cartoons-olivier.gz..."
ftp.retrbinary('RETR ' + 'cartoons-olivier.gz', handleDownload)
filequitter()
file = open(dire+'charlemagne.gz', 'wb')
print "\r\n[+] downloading charlemagne.gz..."
ftp.retrbinary('RETR ' + 'charlemagne.gz', handleDownload)
filequitter()
file = open(dire+'fable.gz', 'wb')
print "\r\n[+] downloading fable.gz..."
ftp.retrbinary('RETR ' + 'fable.gz', handleDownload)
filequitter()
file = open(dire+'iliad.gz', 'wb')
print "\r\n[+] downloading iliad.gz..."
ftp.retrbinary('RETR ' + 'iliad.gz', handleDownload)
filequitter()
file = open(dire+'myths-legends.gz', 'wb')
print "\r\n[+] downloading myths-legends.gz..."
ftp.retrbinary('RETR ' + 'myths-legends.gz', handleDownload)
filequitter()
file = open(dire+'odyssey.gz', 'wb')
print "\r\n[+] downloading odyssey.gz..."
ftp.retrbinary('RETR ' + 'odyssey.gz', handleDownload)
filequitter()
file = open(dire+'sf.gz', 'wb')
print "\r\n[+] downloading sf.gz..."
ftp.retrbinary('RETR ' + 'sf.gz', handleDownload)
filequitter()
file = open(dire+'shakespeare.gz', 'wb')
print "\r\n[+] downloading shakespeare.gz..."
ftp.retrbinary('RETR ' + 'shakespeare.gz', handleDownload)
filequitter()
file = open(dire+'tolkien.words.gz', 'wb')
print "\r\n[+] downloading tolkien.words.gz..."
ftp.retrbinary('RETR ' + 'tolkien.words.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "22":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('movieTV')
if os.path.isdir('dictionaries/movieTV/') == 0:
os.mkdir('dictionaries/movieTV/')
dire = 'dictionaries/movieTV/'
file = open(dire+'Movies.Z', 'wb')
print "\r\n[+] downloading Movies.Z..."
ftp.retrbinary('RETR ' + 'Movies.Z', handleDownload)
filequitter()
file = open(dire+'Python.Z', 'wb')
print "\r\n[+] downloading Python.Z..."
ftp.retrbinary('RETR ' + 'Python.Z', handleDownload)
filequitter()
file = open(dire+'Trek.Z', 'wb')
print "\r\n[+] downloading Trek.Z..."
ftp.retrbinary('RETR ' + 'Trek.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "23":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('music')
if os.path.isdir('dictionaries/music/') == 0:
os.mkdir('dictionaries/music/')
dire = 'dictionaries/music/'
file = open(dire+'music-classical.gz', 'wb')
print "\r\n[+] downloading music-classical.gz..."
ftp.retrbinary('RETR ' + 'music-classical.gz', handleDownload)
filequitter()
file = open(dire+'music-country.gz', 'wb')
print "\r\n[+] downloading music-country.gz..."
ftp.retrbinary('RETR ' + 'music-country.gz', handleDownload)
filequitter()
file = open(dire+'music-jazz.gz', 'wb')
print "\r\n[+] downloading music-jazz.gz..."
ftp.retrbinary('RETR ' + 'music-jazz.gz', handleDownload)
filequitter()
file = open(dire+'music-other.gz', 'wb')
print "\r\n[+] downloading music-other.gz..."
ftp.retrbinary('RETR ' + 'music-other.gz', handleDownload)
filequitter()
file = open(dire+'music-rock.gz', 'wb')
print "\r\n[+] downloading music-rock.gz..."
ftp.retrbinary('RETR ' + 'music-rock.gz', handleDownload)
filequitter()
file = open(dire+'music-shows.gz', 'wb')
print "\r\n[+] downloading music-shows.gz..."
ftp.retrbinary('RETR ' + 'music-shows.gz', handleDownload)
filequitter()
file = open(dire+'rock-groups.gz', 'wb')
print "\r\n[+] downloading rock-groups.gz..."
ftp.retrbinary('RETR ' + 'rock-groups.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "24":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('names')
if os.path.isdir('dictionaries/names/') == 0:
os.mkdir('dictionaries/names/')
dire = 'dictionaries/names/'
file = open(dire+'ASSurnames.gz', 'wb')
print "\r\n[+] downloading ASSurnames.gz..."
ftp.retrbinary('RETR ' + 'ASSurnames.gz', handleDownload)
filequitter()
file = open(dire+'Congress.gz', 'wb')
print "\r\n[+] downloading Congress.gz..."
ftp.retrbinary('RETR ' + 'Congress.gz', handleDownload)
filequitter()
file = open(dire+'Family-Names.gz', 'wb')
print "\r\n[+] downloading Family-Names.gz..."
ftp.retrbinary('RETR ' + 'Family-Names.gz', handleDownload)
filequitter()
file = open(dire+'Given-Names.gz', 'wb')
print "\r\n[+] downloading Given-Names.gz..."
ftp.retrbinary('RETR ' + 'Given-Names.gz', handleDownload)
filequitter()
file = open(dire+'actor-givenname.gz', 'wb')
print "\r\n[+] downloading actor-givenname.gz..."
ftp.retrbinary('RETR ' + 'actor-givenname.gz', handleDownload)
filequitter()
file = open(dire+'actor-surname.gz', 'wb')
print "\r\n[+] downloading actor-surname.gz..."
ftp.retrbinary('RETR ' + 'actor-surname.gz', handleDownload)
filequitter()
file = open(dire+'cis-givenname.gz', 'wb')
print "\r\n[+] downloading cis-givenname.gz..."
ftp.retrbinary('RETR ' + 'cis-givenname.gz', handleDownload)
filequitter()
file = open(dire+'cis-surname.gz', 'wb')
print "\r\n[+] downloading cis-surname.gz..."
ftp.retrbinary('RETR ' + 'cis-surname.gz', handleDownload)
filequitter()
file = open(dire+'crl-names.gz', 'wb')
print "\r\n[+] downloading crl-names.gz..."
ftp.retrbinary('RETR ' + 'crl-names.gz', handleDownload)
filequitter()
file = open(dire+'famous.gz', 'wb')
print "\r\n[+] downloading famous.gz..."
ftp.retrbinary('RETR ' + 'famous.gz', handleDownload)
filequitter()
file = open(dire+'fast-names.gz', 'wb')
print "\r\n[+] downloading fast-names.gz..."
ftp.retrbinary('RETR ' + 'fast-names.gz', handleDownload)
filequitter()
file = open(dire+'female-names-kantr.gz', 'wb')
print "\r\n[+] downloading female-names-kantr.gz..."
ftp.retrbinary('RETR ' + 'female-names-kantr.gz', handleDownload)
filequitter()
file = open(dire+'female-names.gz', 'wb')
print "\r\n[+] downloading female-names.gz..."
ftp.retrbinary('RETR ' + 'female-names.gz', handleDownload)
filequitter()
file = open(dire+'givennames-ol.gz', 'wb')
print "\r\n[+] downloading givennames-ol.gz..."
ftp.retrbinary('RETR ' + 'givennames-ol.gz', handleDownload)
filequitter()
file = open(dire+'male-names-kantr.gz', 'wb')
print "\r\n[+] downloading male-names-kantr.gz..."
ftp.retrbinary('RETR ' + 'male-names-kantr.gz', handleDownload)
filequitter()
file = open(dire+'male-names.gz', 'wb')
print "\r\n[+] downloading male-names.gz..."
ftp.retrbinary('RETR ' + 'male-names.gz', handleDownload)
filequitter()
file = open(dire+'movie-characters.gz', 'wb')
print "\r\n[+] downloading movie-characters.gz..."
ftp.retrbinary('RETR ' + 'movie-characters.gz', handleDownload)
filequitter()
file = open(dire+'names.french.gz', 'wb')
print "\r\n[+] downloading names.french.gz..."
ftp.retrbinary('RETR ' + 'names.french.gz', handleDownload)
filequitter()
file = open(dire+'names.hp.gz', 'wb')
print "\r\n[+] downloading names.hp.gz..."
ftp.retrbinary('RETR ' + 'names.hp.gz', handleDownload)
filequitter()
file = open(dire+'other-names.gz', 'wb')
print "\r\n[+] downloading other-names.gz..."
ftp.retrbinary('RETR ' + 'other-names.gz', handleDownload)
filequitter()
file = open(dire+'shakesp-names.gz', 'wb')
print "\r\n[+] downloading shakesp-names.gz..."
ftp.retrbinary('RETR ' + 'shakesp-names.gz', handleDownload)
filequitter()
file = open(dire+'surnames-ol.gz', 'wb')
print "\r\n[+] downloading surnames-ol.gz..."
ftp.retrbinary('RETR ' + 'surnames-ol.gz', handleDownload)
filequitter()
file = open(dire+'surnames.finnish.gz', 'wb')
print "\r\n[+] downloading surnames.finnish.gz..."
ftp.retrbinary('RETR ' + 'surnames.finnish.gz', handleDownload)
filequitter()
file = open(dire+'usenet-names.gz', 'wb')
print "\r\n[+] downloading usenet-names.gz..."
ftp.retrbinary('RETR ' + 'usenet-names.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "25":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('net')
if os.path.isdir('dictionaries/net/') == 0:
os.mkdir('dictionaries/net/')
dire = 'dictionaries/net/'
file = open(dire+'hosts-txt.Z', 'wb')
print "\r\n[+] downloading hosts-txt.Z..."
ftp.retrbinary('RETR ' + 'hosts-txt.Z', handleDownload)
filequitter()
file = open(dire+'inet-machines.Z', 'wb')
print "\r\n[+] downloading inet-machines.Z..."
ftp.retrbinary('RETR ' + 'inet-machines.Z', handleDownload)
filequitter()
file = open(dire+'usenet-loginids.Z', 'wb')
print "\r\n[+] downloading usenet-loginids.Z..."
ftp.retrbinary('RETR ' + 'usenet-loginids.Z', handleDownload)
filequitter()
file = open(dire+'usenet-machines.Z', 'wb')
print "\r\n[+] downloading usenet-machines.Z..."
ftp.retrbinary('RETR ' + 'usenet-machines.Z', handleDownload)
filequitter()
file = open(dire+'uunet-sites.Z', 'wb')
print "\r\n[+] downloading uunet-sites.Z..."
ftp.retrbinary('RETR ' + 'uunet-sites.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "26":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('norwegian')
if os.path.isdir('dictionaries/norwegian/') == 0:
os.mkdir('dictionaries/norwegian/')
dire = 'dictionaries/norwegian/'
file = open(dire+'words.norwegian.Z', 'wb')
print "\r\n[+] downloading words.norwegian.Z..."
ftp.retrbinary('RETR ' + 'words.norwegian.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "27":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('places')
if os.path.isdir('dictionaries/places/') == 0:
os.mkdir('dictionaries/places/')
dire = 'dictionaries/places/'
file = open(dire+'Colleges.Z', 'wb')
print "\r\n[+] downloading Colleges.Z..."
ftp.retrbinary('RETR ' + 'Colleges.Z', handleDownload)
filequitter()
file = open(dire+'US-counties.Z', 'wb')
print "\r\n[+] downloading US-counties.Z..."
ftp.retrbinary('RETR ' + 'US-counties.Z', handleDownload)
filequitter()
file = open(dire+'World.factbook.Z', 'wb')
print "\r\n[+] downloading World.factbook.Z..."
ftp.retrbinary('RETR ' + 'World.factbook.Z', handleDownload)
filequitter()
file = open(dire+'Zipcodes.Z', 'wb')
print "\r\n[+] downloading Zipcodes.Z..."
ftp.retrbinary('RETR ' + 'Zipcodes.Z', handleDownload)
filequitter()
file = open(dire+'places.Z', 'wb')
print "\r\n[+] downloading places.Z..."
ftp.retrbinary('RETR ' + 'places.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "28":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('polish')
if os.path.isdir('dictionaries/polish/') == 0:
os.mkdir('dictionaries/polish/')
dire = 'dictionaries/polish/'
file = open(dire+'words.polish.Z', 'wb')
print "\r\n[+] downloading words.polish.Z..."
ftp.retrbinary('RETR ' + 'words.polish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "29":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('random')
if os.path.isdir('dictionaries/random/') == 0:
os.mkdir('dictionaries/random/')
dire = 'dictionaries/random/'
file = open(dire+'Ethnologue.gz', 'wb')
print "\r\n[+] downloading Ethnologue.gz..."
ftp.retrbinary('RETR ' + 'Ethnologue.gz', handleDownload)
filequitter()
file = open(dire+'abbr.gz', 'wb')
print "\r\n[+] downloading abbr.gz..."
ftp.retrbinary('RETR ' + 'abbr.gz', handleDownload)
filequitter()
file = open(dire+'chars.gz', 'wb')
print "\r\n[+] downloading chars.gz..."
ftp.retrbinary('RETR ' + 'chars.gz', handleDownload)
filequitter()
file = open(dire+'dogs.gz', 'wb')
print "\r\n[+] downloading dogs.gz..."
ftp.retrbinary('RETR ' + 'dogs.gz', handleDownload)
filequitter()
file = open(dire+'drugs.gz', 'wb')
print "\r\n[+] downloading drugs.gz..."
ftp.retrbinary('RETR ' + 'drugs.gz', handleDownload)
filequitter()
file = open(dire+'junk.gz', 'wb')
print "\r\n[+] downloading junk.gz..."
ftp.retrbinary('RETR ' + 'junk.gz', handleDownload)
filequitter()
file = open(dire+'numbers.gz', 'wb')
print "\r\n[+] downloading numbers.gz..."
ftp.retrbinary('RETR ' + 'numbers.gz', handleDownload)
filequitter()
file = open(dire+'phrases.gz', 'wb')
print "\r\n[+] downloading phrases.gz..."
ftp.retrbinary('RETR ' + 'phrases.gz', handleDownload)
filequitter()
file = open(dire+'sports.gz', 'wb')
print "\r\n[+] downloading sports.gz..."
ftp.retrbinary('RETR ' + 'sports.gz', handleDownload)
filequitter()
file = open(dire+'statistics.gz', 'wb')
print "\r\n[+] downloading statistics.gz..."
ftp.retrbinary('RETR ' + 'statistics.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "30":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('religion')
if os.path.isdir('dictionaries/religion/') == 0:
os.mkdir('dictionaries/religion/')
dire = 'dictionaries/religion/'
file = open(dire+'Koran.Z', 'wb')
print "\r\n[+] downloading Koran.Z..."
ftp.retrbinary('RETR ' + 'Koran.Z', handleDownload)
filequitter()
file = open(dire+'kjbible.Z', 'wb')
print "\r\n[+] downloading kjbible.Z..."
ftp.retrbinary('RETR ' + 'kjbible.Z', handleDownload)
filequitter()
file = open(dire+'norse.Z', 'wb')
print "\r\n[+] downloading norse.Z..."
ftp.retrbinary('RETR ' + 'norse.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "31":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('russian')
if os.path.isdir('dictionaries/russian/') == 0:
os.mkdir('dictionaries/russian/')
dire = 'dictionaries/russian/'
file = open(dire+'russian.lst.Z', 'wb')
print "\r\n[+] downloading russian.lst.Z..."
ftp.retrbinary('RETR ' + 'russian.lst.Z', handleDownload)
filequitter()
file = open(dire+'russian_words.koi8.Z', 'wb')
print "\r\n[+] downloading russian_words.koi8.Z..."
ftp.retrbinary('RETR ' + 'russian_words.koi8.Z', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "32":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('science')
if os.path.isdir('dictionaries/science/') == 0:
os.mkdir('dictionaries/science/')
dire = 'dictionaries/science/'
file = open(dire+'Acr-diagnosis.gz', 'wb')
print "\r\n[+] downloading Acr-diagnosis.gz..."
ftp.retrbinary('RETR ' + 'Acr-diagnosis.gz', handleDownload)
filequitter()
file = open(dire+'Algae.gz', 'wb')
print "\r\n[+] downloading Algae.gz..."
ftp.retrbinary('RETR ' + 'Algae.gz', handleDownload)
filequitter()
file = open(dire+'Bacteria.gz', 'wb')
print "\r\n[+] downloading Bacteria.gz..."
ftp.retrbinary('RETR ' + 'Bacteria.gz', handleDownload)
filequitter()
file = open(dire+'Fungi.gz', 'wb')
print "\r\n[+] downloading Fungi.gz..."
ftp.retrbinary('RETR ' + 'Fungi.gz', handleDownload)
filequitter()
file = open(dire+'Microalgae.gz', 'wb')
print "\r\n[+] downloading Microalgae.gz..."
ftp.retrbinary('RETR ' + 'Microalgae.gz', handleDownload)
filequitter()
file = open(dire+'Viruses.gz', 'wb')
print "\r\n[+] downloading Viruses.gz..."
ftp.retrbinary('RETR ' + 'Viruses.gz', handleDownload)
filequitter()
file = open(dire+'asteroids.Z', 'wb')
print "\r\n[+] downloading asteroids.Z..."
ftp.retrbinary('RETR ' + 'asteroids.Z', handleDownload)
filequitter()
file = open(dire+'biology.Z', 'wb')
print "\r\n[+] downloading biology.Z..."
ftp.retrbinary('RETR ' + 'biology.Z', handleDownload)
filequitter()
file = open(dire+'tech.gz', 'wb')
print "\r\n[+] downloading tech.gz..."
ftp.retrbinary('RETR ' + 'tech.gz', handleDownload)
filequitter()
print '[+] files saved to '+ dire
ftp.quit()
exit()
if filedown == "33":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('spanish')
if os.path.isdir('dictionaries/spanish/') == 0:
os.mkdir('dictionaries/spanish/')
dire = 'dictionaries/spanish/'
file = open(dire+'words.spanish.Z', 'wb')
print "\r\n[+] downloading words.spanish.Z..."
ftp.retrbinary('RETR ' + 'words.spanish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "34":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('swahili')
if os.path.isdir('dictionaries/swahili/') == 0:
os.mkdir('dictionaries/swahili/')
dire = 'dictionaries/swahili/'
file = open(dire+'swahili.gz', 'wb')
print "\r\n[+] downloading swahili.gz..."
ftp.retrbinary('RETR ' + 'swahili.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "35":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('swedish')
if os.path.isdir('dictionaries/swedish/') == 0:
os.mkdir('dictionaries/swedish/')
dire = 'dictionaries/swedish/'
file = open(dire+'words.swedish.Z', 'wb')
print "\r\n[+] downloading words.swedish.Z..."
ftp.retrbinary('RETR ' + 'words.swedish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "36":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('turkish')
if os.path.isdir('dictionaries/turkish/') == 0:
os.mkdir('dictionaries/turkish/')
dire = 'dictionaries/turkish/'
file = open(dire+'turkish.dict.gz', 'wb')
print "\r\n[+] downloading turkish.dict.gz..."
ftp.retrbinary('RETR ' + 'turkish.dict.gz', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
if filedown == "37":
print "[+] connecting..."
ftp = ftplib.FTP(ftpurl)
downloader()
ftp.cwd('yiddish')
if os.path.isdir('dictionaries/yiddish/') == 0:
os.mkdir('dictionaries/yiddish/')
dire = 'dictionaries/yiddish/'
file = open(dire+'yiddish.Z', 'wb')
print "\r\n[+] downloading yiddish.Z..."
ftp.retrbinary('RETR ' + 'yiddish.Z', handleDownload)
filequitter()
print '[+] file saved to '+ dire
ftp.quit()
exit()
else:
print '[-] leaving.'
exit()
else:
print "\r\n[Usage]: "+sys.argv[0] +" [OPTIONS] \r\n"
print "[Help]: "+sys.argv[0] +" -h\r\n"
exit() | apache-2.0 | -5,743,812,412,542,314,000 | 29.003751 | 478 | 0.633408 | false | 2.578812 | true | false | false |
ISCDtoolbox/FaciLe | pipeline/createDatabase.py | 1 | 2873 | import os
import sys
import numpy as np
from copy import deepcopy
import argparse
#Parallel
import subprocess as sp
import multiprocessing as mp
sys.path.append(os.path.join(os.path.dirname(__file__),"../projects/tools"))
import msh
import executable_paths as exe
def parse():
parser = argparse.ArgumentParser(description="Creates mandible and masseter files for the database creation")
parser.add_argument("-i", "--inputDir", help="input directory", type=str, required=True)
parser.add_argument("-o", "--outputDir", help="output directory", type=str, required=True)
return parser.parse_args()
def checkArgs(args):
if not os.path.exists(args.inputDir):
print args.input + "is not a valid directory"
sys.exit()
if not len([f for f in os.listdir(args.inputDir) if f[0]=="."]) == 0:
print args.inputDir + " is an empty directory"
sys.exit()
if not os.path.exists(args.outputDir):
print args.outputDir + " does not exist, creating"
os.system("mkdir " + args.outputDir)
args.inputDir = os.path.abspath(args.inputDir)
args.outputDir = os.path.abspath(args.outputDir)
def command(cmd, displayOutput=False):
err = 1
print "Running the command '" + cmd + "'"
if displayOutput:
err = os.system(cmd)
else:
err = os.system(cmd + " > tmp_out.txt 2>tmp_err.txt")
if err:
print "An error happened while executing:\n"+cmd+"\nLook in tmp_out.txt or tmp_err.txt for info\nExiting..."
sys.exit()
else:
os.system("rm tmp_out.txt tmp_err.txt >/dev/null 2>&1")
def work(in_file):
"""Defines the work unit on an input file"""
root = '.'.join(in_file.split("/")[-1].split(".")[:-1])
if not os.path.exists("tmp_"+root):
os.mkdir("tmp_"+root)
os.chdir("tmp_"+root)
os.system("cp /home/norgeot/dev/own/FaciLe/projects/warping/demo/sphere.o1.mesh ./sphere.mesh")
cmd = " ".join([exe.processSkull, "-i " + in_file, "-t ../../OsTemplate.mesh",">",root+"_OUT.txt"])
print "Starting the skull processing for " + in_file
#os.system(cmd)
print "Skull processing finished for " + in_file
#clean the working directories
for ext in [".warped.mesh", ".box.1.o.", "mat","_OUT.txt"]:
for f in os.listdir("."):
if ext in f:
os.rename(f, os.path.join(args.outputDir,f))
for f in os.listdir("."):
if ".mesh" in f or ".sol" in f:
#os.remove(f)
#print f + " was successfully removed"
a=2
return 0
if __name__=="__main__":
args = parse()
checkArgs(args)
files = [os.path.join(args.inputDir,f) for f in os.listdir(args.inputDir) if ".mesh" in f]
#Set up the parallel task pool to use all available processors
count = mp.cpu_count()
pool = mp.Pool(processes=count)
pool.map(work, files)
| gpl-3.0 | 9,033,572,557,116,844,000 | 31.647727 | 116 | 0.622346 | false | 3.348485 | false | false | false |
iain-peddie/well-behaved-python | tests/WellBehavedPythonTests/Discovery/ModuleExaminerTests.py | 1 | 3114 | #!/usr/bin/env python3
# Copyright 2013 Iain Peddie [email protected]
#
# This file is part of WellBehavedPython
#
# WellBehavedPython is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WellBehavedPython is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WellBehavedPython. If not, see <http://www.gnu.org/licenses/>.
from WellBehavedPython.api import *
from WellBehavedPython.Engine.TestCase import TestCase
from WellBehavedPython.Discovery.ModuleExaminer import ModuleExaminer
class ModuleExaminerTests(TestCase):
def test_examiner_can_find__only_class_in_simple_module(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests.Samples.SampleModule');
# When
classes = examiner.listAllClasses()
# The classes have been imported
# Then
from ..Samples import SampleModule
expect(classes).toEqual([SampleModule.SampleTests])
def test_examiner_can_find_all_classes_in_complex_module(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests.Samples.SampleComplexModule');
# When
classes = examiner.listAllClasses()
# The classes have been imported
# Then
from ..Samples import SampleComplexModule
expect(classes).toContain(SampleComplexModule.SampleFirstTests)
expect(classes).toContain(SampleComplexModule.SampleSecondTests)
expect(classes).toContain(SampleComplexModule.StandaloneClass)
def test_examiner_can_find_all_modules(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests.Samples');
# When
modules = examiner.listAllModules();
# Then
from ..Samples import SampleModule
from ..Samples import SampleComplexModule
expect(modules).toContain('WellBehavedPythonTests.Samples.SampleModule');
expect(modules).toContain('WellBehavedPythonTests.Samples.SampleComplexModule');
def test_examiner_is_not_recursive_for_modules(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests');
# When
modules = examiner.listAllModules();
# Then
expect(modules).toContain('WellBehavedPythonTests.BackwardsCompatibilityTests');
expect(modules).Not.toContain('WellBehavedPythonTests.Discovery.Samples.SampleModule');
def test_examining_can_find_subpackages(self):
# Where
examiner = ModuleExaminer('WellBehavedPythonTests')
# When
packages = examiner.listAllPackages()
# Then
expect(packages).toContain('WellBehavedPythonTests.Discovery')
| gpl-3.0 | 8,893,566,164,321,204,000 | 35.635294 | 95 | 0.706487 | false | 3.977011 | true | false | false |
QualiSystems/Azure-Shell | package/cloudshell/cp/azure/domain/common/vm_details_provider.py | 1 | 5896 | from azure.mgmt.compute.models import StorageAccountTypes
from cloudshell.cp.core.models import VmDetailsProperty, VmDetailsData, VmDetailsNetworkInterface
from cloudshell.cp.azure.domain.vm_management.operations.deploy_operation import get_ip_from_interface_name
class VmDetailsProvider(object):
def __init__(self, network_service, resource_id_parser):
"""
:param cloudshell.cp.azure.domain.services.network_service.NetworkService network_service:
:param AzureResourceIdParser resource_id_parser:
:return:
"""
self.network_service = network_service
self.resource_id_parser = resource_id_parser
def create(self, instance, is_market_place, logger, network_client, group_name):
"""
:param group_name:
:param network_client:
:param instance: azure.mgmt.compute.models.VirtualMachine
:param is_market_place: bool
:param logging.Logger logger:
:return:
"""
vm_instance_data = None
vm_network_data = None
if is_market_place:
vm_instance_data = self._get_vm_instance_data_for_market_place(instance)
vm_network_data = self._get_vm_network_data(instance, network_client, group_name, logger)
logger.info("VM {} was created via market place.".format(instance.name))
else:
vm_instance_data = self._get_vm_instance_data_for_custom_image(instance)
vm_network_data = self._get_vm_network_data(instance, network_client, group_name, logger)
logger.info("VM {} was created via custom image.".format(instance.name))
return VmDetailsData(vmInstanceData=vm_instance_data, vmNetworkData=vm_network_data)
@staticmethod
def _get_vm_instance_data_for_market_place(instance):
data = [
VmDetailsProperty(key='Image Publisher',value= instance.storage_profile.image_reference.publisher),
VmDetailsProperty(key='Image Offer',value= instance.storage_profile.image_reference.offer),
VmDetailsProperty(key='Image SKU',value= instance.storage_profile.image_reference.sku),
VmDetailsProperty(key='VM Size',value= instance.hardware_profile.vm_size),
VmDetailsProperty(key='Operating System',value= instance.storage_profile.os_disk.os_type.name),
VmDetailsProperty(key='Disk Type',value=
'HDD' if instance.storage_profile.os_disk.managed_disk.storage_account_type == StorageAccountTypes.standard_lrs else 'SSD')
]
return data
def _get_vm_instance_data_for_custom_image(self, instance):
image_name = self.resource_id_parser.get_image_name(resource_id=instance.storage_profile.image_reference.id)
resource_group = self.resource_id_parser.get_resource_group_name(resource_id=instance.storage_profile.image_reference.id)
data = [
VmDetailsProperty(key='Image',value= image_name),
VmDetailsProperty(key='Image Resource Group',value= resource_group),
VmDetailsProperty(key='VM Size',value= instance.hardware_profile.vm_size),
VmDetailsProperty(key='Operating System',value= instance.storage_profile.os_disk.os_type.name),
VmDetailsProperty(key='Disk Type',value=
'HDD' if instance.storage_profile.os_disk.managed_disk.storage_account_type == StorageAccountTypes.standard_lrs else 'SSD')
]
return data
def _get_vm_network_data(self, instance, network_client, group_name, logger):
network_interface_objects = []
for network_interface in instance.network_profile.network_interfaces:
nic_name = self.resource_id_parser.get_name_from_resource_id(network_interface.id)
nic = network_client.network_interfaces.get(group_name, nic_name)
ip_configuration = nic.ip_configurations[0]
private_ip = ip_configuration.private_ip_address
public_ip = ''
network_data = [VmDetailsProperty(key="IP", value=ip_configuration.private_ip_address)]
subnet_name = ip_configuration.subnet.id.split('/')[-1]
current_interface = VmDetailsNetworkInterface(interfaceId=nic.resource_guid,
networkId=subnet_name,
isPrimary=nic.primary,
networkData=network_data,
privateIpAddress=private_ip,
publicIpAddress=public_ip)
if ip_configuration.public_ip_address:
public_ip_name = get_ip_from_interface_name(nic_name)
public_ip_object = self.network_service.get_public_ip(network_client=network_client,
group_name=group_name,
ip_name=public_ip_name)
public_ip = public_ip_object.ip_address
network_data.append(VmDetailsProperty(key="Public IP", value=public_ip))
network_data.append(
VmDetailsProperty(key="Public IP Type", value=public_ip_object.public_ip_allocation_method))
# logger.info("VM {} was created with public IP '{}'.".format(instance.name,
# ip_configuration.public_ip_address.ip_address))
logger.info("VM {} was created with public IP '{}'.".format(instance.name, public_ip))
network_data.append(VmDetailsProperty(key="MAC Address", value=nic.mac_address))
network_interface_objects.append(current_interface)
return network_interface_objects | apache-2.0 | 8,312,419,053,083,852,000 | 53.100917 | 150 | 0.61652 | false | 4.232592 | true | false | false |
fengjian/libinjection | src/sqlparse2c.py | 3 | 3800 | #!/usr/bin/env python
#
# Copyright 2012, 2013 Nick Galbreath
# [email protected]
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to a C header (.h) file
"""
import sys
def toc(obj):
""" main routine """
print """
#ifndef LIBINJECTION_SQLI_DATA_H
#define LIBINJECTION_SQLI_DATA_H
#include "libinjection.h"
#include "libinjection_sqli.h"
typedef struct {
const char *word;
char type;
} keyword_t;
static size_t parse_money(sfilter * sf);
static size_t parse_other(sfilter * sf);
static size_t parse_white(sfilter * sf);
static size_t parse_operator1(sfilter *sf);
static size_t parse_char(sfilter *sf);
static size_t parse_hash(sfilter *sf);
static size_t parse_dash(sfilter *sf);
static size_t parse_slash(sfilter *sf);
static size_t parse_backslash(sfilter * sf);
static size_t parse_operator2(sfilter *sf);
static size_t parse_string(sfilter *sf);
static size_t parse_word(sfilter * sf);
static size_t parse_var(sfilter * sf);
static size_t parse_number(sfilter * sf);
static size_t parse_tick(sfilter * sf);
static size_t parse_ustring(sfilter * sf);
static size_t parse_qstring(sfilter * sf);
static size_t parse_nqstring(sfilter * sf);
static size_t parse_xstring(sfilter * sf);
static size_t parse_bstring(sfilter * sf);
static size_t parse_estring(sfilter * sf);
static size_t parse_bword(sfilter * sf);
"""
#
# Mapping of character to function
#
fnmap = {
'CHAR_WORD' : 'parse_word',
'CHAR_WHITE': 'parse_white',
'CHAR_OP1' : 'parse_operator1',
'CHAR_UNARY': 'parse_operator1',
'CHAR_OP2' : 'parse_operator2',
'CHAR_BANG' : 'parse_operator2',
'CHAR_BACK' : 'parse_backslash',
'CHAR_DASH' : 'parse_dash',
'CHAR_STR' : 'parse_string',
'CHAR_HASH' : 'parse_hash',
'CHAR_NUM' : 'parse_number',
'CHAR_SLASH': 'parse_slash',
'CHAR_SEMICOLON' : 'parse_char',
'CHAR_COMMA': 'parse_char',
'CHAR_LEFTPARENS': 'parse_char',
'CHAR_RIGHTPARENS': 'parse_char',
'CHAR_LEFTBRACE': 'parse_char',
'CHAR_RIGHTBRACE': 'parse_char',
'CHAR_VAR' : 'parse_var',
'CHAR_OTHER': 'parse_other',
'CHAR_MONEY': 'parse_money',
'CHAR_TICK' : 'parse_tick',
'CHAR_UNDERSCORE': 'parse_underscore',
'CHAR_USTRING' : 'parse_ustring',
'CHAR_QSTRING' : 'parse_qstring',
'CHAR_NQSTRING' : 'parse_nqstring',
'CHAR_XSTRING' : 'parse_xstring',
'CHAR_BSTRING' : 'parse_bstring',
'CHAR_ESTRING' : 'parse_estring',
'CHAR_BWORD' : 'parse_bword'
}
print
print "typedef size_t (*pt2Function)(sfilter *sf);"
print "static const pt2Function char_parse_map[] = {"
pos = 0
for character in obj['charmap']:
print " &%s, /* %d */" % (fnmap[character], pos)
pos += 1
print "};"
print
# keywords
# load them
keywords = obj['keywords']
for fingerprint in list(obj[u'fingerprints']):
fingerprint = '0' + fingerprint.upper()
keywords[fingerprint] = 'F'
needhelp = []
for key in keywords.iterkeys():
if key != key.upper():
needhelp.append(key)
for key in needhelp:
tmpv = keywords[key]
del keywords[key]
keywords[key.upper()] = tmpv
print "static const keyword_t sql_keywords[] = {"
for k in sorted(keywords.keys()):
if len(k) > 31:
sys.stderr.write("ERROR: keyword greater than 32 chars\n")
sys.exit(1)
print " {\"%s\", '%s'}," % (k, keywords[k])
print "};"
print "static const size_t sql_keywords_sz = %d;" % (len(keywords), )
print "#endif"
return 0
if __name__ == '__main__':
import json
sys.exit(toc(json.load(sys.stdin)))
| bsd-3-clause | -4,710,446,708,618,401,000 | 27.787879 | 73 | 0.604211 | false | 3.071948 | false | false | false |
inconvergent/differential-cloud | modules/helpers.py | 1 | 1866 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--procs',
type=int,
default=4,
help='number of processors.'
)
parser.add_argument(
'--nearl',
type=float,
default=0.003
)
parser.add_argument(
'--midl',
type=float,
default=0.008
)
parser.add_argument(
'--farl',
type=float,
default=0.05
)
parser.add_argument(
'--stp',
type=float,
default=1.0e-7
)
parser.add_argument(
'--reject',
type=float,
default=1.0
)
parser.add_argument(
'--attract',
type=float,
default=0.3
)
parser.add_argument(
'--nmax',
type=int,
default=1000000
)
parser.add_argument(
'--itt',
type=int,
default=10000000000
)
parser.add_argument(
'--vnum',
type=int,
default=10000000000
)
parser.add_argument(
'--stat',
type=int,
default=100
)
parser.add_argument(
'--export',
type=int,
default=1000
)
parser.add_argument(
'--out',
type=str,
default='./res/res'
)
parser.add_argument(
'--startRad',
type=float,
default=0.01
)
parser.add_argument(
'--startNum',
type=int,
default=100
)
return parser.parse_args()
def make_info_str(args):
s = ''
for k in vars(args):
s += '# ' + str(k) + ': ' + str(getattr(args,k)) + '\n'
return s
def print_stats(steps,dm, meta=False):
from time import strftime
from time import time
if isinstance(meta, str):
meta = ' | {:s}'.format(meta)
else:
meta = ''
print(
'{:s} | stp: {:d} sec: {:.2f} v: {:d}{:s}'
.format(
strftime('%d/%m/%y %H:%M:%S'),
steps,
time()-dm.get_start_time(),
dm.get_vnum(),
meta
)
)
return
| mit | -6,956,966,956,868,832,000 | 14.55 | 59 | 0.546088 | false | 3.094527 | false | false | false |
apacha/OMR-Datasets | omrdatasettools/OmrDataset.py | 1 | 11026 | from enum import Enum, auto
from typing import Dict
class OmrDataset(Enum):
"""
The available OMR datasets that can be automatically downloaded with Downloader.py
"""
#: The Audiveris OMR dataset from https://github.com/Audiveris/omr-dataset-tools, Copyright 2017 by Hervé Bitteur under AGPL-3.0 license
Audiveris = auto()
#: The Baro Single Stave dataset from http://www.cvc.uab.es/people/abaro/datasets.html, Copyright 2019 Arnau Baró, Pau Riba, Jorge Calvo-Zaragoza, and Alicia Fornés under CC-BY-NC-SA 4.0 license
Baro = auto()
#: The Capitan dataset from http://grfia.dlsi.ua.es/, License unspecified, free for research purposes
Capitan = auto()
#: Custom version of the CVC-MUSCIMA dataset that contains all images in grayscale, binary and with the
#: following staff-line augmentations: interrupted, kanungo, thickness-variation-v1/2, y-variation-v1/2
#: typeset-emulation and whitespeckles. (all data augmentations that could be aligned automatically).
#: The grayscale images are different from the WriterIdentification dataset, in such a way, that they were aligned
#: to the images from the Staff-Removal dataset. This is the recommended dataset for object detection, as the
#: MUSCIMA++ annotations can be used with a variety of underlying images.
#: See https://github.com/apacha/CVC-MUSCIMA to learn more.
CvcMuscima_MultiConditionAligned = auto()
#: The larger version of the CVC-MUSCIMA dataset for staff removal in black and white with augmentations
#: from http://www.cvc.uab.es/cvcmuscima/index_database.html,
#: Copyright 2012 Alicia Fornés, Anjan Dutta, Albert Gordo and Josep Lladós under CC-BY-NC-SA 4.0 license
CvcMuscima_StaffRemoval = auto()
#: The smaller version of the CVC-MUSCIMA dataset for writer identification in grayscale
#: from http://www.cvc.uab.es/cvcmuscima/index_database.html,
#: Copyright 2012 Alicia Fornés, Anjan Dutta, Albert Gordo and Josep Lladós under CC-BY-NC-SA 4.0 license
CvcMuscima_WriterIdentification = auto()
#: Edirom dataset. All rights reserved
Edirom_Bargheer = auto()
#: Edirom datasets on Freischuetz from https://freischuetz-digital.de/edition.html. All rights reserved.
Edirom_FreischuetzDigital = auto()
#: The Fornes Music Symbols dataset from http://www.cvc.uab.es/~afornes/, License unspecified - citation requested
Fornes = auto()
#: The official HOMUS dataset from http://grfia.dlsi.ua.es/homus/, License unspecified.
Homus_V1 = auto()
#: The improved version of the HOMUS dataset with several bugs-fixed from https://github.com/apacha/Homus
Homus_V2 = auto()
#: The MUSCIMA++ dataset from https://ufal.mff.cuni.cz/muscima, Copyright 2017 Jan Hajic jr. under CC-BY-NC-SA 4.0 license
MuscimaPlusPlus_V1 = auto()
#: The second version of the MUSCIMA++ dataset from https://github.com/OMR-Research/muscima-pp
MuscimaPlusPlus_V2 = auto()
#: A sub-set of the MUSCIMA++ annotations that contains bounding-box annotations for staves, staff measures and system measures. It was semi-automatically constructed from existing annotations and manually verified for correctness. The annotations are available in a plain JSON format as well as in the COCO format.
MuscimaPlusPlus_MeasureAnnotations = auto()
#: The OpenOMR Symbols dataset from https://sourceforge.net/projects/openomr/, Copyright 2013 by Arnaud F. Desaedeleer under GPL license
OpenOmr = auto()
#: The Printed Music Symbols dataset from https://github.com/apacha/PrintedMusicSymbolsDataset, Copyright 2017 by Alexander Pacha under MIT license
Printed = auto()
#: The Rebelo dataset (part 1) with music symbols from http://www.inescporto.pt/~arebelo/index.php, Copyright 2017 by Ana Rebelo under CC BY-SA 4.0 license
Rebelo1 = auto()
#: The Rebelo dataset (part 2) with music symbols from http://www.inescporto.pt/~arebelo/index.php, Copyright 2017 by Ana Rebelo under CC BY-SA 4.0 license
Rebelo2 = auto()
#: The DeepScore dataset (version 1) with extended vocabulary from https://tuggeluk.github.io/downloads/, License unspecified.
DeepScores_V1_Extended = auto()
#: The AudioLabs v1 dataset (aka. Measure Bounding Box Annotation) from https://www.audiolabs-erlangen.de/resources/MIR/2019-ISMIR-LBD-Measures, Copyright 2019 by Frank Zalkow, Angel Villar Corrales, TJ Tsai, Vlora Arifi-Müller, and Meinard Müller under CC BY-NC-SA 4.0 license
AudioLabs_v1 = auto()
#: The AudioLabs v2 dataset, enhanced with staves, staff measures and the original system measures. The annotations are available in csv, JSON and COCO format.
AudioLabs_v2 = auto()
#: The Accidentals detection dataset by Kwon-Young Choi from https://www-intuidoc.irisa.fr/en/choi_accidentals/, License unspecified.
ChoiAccidentals = auto()
def get_dataset_download_url(self) -> str:
""" Returns the url of the selected dataset.
Example usage: OmrDataset.Fornes.get_dataset_download_url() """
return self.dataset_download_urls()[self.name]
def get_dataset_filename(self) -> str:
""" Returns the name of the downloaded zip file of a dataset.
Example usage: OmrDataset.Fornes.get_dataset_filename() """
return self.dataset_file_names()[self.name]
def dataset_download_urls(self) -> Dict[str, str]:
""" Returns a mapping with all URLs, mapped from their enum keys """
return {
# Official URL: https://github.com/Audiveris/omr-dataset-tools/tree/master/data/input-images
"Audiveris": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudiverisOmrDataset.zip",
# Official URL: http://www.cvc.uab.es/people/abaro/datasets/MUSCIMA_ABARO.zip
"Baro": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/BaroMuscima.zip",
# Official URL: http://grfia.dlsi.ua.es/cm/projects/timul/databases/BimodalHandwrittenSymbols.zip
"Capitan": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/BimodalHandwrittenSymbols.zip",
# Official URL: http://www.cvc.uab.es/cvcmuscima/CVCMUSCIMA_WI.zip
"CvcMuscima_WriterIdentification": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_WI.zip",
# Official URL: http://www.cvc.uab.es/cvcmuscima/CVCMUSCIMA_SR.zip
"CvcMuscima_StaffRemoval": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_SR.zip",
# Official URL: https://github.com/apacha/CVC-MUSCIMA
"CvcMuscima_MultiConditionAligned": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_MCA.zip",
"Edirom_Bargheer": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Bargheer.zip",
"Edirom_FreischuetzDigital": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/FreischuetzDigital.zip",
# Official URL: http://www.cvc.uab.es/cvcmuscima/datasets/Music_Symbols.zip
"Fornes": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Music_Symbols.zip",
# Official URL: http://grfia.dlsi.ua.es/homus/HOMUS.zip
"Homus_V1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/HOMUS.zip",
# Official URL: https://github.com/apacha/Homus
"Homus_V2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/HOMUS-2.0.zip",
# Official URL: https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11372/LRT-2372/MUSCIMA-pp_v1.0.zip?sequence=1&isAllowed=y
"MuscimaPlusPlus_V1": "https://github.com/OMR-Research/muscima-pp/releases/download/v1.0/MUSCIMA-pp_v1.0.zip",
# Official URL: https://github.com/OMR-Research/muscima-pp
"MuscimaPlusPlus_V2": "https://github.com/OMR-Research/muscima-pp/releases/download/v2.0/MUSCIMA-pp_v2.0.zip",
"MuscimaPlusPlus_Images": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVC_MUSCIMA_PP_Annotated-Images.zip",
"MuscimaPlusPlus_MeasureAnnotations": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0-measure-annotations.zip",
# Official URL: https://sourceforge.net/projects/openomr/
"OpenOmr": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/OpenOMR-Dataset.zip",
"Printed": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/PrintedMusicSymbolsDataset.zip",
"Rebelo1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Rebelo-Music-Symbol-Dataset1.zip",
"Rebelo2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Rebelo-Music-Symbol-Dataset2.zip",
"DeepScores_V1_Extended": "https://repository.cloudlab.zhaw.ch/artifactory/deepscores/ds_extended.zip",
# Official URL: https://www.audiolabs-erlangen.de/resources/MIR/2019-ISMIR-LBD-Measures
"AudioLabs_v1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudioLabs_v1.zip",
"AudioLabs_v2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudioLabs_v2.zip",
# Official URL: https://www-intuidoc.irisa.fr/en/choi_accidentals/
"ChoiAccidentals": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/choi_accidentals_dataset.zip"
}
def dataset_file_names(self) -> Dict[str, str]:
""" Returns a map of all file_names, mapped from their enum keys """
return {
"Audiveris": "AudiverisOmrDataset.zip",
"Baro": "BaroMuscima.zip",
"Capitan": "BimodalHandwrittenSymbols.zip",
"CvcMuscima_WriterIdentification": "CVCMUSCIMA_WI.zip",
"CvcMuscima_StaffRemoval": "CVCMUSCIMA_SR.zip",
"CvcMuscima_MultiConditionAligned": "CVCMUSCIMA_MCA.zip",
"Edirom_Bargheer": "Bargheer.zip",
"Edirom_FreischuetzDigital": "FreischuetzDigital.zip",
"Fornes": "Music_Symbols.zip",
"Homus_V1": "HOMUS.zip",
"Homus_V2": "HOMUS-2.0.zip",
"MuscimaPlusPlus_V1": "MUSCIMA-pp_v1.0.zip",
"MuscimaPlusPlus_V2": "MUSCIMA-pp_v2.0.zip",
"MuscimaPlusPlus_Images": "CVC_MUSCIMA_PP_Annotated-Images.zip",
"MuscimaPlusPlus_MeasureAnnotations": "MUSCIMA-pp_v1.0-measure-annotations.zip",
"OpenOmr": "OpenOMR-Dataset.zip",
"Printed": "PrintedMusicSymbolsDataset.zip",
"Rebelo1": "Rebelo-Music-Symbol-Dataset1.zip",
"Rebelo2": "Rebelo-Music-Symbol-Dataset2.zip",
"DeepScores_V1_Extended": "ds_extended.zip",
"AudioLabs_v1": "AudioLabs_v1.zip",
"AudioLabs_v2": "AudioLabs_v2.zip",
"ChoiAccidentals": "choi_accidentals_dataset.zip"
}
| mit | -4,851,323,977,944,946,000 | 58.551351 | 319 | 0.699737 | false | 3.070513 | false | false | false |
khrapovs/datastorage | datastorage/compustat.py | 1 | 2589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Short interest dynamics
"""
from __future__ import print_function, division
import os
import zipfile
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
path = os.getenv("HOME") + '/Dropbox/Research/data/Compustat/data/'
# __location__ = os.path.realpath(os.path.join(os.getcwd(),
# os.path.dirname(__file__)))
# path = os.path.join(__location__, path + 'Compustat/data/')
def date_convert(string):
return dt.datetime.strptime(string, '%d-%m-%Y')
def import_data():
"""Import data and save it to the disk.
"""
zf = zipfile.ZipFile(path + 'short_int.zip', 'r')
name = zf.namelist()[0]
short_int = pd.read_csv(zf.open(name),
converters={'datadate': date_convert})
columns = {'datadate': 'date',
'SHORTINTADJ': 'short_int',
'GVKEY': 'gvkey'}
short_int.rename(columns=columns, inplace=True)
short_int.set_index(['gvkey', 'date'], inplace=True)
short_int.sort_index(inplace=True)
short_int.to_hdf(path + 'short_int.h5', key='short_int')
print(short_int.head())
print(short_int.dtypes)
print('Number of unique companies: ',
short_int.index.get_level_values('gvkey').nunique())
print('Number of unique dates: ',
short_int.index.get_level_values('date').nunique())
print('Min and Max date: ',
short_int.index.get_level_values('date').min().date(), ',',
short_int.index.get_level_values('date').max().date())
def load_data():
"""Load data from disk and check for sanity.
"""
return pd.read_hdf(path + 'short_int.h5', 'short_int')
def count_companies(short_int):
"""Plot number of companies over time.
"""
df = short_int.reset_index().groupby('date')['gvkey'].nunique()
sns.set_context('paper')
df.plot(figsize=(10, 3))
plt.show()
data = df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)]
data.plot(figsize=(10, 3))
plt.show()
def mean_short_int(short_int):
"""Mean short interest on each date.
"""
df = short_int.groupby(level='date')['short_int'].mean()
sns.set_context('paper')
df.plot(figsize=(10, 3))
plt.show()
df.ix[:dt.date(2004, 12, 31)].plot(figsize=(10, 3))
plt.show()
df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)].plot(figsize=(10, 3))
plt.show()
if __name__ == '__main__':
import_data()
short_int = load_data()
count_companies(short_int)
mean_short_int(short_int)
| mit | 2,111,269,302,578,816,300 | 24.382353 | 73 | 0.596756 | false | 3.172794 | false | false | false |
Yethiel/re-volt-addon | io_revolt/parameters_in.py | 1 | 4567 | """
Name: parameters_in
Purpose: Importing cars using the parameters.txt file
Description:
Imports entire cars using the carinfo module.
"""
if "bpy" in locals():
import imp
imp.reload(common)
imp.reload(carinfo)
imp.reload(prm_in)
import os
import bpy
import bmesh
from mathutils import Vector
from . import common
from . import carinfo
from . import prm_in
from .common import *
def import_file(filepath, scene):
"""
Imports a parameters.txt file and loads car body and wheels.
"""
PARAMETERS[filepath] = carinfo.read_parameters(filepath)
# Imports the car with all supported files
import_car(scene, PARAMETERS[filepath], filepath)
# Removes parameters from dict so they can be reloaded next time
PARAMETERS.pop(filepath)
def import_car(scene, params, filepath):
body = params["model"][params["body"]["modelnum"]]
body_loc = to_blender_coord(params["body"]["offset"])
wheel0loc = to_blender_coord(params["wheel"][0]["offset1"])
wheel1loc = to_blender_coord(params["wheel"][1]["offset1"])
wheel2loc = to_blender_coord(params["wheel"][2]["offset1"])
wheel3loc = to_blender_coord(params["wheel"][3]["offset1"])
folder = os.sep.join(filepath.split(os.sep)[:-1])
# Checks if the wheel models exist
wheel0_modelnum = int(params["wheel"][0]["modelnum"])
if wheel0_modelnum >= 0:
wheel0 = params["model"][wheel0_modelnum]
if wheel0.split(os.sep)[-1] in os.listdir(folder):
wheel0path = os.sep.join([folder, wheel0.split(os.sep)[-1]])
else:
wheel0 = None
wheel1_modelnum = int(params["wheel"][1]["modelnum"])
if wheel1_modelnum >= 0:
wheel1 = params["model"][wheel1_modelnum]
if wheel1.split(os.sep)[-1] in os.listdir(folder):
wheel1path = os.sep.join([folder, wheel1.split(os.sep)[-1]])
else:
wheel1 = None
wheel2_modelnum = int(params["wheel"][2]["modelnum"])
if wheel2_modelnum >= 0:
wheel2 = params["model"][wheel2_modelnum]
if wheel2.split(os.sep)[-1] in os.listdir(folder):
wheel2path = os.sep.join([folder, wheel2.split(os.sep)[-1]])
else:
wheel2 = None
wheel3_modelnum = int(params["wheel"][3]["modelnum"])
if wheel3_modelnum >= 0:
wheel3 = params["model"][wheel3_modelnum]
if wheel3.split(os.sep)[-1] in os.listdir(folder):
wheel3path = os.sep.join([folder, wheel3.split(os.sep)[-1]])
else:
wheel3 = None
# Checks if the body is in the same folder
if body.split(os.sep)[-1] in os.listdir(folder):
bodypath = os.sep.join([folder, body.split(os.sep)[-1]])
# Creates the car body and sets the offset
body_obj = prm_in.import_file(bodypath, scene)
body_obj.location = body_loc
# Creates the wheel objects or an empty if the wheel file is not present
if wheel0:
wheel = prm_in.import_file(wheel0path, scene)
else:
wheel = bpy.data.objects.new("wheel 0", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel0loc
wheel.parent = body_obj
if wheel1:
wheel = prm_in.import_file(wheel1path, scene)
else:
wheel = bpy.data.objects.new("wheel 1", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel1loc
wheel.parent = body_obj
if wheel2:
wheel = prm_in.import_file(wheel2path, scene)
else:
wheel = bpy.data.objects.new("wheel 2", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel2loc
wheel.parent = body_obj
if wheel3:
wheel = prm_in.import_file(wheel3path, scene)
else:
wheel = bpy.data.objects.new("wheel 3", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel3loc
wheel.parent = body_obj
# Aerial representation
aerial_loc = to_blender_coord(params["aerial"]["offset"])
aerial = bpy.data.objects.new( "aerial", None )
scene.objects.link(aerial)
aerial.location = aerial_loc
aerial.empty_draw_size = 0.1
aerial.empty_draw_type = 'PLAIN_AXES'
aerial.parent = body_obj
| gpl-3.0 | 5,301,716,076,450,188,000 | 30.390071 | 76 | 0.611999 | false | 3.413303 | false | false | false |
wolcomm/rptk | rptk/base.py | 1 | 3743 | # Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk base module."""
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import logging
class BaseObject(object):
"""BaseObject class providing generic logging functionality."""
def __init__(self):
"""Initialise object."""
self._log = logging.getLogger(self.__module__)
def __repr__(self):
"""Provide generic string representation."""
return "{}() object".format(self.cls_name)
def __enter__(self):
"""Log context manager entry."""
self.log_ready_start()
self.log_ready_done()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Log context manager exit."""
self.log_exit_start()
self.log_exit_done()
@property
def opts(self):
"""Get self.opts if it exists."""
return getattr(self, "_opts", None)
@property
def log(self):
"""Get the current logger."""
return self._log
@property
def cls_name(self):
"""Get the class name of self."""
return self.__class__.__name__
@property
def current_method(self):
"""Get the currently executing method name."""
return inspect.currentframe().f_back.f_code.co_name
def log_init(self):
"""Log entry into the __init__ method."""
self.log.debug(msg="initialising {} instance".format(self.cls_name))
def log_init_done(self):
"""Log exit from an __init__ method."""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller == '__init__':
self.log.debug(msg="still initialising {} instance"
.format(self.cls_name))
else:
self.log.debug(msg="{} instance initialised".format(self.cls_name))
def log_method_enter(self, method=None):
"""Log entry into a class method."""
self.log.debug(msg="entering method {}.{}"
.format(self.cls_name, method))
def log_method_exit(self, method=None):
"""Log exit from a class method."""
self.log.debug(msg="leaving method {}.{}"
.format(self.cls_name, method))
def log_ready_start(self):
"""Log start of object initialisation."""
self.log.debug(msg="preparing {} for use".format(self))
def log_ready_done(self):
"""Log end of object initialisation."""
self.log.debug(msg="{} ready for use".format(self))
def log_exit_start(self):
"""Log start of object cleanup."""
self.log.debug(msg="cleaning up {}".format(self))
def log_exit_done(self):
"""Log end of object cleanup."""
self.log.debug(msg="finished cleaning up {}".format(self))
def raise_type_error(self, arg=None, cls=None):
"""Raise a TypeError with useful logging."""
msg = "argument {} ({}) not of type {}".format(arg.__name__, arg, cls)
self.log.error(msg=msg)
raise TypeError(msg)
def raise_runtime_error(self, msg=None):
"""Raise a RuntimeError with useful logging."""
self.log.error(msg=msg)
raise RuntimeError(msg)
| apache-2.0 | 8,040,617,203,420,518,000 | 32.720721 | 79 | 0.606733 | false | 4.064061 | false | false | false |
platinhom/DailyTools | scripts/ESES_ElementArea.py | 1 | 3679 | #! /usr/bin/env python
# -*- coding: utf8 -*-
# Author: Platinhom; Last Updated: 2015-09-10
# Calculate each element surface area by MS_Intersection and also match the atomic area results to the pqr file.
# Usage: python ESES_ElementArea.py file.pqr
#
# Need: MS_Intersection (partition version)
# Note: Only for PQR format input.
# Custom: ESES parameters.
import os,sys
# Modify the ESES program parameter here.
# You can modify to command line input parameter as you like
probe=1.4
grid=0.2
buffer=4.0
if (__name__ == '__main__'):
fname=sys.argv[1]
fnamelist=os.path.splitext(fname)
fxyzr=open(fnamelist[0]+".xyzr",'w')
fr=open(fname)
inlines=fr.readlines();
fr.close();
# All elements/types of input atoms, used in element area summary.
atomtypes=[];
# Write out the corresponding xyzr file.
for line in inlines:
# Each atom
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
# Atom element here
tmp=line.split();
element=tmp[-1].upper();
atomtypes.append(element);
# Extract x, y, z, r from pqr to xyzr file
radius="%10.5f" % float(line[62:70].strip());
xcoor="%10.5f" % float(line[30:38].strip());
ycoor="%10.5f" % float(line[38:46].strip());
zcoor="%10.5f" % float(line[46:54].strip());
xyzrstr=xcoor+ycoor+zcoor+radius+"\n";
fxyzr.write(xyzrstr);
fxyzr.close()
# Use external ESES program to generate surface and calculate atom area
## So you have to put the ESES program in the same directory
# Output a "partition_area.txt" file saving atom area
#os.system('./MS_Intersection_Area '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer));
p=os.popen('./MS_Intersection '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer),'r')
totalArea="0"
totalVolume="0"
while 1:
line=p.readline();
if "area:" in line: totalArea=line.split(':')[1].split()[0]
if "volume:" in line: totalVolume=line.split(':')[1].split()[0]
if not line:break
# Analyze output atom area file
fa=open("partition_area.txt")
atomareas=[];# tmp save atom area by atom number
typedefault=["H","C","N","O","F","S","P","CL","BR","I"];
typeareas={"H":0.0,"C":0.0,"N":0.0,"O":0.0,"F":0.0,"S":0.0,"P":0.0,"CL":0.0,"BR":0.0,"I":0.0};
atomnum=0;
for line in fa:
tmp=line.split();
atomarea="%12.6f" % float(tmp[1]);
atomareas.append(atomarea);
atype=atomtypes[atomnum];
typeareas[atype]=typeareas.setdefault(atype,0.0)+float(tmp[1]);
atomnum=atomnum+1;
fa.close()
# Write out pqra file saving atom area
fwname=fnamelist[0]+"_area.pqra"
fw=open(fwname,'w')
# Write the total area for each element.
## Notice that here just write out the default elements.
## If you want all elements, use "typeused" for iteration.
typeused=["H","C","N","O","F","S","P","CL","BR","I"];
for i in typeareas.iterkeys():
if i not in typeused:typeused.append(i);
# For print out the atom area summary
outputelearea=fnamelist[0]+" Areas: "+totalArea+" Volumes: "+totalVolume+" ";
fw.write("REMARK AREAS "+totalArea+"\n");
fw.write("REMARK VOLUMES "+totalVolume+"\n");
for element in typedefault:
# If you want all elements, need to comment the above line and uncomment the following line.
#for element in typeused:
fw.write("REMARK AREA "+"%2s"%element+" "+"%20.6f"%typeareas.get(element,0.0)+"\n");
outputelearea=outputelearea+element+": "+str(typeareas[element])+" ";
print outputelearea
fr=open(fname)
atomnum=0;
for line in fr:
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
tmp=line.split();
element=tmp[-1].upper();
newline=line.strip('\n')+atomareas[atomnum]+"\n";
fw.write(newline);
atomnum=atomnum+1;
else:
fw.write(line);
fr.close();
fw.close()
#end main
| gpl-2.0 | -1,598,104,058,107,988,000 | 32.144144 | 112 | 0.65969 | false | 2.693265 | false | false | false |
ea4gja/mrig | mrig/mrig.py | 1 | 1349 | #!/usr/bin/env python
#
# File: mrig.py
# Version: 1.0
#
# mrig: main program
# Copyright (c) 2016 German EA4GJA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from mrig_config import *
from gui_tkinter import *
import sys
import socket
import os
from Tkinter import Tk
import multiprocessing
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
tcp.connect((REMOTE_SERVER, REMOTE_SERVER_TCP_PORT))
tcp.setblocking(1)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind(("", LOCAL_UDP_PORT))
udp.setblocking(0)
root = Tk()
gui = gui_Tkinter(root, tcp=tcp, udp=udp)
root.mainloop()
tcp.close()
udp.close()
| gpl-2.0 | -2,723,161,411,879,875,600 | 27.104167 | 67 | 0.749444 | false | 3.380952 | false | false | false |
Lujeni/ansible | lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py | 1 | 7817 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_ssm_parameter_store
short_description: Manage key-value pairs in aws parameter store.
description:
- Manage key-value pairs in aws parameter store.
version_added: "2.5"
options:
name:
description:
- Parameter key name.
required: true
type: str
description:
description:
- Parameter key description.
required: false
type: str
value:
description:
- Parameter value.
required: false
type: str
state:
description:
- Creates or modifies an existing parameter.
- Deletes a parameter.
required: false
choices: ['present', 'absent']
default: present
type: str
string_type:
description:
- Parameter String type.
required: false
choices: ['String', 'StringList', 'SecureString']
default: String
type: str
decryption:
description:
- Work with SecureString type to get plain text secrets
type: bool
required: false
default: true
key_id:
description:
- AWS KMS key to decrypt the secrets.
- The default key (C(alias/aws/ssm)) is automatically generated the first
time it's requested.
required: false
default: alias/aws/ssm
type: str
overwrite_value:
description:
- Option to overwrite an existing value if it already exists.
required: false
version_added: "2.6"
choices: ['never', 'changed', 'always']
default: changed
type: str
author:
- Nathan Webster (@nathanwebsterdotme)
- Bill Wang (@ozbillwang) <[email protected]>
- Michael De La Rue (@mikedlr)
extends_documentation_fragment:
- aws
- ec2
requirements: [ botocore, boto3 ]
'''
EXAMPLES = '''
- name: Create or update key/value pair in aws parameter store
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
value: "World"
- name: Delete the key
aws_ssm_parameter_store:
name: "Hello"
state: absent
- name: Create or update secure key/value pair with default kms key (aws/ssm)
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
value: "World"
- name: Create or update secure key/value pair with nominated kms key
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
key_id: "alias/demo"
value: "World"
- name: Always update a parameter store value and create a new version
aws_ssm_parameter_store:
name: "overwrite_example"
description: "This example will always overwrite the value"
string_type: "String"
value: "Test1234"
overwrite_value: "always"
- name: recommend to use with aws_ssm lookup plugin
debug: msg="{{ lookup('aws_ssm', 'hello') }}"
'''
RETURN = '''
put_parameter:
description: Add one or more parameters to the system.
returned: success
type: dict
delete_parameter:
description: Delete a parameter from the system.
returned: success
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
try:
from botocore.exceptions import ClientError
except ImportError:
pass # will be captured by imported HAS_BOTO3
def update_parameter(client, module, args):
changed = False
response = {}
try:
response = client.put_parameter(**args)
changed = True
except ClientError as e:
module.fail_json_aws(e, msg="setting parameter")
return changed, response
def create_update_parameter(client, module):
changed = False
existing_parameter = None
response = {}
args = dict(
Name=module.params.get('name'),
Value=module.params.get('value'),
Type=module.params.get('string_type')
)
if (module.params.get('overwrite_value') in ("always", "changed")):
args.update(Overwrite=True)
else:
args.update(Overwrite=False)
if module.params.get('description'):
args.update(Description=module.params.get('description'))
if module.params.get('string_type') == 'SecureString':
args.update(KeyId=module.params.get('key_id'))
try:
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
except Exception:
pass
if existing_parameter:
if (module.params.get('overwrite_value') == 'always'):
(changed, response) = update_parameter(client, module, args)
elif (module.params.get('overwrite_value') == 'changed'):
if existing_parameter['Parameter']['Type'] != args['Type']:
(changed, response) = update_parameter(client, module, args)
if existing_parameter['Parameter']['Value'] != args['Value']:
(changed, response) = update_parameter(client, module, args)
if args.get('Description'):
# Description field not available from get_parameter function so get it from describe_parameters
describe_existing_parameter = None
try:
describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
describe_existing_parameter = describe_existing_parameter_paginator.paginate(
Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
except ClientError as e:
module.fail_json_aws(e, msg="getting description value")
if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
(changed, response) = update_parameter(client, module, args)
else:
(changed, response) = update_parameter(client, module, args)
return changed, response
def delete_parameter(client, module):
response = {}
try:
response = client.delete_parameter(
Name=module.params.get('name')
)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
return False, {}
module.fail_json_aws(e, msg="deleting parameter")
return True, response
def setup_client(module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ssm', region=region, endpoint=ec2_url, **aws_connect_params)
return connection
def setup_module_object():
argument_spec = dict(
name=dict(required=True),
description=dict(),
value=dict(required=False, no_log=True),
state=dict(default='present', choices=['present', 'absent']),
string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
decryption=dict(default=True, type='bool'),
key_id=dict(default="alias/aws/ssm"),
overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
)
def main():
module = setup_module_object()
state = module.params.get('state')
client = setup_client(module)
invocations = {
"present": create_update_parameter,
"absent": delete_parameter,
}
(changed, response) = invocations[state](client, module)
module.exit_json(changed=changed, response=response)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,053,639,846,693,307,000 | 28.609848 | 126 | 0.643214 | false | 3.961987 | false | false | false |
SanPen/GridCal | src/GridCal/Engine/Simulations/LinearFactors/linear_analysis_ts_driver.py | 1 | 10126 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import json
import pandas as pd
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve, factorized
import time
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Simulations.PowerFlow.power_flow_options import PowerFlowOptions
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis
from GridCal.Engine.Simulations.LinearFactors.linear_analysis_driver import LinearAnalysisOptions
from GridCal.Engine.Simulations.results_model import ResultsModel
from GridCal.Engine.Core.time_series_pf_data import compile_time_circuit
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import TSDriverTemplate
class LinearAnalysisTimeSeriesResults(ResultsTemplate):
def __init__(self, n, m, time_array, bus_names, bus_types, branch_names):
"""
TimeSeriesResults constructor
@param n: number of buses
@param m: number of branches
@param nt: number of time steps
"""
ResultsTemplate.__init__(self,
name='Linear Analysis time series',
available_results=[ResultTypes.BusActivePower,
ResultTypes.BranchActivePowerFrom,
ResultTypes.BranchLoading
],
data_variables=['bus_names',
'bus_types',
'time',
'branch_names',
'voltage',
'S',
'Sf',
'loading',
'losses'])
self.nt = len(time_array)
self.m = m
self.n = n
self.time = time_array
self.bus_names = bus_names
self.bus_types = bus_types
self.branch_names = branch_names
self.voltage = np.ones((self.nt, n), dtype=float)
self.S = np.zeros((self.nt, n), dtype=float)
self.Sf = np.zeros((self.nt, m), dtype=float)
self.loading = np.zeros((self.nt, m), dtype=float)
self.losses = np.zeros((self.nt, m), dtype=float)
def apply_new_time_series_rates(self, nc: "TimeCircuit"):
rates = nc.Rates.T
self.loading = self.Sf / (rates + 1e-9)
def get_results_dict(self):
"""
Returns a dictionary with the results sorted in a dictionary
:return: dictionary of 2D numpy arrays (probably of complex numbers)
"""
data = {'V': self.voltage.tolist(),
'P': self.S.real.tolist(),
'Q': self.S.imag.tolist(),
'Sbr_real': self.Sf.real.tolist(),
'Sbr_imag': self.Sf.imag.tolist(),
'loading': np.abs(self.loading).tolist()}
return data
def mdl(self, result_type: ResultTypes) -> "ResultsModel":
"""
Get ResultsModel instance
:param result_type:
:return: ResultsModel instance
"""
if result_type == ResultTypes.BusActivePower:
labels = self.bus_names
data = self.S
y_label = '(MW)'
title = 'Bus active power '
elif result_type == ResultTypes.BranchActivePowerFrom:
labels = self.branch_names
data = self.Sf.real
y_label = '(MW)'
title = 'Branch power '
elif result_type == ResultTypes.BranchLoading:
labels = self.branch_names
data = self.loading * 100
y_label = '(%)'
title = 'Branch loading '
elif result_type == ResultTypes.BranchLosses:
labels = self.branch_names
data = self.losses
y_label = '(MVA)'
title = 'Branch losses'
elif result_type == ResultTypes.BusVoltageModule:
labels = self.bus_names
data = self.voltage
y_label = '(p.u.)'
title = 'Bus voltage'
else:
raise Exception('Result type not understood:' + str(result_type))
if self.time is not None:
index = self.time
else:
index = list(range(data.shape[0]))
# assemble model
return ResultsModel(data=data, index=index, columns=labels, title=title, ylabel=y_label, units=y_label)
class LinearAnalysisTimeSeries(TSDriverTemplate):
name = 'Linear analysis time series'
tpe = SimulationTypes.LinearAnalysis_TS_run
def __init__(self, grid: MultiCircuit, options: LinearAnalysisOptions, start_=0, end_=None):
"""
TimeSeries constructor
@param grid: MultiCircuit instance
@param options: LinearAnalysisOptions instance
"""
TSDriverTemplate.__init__(self, grid=grid, start_=start_, end_=end_)
self.options = options
self.results = LinearAnalysisTimeSeriesResults(n=0,
m=0,
time_array=[],
bus_names=[],
bus_types=[],
branch_names=[])
self.ptdf_driver = LinearAnalysis(grid=self.grid, distributed_slack=self.options.distribute_slack)
def get_steps(self):
"""
Get time steps list of strings
"""
return [l.strftime('%d-%m-%Y %H:%M') for l in self.indices]
def run(self):
"""
Run the time series simulation
@return:
"""
self.__cancel__ = False
a = time.time()
if self.end_ is None:
self.end_ = len(self.grid.time_profile)
time_indices = np.arange(self.start_, self.end_ + 1)
ts_numeric_circuit = compile_time_circuit(self.grid)
self.results = LinearAnalysisTimeSeriesResults(n=ts_numeric_circuit.nbus,
m=ts_numeric_circuit.nbr,
time_array=ts_numeric_circuit.time_array[time_indices],
bus_names=ts_numeric_circuit.bus_names,
bus_types=ts_numeric_circuit.bus_types,
branch_names=ts_numeric_circuit.branch_names)
self.indices = pd.to_datetime(ts_numeric_circuit.time_array[time_indices])
self.progress_text.emit('Computing PTDF...')
linear_analysis = LinearAnalysis(grid=self.grid,
distributed_slack=self.options.distribute_slack,
correct_values=self.options.correct_values
)
linear_analysis.run()
self.progress_text.emit('Computing branch flows...')
Pbus_0 = ts_numeric_circuit.Sbus.real[:, time_indices]
self.results.Sf = linear_analysis.get_flows_time_series(Pbus_0)
# compute post process
self.results.loading = self.results.Sf / (ts_numeric_circuit.Rates[:, time_indices].T + 1e-9)
self.results.S = Pbus_0.T
self.elapsed = time.time() - a
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
if __name__ == '__main__':
from matplotlib import pyplot as plt
from GridCal.Engine import *
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/grid_2_islands.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
main_circuit = FileOpen(fname).open()
options_ = LinearAnalysisOptions()
ptdf_driver = LinearAnalysisTimeSeries(grid=main_circuit, options=options_)
ptdf_driver.run()
pf_options_ = PowerFlowOptions(solver_type=SolverType.NR)
ts_driver = TimeSeries(grid=main_circuit, options=pf_options_)
ts_driver.run()
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_title('Newton-Raphson based flow')
ax1.plot(ts_driver.results.Sf.real)
ax2 = fig.add_subplot(222)
ax2.set_title('PTDF based flow')
ax2.plot(ptdf_driver.results.Sf.real)
ax3 = fig.add_subplot(223)
ax3.set_title('Difference')
diff = ts_driver.results.Sf.real - ptdf_driver.results.Sf.real
ax3.plot(diff)
fig2 = plt.figure()
ax1 = fig2.add_subplot(221)
ax1.set_title('Newton-Raphson based voltage')
ax1.plot(np.abs(ts_driver.results.voltage))
ax2 = fig2.add_subplot(222)
ax2.set_title('PTDF based voltage')
ax2.plot(ptdf_driver.results.voltage)
ax3 = fig2.add_subplot(223)
ax3.set_title('Difference')
diff = np.abs(ts_driver.results.voltage) - ptdf_driver.results.voltage
ax3.plot(diff)
plt.show()
| gpl-3.0 | -3,750,878,709,547,375,600 | 36.783582 | 111 | 0.567351 | false | 4.12969 | false | false | false |
thesgc/cbh_datastore_ws | runtests.py | 1 | 1190 | import sys
try:
from django.conf import settings
from django.test.utils import get_runner
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="cbh_datastore_ws.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"cbh_datastore_ws",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
except ImportError:
import traceback
traceback.print_exc()
raise ImportError(
"To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| mit | -6,751,464,929,567,952,000 | 20.25 | 71 | 0.553782 | false | 4.006734 | true | false | false |
kylelutz/chemkit | tests/auto/plugins/mmff/analyze.py | 1 | 3504 | #!/usr/bin/python
# this script compares the mmff94.expected and mmff94.actual files
# and outputs the differences
import os
import sys
import xml.dom.minidom
COLORS_ENABLED = False
class AtomResults:
def __init__(self, type, charge):
self.type = type
self.charge = charge
class MoleculeResults:
def __init__(self, name, energy):
self.name = name
self.energy = energy
self.atoms = []
class ResultsFile:
def __init__(self, fileName):
self.fileName = fileName
self.molecules = []
def read(self):
doc = xml.dom.minidom.parse(self.fileName)
for moleculeElem in doc.getElementsByTagName('molecule'):
name = moleculeElem.getAttribute('name')
energy = float(moleculeElem.getAttribute('energy'))
moleculeResults = MoleculeResults(name, energy)
for atomElem in moleculeElem.getElementsByTagName('atom'):
type = atomElem.getAttribute('type')
charge = float(atomElem.getAttribute('charge'))
moleculeResults.atoms.append(AtomResults(type, charge))
self.molecules.append(moleculeResults)
if __name__ == '__main__':
actualResultsFile = 'mmff94.actual'
expectedResultsFile = 'mmff94.expected'
if not os.path.exists(actualResultsFile):
print 'could not find actual results file (%s)' % actualResultsFile
sys.exit(-1)
if not os.path.exists(expectedResultsFile):
print 'could not find expected results file (%s)' % expectedResultsFile
sys.exit(-1)
actualResults = ResultsFile(actualResultsFile)
actualResults.read()
expectedResults = ResultsFile(expectedResultsFile)
expectedResults.read()
# escape codes to color text
RED_COLOR = '\033[91m'
END_COLOR = '\033[0m'
if not COLORS_ENABLED:
RED_COLOR = ''
END_COLOR = ''
ATOMS_FAILED = 0
MOLECULES_FAILED = 0
# compare files
expectedMoleculeIndex = 0
for i, actualMolecule in enumerate(actualResults.molecules):
expectedMolecule = expectedResults.molecules[expectedMoleculeIndex]
while expectedMolecule.name != actualMolecule.name:
expectedMoleculeIndex += 1
expectedMolecule = expectedResults.molecules[expectedMoleculeIndex]
print '%i. %s' % (expectedMoleculeIndex+1, actualMolecule.name)
for j in range(len(actualMolecule.atoms)):
actualAtom = actualMolecule.atoms[j]
expectedAtom = expectedMolecule.atoms[j]
expectedTypeText = ''
colorCode = ''
if(actualAtom.type != expectedAtom.type or
(abs(actualAtom.charge - expectedAtom.charge) > 0.01)):
ATOMS_FAILED += 1
colorCode = RED_COLOR
expectedTypeText = '%s[%s, %s] -- FAILED%s' % (colorCode, expectedAtom.type, expectedAtom.charge, END_COLOR)
print ' %i. %s, %s %s' % (j+1, actualAtom.type, actualAtom.charge, expectedTypeText)
colorCode = ''
if(int(actualMolecule.energy) != int(expectedMolecule.energy)):
MOLECULES_FAILED += 1
colorCode = RED_COLOR
print 'energy: %f %s[%f]%s' % (actualMolecule.energy, colorCode, expectedMolecule.energy, END_COLOR)
# print some statistics
print >> sys.stderr, ''
print >> sys.stderr, 'atoms: %i failed' % ATOMS_FAILED
print >> sys.stderr, 'molecules: %i failed' % MOLECULES_FAILED
| bsd-3-clause | 8,729,999,212,681,322,000 | 32.371429 | 124 | 0.631849 | false | 3.977299 | false | false | false |
MAECProject/maec-to-oval | cybox_oval_mappings.py | 1 | 10331 | #MAEC -> OVAL Translator
#v0.94 BETA
#Generic mappings class
#Generates OVAL tests/objects/states from a CybOX Defined Object
import oval57 as oval
class cybox_oval_mappings(object):
def __init__(self, id_namespace):
self.test_id_base = 0
self.obj_id_base = 0
self.ste_id_base = 0
self.def_id_base = 0
self.id_namespace = id_namespace
#Mappings
#CybOX Condition to OVAL operation mappings
self.operator_condition_mappings = {'Equals':'equals','DoesNotEqual':'not equal','Contains':'pattern match',\
'GreaterThan':'greater than', 'GreaterThanOrEqual':'greater than or equal',\
'LessThan':'less than','LessThanOrEqual':'less than or equal','FitsPattern':'pattern match',\
'BitwiseAnd':'bitwise and', 'BitwiseOr':'bitwise or'}
#CybOX Object Type to OVAL object mappings
self.object_mappings = {'WinRegistryKeyObj:WindowsRegistryKeyObjectType':'registry_object', 'FileObj:FileObjectType':'file_object',
'WinFileObj:WindowsFileObjectType':'file_object', 'WinExecutableFileObj:WindowsExecutableFileObjectType':'file_object'}
#CybOX FileObject to OVAL file_object mappings (CybOX element name : {OVAL element name, OVAL element datatype})
self.file_object_mappings = {'File_Path':{'name':'path','datatype':'string'},'Full_Path':{'name':'filepath','datatype':'string'},
'File_Name':{'name':'filename', 'datatype':'string'}}
#CybOX FileObject to OVAL file_state mappings
self.file_state_mappings = {'Size_In_Bytes':{'name':'size','datatype':'int'},'Accessed_Time':{'name':'a_time','datatype':'int'},\
'Modified_Time':{'name':'m_time','datatype':'int'},'Created_Time':{'name':'c_time','datatype':'int'}}
#CybOX WinRegistryObject to OVAL registry_object mappings
self.registry_object_mappings = {'Key':{'name':'key','datatype':'string'},'Hive':{'name':'hive','datatype':'string'},'Name':{'name':'name','datatype':'string'}}
#CybOX WinRegistryObject Values to OVAL registry_state mappings
self.registry_state_mappings = {'Name':{'name':'name','datatype':'string'},'Data':{'name':'value','datatype':'string'},'Datatype':{'name':'type','datatype':'string'}}
#Creates and returns a dictionary of OVAL test, object, and state (if applicable)
def create_oval(self, cybox_defined_object, reference):
oval_entities = {}
oval_states = []
object_type = cybox_defined_object._XSI_NS + ':' + cybox_defined_object._XSI_TYPE
if object_type in self.object_mappings.keys():
oval_object = self.create_oval_object(object_type, cybox_defined_object)
if oval_object is not None:
if object_type == 'WinRegistryKeyObj:WindowsRegistryKeyObjectType':
self.process_registry_values(cybox_defined_object, oval_object, oval_states)
else:
state = self.create_oval_state(object_type, cybox_defined_object)
if state is not None:
oval_states.append(self.create_oval_state(object_type, cybox_defined_object))
oval_test = self.create_oval_test(object_type, oval_object, oval_entities, oval_states, reference)
oval_entities['test'] = oval_test
oval_entities['object'] = oval_object
if oval_states is not None and len(oval_states) > 0:
oval_entities['state'] = oval_states
return oval_entities
else:
return None
#Create the OVAL object
def create_oval_object(self, object_type, cybox_defined_object):
oval_object_type = self.object_mappings.get(object_type)
oval_object_mappings = self.object_mappings.get(object_type) + '_mappings'
oval_object = getattr(oval,oval_object_type)()
oval_object.set_id(self.generate_obj_id())
oval_object.set_version(1)
object_fields = cybox_defined_object._fields
# File Object related corner cases
if "File" in object_type:
if object_fields["Full_Path"]:
del object_fields["File_Name"]
del object_fields["File_Path"]
# Corner case where file_path is meant to be used as the full path to the file
elif object_fields["File_Path"] and (not object_fields["Full_Path"] and not object_fields["File_Name"]):
object_fields["Full_Path"] = object_fields["File_Path"]
del object_fields["File_Path"]
for element, value in object_fields.items():
if value is not None:
if element in getattr(getattr(self,oval_object_mappings),'keys')():
element_dictionary = getattr(getattr(self,oval_object_mappings),'get')(element)
element_name = element_dictionary.get('name')
element_datatype = element_dictionary.get('datatype')
method = 'set_' + element_name
getattr(oval_object,method)(oval.EntityBaseType(datatype = element_datatype, operation = self.operator_condition_mappings.get(value), valueOf_=value))
#Do some basic object sanity checking for certain objects
if object_type == 'WinRegistryKeyObj:WindowsRegistryKeyObjectType' and (oval_object.hive is None or oval_object.key is None):
return None
elif 'FileObjectType' in object_type and (oval_object.filepath is None and (oval_object.path is None or oval_object.filename is None)):
return None
return oval_object
#Create any OVAL states
def create_oval_state(self, object_type, cybox_defined_object):
oval_state_type = self.object_mappings.get(object_type).split('_')[0] + '_state'
oval_state_mappings = oval_state_type + '_mappings'
oval_state = getattr(oval,oval_state_type)(version = 1, id = self.generate_ste_id())
oval_state.set_id(self.generate_ste_id())
object_fields = cybox_defined_object._fields
for element, value in object_fields.items():
if value is not None:
if element in getattr(getattr(self,oval_state_mappings),'keys')():
element_dictionary = getattr(getattr(self,oval_state_mappings),'get')(element)
element_name = element_dictionary.get('name')
element_datatype = element_dictionary.get('datatype')
method = 'set_' + element_name
getattr(oval_state,method)(oval.EntityBaseType(datatype = element_datatype, operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
if oval_state.hasContent_():
return oval_state
#Create the OVAL test
def create_oval_test(self, object_type, oval_object, oval_entities, oval_states, reference = None):
oval_test_type = self.object_mappings.get(object_type).split('_')[0] + '_test'
#Create the test
comment = 'OVAL Test created from MAEC Action ' + reference
oval_test = getattr(oval,oval_test_type)(id = self.generate_test_id(), check = 'at least one', version=1.0, comment = comment)
oval_test.set_object(oval.ObjectRefType(object_ref = oval_object.get_id()))
if len(oval_states) > 0:
for state in oval_states:
if state is not None:
oval_test.add_state(oval.StateRefType(state_ref = state.get_id()))
return oval_test
#Handle any Values inside a Registry object
def process_registry_values(self, cybox_defined_object, oval_object, oval_states):
#Special registry Values handling
if cybox_defined_object.values is not None:
name_set = False
for reg_value in cybox_defined_object.values:
oval_state = oval.registry_state(version = 1, id = self.generate_ste_id())
for element, value in reg_value._fields.items():
if value is not None:
#Corner case for handling multiple name/value pairs in the OVAL object
if len(cybox_defined_object.values) == 1 and not name_set:
if element in self.registry_object_mappings.keys():
oval_element = self.registry_object_mappings.get(element)
method = 'set_' + oval_element.get('name')
getattr(oval_object,method)(oval.EntityBaseType(datatype = 'string', operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
name_set = True
elif len(cybox_defined_object.values) > 1 and not name_set:
oval_object.set_name(oval.EntityBaseType(datatype = 'string', operation = 'pattern match', valueOf_='.*'))
name_set = True
if element in self.registry_state_mappings.keys():
oval_element = self.registry_state_mappings.get(element)
method = 'set_' + oval_element.get('name')
getattr(oval_state,method)(oval.EntityBaseType(datatype = 'string', operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
if oval_state.hasContent_():
oval_states.append(oval_state)
def generate_test_id(self):
self.test_id_base += 1
test_id = 'oval:' + self.id_namespace + ':tst:' + str(self.test_id_base)
return test_id
def generate_obj_id(self):
self.obj_id_base += 1
obj_id = 'oval:' + self.id_namespace + ':obj:' + str(self.obj_id_base)
return obj_id
def generate_ste_id(self):
self.ste_id_base += 1
ste_id = 'oval:' + self.id_namespace + ':ste:' + str(self.ste_id_base)
return ste_id
def generate_def_id(self):
self.def_id_base += 1
def_id = 'oval:' + self.id_namespace + ':def:' + str(self.def_id_base)
return def_id
| bsd-3-clause | -8,493,995,175,343,858,000 | 58.37931 | 180 | 0.597425 | false | 3.971934 | true | false | false |
enckse/system-viewer | bottle.py | 1 | 146747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet; eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value): ls.var = value
def fdel(_): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls, handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
offs = self.offset
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep)
self.offset += len(line+sep)
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if code_line and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END | mit | -874,936,976,201,851,400 | 38.50483 | 108 | 0.566328 | false | 4.216868 | false | false | false |
ifduyue/sentry | src/sentry/models/user.py | 1 | 10584 | """
sentry.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import warnings
from bitfield import BitField
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.core.urlresolvers import reverse
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField
from sentry.models import LostPasswordHash
from sentry.utils.http import absolute_uri
audit_logger = logging.getLogger('sentry.audit.user')
class UserManager(BaseManager, UserManager):
pass
class User(BaseModel, AbstractBaseUser):
__core__ = True
id = BoundedAutoField(primary_key=True)
username = models.CharField(_('username'), max_length=128, unique=True)
# this column is called first_name for legacy reasons, but it is the entire
# display name
name = models.CharField(_('name'), max_length=200, blank=True, db_column='first_name')
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin '
'site.')
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'
)
)
is_superuser = models.BooleanField(
_('superuser status'),
default=False,
help_text=_(
'Designates that this user has all permissions without '
'explicitly assigning them.'
)
)
is_managed = models.BooleanField(
_('managed'),
default=False,
help_text=_(
'Designates whether this user should be treated as '
'managed. Select this to disallow the user from '
'modifying their account (username, password, etc).'
)
)
is_sentry_app = models.NullBooleanField(
_('is sentry app'),
null=True,
default=None,
help_text=_(
'Designates whether this user is the entity used for Permissions'
'on behalf of a Sentry App. Cannot login or use Sentry like a'
'normal User would.'
)
)
is_password_expired = models.BooleanField(
_('password expired'),
default=False,
help_text=_(
'If set to true then the user needs to change the '
'password on next sign in.'
)
)
last_password_change = models.DateTimeField(
_('date of last password change'),
null=True,
help_text=_('The date the password was changed last.')
)
flags = BitField(
flags=(
(
'newsletter_consent_prompt',
'Do we need to ask this user for newsletter consent?'
),
),
default=0,
null=True,
)
session_nonce = models.CharField(max_length=12, null=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
last_active = models.DateTimeField(_('last active'), default=timezone.now, null=True)
objects = UserManager(cache_fields=['pk'])
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
app_label = 'sentry'
db_table = 'auth_user'
verbose_name = _('user')
verbose_name_plural = _('users')
def delete(self):
if self.username == 'sentry':
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
avatar = self.avatar.first()
if avatar:
avatar.delete()
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn('User.has_perm is deprecated', DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
warnings.warn('User.has_module_perms is deprecated', DeprecationWarning)
return self.is_superuser
def get_unverified_emails(self):
return self.emails.filter(is_verified=False)
def get_verified_emails(self):
return self.emails.filter(is_verified=True)
def has_unverified_emails(self):
return self.get_unverified_emails().exists()
def get_label(self):
return self.email or self.username or self.id
def get_display_name(self):
return self.name or self.email or self.username
def get_full_name(self):
return self.name
def get_short_name(self):
return self.username
def get_salutation_name(self):
name = self.name or self.username.split('@', 1)[0].split('.', 1)[0]
first_name = name.split(' ', 1)[0]
return first_name.capitalize()
def get_avatar_type(self):
avatar = self.avatar.first()
if avatar:
return avatar.get_avatar_type_display()
return 'letter_avatar'
def send_confirm_email_singular(self, email, is_new_user=False):
from sentry import options
from sentry.utils.email import MessageBuilder
if not email.hash_is_valid():
email.set_hash()
email.save()
context = {
'user':
self,
'url':
absolute_uri(
reverse('sentry-account-confirm-email', args=[self.id, email.validation_hash])
),
'confirm_email':
email.email,
'is_new_user':
is_new_user,
}
msg = MessageBuilder(
subject='%sConfirm Email' % (options.get('mail.subject-prefix'), ),
template='sentry/emails/confirm_email.txt',
html_template='sentry/emails/confirm_email.html',
type='user.confirm_email',
context=context,
)
msg.send_async([email.email])
def send_confirm_emails(self, is_new_user=False):
email_list = self.get_unverified_emails()
for email in email_list:
self.send_confirm_email_singular(email, is_new_user)
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry import roles
from sentry.models import (
Activity, AuditLogEntry, AuthIdentity, Authenticator, GroupAssignee, GroupBookmark, GroupSeen,
GroupShare, GroupSubscription, OrganizationMember, OrganizationMemberTeam, UserAvatar,
UserEmail, UserOption,
)
audit_logger.info(
'user.merge', extra={
'from_user_id': from_user.id,
'to_user_id': to_user.id,
}
)
for obj in OrganizationMember.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
# identify the highest priority membership
to_member = OrganizationMember.objects.get(
organization=obj.organization_id,
user=to_user,
)
if roles.get(obj.role).priority > roles.get(to_member.role).priority:
to_member.update(role=obj.role)
for team in obj.teams.all():
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
organizationmember=to_member,
team=team,
)
except IntegrityError:
pass
model_list = (
Authenticator, GroupAssignee, GroupBookmark, GroupSeen, GroupShare,
GroupSubscription, UserAvatar, UserEmail, UserOption,
)
for model in model_list:
for obj in model.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(
user=from_user,
).update(user=to_user)
AuditLogEntry.objects.filter(
actor=from_user,
).update(actor=to_user)
AuditLogEntry.objects.filter(
target_user=from_user,
).update(target_user=to_user)
# remove any duplicate identities that exist on the current user that
# might conflict w/ the new users existing SSO
AuthIdentity.objects.filter(
user=from_user,
auth_provider__organization__in=AuthIdentity.objects.filter(
user=to_user,
).values('auth_provider__organization')
).delete()
AuthIdentity.objects.filter(
user=from_user,
).update(user=to_user)
def set_password(self, raw_password):
super(User, self).set_password(raw_password)
self.last_password_change = timezone.now()
self.is_password_expired = False
def refresh_session_nonce(self, request=None):
from django.utils.crypto import get_random_string
self.session_nonce = get_random_string(12)
if request is not None:
request.session['_nonce'] = self.session_nonce
def get_orgs(self):
from sentry.models import (Organization, OrganizationMember, OrganizationStatus)
return Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
id__in=OrganizationMember.objects.filter(
user=self,
).values('organization'),
)
def get_orgs_require_2fa(self):
from sentry.models import (Organization, OrganizationStatus)
return Organization.objects.filter(
flags=models.F('flags').bitor(Organization.flags.require_2fa),
status=OrganizationStatus.VISIBLE,
member_set__user=self,
)
def clear_lost_passwords(self):
LostPasswordHash.objects.filter(user=self).delete()
# HACK(dcramer): last_login needs nullable for Django 1.8
User._meta.get_field('last_login').null = True
| bsd-3-clause | 7,035,034,941,226,481,000 | 32.283019 | 106 | 0.595333 | false | 4.325296 | false | false | false |
jelly/calibre | src/calibre/gui2/dbus_export/tray.py | 1 | 7237 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Implement the StatusNotifierItem spec for creating a system tray icon in
# modern linux desktop environments. See
# http://www.notmart.org/misc/statusnotifieritem/index.html#introduction
# This is not an actual standard, but is apparently used by GNOME, KDE and
# Unity, which makes it necessary enough to implement.
import os
import dbus
from PyQt5.Qt import (
QApplication, QObject, pyqtSignal, Qt, QPoint, QRect, QMenu, QSystemTrayIcon)
from calibre.gui2.dbus_export.menu import DBusMenu
from calibre.gui2.dbus_export.utils import icon_cache
from calibre.utils.dbus_service import (
Object, method as dbus_method, BusName, dbus_property, signal as dbus_signal)
_sni_count = 0
class StatusNotifierItem(QObject):
IFACE = 'org.kde.StatusNotifierItem'
activated = pyqtSignal(object)
show_menu = pyqtSignal(int, int)
def __init__(self, **kw):
global _sni_count
QObject.__init__(self, parent=kw.get('parent'))
self.context_menu = None
self.is_visible = True
self.tool_tip = ''
self._icon = QApplication.instance().windowIcon()
self.show_menu.connect(self._show_menu, type=Qt.QueuedConnection)
_sni_count += 1
kw['num'] = _sni_count
self.dbus_api = StatusNotifierItemAPI(self, **kw)
def _show_menu(self, x, y):
m = self.contextMenu()
if m is not None:
m.exec_(QPoint(x, y))
def isVisible(self):
return self.is_visible
def setVisible(self, visible):
if self.is_visible != visible:
self.is_visible = visible
self.dbus_api.NewStatus(self.dbus_api.Status)
def show(self):
self.setVisible(True)
def hide(self):
self.setVisible(False)
def toggle(self):
self.setVisible(not self.isVisible())
def contextMenu(self):
return self.context_menu
def setContextMenu(self, menu):
self.context_menu = menu
self.dbus_api.publish_new_menu()
def geometry(self):
return QRect()
def toolTip(self):
return self.tool_tip
def setToolTip(self, val):
self.tool_tip = val or ''
self.dbus_api.NewToolTip()
def setIcon(self, icon):
self._icon = icon
self.dbus_api.NewIcon()
def icon(self):
return self._icon
@classmethod
def supportsMessages(cls):
return False
def emit_activated(self):
self.activated.emit(QSystemTrayIcon.Trigger)
_status_item_menu_count = 0
class StatusNotifierItemAPI(Object):
'See http://www.notmart.org/misc/statusnotifieritem/statusnotifieritem.html'
IFACE = 'org.kde.StatusNotifierItem'
def __init__(self, notifier, **kw):
global _status_item_menu_count
self.notifier = notifier
bus = kw.get('bus')
if bus is None:
bus = kw['bus'] = dbus.SessionBus()
self.name = '%s-%s-%s' % (self.IFACE, os.getpid(), kw.get('num', 1))
self.dbus_name = BusName(self.name, bus=bus, do_not_queue=True)
self.app_id = kw.get('app_id') or QApplication.instance().applicationName() or 'unknown_application'
self.category = kw.get('category') or 'ApplicationStatus'
self.title = kw.get('title') or self.app_id
Object.__init__(self, bus, '/' + self.IFACE.split('.')[-1])
_status_item_menu_count += 1
self.dbus_menu = DBusMenu('/StatusItemMenu/%d' % _status_item_menu_count, bus=bus, parent=kw.get('parent'))
def publish_new_menu(self):
menu = self.notifier.contextMenu()
if menu is None:
menu = QMenu()
if len(menu.actions()) == 0:
menu.addAction(self.notifier.icon(), _('Show/hide %s') % self.title, self.notifier.emit_activated)
# The menu must have at least one entry, namely the show/hide entry.
# This is necessary as Canonical in their infinite wisdom decided to
# force all tray icons to show their popup menus when clicked.
self.dbus_menu.publish_new_menu(menu)
@dbus_property(IFACE, signature='s')
def IconName(self):
return icon_cache().name_for_icon(self.notifier.icon())
@dbus_property(IFACE, signature='s')
def IconThemePath(self):
return icon_cache().icon_theme_path
@dbus_property(IFACE, signature='a(iiay)')
def IconPixmap(self):
return dbus.Array(signature='(iiay)')
@dbus_property(IFACE, signature='s')
def OverlayIconName(self):
return ''
@dbus_property(IFACE, signature='(sa(iiay)ss)')
def ToolTip(self):
# This is ignored on Unity, Canonical believes in user interfaces
# that are so functionality free that they dont need tooltips
return self.IconName, self.IconPixmap, self.Title, self.notifier.toolTip()
@dbus_property(IFACE, signature='a(iiay)')
def OverlayIconPixmap(self):
return dbus.Array(signature='(iiay)')
@dbus_property(IFACE, signature='s')
def AttentionIconName(self):
return ''
@dbus_property(IFACE, signature='a(iiay)')
def AttentionIconPixmap(self):
return dbus.Array(signature='(iiay)')
@dbus_property(IFACE, signature='s')
def Category(self):
return self.category
@dbus_property(IFACE, signature='s')
def Id(self):
return self.app_id
@dbus_property(IFACE, signature='s')
def Title(self):
return self.title
@dbus_property(IFACE, signature='s')
def Status(self):
return 'Active' if self.notifier.isVisible() else 'Passive'
@dbus_property(IFACE, signature='o')
def Menu(self):
return dbus.ObjectPath(self.dbus_menu.object_path)
@dbus_property(IFACE, signature='i')
def WindowId(self):
return 0
@dbus_method(IFACE, in_signature='ii', out_signature='')
def ContextMenu(self, x, y):
self.notifier.show_menu.emit(x, y)
@dbus_method(IFACE, in_signature='ii', out_signature='')
def Activate(self, x, y):
self.notifier.activated.emit(QSystemTrayIcon.Trigger)
@dbus_method(IFACE, in_signature='u', out_signature='')
def XAyatanaSecondaryActivate(self, timestamp):
# This is called when the user middle clicks the icon in Unity
self.notifier.activated.emit(QSystemTrayIcon.MiddleClick)
@dbus_method(IFACE, in_signature='ii', out_signature='')
def SecondaryActivate(self, x, y):
self.notifier.activated.emit(QSystemTrayIcon.MiddleClick)
@dbus_method(IFACE, in_signature='is', out_signature='')
def Scroll(self, delta, orientation):
pass
@dbus_signal(IFACE, '')
def NewTitle(self):
pass
@dbus_signal(IFACE, '')
def NewIcon(self):
pass
@dbus_signal(IFACE, '')
def NewAttentionIcon(self):
pass
@dbus_signal(IFACE, '')
def NewOverlayIcon(self):
pass
@dbus_signal(IFACE, '')
def NewToolTip(self):
pass
@dbus_signal(IFACE, 's')
def NewStatus(self, status):
pass
| gpl-3.0 | -3,887,124,722,416,218,000 | 29.407563 | 115 | 0.637833 | false | 3.531967 | false | false | false |
kjiang8/Ardustat | Deprecated_Unsupported/Python_Client/galvanostat_with_connect.py | 1 | 1837 | import numpy
import ardustat_library_simple as ard
import time
import subprocess
import os
import glob
import sys
##Guess a serial port
port = ""
if os.name == "posix":
#try os x
if len(glob.glob("/dev/tty.u*")) > 0:
port = glob.glob("/dev/tty.u*")[0]
elif len(glob.glob("/dev/ttyUSB*")) > 0:
port = glob.glob("/dev/ttyUSB*")[0]
else:
print "can't see any ardustats. PEACE."
sys.exit()
print port
#start a serial forwarder
p = subprocess.Popen(("python tcp_serial_redirect.py "+port+" 57600").split())
print "waiting"
time.sleep(5)
print "going"
#set parameters
read_delay = .5 #second
ardustat_id = 21
file_name = "galvanostat_test"
ardustat_socket = 7777
debug = False
pulse_time = 60*60*10
#Below here no touchy
#connect to to ardustat and setup resistance table
a = ard.ardustat()
a.connect(ardustat_socket)
a.debug = debug
a.load_resistance_table(ardustat_id)
a.ocv()
a.groundvalue = 4
a.moveground()
time.sleep(.2)
a.ocv()
#create arrays + a function for logging data
times = []
potential = []
current = []
time_start = time.time()
cycle = 0
file_name = file_name+"_"+str(int(time_start))+".dat"
def appender(reading):
if reading['valid']:
print reading['cell_ADC'],read['current']
tdiff = str(time.time()-time_start)
out = tdiff+","+str(reading['cell_ADC'])+","+str(read['current'])+","+str(cycle)+"\n"
open(file_name,"a").write(out)
else:
print "bad read"
#Step through values
output = 0
a.ocv()
for i in range(0,20):
time.sleep(.1)
read = a.parsedread()
appender(read)
start_pulse = time.time()
a.galvanostat(-0.001)
while (time.time()- start_pulse) < pulse_time:
time.sleep(read_delay)
read = a.parsedread()
appender(read)
start_pulse = time.time()
a.ocv()
while (time.time()- start_pulse) < 600:
time.sleep(read_delay)
read = a.parsedread()
appender(read)
p.kill()
| bsd-2-clause | 1,281,744,079,879,438,600 | 17.938144 | 87 | 0.672836 | false | 2.609375 | false | false | false |
manhg/tokit | tokit/postgres.py | 1 | 4462 | import logging
import shortuuid
import uuid
import momoko
import momoko.exceptions
import psycopg2
from psycopg2.extras import DictCursor, DictRow, register_uuid
import psycopg2.extensions
from tornado.gen import coroutine, sleep
from tornado.web import HTTPError
import tokit
logger = tokit.logger
class DictLogCursor(DictCursor):
def execute(self, sql, args=None):
logger.debug('Excute SQL: %s', self.mogrify(sql, args).decode())
return super().execute(sql, args)
@tokit.on('init')
def pg_init(app):
""" Hook to init Postgres momoko driver.
dsn config is required, with syntax same as Psycopg2 DSN.
Sample env.ini::
[postgres]
dsn=dbname=[APP_NAME]
size=2
"""
env = app.config.env['postgres']
if env.getboolean('log_momoko'):
logging.getLogger('momoko').setLevel(logger.getEffectiveLevel())
momoko_opts = dict(
dsn=env['dsn'],
size=int(env['size']),
max_size=int(env['max_size']),
auto_shrink=env.getboolean('auto_shrink'),
cursor_factory=(DictLogCursor if env.getboolean('log') else DictCursor),
# connection_factory=env.get('connection_factory', None),
)
register_uuid()
app.pg_db = momoko.Pool(**momoko_opts)
try:
app.pg_db.connect()
except momoko.PartiallyConnectedError:
logger.error('Cannot connect')
class PgMixin:
DbIntegrityError = psycopg2.IntegrityError
DbError = psycopg2.Error
@property
def db(self):
return self.application.pg_db
@coroutine
def pg_insert(self, table, fields=None, **data):
"""
Postgres shorcut to insert data
:return int new row's id
Example::
user_id = yield self.pg_insert('users', {"username": "foo", "password": "secret"})
"""
if fields:
data = self.get_request_dict(*fields)
else:
fields = list(data.keys())
assert len(data) > 0 # check data
values = list(data.values())
sql = 'INSERT INTO {} ({}) VALUES ({}) RETURNING id ' \
.format(table,
','.join(fields),
','.join(['%s'] * len(fields))
)
cursor = yield self.pg_query(sql, *values)
return cursor.fetchone()[0]
@coroutine
def pg_getconn(self):
try:
connection = yield self.db.getconn()
return connection
except psycopg2.OperationalError:
yield self.db.connect()
yield sleep(0.5)
try:
connection = yield self.db.getconn()
return connection
except:
raise HTTPError(503, "Database unavailable")
except (momoko.Pool.DatabaseNotAvailable, momoko.exceptions.PartiallyConnectedError):
raise HTTPError(503, "Database unavailable")
@coroutine
def pg_update(self, table, data):
id_value = data.pop('id')
changes = [field + ' = %s' for field in data.keys()]
sql = 'UPDATE {} SET {} WHERE id = %s'.format(table, ','.join(changes))
values = list(data.values()) + [id_value]
cursor = yield self.pg_query(sql, *values)
return cursor
@coroutine
def pg_query(self, query, *params):
""" Low level execuation """
connection = yield self.pg_getconn()
with self.db.manage(connection):
cursor = yield connection.execute(query, params)
return cursor
def pg_serialize(self, row):
if not row:
return
ret = dict(row) if isinstance(row, DictRow) else row
return ret
@coroutine
def pg_select(self, query, *params):
"""
Query and convert each returned row
:return generator
"""
result = yield self.pg_query(query, *params)
return (self.pg_serialize(row) for row in result.fetchall())
@coroutine
def pg_one(self, query, *params):
result = yield self.pg_query(query, *params)
row = result.fetchone()
if row:
return self.pg_serialize(row)
db_insert = pg_insert
db_update = pg_update
db_query = pg_query
db_select = pg_select
db_one = pg_one
class UidMixin:
def pg_serialize(self, row):
ret = PgMixin.pg_serialize(self, row)
if 'id' in ret:
ret['short_id'] = shortuuid.encode(ret['id'])
return ret
| mit | 4,795,081,817,502,400,000 | 27.240506 | 94 | 0.586732 | false | 3.88676 | false | false | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py | 1 | 27666 |
import pytest
import networkx as nx
from networkx.utils import pairwise
def validate_path(G, s, t, soln_len, path):
assert path[0] == s
assert path[-1] == t
if not G.is_multigraph():
computed = sum(G[u][v].get('weight', 1) for u, v in pairwise(path))
assert soln_len == computed
else:
computed = sum(min(e.get('weight', 1) for e in G[u][v].values())
for u, v in pairwise(path))
assert soln_len == computed
def validate_length_path(G, s, t, soln_len, length, path):
assert soln_len == length
validate_path(G, s, t, length, path)
class WeightedTestBase(object):
"""Base class for test classes that test functions for computing
shortest paths in weighted graphs.
"""
def setup(self):
"""Creates some graphs for use in the unit tests."""
cnlti = nx.convert_node_labels_to_integers
self.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1,
ordering="sorted")
self.cycle = nx.cycle_graph(7)
self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
self.XG = nx.DiGraph()
self.XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
self.MXG = nx.MultiDiGraph(self.XG)
self.MXG.add_edge('s', 'u', weight=15)
self.XG2 = nx.DiGraph()
self.XG2.add_weighted_edges_from([[1, 4, 1], [4, 5, 1],
[5, 6, 1], [6, 3, 1],
[1, 3, 50], [1, 2, 100],
[2, 3, 100]])
self.XG3 = nx.Graph()
self.XG3.add_weighted_edges_from([[0, 1, 2], [1, 2, 12],
[2, 3, 1], [3, 4, 5],
[4, 5, 1], [5, 0, 10]])
self.XG4 = nx.Graph()
self.XG4.add_weighted_edges_from([[0, 1, 2], [1, 2, 2],
[2, 3, 1], [3, 4, 1],
[4, 5, 1], [5, 6, 1],
[6, 7, 1], [7, 0, 1]])
self.MXG4 = nx.MultiGraph(self.XG4)
self.MXG4.add_edge(0, 1, weight=3)
self.G = nx.DiGraph() # no weights
self.G.add_edges_from([('s', 'u'), ('s', 'x'),
('u', 'v'), ('u', 'x'),
('v', 'y'), ('x', 'u'),
('x', 'v'), ('x', 'y'),
('y', 's'), ('y', 'v')])
class TestWeightedPath(WeightedTestBase):
def test_dijkstra(self):
(D, P) = nx.single_source_dijkstra(self.XG, 's')
validate_path(self.XG, 's', 'v', 9, P['v'])
assert D['v'] == 9
validate_path(
self.XG, 's', 'v', 9, nx.single_source_dijkstra_path(self.XG, 's')['v'])
assert dict(
nx.single_source_dijkstra_path_length(self.XG, 's'))['v'] == 9
validate_path(
self.XG, 's', 'v', 9, nx.single_source_dijkstra(self.XG, 's')[1]['v'])
validate_path(
self.MXG, 's', 'v', 9, nx.single_source_dijkstra_path(self.MXG, 's')['v'])
GG = self.XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
(D, P) = nx.single_source_dijkstra(GG, 's')
validate_path(GG, 's', 'v', 8, P['v'])
assert D['v'] == 8 # uses lower weight of 2 on u<->x edge
validate_path(GG, 's', 'v', 8, nx.dijkstra_path(GG, 's', 'v'))
assert nx.dijkstra_path_length(GG, 's', 'v') == 8
validate_path(self.XG2, 1, 3, 4, nx.dijkstra_path(self.XG2, 1, 3))
validate_path(self.XG3, 0, 3, 15, nx.dijkstra_path(self.XG3, 0, 3))
assert nx.dijkstra_path_length(self.XG3, 0, 3) == 15
validate_path(self.XG4, 0, 2, 4, nx.dijkstra_path(self.XG4, 0, 2))
assert nx.dijkstra_path_length(self.XG4, 0, 2) == 4
validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2))
validate_path(
self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's', 'v')[1])
validate_path(
self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's')[1]['v'])
validate_path(self.G, 's', 'v', 2, nx.dijkstra_path(self.G, 's', 'v'))
assert nx.dijkstra_path_length(self.G, 's', 'v') == 2
# NetworkXError: node s not reachable from moon
pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path, self.G, 's', 'moon')
pytest.raises(
nx.NetworkXNoPath, nx.dijkstra_path_length, self.G, 's', 'moon')
validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3))
validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4))
assert nx.single_source_dijkstra(self.cycle, 0, 0) == (0, [0])
def test_bidirectional_dijkstra(self):
validate_length_path(
self.XG, 's', 'v', 9, *nx.bidirectional_dijkstra(self.XG, 's', 'v'))
validate_length_path(
self.G, 's', 'v', 2, *nx.bidirectional_dijkstra(self.G, 's', 'v'))
validate_length_path(
self.cycle, 0, 3, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 3))
validate_length_path(
self.cycle, 0, 4, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 4))
validate_length_path(
self.XG3, 0, 3, 15, *nx.bidirectional_dijkstra(self.XG3, 0, 3))
validate_length_path(
self.XG4, 0, 2, 4, *nx.bidirectional_dijkstra(self.XG4, 0, 2))
# need more tests here
P = nx.single_source_dijkstra_path(self.XG, 's')['v']
validate_path(self.XG, 's', 'v', sum(self.XG[u][v]['weight'] for u, v in zip(
P[:-1], P[1:])), nx.dijkstra_path(self.XG, 's', 'v'))
# check absent source
G = nx.path_graph(2)
pytest.raises(nx.NodeNotFound, nx.bidirectional_dijkstra, G, 3, 0)
def test_bidirectional_dijkstra_no_path(self):
with pytest.raises(nx.NetworkXNoPath):
G = nx.Graph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5, 6])
path = nx.bidirectional_dijkstra(G, 1, 6)
def test_absent_source(self):
# the check is in _dijkstra_multisource, but this will provide
# regression testing against later changes to any of the "client"
# Dijkstra or Bellman-Ford functions
G = nx.path_graph(2)
for fn in (nx.dijkstra_path,
nx.dijkstra_path_length,
nx.single_source_dijkstra_path,
nx.single_source_dijkstra_path_length,
nx.single_source_dijkstra,
nx.dijkstra_predecessor_and_distance,):
pytest.raises(nx.NodeNotFound, fn, G, 3, 0)
def test_dijkstra_predecessor1(self):
G = nx.path_graph(4)
assert (nx.dijkstra_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
def test_dijkstra_predecessor2(self):
# 4-cycle
G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)])
pred, dist = nx.dijkstra_predecessor_and_distance(G, (0))
assert pred[0] == []
assert pred[1] == [0]
assert pred[2] in [[1, 3], [3, 1]]
assert pred[3] == [0]
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
def test_dijkstra_predecessor3(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
(P, D) = nx.dijkstra_predecessor_and_distance(XG, 's')
assert P['v'] == ['u']
assert D['v'] == 9
(P, D) = nx.dijkstra_predecessor_and_distance(XG, 's', cutoff=8)
assert not 'v' in D
def test_single_source_dijkstra_path_length(self):
pl = nx.single_source_dijkstra_path_length
assert dict(pl(self.MXG4, 0))[2] == 4
spl = pl(self.MXG4, 0, cutoff=2)
assert not 2 in spl
def test_bidirectional_dijkstra_multigraph(self):
G = nx.MultiGraph()
G.add_edge('a', 'b', weight=10)
G.add_edge('a', 'b', weight=100)
dp = nx.bidirectional_dijkstra(G, 'a', 'b')
assert dp == (10, ['a', 'b'])
def test_dijkstra_pred_distance_multigraph(self):
G = nx.MultiGraph()
G.add_edge('a', 'b', key='short', foo=5, weight=100)
G.add_edge('a', 'b', key='long', bar=1, weight=110)
p, d = nx.dijkstra_predecessor_and_distance(G, 'a')
assert p == {'a': [], 'b': ['a']}
assert d == {'a': 0, 'b': 100}
def test_negative_edge_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
assert nx.negative_edge_cycle(G) == False
G.add_edge(8, 9, weight=-7)
G.add_edge(9, 8, weight=3)
graph_size = len(G)
assert nx.negative_edge_cycle(G) == True
assert graph_size == len(G)
pytest.raises(ValueError, nx.single_source_dijkstra_path_length, G, 8)
pytest.raises(ValueError, nx.single_source_dijkstra, G, 8)
pytest.raises(ValueError, nx.dijkstra_predecessor_and_distance, G, 8)
G.add_edge(9, 10)
pytest.raises(ValueError, nx.bidirectional_dijkstra, G, 8, 10)
def test_weight_function(self):
"""Tests that a callable weight is interpreted as a weight
function instead of an edge attribute.
"""
# Create a triangle in which the edge from node 0 to node 2 has
# a large weight and the other two edges have a small weight.
G = nx.complete_graph(3)
G.adj[0][2]['weight'] = 10
G.adj[0][1]['weight'] = 1
G.adj[1][2]['weight'] = 1
# The weight function will take the multiplicative inverse of
# the weights on the edges. This way, weights that were large
# before now become small and vice versa.
def weight(u, v, d): return 1 / d['weight']
# The shortest path from 0 to 2 using the actual weights on the
# edges should be [0, 1, 2].
distance, path = nx.single_source_dijkstra(G, 0, 2)
assert distance == 2
assert path == [0, 1, 2]
# However, with the above weight function, the shortest path
# should be [0, 2], since that has a very small weight.
distance, path = nx.single_source_dijkstra(G, 0, 2, weight=weight)
assert distance == 1 / 10
assert path == [0, 2]
def test_all_pairs_dijkstra_path(self):
cycle = nx.cycle_graph(7)
p = dict(nx.all_pairs_dijkstra_path(cycle))
assert p[0][3] == [0, 1, 2, 3]
cycle[1][2]['weight'] = 10
p = dict(nx.all_pairs_dijkstra_path(cycle))
assert p[0][3] == [0, 6, 5, 4, 3]
def test_all_pairs_dijkstra_path_length(self):
cycle = nx.cycle_graph(7)
pl = dict(nx.all_pairs_dijkstra_path_length(cycle))
assert pl[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
cycle[1][2]['weight'] = 10
pl = dict(nx.all_pairs_dijkstra_path_length(cycle))
assert pl[0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1}
def test_all_pairs_dijkstra(self):
cycle = nx.cycle_graph(7)
out = dict(nx.all_pairs_dijkstra(cycle))
assert out[0][0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert out[0][1][3] == [0, 1, 2, 3]
cycle[1][2]['weight'] = 10
out = dict(nx.all_pairs_dijkstra(cycle))
assert out[0][0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1}
assert out[0][1][3] == [0, 6, 5, 4, 3]
class TestDijkstraPathLength(object):
"""Unit tests for the :func:`networkx.dijkstra_path_length`
function.
"""
def test_weight_function(self):
"""Tests for computing the length of the shortest path using
Dijkstra's algorithm with a user-defined weight function.
"""
# Create a triangle in which the edge from node 0 to node 2 has
# a large weight and the other two edges have a small weight.
G = nx.complete_graph(3)
G.adj[0][2]['weight'] = 10
G.adj[0][1]['weight'] = 1
G.adj[1][2]['weight'] = 1
# The weight function will take the multiplicative inverse of
# the weights on the edges. This way, weights that were large
# before now become small and vice versa.
def weight(u, v, d): return 1 / d['weight']
# The shortest path from 0 to 2 using the actual weights on the
# edges should be [0, 1, 2]. However, with the above weight
# function, the shortest path should be [0, 2], since that has a
# very small weight.
length = nx.dijkstra_path_length(G, 0, 2, weight=weight)
assert length == 1 / 10
class TestMultiSourceDijkstra(object):
"""Unit tests for the multi-source dialect of Dijkstra's shortest
path algorithms.
"""
def test_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra(nx.Graph(), {})
def test_path_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra_path(nx.Graph(), {})
def test_path_length_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra_path_length(nx.Graph(), {})
def test_absent_source(self):
G = nx.path_graph(2)
for fn in (nx.multi_source_dijkstra_path,
nx.multi_source_dijkstra_path_length,
nx.multi_source_dijkstra,):
pytest.raises(nx.NodeNotFound, fn, G, [3], 0)
def test_two_sources(self):
edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)]
G = nx.Graph()
G.add_weighted_edges_from(edges)
sources = {0, 4}
distances, paths = nx.multi_source_dijkstra(G, sources)
expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0}
expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]}
assert distances == expected_distances
assert paths == expected_paths
def test_simple_paths(self):
G = nx.path_graph(4)
lengths = nx.multi_source_dijkstra_path_length(G, [0])
assert lengths == {n: n for n in G}
paths = nx.multi_source_dijkstra_path(G, [0])
assert paths == {n: list(range(n + 1)) for n in G}
class TestBellmanFordAndGoldbergRadzik(WeightedTestBase):
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
assert nx.single_source_bellman_ford_path(G, 0) == {0: [0]}
assert nx.single_source_bellman_ford_path_length(G, 0) == {0: 0}
assert nx.single_source_bellman_ford(G, 0) == ({0: 0}, {0: [0]})
assert nx.bellman_ford_predecessor_and_distance(G, 0) == ({0: []}, {0: 0})
assert nx.goldberg_radzik(G, 0) == ({0: None}, {0: 0})
def test_absent_source_bellman_ford(self):
# the check is in _bellman_ford; this provides regression testing
# against later changes to "client" Bellman-Ford functions
G = nx.path_graph(2)
for fn in (nx.bellman_ford_predecessor_and_distance,
nx.bellman_ford_path,
nx.bellman_ford_path_length,
nx.single_source_bellman_ford_path,
nx.single_source_bellman_ford_path_length,
nx.single_source_bellman_ford,):
pytest.raises(nx.NodeNotFound, fn, G, 3, 0)
def test_absent_source_goldberg_radzik(self):
with pytest.raises(nx.NodeNotFound):
G = nx.path_graph(2)
nx.goldberg_radzik(G, 3, 0)
def test_negative_weight_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-7)
for i in range(5):
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.cycle_graph(5) # undirected Graph
G.add_edge(1, 2, weight=-3)
for i in range(5):
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.DiGraph([(1, 1, {'weight': -1})])
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1)
# no negative cycle but negative weight
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-3)
assert (nx.single_source_bellman_ford_path(G, 0) ==
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]})
assert (nx.single_source_bellman_ford_path_length(G, 0) ==
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0})
assert (nx.single_source_bellman_ford(G, 0) ==
({0: 0, 1: 1, 2: -2, 3: -1, 4: 0},
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [1], 3: [2], 4: [3]},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
assert (nx.goldberg_radzik(G, 0) ==
({0: None, 1: 0, 2: 1, 3: 2, 4: 3},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
def test_not_connected(self):
G = nx.complete_graph(6)
G.add_edge(10, 11)
G.add_edge(10, 12)
assert (nx.single_source_bellman_ford_path(G, 0) ==
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]})
assert (nx.single_source_bellman_ford_path_length(G, 0) ==
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
assert (nx.single_source_bellman_ford(G, 0) ==
({0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
assert (nx.goldberg_radzik(G, 0) ==
({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
# not connected, with a component not containing the source that
# contains a negative cost cycle.
G = nx.complete_graph(6)
G.add_edges_from([('A', 'B', {'load': 3}),
('B', 'C', {'load': -10}),
('C', 'A', {'load': 2})])
assert (nx.single_source_bellman_ford_path(G, 0, weight='load') ==
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]})
assert (nx.single_source_bellman_ford_path_length(G, 0, weight='load') ==
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
assert (nx.single_source_bellman_ford(G, 0, weight='load') ==
({0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0, weight='load') ==
({0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
assert (nx.goldberg_radzik(G, 0, weight='load') ==
({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
def test_multigraph(self):
assert nx.bellman_ford_path(self.MXG, 's', 'v') == ['s', 'x', 'u', 'v']
assert nx.bellman_ford_path_length(self.MXG, 's', 'v') == 9
assert nx.single_source_bellman_ford_path(self.MXG, 's')['v'] == ['s', 'x', 'u', 'v']
assert nx.single_source_bellman_ford_path_length(self.MXG, 's')['v'] == 9
D, P = nx.single_source_bellman_ford(self.MXG, 's', target='v')
assert D == 9
assert P == ['s', 'x', 'u', 'v']
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, 's')
assert P['v'] == ['u']
assert D['v'] == 9
P, D = nx.goldberg_radzik(self.MXG, 's')
assert P['v'] == 'u'
assert D['v'] == 9
assert nx.bellman_ford_path(self.MXG4, 0, 2) == [0, 1, 2]
assert nx.bellman_ford_path_length(self.MXG4, 0, 2) == 4
assert nx.single_source_bellman_ford_path(self.MXG4, 0)[2] == [0, 1, 2]
assert nx.single_source_bellman_ford_path_length(self.MXG4, 0)[2] == 4
D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2)
assert D == 4
assert P == [0, 1, 2]
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0)
assert P[2] == [1]
assert D[2] == 4
P, D = nx.goldberg_radzik(self.MXG4, 0)
assert P[2] == 1
assert D[2] == 4
def test_others(self):
assert nx.bellman_ford_path(self.XG, 's', 'v') == ['s', 'x', 'u', 'v']
assert nx.bellman_ford_path_length(self.XG, 's', 'v') == 9
assert nx.single_source_bellman_ford_path(self.XG, 's')['v'] == ['s', 'x', 'u', 'v']
assert nx.single_source_bellman_ford_path_length(self.XG, 's')['v'] == 9
D, P = nx.single_source_bellman_ford(self.XG, 's', target='v')
assert D == 9
assert P == ['s', 'x', 'u', 'v']
(P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, 's')
assert P['v'] == ['u']
assert D['v'] == 9
(P, D) = nx.goldberg_radzik(self.XG, 's')
assert P['v'] == 'u'
assert D['v'] == 9
def test_path_graph(self):
G = nx.path_graph(4)
assert (nx.single_source_bellman_ford_path(G, 0) ==
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]})
assert (nx.single_source_bellman_ford_path_length(G, 0) ==
{0: 0, 1: 1, 2: 2, 3: 3})
assert (nx.single_source_bellman_ford(G, 0) ==
({0: 0, 1: 1, 2: 2, 3: 3}, {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
assert (nx.goldberg_radzik(G, 0) ==
({0: None, 1: 0, 2: 1, 3: 2}, {0: 0, 1: 1, 2: 2, 3: 3}))
assert (nx.single_source_bellman_ford_path(G, 3) ==
{0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]})
assert (nx.single_source_bellman_ford_path_length(G, 3) ==
{0: 3, 1: 2, 2: 1, 3: 0})
assert (nx.single_source_bellman_ford(G, 3) ==
({0: 3, 1: 2, 2: 1, 3: 0}, {0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 3) ==
({0: [1], 1: [2], 2: [3], 3: []}, {0: 3, 1: 2, 2: 1, 3: 0}))
assert (nx.goldberg_radzik(G, 3) ==
({0: 1, 1: 2, 2: 3, 3: None}, {0: 3, 1: 2, 2: 1, 3: 0}))
def test_4_cycle(self):
# 4-cycle
G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)])
dist, path = nx.single_source_bellman_ford(G, 0)
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
assert path[0] == [0]
assert path[1] == [0, 1]
assert path[2] in [[0, 1, 2], [0, 3, 2]]
assert path[3] == [0, 3]
pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
assert pred[0] == []
assert pred[1] == [0]
assert pred[2] in [[1, 3], [3, 1]]
assert pred[3] == [0]
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
pred, dist = nx.goldberg_radzik(G, 0)
assert pred[0] == None
assert pred[1] == 0
assert pred[2] in [1, 3]
assert pred[3] == 0
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
def test_negative_weight(self):
G = nx.DiGraph()
G.add_nodes_from('abcd')
G.add_edge('a','d', weight = 0)
G.add_edge('a','b', weight = 1)
G.add_edge('b','c', weight = -3)
G.add_edge('c','d', weight = 1)
assert nx.bellman_ford_path(G, 'a', 'd') == ['a', 'b', 'c', 'd']
assert nx.bellman_ford_path_length(G, 'a', 'd') == -1
class TestJohnsonAlgorithm(WeightedTestBase):
def test_single_node_graph(self):
with pytest.raises(nx.NetworkXError):
G = nx.DiGraph()
G.add_node(0)
nx.johnson(G)
def test_negative_cycle(self):
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
pytest.raises(nx.NetworkXUnbounded, nx.johnson, G)
G = nx.Graph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
pytest.raises(nx.NetworkXUnbounded, nx.johnson, G)
def test_negative_weights(self):
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
paths = nx.johnson(G)
assert paths == {'1': {'1': ['1'], '3': ['1', '2', '3'],
'2': ['1', '2']}, '0': {'1': ['0', '1'],
'0': ['0'], '3': ['0', '1', '2', '3'],
'2': ['0', '1', '2']}, '3': {'3': ['3']},
'2': {'3': ['2', '3'], '2': ['2']}}
def test_unweighted_graph(self):
with pytest.raises(nx.NetworkXError):
G = nx.path_graph(5)
nx.johnson(G)
def test_graphs(self):
validate_path(self.XG, 's', 'v', 9, nx.johnson(self.XG)['s']['v'])
validate_path(self.MXG, 's', 'v', 9, nx.johnson(self.MXG)['s']['v'])
validate_path(self.XG2, 1, 3, 4, nx.johnson(self.XG2)[1][3])
validate_path(self.XG3, 0, 3, 15, nx.johnson(self.XG3)[0][3])
validate_path(self.XG4, 0, 2, 4, nx.johnson(self.XG4)[0][2])
validate_path(self.MXG4, 0, 2, 4, nx.johnson(self.MXG4)[0][2])
| mit | -4,608,623,554,818,168,000 | 44.279869 | 100 | 0.493855 | false | 2.915586 | true | false | false |
bumper-app/bumper-bianca | bianca/analyzer/bugfinder.py | 1 | 3624 | """
file: bugfinder.py
author: Christoffer Rosen <[email protected]>
date: November 2013
description: Links changes that introduces bugs by identifying changes
that fix problems.
"""
import re
from orm.commit import *
from caslogging import logging
from analyzer.git_commit_linker import *
import json
class BugFinder:
"""
BugFinder():
description: Links changes that introduces bugs.
"""
def __init__(self, allCommits, correctiveCommits, issueTracker):
"""
Constructor
@param commits: All commits in ascending order by date
@param correctiveCommits: All commits/changes which are identified
as fixing problems.
@param issueTracker: Issue tracker (e.g., GitHub Issues)
"""
self.allCommits = allCommits
self.correctiveCommits = correctiveCommits
self.issueTracker = issueTracker
def findIssueOpened(self, correctiveCommit):
"""
findIssueIds()
If the corrective change/commit links to a issue in the issue tracker, returns
the date of oldest open issue found otherwise returns none
"""
issue_opened = None
if(self.issueTracker is None or hasattr(self.issueTracker, "getDateOpened") == False):
return None
idMatch = re.compile('#[\d]+')
issue_ids = idMatch.findall(correctiveCommit.commit_message)
issue_ids = [issue_id.strip('#') for issue_id in issue_ids] # Remove the '#' from ids
if len(issue_ids) > 0:
issue_opened = self.issueTracker.getDateOpened(issue_ids[0])
# Use the oldest open bug
for issue_id in issue_ids:
logging.info('Searching for issue id: ' + issue_id)
curr_issue_opened = self.issueTracker.getDateOpened(issue_id)
# Verify that an issue was found.
if curr_issue_opened is not None:
if int(curr_issue_opened) < int(issue_opened):
issue_opened = curr_issue_opened
return issue_opened
def searchForBuggyCommit(self, correctiveCommit):
"""
Finds the buggy commit based on the bug fixing commit
Helper method for markBuggyCommits. If commir links to an
issue tracker, we check files changed prior to this date.
Otherwise, me only check date prior to the fix.
@param correctiveCommits: the bug fixing commit
"""
bug_introduced_prior = correctiveCommit.author_date_unix_timestamp
issue_opened = self.findIssueOpened(correctiveCommit)
if issue_opened is not None:
bug_introduced_prior = issue_opened
if 'CAS_DELIMITER' in correctiveCommit.fileschanged:
# Provide legacy support for the previous fileschanged format
correctiveFiles = correctiveCommit.fileschanged.split(",CAS_DELIMITER,")
else:
correctiveFiles = json.loads(correctiveCommit.fileschanged)
for commit in self.allCommits:
if int(commit.author_date_unix_timestamp) < int(bug_introduced_prior):
if 'CAS_DELIMITER' in commit.fileschanged:
# Provide legacy support for the previous fileschanged format
commitFiles = commit.fileschanged.split(",CAS_DELIMITER,")
else:
commitFiles = json.loads(commit.fileschanged)
for commitFile in commitFiles:
# This introudced the bug!
if commitFile in correctiveFiles:
return commit
return -1 # Not found
def markBuggyCommits(self):
"""
Finds bug inducing commits based on those that are
bug fixing. It checks commits prior to this and determines
it to be bug inducing if it changes the same file in a bug fixing
commit
"""
for correctiveCommit in self.correctiveCommits:
buggyCommit = self.searchForBuggyCommit(correctiveCommit)
if buggyCommit is not -1:
buggyCommit.contains_bug = True
#else:
#print("Cound not find the bug inducing commit for: " +
# correctiveCommit.commit_message)
| mit | 9,136,406,733,710,434,000 | 30.241379 | 88 | 0.737307 | false | 3.43833 | false | false | false |
danielpalomino/gem5 | configs/common/Options.py | 1 | 10060 | # Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from Benchmarks import *
def addCommonOptions(parser):
# system options
parser.add_option("--cpu-type", type="choice", default="atomic",
choices = ["atomic", "timing", "detailed", "inorder",
"arm_detailed"],
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--fastmem", action="store_true")
parser.add_option("--clock", action="store", type="string", default='2GHz')
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T", help="Stop after T ticks")
parser.add_option("--maxtime", type="float")
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> will take checkpoint at cycle M and every N cycles thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="atomic", choices = ["atomic", "timing",
"detailed", "inorder"],
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="int",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--machine-type", action="store", type="choice",
choices=ArmMachineType.map.keys(), default="RealView_PBX")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
# Memory Size Options
parser.add_option("--mem-size", action="store", type="string", default=None,
help="Specify the physical memory size (single memory)")
| bsd-3-clause | -2,901,236,696,301,935,600 | 54.274725 | 85 | 0.633996 | false | 4.293641 | false | false | false |
BILS/agda | agda/species_geo_coder/views.py | 1 | 2551 | import time
#from django.shortcuts import render
from django.views.generic import TemplateView, FormView
from django.shortcuts import redirect, render
from django.db import transaction
from agda.views import package_template_dict
from jobs.models import (JOB_STATUS_LEVEL_ACCEPTED,
JOB_STATUS_LEVEL_FINISHED,
get_job_or_404)
from species_geo_coder.models import app_package, SpeciesGeoCoderJob
from species_geo_coder.forms import SpeciesGeoCoderForm
from species_geo_coder.models import tool_1
# Create your views here.
class IndexView(TemplateView):
template_name = 'species_geo_coder/index.html'
def get_context_data(self, *args, **kw):
context = super(IndexView, self).get_context_data(**kw)
return package_template_dict(self.request, app_package, *args, **kw)
class ToolView(FormView):
template_name = 'species_geo_coder/speciesgeocoder.html'
form_class = SpeciesGeoCoderForm
@transaction.atomic
def form_valid(self, form):
request = self.request
## These are all generic, should be extracted to a main class
job = SpeciesGeoCoderJob(status=JOB_STATUS_LEVEL_ACCEPTED)
job.save()
job = SpeciesGeoCoderJob.objects.select_for_update().get(pk=job.id)
job.log_create(request.user, 'Created in web interface.')
verbose = form.cleaned_data['verbose']
occurences = form.cleaned_data['occurences']
plot = form.cleaned_data['plot']
job.submit(request.user, request.META['REMOTE_ADDR'], form.cleaned_data['name'],
self.request.FILES, occurences, verbose, plot)
return redirect('jobs.views.show_results', job.slug)
@transaction.atomic
def tool_1_results(request, slug):
job = get_job_or_404(slug=slug, select_for_update=True)
job.update_status(request.user)
params = dict(job=job, tool=tool_1)
if job.is_alive:
reload_time, interval = request.session.setdefault('mdrscan', dict()).pop(job.slug, (0, 5))
if reload_time <= time.time():
reload_time = max(time.time() + 5, reload_time + interval)
interval *= 2
request.session['mdrscan'][job.slug] = (reload_time, interval)
request.session.modified = True
params.update(timeout=reload_time - time.time())
params.update(reload_time=reload_time, interval=interval)
return render(request, 'species_geo_coder/results.html', params)
#class ToolResultView(TemplateView):
# template_name = '<app>/tool_result.html'
| mit | 1,003,078,452,284,598,300 | 38.246154 | 99 | 0.681693 | false | 3.567832 | false | false | false |
lowRISC/manticore | util/license-checker.py | 2 | 17721 | #!/usr/bin/env python3
#
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import fnmatch
import logging
import re
import subprocess
from pathlib import Path
from types import SimpleNamespace
import hjson
from tabulate import tabulate
class LicenceHeader(object):
"""Represents the licence header we want to insert"""
def __init__(self, text):
self._lines = text.strip().splitlines()
def __getitem__(self, idx):
return self._lines.__getitem__(idx)
def __len__(self):
return self._lines.__len__()
def numbered_lines(self, skip=0):
"""Returns an iterator of (line_no, line_text).
`line_no` counts from 1, and is for humans to count line numbers with.
use `skip_lines` to skip enumerating the first few lines.
"""
return enumerate(self._lines[skip:], start=1 + skip)
@property
def first_word(self):
(first_word, _) = self._lines[0].split(' ', 1)
return first_word
class CommentStyle:
'''Base class for comment style objects'''
def __init__(self, first_line_prefix, comment_prefix):
self.first_line_prefix = first_line_prefix
self.comment_prefix = comment_prefix
def search_line_pattern(self, licence_first_word):
return re.compile(
re.escape(self.comment_prefix + ' ' + licence_first_word))
def full_line_parts(self, licence_line):
return [re.escape(self.comment_prefix), licence_line]
def full_line_pattern(self, licence_line):
'''Returns a regex pattern which matches one line of licence text.'''
return re.compile(' '.join(self.full_line_parts(licence_line)))
class LineCommentStyle(CommentStyle):
"""Helpers for line-style comments."""
def __init__(self, prefix):
super().__init__(prefix, prefix)
class DifferentFirstLineCommentStyle(CommentStyle):
"""Some files have a different allowable prefix for their first line."""
def __init__(self, first_line_prefix, prefix):
super().__init__(first_line_prefix, prefix)
class BlockCommentStyle(CommentStyle):
"""Helpers for block-style comments."""
def __init__(self, prefix, suffix):
super().__init__(prefix, prefix)
self.comment_suffix = str(suffix)
def full_line_parts(self, licence_line):
return [
re.escape(self.comment_prefix), licence_line,
re.escape(self.comment_suffix)
]
SLASH_SLASH = '//'
HASH = '#'
SLASH_STAR = '/*'
COMMENT_STYLES = {
SLASH_SLASH: LineCommentStyle("//"),
HASH: LineCommentStyle("#"),
SLASH_STAR: BlockCommentStyle("/*", "*/"),
'corefile': DifferentFirstLineCommentStyle("CAPI=2", "#")
}
# (Prioritised) Mapping of file name suffixes to comment style. If the suffix
# of your file does not match one of these, it will not be checked.
#
# Each entry is a pair (suffixes, styles). suffixes is a list of file suffixes:
# if a filename matches one of these suffixes, we'll use the styles in styles.
# styles is either a string or a list of strings. If there is one or more
# strings, these strings must all be keys of COMMENT_STYLES and they give the
# different comment styles that are acceptable for the file type.
#
# These rules are given in priority order. Tuples higher in the list are
# matched before those later in the list, on purpose.
#
# Files that either do not match any extension or that have an empty list of
# styles are not checked for a licence.
COMMENT_CHARS = [
# Hardware Files
([".svh", ".sv", ".sv.tpl"], SLASH_SLASH), # SystemVerilog
# Hardware Build Systems
([".tcl", ".sdc"], HASH), # tcl
([".core", ".core.tpl"], 'corefile'), # FuseSoC Core Files
(["Makefile", ".mk"], HASH), # Makefiles
([".ys"], HASH), # Yosys script
([".waiver"], HASH), # AscentLint waiver files
([".vlt"], SLASH_SLASH), # Verilator configuration (waiver) files
([".vbl"], HASH), # Verible configuration files
([".el", ".el.tpl"], SLASH_SLASH), # Exclusion list
([".cfg", ".cfg.tpl"], [SLASH_SLASH,
HASH]), # Kinds of configuration files
([".f"], []), # File lists (not checked)
# The following two rules will inevitably bite us.
(["riviera_run.do"], HASH), # Riviera dofile
([".do"], SLASH_SLASH), # Cadence LEC dofile
# Software Files
([".c", ".c.tpl", ".h", ".h.tpl", ".cc", ".cpp"], SLASH_SLASH), # C, C++
([".def"], SLASH_SLASH), # C, C++ X-Include List Declaration Files
([".S"], [SLASH_SLASH, SLASH_STAR]), # Assembly (With Preprocessing)
([".s"], SLASH_STAR), # Assembly (Without Preprocessing)
([".ld", ".ld.tpl"], SLASH_STAR), # Linker Scripts
([".rs", ".rs.tpl"], SLASH_SLASH), # Rust
# Software Build Systems
(["meson.build", "toolchain.txt", "meson_options.txt"], HASH), # Meson
# General Tooling
([".py"], HASH), # Python
([".sh"], HASH), # Shell Scripts
(["Dockerfile"], HASH), # Dockerfiles
# Configuration
([".hjson", ".hjson.tpl"], SLASH_SLASH), # hjson
([".yml", ".yaml"], HASH), # YAML
([".toml"], HASH), # TOML
(["-requirements.txt"], HASH), # Apt and Python requirements files
(["redirector.conf"], HASH), # nginx config
# Documentation
([".md", ".md.tpl", ".html"], []), # Markdown and HTML (not checked)
([".css"], SLASH_STAR), # CSS
([".scss"], SLASH_SLASH), # SCSS
# Templates (Last because there are overlaps with extensions above)
([".tpl"], HASH), # Mako templates
]
class LicenceMatcher:
'''An object to match a given licence at the start of a file'''
def __init__(self, comment_style, licence, match_regex):
self.style = comment_style
self.expected_lines = list()
# In case we are using regex matching we can pass the full line "as is"
if match_regex:
for i, ll in enumerate(licence):
try:
self.expected_lines.append(
comment_style.full_line_pattern(ll))
# Catch any regex error here and raise a runtime error.
except re.error as e:
raise RuntimeError(
"Can't compile line {} of the licence as a regular expression. Saw `{}`: {}"
.format(i, e.pattern[e.pos], e.msg))
# use the "first line" as a licence marker
self.search_marker = self.expected_lines[0]
# For non-regex matching we need to escape everything.
# This can never throw an exception as everything has been escaped and
# therefore is always a legal regex.
else:
self.search_marker = comment_style.search_line_pattern(
licence.first_word)
self.expected_lines = [
comment_style.full_line_pattern(re.escape(ll))
for ll in licence
]
self.lines_left = []
def looks_like_first_line_comment(self, line):
return line.startswith(self.style.first_line_prefix)
def looks_like_comment(self, line):
return line.startswith(self.style.comment_prefix)
def looks_like_first_line(self, line):
return self.search_marker.match(line) is not None
def start(self):
'''Reset lines_left, to match at the start of the licence'''
self.lines_left = self.expected_lines
def take_line(self, line):
'''Check whether line matches the next line of the licence.
Returns a pair (matched, done). matched is true if the line matched. If
this was the last line of the licence, done is true. On a match, this
increments an internal counter, so the next call to take_line will
match against the next line of the licence.
'''
# If we have no more lines to match, claim a match and that we're done.
# This shouldn't happen in practice, except if the configuration has an
# empty licence.
if not self.lines_left:
return (True, True)
next_expected = self.lines_left[0]
matched = next_expected.fullmatch(line)
if not matched:
return (False, False)
if matched:
self.lines_left = self.lines_left[1:]
return (True, not self.lines_left)
def detect_comment_char(all_matchers, filename):
'''Find zero or more LicenceMatcher objects for filename
all_matchers should be a dict like COMMENT_STYLES, but where the values are
the corresponding LicenceMatcher objects.
'''
found = None
for (suffixes, keys) in COMMENT_CHARS:
if found is not None:
break
for suffix in suffixes:
if filename.endswith(suffix):
found = keys
break
if found is None:
return []
if not isinstance(found, list):
assert isinstance(found, str)
found = [found]
return [all_matchers[key] for key in found]
def git_find_repo_toplevel():
git_output = subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'])
return Path(git_output.decode().strip()).resolve()
def git_find_all_file_paths(top_level, search_paths):
git_output = subprocess.check_output(
["git", "-C",
str(top_level), "ls-files", "-z", "--", *search_paths])
for path in git_output.rstrip(b"\0").split(b"\0"):
yield Path(top_level, path.decode())
class ResultsTracker(object):
"""Helper for tracking results"""
def __init__(self, base_dir):
self.base_dir = base_dir
passed_count = 0
failed_count = 0
excluded_count = 0
skipped_count = 0
failing_paths = set()
@property
def total_count(self):
return (self.passed_count + self.failed_count + self.skipped_count +
self.excluded_count)
def passed(self, path, line_no, reason):
rel_path = path.relative_to(self.base_dir)
logging.debug("%s:%d PASSED: %s", str(rel_path), line_no, reason)
self.passed_count += 1
def failed(self, path, line_no, reason):
rel_path = path.relative_to(self.base_dir)
logging.error("%s:%d FAILED: %s", str(rel_path), line_no, reason)
self.failing_paths.add(rel_path)
self.failed_count += 1
def skipped(self, path, reason):
rel_path = path.relative_to(self.base_dir)
logging.info("%s: SKIPPED: %s", str(rel_path), reason)
self.skipped_count += 1
def excluded(self, path, reason):
rel_path = path.relative_to(self.base_dir)
logging.debug("%s: EXCLUDED: %s", str(rel_path), reason)
self.excluded_count += 1
def any_failed(self):
return self.failed_count > 0
def display_nicely(self):
headers = ["Results:", "Files"]
results = [["Passed", self.passed_count],
["Failed", self.failed_count],
["Skipped", self.skipped_count],
["Excluded", self.excluded_count],
["Total", self.total_count]]
return tabulate(results, headers, tablefmt="simple")
def matches_exclude_pattern(config, file_path):
rel_path = str(file_path.relative_to(config.base_dir))
for exclude_pattern in config.exclude_paths:
if fnmatch.fnmatch(rel_path, exclude_pattern):
return True
return False
def check_paths(config, git_paths):
results = ResultsTracker(config.base_dir)
try:
all_matchers = {
key: LicenceMatcher(style, config.licence, config.match_regex)
for key, style in COMMENT_STYLES.items()
}
except RuntimeError as e:
exit(e)
for filepath in git_find_all_file_paths(config.base_dir, git_paths):
# Skip symlinks (with message)
if filepath.is_symlink():
results.excluded(filepath, "File is a symlink")
continue
# Skip non-file
if not filepath.is_file():
continue
# Skip exclude patterns
if matches_exclude_pattern(config, filepath):
results.excluded(filepath, "Path matches exclude pattern")
continue
check_file_for_licence(all_matchers, results, filepath)
return results
def check_file_for_licence(all_matchers, results, filepath):
matchers = detect_comment_char(all_matchers, filepath.name)
if not matchers:
results.skipped(filepath, "Unknown comment style")
return
if filepath.stat().st_size == 0:
results.skipped(filepath, "Empty file")
return
problems = []
for matcher in matchers:
good, line_num, msg = check_file_with_matcher(matcher, filepath)
if good:
results.passed(filepath, line_num, msg)
return
else:
problems.append((line_num, msg))
# If we get here, we didn't find a matching licence
for line_num, msg in problems:
results.failed(filepath, line_num, msg)
def check_file_with_matcher(matcher, filepath):
'''Check the file at filepath against matcher.
Returns a tuple (is_good, line_number, msg). is_good is True on success;
False on failure. line_number is the position where the licence was found
(on success) or where we gave up searching for it (on failure). msg is the
associated success or error message.
'''
def next_line(file, line_no):
return (next(file).rstrip(), line_no + 1)
with filepath.open() as f:
licence_assumed_start = None
# Get first line
try:
line, line_no = next_line(f, 0)
except StopIteration:
return (False, 1, "Empty file")
# Check first line against the first word of licence, or against a
# possible different first line.
if not matcher.looks_like_first_line(line):
if not matcher.looks_like_first_line_comment(line):
return (False, line_no, "File does not start with comment")
try:
line, line_no = next_line(f, line_no)
except StopIteration:
return (False, line_no,
"Reached end of file before finding licence")
# Skip lines that don't seem to be the first line of the licence
while not matcher.looks_like_first_line(line):
try:
line, line_no = next_line(f, line_no)
except StopIteration:
return (False, line_no,
"Reached end of file before finding licence")
if not matcher.looks_like_comment(line):
return (False, line_no,
"First comment ended before licence notice")
# We found the marker, so we found the first line of the licence. The
# current line is in the first comment, so check the line matches the
# expected first line:
licence_assumed_start = line_no
matcher.start()
matched, done = matcher.take_line(line)
if not matched:
return (False, line_no, "Licence does not match")
while not done:
try:
line, line_no = next_line(f, line_no)
except StopIteration:
return (False, line_no,
"Reached end of file before finding licence")
# Check against full expected line.
matched, done = matcher.take_line(line)
if not matched:
return (False, line_no, "Licence did not match")
return (True, licence_assumed_start, "Licence found")
def main():
desc = "A tool to check the lowRISC licence header is in each source file"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--config",
metavar="config.hjson",
type=argparse.FileType('r', encoding='UTF-8'),
required=True,
help="HJSON file to read for licence configuration.")
parser.add_argument("paths",
metavar="path",
nargs='*',
default=["."],
help="Paths to check for licence headers.")
parser.add_argument('-v',
"--verbose",
action='store_true',
dest='verbose',
help="Verbose output")
options = parser.parse_args()
if options.verbose:
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.INFO)
else:
logging.basicConfig(format="%(levelname)s: %(message)s")
config = SimpleNamespace()
config.base_dir = git_find_repo_toplevel()
parsed_config = hjson.load(options.config)
config.licence = LicenceHeader(parsed_config['licence'])
config.exclude_paths = set(parsed_config['exclude_paths'])
# Check whether we should use regex matching or full string matching.
match_regex = parsed_config.get('match_regex', 'false')
if match_regex not in ['true', 'false']:
print('Invalid value for match_regex: {!r}. '
'Should be "true" or "false".'.format(match_regex))
exit(1)
config.match_regex = match_regex == 'true'
results = check_paths(config, options.paths)
print(results.display_nicely())
if results.any_failed():
print("Failed:")
for path in results.failing_paths:
print(" {}".format(str(path)))
print("")
exit(1)
else:
exit(0)
if __name__ == '__main__':
main()
| apache-2.0 | -6,870,868,201,439,118,000 | 33.276596 | 100 | 0.596524 | false | 3.853229 | true | false | false |
jithinbp/vLabtool-v0 | v0/utilitiesClass.py | 1 | 2985 | import time,random,functools
import numpy as np
from PyQt4 import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
class utilitiesClass():
"""
This class contains methods that simplify setting up and running
an experiment.
"""
timers=[]
viewBoxes=[]
plots3D=[]
plots2D=[]
axisItems=[]
total_plot_areas=0
def __init__(self):
pass
def updateViews(self,plot):
for a in plot.viewBoxes:
a.setGeometry(plot.getViewBox().sceneBoundingRect())
a.linkedViewChanged(plot.plotItem.vb, a.XAxis)
def random_color(self):
c=QtGui.QColor(random.randint(20,255),random.randint(20,255),random.randint(20,255))
if np.average(c.getRgb())<150:
c=self.random_color()
return c
def add2DPlot(self,plot_area):
plot=pg.PlotWidget()
plot.setMinimumHeight(250)
plot_area.addWidget(plot)
plot.viewBoxes=[]
self.plots2D.append(plot)
return plot
def add3DPlot(self,plot_area):
plot3d = gl.GLViewWidget()
#gx = gl.GLGridItem();gx.rotate(90, 0, 1, 0);gx.translate(-10, 0, 0);self.plot.addItem(gx)
#gy = gl.GLGridItem();gy.rotate(90, 1, 0, 0);gy.translate(0, -10, 0);self.plot.addItem(gy)
gz = gl.GLGridItem();#gz.translate(0, 0, -10);
plot3d.addItem(gz);
plot3d.opts['distance'] = 40
plot3d.opts['elevation'] = 5
plot3d.opts['azimuth'] = 20
plot3d.setMinimumHeight(250)
plot_area.addWidget(plot3d)
self.plots3D.append(plot3d)
plot3d.plotLines3D=[]
return plot3d
def addCurve(self,plot,name='',col=(255,255,255),axis='left'):
#if(len(name)):curve = plot.plot(name=name)
#else:curve = plot.plot()
if(len(name)):curve = pg.PlotCurveItem(name=name)
else:curve = pg.PlotCurveItem()
plot.addItem(curve)
curve.setPen(color=col, width=1)
return curve
def rebuildLegend(self,plot):
return plot.addLegend(offset=(-10,30))
def addAxis(self,plot,**args):
p3 = pg.ViewBox()
ax3 = pg.AxisItem('right')
plot.plotItem.layout.addItem(ax3, 2, 3+len(self.axisItems))
plot.plotItem.scene().addItem(p3)
ax3.linkToView(p3)
p3.setXLink(plot.plotItem)
ax3.setZValue(-10000)
if args.get('label',False):
ax3.setLabel(args.get('label',False), color=args.get('color','#ffffff'))
plot.viewBoxes.append(p3)
p3.setGeometry(plot.plotItem.vb.sceneBoundingRect())
p3.linkedViewChanged(plot.plotItem.vb, p3.XAxis)
## Handle view resizing
Callback = functools.partial(self.updateViews,plot)
plot.getViewBox().sigStateChanged.connect(Callback)
self.axisItems.append(ax3)
return p3
def loopTask(self,interval,func,*args):
timer = QtCore.QTimer()
timerCallback = functools.partial(func,*args)
timer.timeout.connect(timerCallback)
timer.start(interval)
self.timers.append(timer)
return timer
def delayedTask(self,interval,func,*args):
timer = QtCore.QTimer()
timerCallback = functools.partial(func,*args)
timer.singleShot(interval,timerCallback)
self.timers.append(timer)
def displayDialog(self,txt=''):
QtGui.QMessageBox.about(self, 'Message', txt)
| gpl-3.0 | -7,986,475,858,903,219,000 | 25.651786 | 92 | 0.710218 | false | 2.748619 | false | false | false |
devilry/devilry-django | devilry/apps/core/tests/test_groupinvite.py | 1 | 24874 | from django.core import mail
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils import timezone
from django.utils.timezone import timedelta
from django.urls import reverse
from model_bakery import baker
from devilry.apps.core import devilry_core_baker_factories as core_baker
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import GroupInvite
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
class TestGroupInviteErrors(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_user_sending_is_not_part_of_the_group(self):
testgroup = baker.make('core.AssignmentGroup')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup1).relatedstudent.user
sent_to = core_baker.candidate(testgroup2).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'The user sending an invite must be a Candiate on the group.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_already_member_of_the_group(self):
testgroup = baker.make('core.AssignmentGroup')
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'The student is already a member of the group.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_already_invited_but_not_responded(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
baker.make('core.GroupInvite', group=testgroup, sent_by=sent_by, sent_to=sent_to)
with self.assertRaisesMessage(
ValidationError,
'The student is already invited to join the group, but they have not responded yet.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_create_groups_expired(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__students_can_create_groups=True,
parentnode__students_can_not_create_groups_after=timezone.now() - timedelta(days=1))
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'Creating project groups without administrator approval is not '
'allowed on this assignment anymore. Please contact you course '
'administrator if you think this is wrong.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_assignment_does_not_allow_students_to_form_groups(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__students_can_create_groups=False)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'This assignment does not allow students to form project groups on their own.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_sent_to_is_not_registerd_on_assignment(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup')
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'The invited student is not registered on this assignment.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_sent_to_is_already_in_a_group_with_more_than_one_student(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
core_baker.candidate(testgroup1)
with self.assertRaisesMessage(
ValidationError,
'The invited student is already in a project group with more than 1 students.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to,
accepted=True
)
invite.full_clean()
def test_sanity(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
self.assertEqual(invite.sent_to, sent_to)
self.assertEqual(invite.sent_by, sent_by)
self.assertEqual(invite.group, testgroup)
self.assertIsNotNone(invite.sent_datetime)
def test_sanity_accepted(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to,
accepted=True
)
invite.full_clean()
self.assertEqual(invite.sent_to, sent_to)
self.assertEqual(invite.sent_by, sent_by)
self.assertEqual(invite.group, testgroup)
self.assertTrue(invite.accepted)
self.assertIsNotNone(invite.responded_datetime)
class TestGroupInviteQueryset(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_queryset_sanity(self):
baker.make('core.GroupInvite', id=100)
self.assertEqual(GroupInvite.objects.all().first().id, 100)
def test_filter_accepted(self):
baker.make('core.GroupInvite', accepted=None, id=10)
baker.make('core.GroupInvite', accepted=False, id=11)
baker.make('core.GroupInvite', accepted=True, id=100)
baker.make('core.GroupInvite', accepted=True, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_accepted()),
{100, 101}
)
def test_filter_no_response(self):
baker.make('core.GroupInvite', accepted=None, id=10)
baker.make('core.GroupInvite', accepted=None, id=11)
baker.make('core.GroupInvite', accepted=True, id=100)
baker.make('core.GroupInvite', accepted=False, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_no_response()),
{10, 11}
)
def test_filter_rejected(self):
baker.make('core.GroupInvite', accepted=False, id=10)
baker.make('core.GroupInvite', accepted=False, id=11)
baker.make('core.GroupInvite', accepted=True, id=100)
baker.make('core.GroupInvite', accepted=None, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_rejected()),
{10, 11}
)
def test_filter_unanswered_received_invites(self):
group = baker.make('core.AssignmentGroup')
sent_by = core_baker.candidate(group=group).relatedstudent.user
sent_to = core_baker.candidate(group=group).relatedstudent.user
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=False, id=10)
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=None, id=11)
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=True, id=100)
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=None, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_unanswered_received_invites(sent_to)),
{11, 101}
)
def test_filter_unanswered_sent_invites(self):
group = baker.make('core.AssignmentGroup')
baker.make('core.GroupInvite', group=group, accepted=False, id=10)
baker.make('core.GroupInvite', group=group, accepted=None, id=11)
baker.make('core.GroupInvite', group=group, accepted=True, id=100)
baker.make('core.GroupInvite', group=group, accepted=None, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_unanswered_sent_invites(group)),
{11, 101}
)
def test_filter_allowed_to_create_groups(self):
assignment_expired = baker.make(
'core.Assignment',
students_can_create_groups=True,
students_can_not_create_groups_after=timezone.now() - timedelta(days=1)
)
assignment_not_expired = baker.make(
'core.Assignment',
students_can_create_groups=True,
students_can_not_create_groups_after=timezone.now() + timedelta(days=1)
)
assignment_not_allowed = baker.make('core.Assignment', students_can_create_groups=False)
assignment_allowed = baker.make('core.Assignment', students_can_create_groups=True)
group1 = baker.make('core.AssignmentGroup', parentnode=assignment_expired)
group2 = baker.make('core.AssignmentGroup', parentnode=assignment_not_expired)
group3 = baker.make('core.AssignmentGroup', parentnode=assignment_not_allowed)
group4 = baker.make('core.AssignmentGroup', parentnode=assignment_allowed)
baker.make('core.GroupInvite', group=group1, id=10)
baker.make('core.GroupInvite', group=group2, id=11)
baker.make('core.GroupInvite', group=group3, id=100)
baker.make('core.GroupInvite', group=group4, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_allowed_to_create_groups()),
{11, 101}
)
class FakeRequest(object):
def build_absolute_uri(self, location):
return 'http://example.com{}'.format(location)
class GroupInviteRespond(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def __fake_request(self):
return FakeRequest()
def test_respond_reject(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
invite.respond(False)
self.assertFalse(GroupInvite.objects.get(id=invite.id).accepted)
group = AssignmentGroup.objects.filter_user_is_candidate(student2)
self.assertEqual(group.count(), 1)
self.assertEqual(group.first().id, group2.id)
def test_respond_accept(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
invite.respond(True)
self.assertTrue(GroupInvite.objects.get(id=invite.id).accepted)
group = AssignmentGroup.objects.filter_user_is_candidate(student2)
self.assertEqual(group.count(), 1)
self.assertEqual(group.first().id, group1.id)
self.assertEqual(group.first().cached_data.candidate_count, 2)
self.assertFalse(AssignmentGroup.objects.filter(id=group2.id).exists())
def test_num_queries_accept(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
with self.assertNumQueries(36):
invite.respond(True)
def test_num_queries_reject(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
with self.assertNumQueries(9):
invite.respond(False)
def test_send_invite_mail(self):
assignment = baker.make(
'core.Assignment',
long_name='Assignment 1',
short_name='assignment1',
parentnode__long_name='Spring2017',
parentnode__short_name='s17',
parentnode__parentnode__long_name='DUCK1010 - Object Oriented Programming',
parentnode__parentnode__short_name='Duck1010',
students_can_create_groups=True,
)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
sent_by = core_baker.candidate(testgroup, shortname="[email protected]", fullname="April").relatedstudent.user
sent_to = core_baker.candidate(testgroup1, shortname="[email protected]", fullname="Dewey").relatedstudent.user
baker.make('devilry_account.UserEmail', user=sent_to, email="[email protected]")
invite = GroupInvite(group=testgroup, sent_by=sent_by, sent_to=sent_to)
invite.full_clean()
invite.save()
request = self.__fake_request()
invite.send_invite_notification(request)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, '[Devilry] Project group invite for Duck1010.s17.assignment1')
url = request.build_absolute_uri(
reverse('devilry_student_groupinvite_respond', kwargs={'invite_id': invite.id}))
self.assertIn(url, mail.outbox[0].body)
def test_send_reject_mail(self):
assignment = baker.make(
'core.Assignment',
long_name='Assignment 1',
short_name='assignment1',
parentnode__long_name='Spring2017',
parentnode__short_name='s17',
parentnode__parentnode__long_name='DUCK1010 - Object Oriented Programming',
parentnode__parentnode__short_name='Duck1010',
students_can_create_groups=True,
)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
sent_by = core_baker.candidate(testgroup, shortname="[email protected]", fullname="April").relatedstudent.user
sent_to = core_baker.candidate(testgroup1, shortname="[email protected]", fullname="Dewey").relatedstudent.user
baker.make('devilry_account.UserEmail', user=sent_to, email="[email protected]")
baker.make('devilry_account.UserEmail', user=sent_by, email="[email protected]")
invite = GroupInvite(
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
invite.save()
invite.send_invite_notification(self.__fake_request())
invite.respond(False)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[1].subject, '[Devilry] Dewey rejected your project group invite')
def test_send_accept_mail(self):
assignment = baker.make(
'core.Assignment',
long_name='Assignment 1',
short_name='assignment1',
parentnode__long_name='Spring2017',
parentnode__short_name='s17',
parentnode__parentnode__long_name='DUCK1010 - Object Oriented Programming',
parentnode__parentnode__short_name='Duck1010',
students_can_create_groups=True,
)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
sent_by = core_baker.candidate(testgroup, shortname="[email protected]", fullname="April").relatedstudent.user
sent_to = core_baker.candidate(testgroup1, shortname="[email protected]", fullname="Dewey").relatedstudent.user
baker.make('devilry_account.UserEmail', user=sent_to, email="[email protected]")
baker.make('devilry_account.UserEmail', user=sent_by, email="[email protected]")
invite = GroupInvite(
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
invite.save()
invite.send_invite_notification(self.__fake_request())
invite.respond(True)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[1].subject, '[Devilry] Dewey accepted your project group invite')
def test_send_invite_to_choices_queryset(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group3 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group4 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
core_baker.candidate(group=group1, fullname="Louie", shortname="louie")
core_baker.candidate(group=group2, fullname="Huey", shortname="huey")
core_baker.candidate(group=group2, fullname="Donald", shortname="donald")
candidate4 = core_baker.candidate(group=group3, fullname="April", shortname="april")
candidate5 = core_baker.candidate(group=group4, fullname="Dewey", shortname="dewey")
candidates = GroupInvite.send_invite_to_choices_queryset(group1)
self.assertEqual(candidates.count(), 2)
self.assertEqual(
set(candidate.id for candidate in candidates),
{candidate4.id, candidate5.id}
)
def test_send_invite_to_choices_queryset_pending_is_excluded(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group3 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group4 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
candidate1 = core_baker.candidate(group=group1, fullname="Louie", shortname="louie")
core_baker.candidate(group=group2, fullname="Huey", shortname="huey")
core_baker.candidate(group=group2, fullname="Donald", shortname="donald")
candidate4 = core_baker.candidate(group=group3, fullname="April", shortname="april")
candidate5 = core_baker.candidate(group=group4, fullname="Dewey", shortname="dewey")
baker.make(
'core.GroupInvite',
group=group1,
sent_to=candidate4.relatedstudent.user,
sent_by=candidate1.relatedstudent.user
)
candidates = GroupInvite.send_invite_to_choices_queryset(group1)
self.assertEqual(candidates.count(), 1)
self.assertEqual(
set(candidate.id for candidate in candidates),
{candidate5.id}
)
def test_validate_user_id_send_to(self):
assignment = baker.make('core.Assignment', students_can_create_groups=True)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
core_baker.candidate(testgroup)
sent_to = core_baker.candidate(testgroup1)
with self.assertNumQueries(1):
GroupInvite.validate_candidate_id_sent_to(testgroup, sent_to.id)
def test_validation_user_id_send_to_error_wrong_assignment(self):
assignment = baker.make('core.Assignment', students_can_create_groups=True)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup')
core_baker.candidate(testgroup)
sent_to = core_baker.candidate(testgroup1)
with self.assertRaisesMessage(ValidationError, 'The selected student is not eligible to join the group.'):
GroupInvite.validate_candidate_id_sent_to(testgroup, sent_to.id)
def test_validation_user_id_send_to_error_already_in_group(self):
assignment = baker.make('core.Assignment', students_can_create_groups=True)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
core_baker.candidate(testgroup)
sent_to = core_baker.candidate(testgroup)
with self.assertRaisesMessage(ValidationError, 'The selected student is not eligible to join the group.'):
GroupInvite.validate_candidate_id_sent_to(testgroup, sent_to.id)
def test_invite_has_already_been_accepted(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make('core.GroupInvite', group=testgroup, sent_by=sent_by, sent_to=sent_to, accepted=True)
with self.assertRaisesMessage(ValidationError, 'This invite has already been accepted.'):
invite.respond(True)
def test_invite_has_already_been_declined(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make('core.GroupInvite', group=testgroup, sent_by=sent_by, sent_to=sent_to, accepted=False)
with self.assertRaisesMessage(ValidationError, 'This invite has already been declined.'):
invite.respond(False)
| bsd-3-clause | -7,695,764,086,700,360,000 | 48.947791 | 119 | 0.659564 | false | 3.737641 | true | false | false |
koduj-z-klasa/python101 | docs/webflask/quiz_orm/quiz2_pw/views.py | 1 | 4117 | # -*- coding: utf-8 -*-
# quiz-orm/views.py
from flask import render_template, request, redirect, url_for, abort, flash
from app import app
from models import Pytanie, Odpowiedz
from forms import *
@app.route('/')
def index():
"""Strona główna"""
return render_template('index.html')
@app.route('/lista')
def lista():
"""Pobranie wszystkich pytań z bazy i zwrócenie szablonu z listą pytań"""
pytania = Pytanie().select().annotate(Odpowiedz)
if not pytania.count():
flash('Brak pytań w bazie.', 'kom')
return redirect(url_for('index'))
return render_template('lista.html', pytania=pytania)
@app.route('/quiz', methods=['GET', 'POST'])
def quiz():
"""Wyświetlenie pytań i odpowiedzi w formie quizu oraz ocena poprawności
przesłanych odpowiedzi"""
if request.method == 'POST':
wynik = 0
for pid, odp in request.form.items():
odpok = Pytanie.select(Pytanie.odpok).where(
Pytanie.id == int(pid)).scalar()
if odp == odpok:
wynik += 1
flash('Liczba poprawnych odpowiedzi, to: {0}'.format(wynik), 'sukces')
return redirect(url_for('index'))
# GET, wyświetl pytania
pytania = Pytanie().select().annotate(Odpowiedz)
if not pytania.count():
flash('Brak pytań w bazie.', 'kom')
return redirect(url_for('index'))
return render_template('quiz.html', pytania=pytania)
def flash_errors(form):
"""Odczytanie wszystkich błędów formularza i przygotowanie komunikatów"""
for field, errors in form.errors.items():
for error in errors:
if type(error) is list:
error = error[0]
flash("Błąd: {}. Pole: {}".format(
error,
getattr(form, field).label.text))
@app.route('/dodaj', methods=['GET', 'POST'])
def dodaj():
"""Dodawanie pytań i odpowiedzi"""
form = DodajForm()
if form.validate_on_submit():
odp = form.odpowiedzi.data
p = Pytanie(pytanie=form.pytanie.data, odpok=odp[int(form.odpok.data)])
p.save()
for o in odp:
inst = Odpowiedz(pnr=p.id, odpowiedz=o)
inst.save()
flash("Dodano pytanie: {}".format(form.pytanie.data))
return redirect(url_for("lista"))
elif request.method == 'POST':
flash_errors(form)
return render_template("dodaj.html", form=form, radio=list(form.odpok))
def get_or_404(pid):
"""Pobranie i zwrócenie obiektu z bazy lub wywołanie szablonu 404.html"""
try:
p = Pytanie.select().annotate(Odpowiedz).where(Pytanie.id == pid).get()
return p
except Pytanie.DoesNotExist:
abort(404)
@app.errorhandler(404)
def page_not_found(e):
"""Zwrócenie szablonu 404.html w przypadku nie odnalezienia strony"""
return render_template('404.html'), 404
@app.route('/edytuj/<int:pid>', methods=['GET', 'POST'])
def edytuj(pid):
"""Edycja pytania o identyfikatorze pid i odpowiedzi"""
p = get_or_404(pid)
form = DodajForm()
if form.validate_on_submit():
odp = form.odpowiedzi.data
p.pytanie = form.pytanie.data
p.odpok = odp[int(form.odpok.data)]
p.save()
for i, o in enumerate(p.odpowiedzi):
o.odpowiedz = odp[i]
o.save()
flash("Zaktualizowano pytanie: {}".format(form.pytanie.data))
return redirect(url_for("lista"))
elif request.method == 'POST':
flash_errors(form)
for i in range(3):
if p.odpok == p.odpowiedzi[i].odpowiedz:
p.odpok = i
break
form = DodajForm(obj=p)
return render_template("edytuj.html", form=form, radio=list(form.odpok))
@app.route('/usun/<int:pid>', methods=['GET', 'POST'])
def usun(pid):
"""Usunięcie pytania o identyfikatorze pid"""
p = get_or_404(pid)
if request.method == 'POST':
flash('Usunięto pytanie {0}'.format(p.pytanie), 'sukces')
p.delete_instance(recursive=True)
return redirect(url_for('index'))
return render_template("pytanie_usun.html", pytanie=p)
| mit | 5,689,004,494,325,511,000 | 30.236641 | 79 | 0.611193 | false | 2.873596 | false | false | false |
PyQuake/earthquakemodels | code/runExperiments/histogramMagnitude.py | 1 | 1982 | import matplotlib.pyplot as plt
import models.model as model
import earthquake.catalog as catalog
from collections import OrderedDict
def histogramMagnitude(catalog_, region):
"""
Creates the histogram of magnitudes by a given region.
Saves the histogram to the follwing path ./code/Zona2/histograms/'+region+'/Magnitude Histogram of ' + str(year) + " " + region + '.png'
Where region, year are given by the application
From 2000 to 2011
"""
definition = model.loadModelDefinition('../params/' + region + '.txt')
catalogFiltred = catalog.filter(catalog_, definition)
year = 2000
while(year < 2012):
data = dict()
for i in range(len(catalogFiltred)):
if catalogFiltred[i]['year'] == year and catalogFiltred[i]['lat'] > 34.8 and catalogFiltred[i][
'lat'] < 37.05 and catalogFiltred[i]['lon'] > 138.8 and catalogFiltred[i]['lon'] < 141.05:
data[catalogFiltred[i]['mag']] = data.get(catalogFiltred[i]['mag'], 0) + 1
b = OrderedDict(sorted(data.items()))
plt.title('Histogram of ' + str(year) + " " + region)
plt.bar(range(len(data)), b.values(), align='center')
plt.xticks(range(len(data)), b.keys(), rotation=25)
# print(b)
axes = plt.gca()
plt.savefig(
'../Zona2/histograms/'+region+'/Magnitude Histogram of ' +
str(year) +
" " +
region +
'.png')
del data
year += 1
def main():
"""
Calls function to plot a hitogram of magnitudes by region, based on JMA catalog
"""
catalog_ = catalog.readFromFile('../data/jmacat_2000_2013.dat')
region = "Kanto"
histogramMagnitude(catalog_, region)
region = "Kansai"
histogramMagnitude(catalog_, region)
region = "Tohoku"
histogramMagnitude(catalog_, region)
region = "EastJapan"
histogramMagnitude(catalog_, region)
if __name__ == "__main__":
main()
| bsd-3-clause | 2,488,482,629,625,943,600 | 35.036364 | 140 | 0.604945 | false | 3.597096 | false | false | false |
Skyscanner/pages | test/utils/hamcrest_matchers.py | 1 | 2357 | ############################################################################
# Copyright 2015 Skyscanner Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
from hamcrest.core.base_matcher import BaseMatcher
from selenium.common.exceptions import TimeoutException
from pages.page import Page
DEFAULT_POLLING_TIME = 0.5
DEFAULT_TIMEOUT = 25
class PageIsLoaded(BaseMatcher):
def __init__(self, timeout):
BaseMatcher.__init__(self)
self.timeout = timeout
self.polling = DEFAULT_POLLING_TIME
self.page_name = None
def _matches(self, page):
self.page_name = page.name
if isinstance(page, Page):
try:
page.wait_until_loaded(self.timeout, self.polling)
return True
except TimeoutException:
return False
def describe_to(self, description):
description.append_text("Expected page {0} to load within {1} ms".format(self.page_name, str(self.timeout)))
def describe_mismatch(self, item, mismatch_description):
mismatch_description.append_text('page load timed out.')
def with_timeout(self, timeout):
self.timeout = timeout
return self
def with_polling(self, polling):
self.polling = polling
return self
def is_loaded(timeout=30):
return PageIsLoaded(timeout)
| apache-2.0 | -6,174,321,162,460,390,000 | 39.637931 | 116 | 0.517183 | false | 5.079741 | false | false | false |
socrocket/trap-gen | processors/LEON2/LEONCoding.py | 1 | 5555 | ################################################################################
#
# _/_/_/_/_/ _/_/_/ _/ _/_/_/
# _/ _/ _/ _/_/ _/ _/
# _/ _/ _/ _/ _/ _/ _/
# _/ _/_/_/ _/_/_/_/ _/_/_/
# _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/ _/
#
# @file LEONCoding.py
# @brief This file is part of the TRAP example processors.
# @details Instruction coding definition file for the LEON2.
# @author Luca Fossati
# @date 2008-2013 Luca Fossati
# @copyright
#
# This file is part of TRAP.
#
# TRAP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# or see <http://www.gnu.org/licenses/>.
#
# (c) Luca Fossati, [email protected], [email protected]
#
################################################################################
import trap
#---------------------------------------------------------
# Instruction Encoding
#---------------------------------------------------------
# Lets now start with defining the instructions, i.e. their bitstring and
# mnemonic and their behavior. Note the zero* field: it is a special identifier and it
# means that all those bits have value 0; the same applies for one*
# As stated in page 44 of "The SPARC Architecture Manual V8" there are
# mainly 6 different format types
# Call instruction format
call_format = trap.MachineCode([('op', 2), ('disp30', 30)])
call_format.setBitfield('op', [0, 1])
# Branch and sethi instructions format
b_sethi_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op2', 3), ('imm22', 22)])
b_sethi_format1.setBitfield('op', [0, 0])
b_sethi_format1.setVarField('rd', ('REGS', 0), 'out')
b_sethi_format2 = trap.MachineCode([('op', 2), ('a', 1), ('cond', 4), ('op2', 3), ('disp22', 22)])
b_sethi_format2.setBitfield('op', [0, 0])
# Memory instruction format
mem_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('zero', 1), ('asi', 8), ('rs2', 5)])
mem_format1.setBitfield('op', [1, 1])
mem_format1.setVarField('rs1', ('REGS', 0), 'in')
mem_format1.setVarField('rs2', ('REGS', 0), 'in')
mem_format2 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('one', 1), ('simm13', 13)])
mem_format2.setBitfield('op', [1, 1])
mem_format2.setVarField('rs1', ('REGS', 0), 'in')
# Store Barrier format
stbar_format = trap.MachineCode([('op', 2), ('zero', 5), ('op3', 6), ('rs1', 5), ('zero', 14)])
stbar_format.setBitfield('op', [1, 0])
stbar_format.setBitfield('op3', [1, 0, 1, 0, 0, 0])
stbar_format.setBitfield('rs1', [0, 1, 1, 1, 1])
# logical and remainig instructions format
dpi_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('zero', 1), ('asi', 8), ('rs2', 5)])
dpi_format1.setBitfield('op', [1, 0])
dpi_format1.setVarField('rd', ('REGS', 0), 'out')
dpi_format1.setVarField('rs1', ('REGS', 0), 'in')
dpi_format1.setVarField('rs2', ('REGS', 0), 'in')
dpi_format2 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('one', 1), ('simm13', 13)])
dpi_format2.setBitfield('op', [1, 0])
dpi_format2.setVarField('rd', ('REGS', 0), 'out')
dpi_format2.setVarField('rs1', ('REGS', 0), 'in')
# Format for reading special instructions
read_special_format = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('asr', 5), ('zero', 14)])
read_special_format.setBitfield('op', [1, 0])
read_special_format.setVarField('rd', ('REGS', 0), 'out')
# Format for writing special instructions
write_special_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('zero', 9), ('rs2', 5)])
write_special_format1.setBitfield('op', [1, 0])
write_special_format1.setVarField('rs1', ('REGS', 0), 'in')
write_special_format1.setVarField('rs2', ('REGS', 0), 'in')
write_special_format2 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('one', 1), ('simm13', 13)])
write_special_format2.setBitfield('op', [1, 0])
write_special_format2.setVarField('rs1', ('REGS', 0), 'in')
# Trap on integer condition code format
ticc_format1 = trap.MachineCode([('op', 2), ('reserved1', 1), ('cond', 4), ('op3', 6), ('rs1', 5), ('zero', 1), ('asi', 8), ('rs2', 5)])
ticc_format1.setBitfield('op', [1, 0])
ticc_format1.setVarField('rs1', ('REGS', 0), 'in')
ticc_format1.setVarField('rs2', ('REGS', 0), 'in')
ticc_format2 = trap.MachineCode([('op', 2), ('reserved1', 1), ('cond', 4), ('op3', 6), ('rs1', 5), ('one', 1), ('reserved2', 6), ('imm7', 7)])
ticc_format2.setBitfield('op', [1, 0])
ticc_format2.setVarField('rs1', ('REGS', 0), 'in')
# Coprocessor of fpu instruction format
coprocessor_format = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('opf', 9), ('rs2', 5)])
coprocessor_format.setBitfield('op', [1, 0])
coprocessor_format.setVarField('rd', ('REGS', 0), 'out')
coprocessor_format.setVarField('rs1', ('REGS', 0), 'in')
coprocessor_format.setVarField('rs2', ('REGS', 0), 'in')
| gpl-3.0 | 6,424,995,192,659,470,000 | 45.680672 | 142 | 0.576598 | false | 2.847258 | false | false | false |
bdang2012/taiga-back-casting | taiga/projects/milestones/api.py | 1 | 6149 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.db.models import Prefetch
from taiga.base import filters
from taiga.base import response
from taiga.base.decorators import detail_route
from taiga.base.api import ModelCrudViewSet, ModelListViewSet
from taiga.base.api.utils import get_object_or_404
from taiga.base.utils.db import get_object_or_none
from taiga.projects.notifications.mixins import WatchedResourceMixin, WatchersViewSetMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.votes.utils import attach_total_voters_to_queryset, attach_is_voter_to_queryset
from taiga.projects.notifications.utils import attach_watchers_to_queryset, attach_is_watcher_to_queryset
from . import serializers
from . import models
from . import permissions
import datetime
class MilestoneViewSet(HistoryResourceMixin, WatchedResourceMixin, ModelCrudViewSet):
serializer_class = serializers.MilestoneSerializer
permission_classes = (permissions.MilestonePermission,)
filter_backends = (filters.CanViewMilestonesFilterBackend,)
filter_fields = ("project", "closed")
queryset = models.Milestone.objects.all()
def list(self, request, *args, **kwargs):
res = super().list(request, *args, **kwargs)
self._add_taiga_info_headers()
return res
def _add_taiga_info_headers(self):
try:
project_id = int(self.request.QUERY_PARAMS.get("project", None))
project_model = apps.get_model("projects", "Project")
project = get_object_or_none(project_model, id=project_id)
except TypeError:
project = None
if project:
opened_milestones = project.milestones.filter(closed=False).count()
closed_milestones = project.milestones.filter(closed=True).count()
self.headers["Taiga-Info-Total-Opened-Milestones"] = opened_milestones
self.headers["Taiga-Info-Total-Closed-Milestones"] = closed_milestones
def get_queryset(self):
qs = super().get_queryset()
# Userstories prefetching
UserStory = apps.get_model("userstories", "UserStory")
us_qs = UserStory.objects.prefetch_related("role_points",
"role_points__points",
"role_points__role")
us_qs = us_qs.select_related("milestone",
"project",
"status",
"owner",
"assigned_to",
"generated_from_issue")
us_qs = self.attach_watchers_attrs_to_queryset(us_qs)
if self.request.user.is_authenticated():
us_qs = attach_is_voter_to_queryset(self.request.user, us_qs)
us_qs = attach_is_watcher_to_queryset(self.request.user, us_qs)
qs = qs.prefetch_related(Prefetch("user_stories", queryset=us_qs))
# Milestones prefetching
qs = qs.select_related("project", "owner")
qs = self.attach_watchers_attrs_to_queryset(qs)
qs = qs.order_by("-estimated_start")
return qs
def pre_save(self, obj):
if not obj.id:
obj.owner = self.request.user
super().pre_save(obj)
@detail_route(methods=['get'])
def stats(self, request, pk=None):
milestone = get_object_or_404(models.Milestone, pk=pk)
self.check_permissions(request, "stats", milestone)
total_points = milestone.total_points
milestone_stats = {
'name': milestone.name,
'estimated_start': milestone.estimated_start,
'estimated_finish': milestone.estimated_finish,
'total_points': total_points,
'completed_points': milestone.closed_points.values(),
'total_userstories': milestone.user_stories.count(),
'completed_userstories': len([us for us in milestone.user_stories.all() if us.is_closed]),
'total_tasks': milestone.tasks.all().count(),
'completed_tasks': milestone.tasks.all().filter(status__is_closed=True).count(),
'iocaine_doses': milestone.tasks.filter(is_iocaine=True).count(),
'days': []
}
current_date = milestone.estimated_start
sumTotalPoints = sum(total_points.values())
optimal_points = sumTotalPoints
milestone_days = (milestone.estimated_finish - milestone.estimated_start).days
optimal_points_per_day = sumTotalPoints / milestone_days if milestone_days else 0
while current_date <= milestone.estimated_finish:
milestone_stats['days'].append({
'day': current_date,
'name': current_date.day,
'open_points': sumTotalPoints - sum(milestone.closed_points_by_date(current_date).values()),
'optimal_points': optimal_points,
})
current_date = current_date + datetime.timedelta(days=1)
optimal_points -= optimal_points_per_day
return response.Ok(milestone_stats)
class MilestoneWatchersViewSet(WatchersViewSetMixin, ModelListViewSet):
permission_classes = (permissions.MilestoneWatchersPermission,)
resource_model = models.Milestone
| agpl-3.0 | 812,098,269,733,922,800 | 41.986014 | 109 | 0.650887 | false | 3.930307 | false | false | false |
DStauffman/dstauffman | dstauffman/estimation/kalman.py | 1 | 8438 | r"""
Functions related to Kalman Filter analysis.
Notes
-----
#. Written by David C. Stauffer in April 2019.
"""
#%% Imports
import doctest
import unittest
from dstauffman import HAVE_NUMPY
from dstauffman.numba import ncjit
from dstauffman.estimation.linalg import mat_divide
if HAVE_NUMPY:
import numpy as np
#%% Functions - calculate_kalman_gain
def calculate_kalman_gain(P, H, R, *, use_inverse=False, return_innov_cov=False):
r"""
Calculates K, the Kalman Gain matrix.
Parameters
----------
P : (N, N) ndarray
Covariance Matrix
H : (A, B) ndarray
Measurement Update Matrix
R : () ndarray
Measurement Noise Matrix
use_inverse : bool, optional
Whether to explicitly calculate the inverse or not, default is False
Returns
-------
K : (N, ) ndarray
Kalman Gain Matrix
Pz : (N, N) ndarray
Innovation Covariance Matrix
Notes
-----
#. Written by David C Stauffer in December 2018.
Examples
--------
>>> from dstauffman.estimation import calculate_kalman_gain
>>> import numpy as np
>>> P = 1e-3 * np.eye(5)
>>> H = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0.5, 0.5, 0.5], [0, 0, 0.1]]).T
>>> R = 0.5 * np.eye(3)
>>> K = calculate_kalman_gain(P, H, R)
"""
# calculate the innovation covariance
Pz = H @ P @ H.T + R
if use_inverse:
# explicit version with inverse
K = (P @ H.T) @ np.linalg.inv(Pz)
else:
# implicit solver
K = mat_divide(Pz.T, (P @ H.T).T).T
# return desired results
if return_innov_cov:
return (K, Pz)
return K
@ncjit
def calculate_kalman_gain_opt(P, H, R):
r"""Calculate the Kalman gain, in a way optimized for use with numba."""
Pz = H @ P @ H.T + R
K = mat_divide(Pz.T, (P @ H.T).T).T
return (K, Pz)
#%% Functions - calculate_prediction
@ncjit
def calculate_prediction(H, state, const=None):
r"""
Calculates u, the measurement prediction.
Parameters
----------
H : (A, B) ndarray
Measurement Update matrix
state : (A, ) ndarray
State vector
const : (A, ) ndarray, optional
Constant state vector offsets
Returns
-------
(A, ) ndarray
Delta state vector
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_prediction
>>> import numpy as np
>>> H = np.array([[1., 0.], [0., 1.], [0., 0.]])
>>> state = np.array([1e-3, 5e-3])
>>> u_pred = calculate_prediction(H, state)
>>> print(u_pred) # doctest: +NORMALIZE_WHITESPACE
[0.001 0.005 0. ]
"""
if const is None:
return H @ state
return H @ (state + const)
#%% Functions - calculate_innovation
@ncjit
def calculate_innovation(u_meas, u_pred):
r"""
Calculates z, the Kalman Filter innovation.
Parameters
----------
u_meas : (A, ) ndarray
Measured state vector
u_pred : (A, ) ndarray
Predicted state vector
Returns
-------
(A, ) ndarray
Kalman Filter innovation
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_innovation
>>> import numpy as np
>>> u_meas = np.array([1., 2.1, -3.])
>>> u_pred = np.array([1.1, 2.0, -3.1])
>>> z = calculate_innovation(u_meas, u_pred)
>>> with np.printoptions(precision=8):
... print(z) # doctest: +NORMALIZE_WHITESPACE
[-0.1 0.1 0.1]
"""
return u_meas - u_pred
#%% Functions - calculate_normalized_innovation
@ncjit
def calculate_normalized_innovation(z, Pz, use_inverse=False):
r"""
Calculates nu, the Normalized Kalman Filter Innovation.
Parameters
----------
z : (A, ) ndarray
Kalman Filter innovation
Pz : (A, A) ndarray
Kalman Filter innovation covariance
use_inverse : bool, optional
Whether to explicitly calculate the inverse or not, default is False
Returns
-------
(A, ) ndarray
Normalized innovation
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_normalized_innovation
>>> import numpy as np
>>> z = np.array([0.1, 0.05, -0.2])
>>> Pz = np.array([[0.1, 0.01, 0.001], [0.01, 0.1, 0.001], [0., 0., 0.2]])
>>> nu = calculate_normalized_innovation(z, Pz)
>>> with np.printoptions(precision=8):
... print(nu) # doctest: +NORMALIZE_WHITESPACE
[ 0.96868687 0.41313131 -1. ]
"""
if use_inverse:
return np.linalg.inv(Pz) @ z
return mat_divide(Pz, z)
#%% Functions - calculate_delta_state
@ncjit
def calculate_delta_state(K, z):
r"""
Calculates dx, the delta state for a given measurement.
Parameters
----------
K : (A, B) ndarray
Kalman Gain Matrix
z : (A, ) ndarray
Kalman Filter innovation
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_delta_state
>>> import numpy as np
>>> K = np.array([[0.1, 0.01, 0.001], [0.01, 0.1, 0.001], [0., 0., 0.2]])
>>> z = np.array([0.1, 0.05, -0.2])
>>> dx = calculate_delta_state(K, z)
>>> with np.printoptions(precision=8):
... print(dx) # doctest: +NORMALIZE_WHITESPACE
[ 0.0103 0.0058 -0.04 ]
"""
return K @ z
#%% Functions - propagate_covariance
def propagate_covariance(P, phi, Q, *, gamma=None, inplace=True):
r"""
Propagates the covariance forward in time.
Parameters
----------
P :
Covariance matrix
phi :
State transition matrix
Q :
Process noise matrix
gamma :
Shaping matrix?
inplace : bool, optional, default is True
Whether to update the value inplace or as a new output
Returns
-------
(N, N) ndarray
Updated covariance matrix
Notes
-----
#. Written by David C. Stauffer in December 2018.
#. Updated by David C. Stauffer in July 2020 to have inplace option.
Examples
--------
>>> from dstauffman.estimation import propagate_covariance
>>> import numpy as np
>>> P = 1e-3 * np.eye(6)
>>> phi = np.diag([1., 1, 1, -1, -1, -1])
>>> Q = np.diag([1e-3, 1e-3, 1e-5, 1e-7, 1e-7, 1e-7])
>>> propagate_covariance(P, phi, Q)
>>> print(P[0, 0])
0.002
"""
if gamma is None:
out = phi @ P @ phi.T + Q
else:
out = phi @ P @ phi.T + gamma @ Q @ gamma.T
if inplace:
P[:] = out
else:
return out
@ncjit
def propagate_covariance_opt(P, phi, Q, gamma=None):
r"""Propagate the covariance in time, in a way optimized for use with numba."""
if gamma is None:
P[:] = phi @ P @ phi.T + Q
else:
P[:] = phi @ P @ phi.T + gamma @ Q @ gamma.T
#%% Functions - update_covariance
def update_covariance(P, K, H, *, inplace=True):
r"""
Updates the covariance for a given measurement.
Parameters
----------
P : (N, N) ndarray
Covariance Matrix
K : (N, ) ndarray
Kalman Gain Matrix
H : (A, N) ndarray
Measurement Update Matrix
inplace : bool, optional, default is True
Whether to update the value inplace or as a new output
Returns
-------
P_out : (N, N) ndarray
Updated Covariance Matrix
Notes
-----
#. Written by David C Stauffer in December 2018.
#. Updated by David C. Stauffer in July 2020 to have inplace option.
Examples
--------
>>> from dstauffman.estimation import update_covariance
>>> import numpy as np
>>> P = 1e-3 * np.eye(6)
>>> P[0, -1] = 5e-2
>>> K = np.ones((6, 3))
>>> H = np.hstack((np.eye(3), np.eye(3)))
>>> update_covariance(P, K, H)
>>> print(P[-1, -1])
-0.05
"""
out = (np.eye(*P.shape) - K @ H) @ P
if inplace:
P[:] = out
else:
return out
@ncjit
def update_covariance_opt(P, K, H):
r"""Propagate the covariance in time, in a way optimized for use with numba."""
P[:] = (np.eye(*P.shape) - K @ H) @ P
#%% Unit Test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_estimation_kalman', exit=False)
doctest.testmod(verbose=False)
| lgpl-3.0 | -4,143,109,021,988,057,000 | 24.263473 | 87 | 0.567907 | false | 3.318128 | true | false | false |
tktrungna/leetcode | Python/range-sum-query-immutable.py | 1 | 1146 | """
QUESTION:
Given an integer array nums, find the sum of the elements between indices i and j (i < j), inclusive.
Example:
Given nums = [-2, 0, 3, -5, 2, -1]
sumRange(0, 2) -> 1
sumRange(2, 5) -> -1
sumRange(0, 5) -> -3
Note:
You may assume that the array does not change.
There are many calls to sumRange function.
ANSWER:
sum accumulation
"""
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.nums = nums
self.sum = [0]*len(self.nums)
for i in xrange(len(nums)):
self.sum[i] = nums[i] if i == 0 else self.sum[i-1]+nums[i]
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.sum[j]-self.sum[i]+self.nums[i]
# Your NumArray object will be instantiated and called as such:
# numArray = NumArray(nums)
# numArray.sumRange(0, 1)
# numArray.sumRange(1, 2)
if __name__ == '__main__':
numArray = NumArray([-2, 0, 3, -5, 2, -1])
print numArray.sumRange(0, 1)
| mit | -799,236,257,874,062,700 | 22.875 | 101 | 0.578534 | false | 3.080645 | false | false | false |
stephenliu1989/HK_DataMiner | hkdataminer/Nystrom_code/microToMacroBySHC_v2.0b.py | 1 | 41931 | #!/usr/bin/env python
#######################################################
#Written by Daniel Silva
#Based in the original SHC code from Yuan YAO and Xuhui Huang:
# Proceedings of the Pacific Symposium on Biocomputing, 15, 228-239, (2010)
#
#Intended to be used in the SimTK project
#Ver. 1.5b 21/Apr/2011
#######################################################
import optparse
import sys
import linecache
import scipy.io
import numpy as np
#import colorsys
from pylab import *
from numpy import *
from scipy import *
from scipy.sparse import *
from scipy.sparse.linalg import *
from scipy.linalg import eig
from scipy.interpolate import interp1d
from scipy.sparse.linalg.eigen.arpack import *
def version():
print "(OUTPUT) Python SHC ver 2.01b"
#def licence():
def main():
version()
# licence()
p = optparse.OptionParser()
p.add_option('--outMicroCountMatrixName', '-c', default="microstateCountMatrix.mtx")
p.add_option('--lagstep', '-l', default="1")
p.add_option('--headDir', '-d', default="./")
p.add_option('--trajlist', '-t', default="-1")
p.add_option('--plevelsFile', '-p', default="plevels.shc")
p.add_option('--outMacrostateAssignementsMap', '-s', default="macrostateMap.map")
p.add_option('--writeMacroAssignments', '-w', default="0")
p.add_option('--optimumMacrostateSize', '-o', default="0.01")
p.add_option('--maximumAssignIterations', '-i', default="10")
p.add_option('--removeBarelyConnectedMicros', '-r', default="0")
p.add_option('--writeTCMtxt', '-x', default="0")
p.add_option('--scanModeTopDensity', '-a', default="0.0")
p.add_option('--inputMatrix', '-m', default="")
p.add_option('--outFlowGraphName', '-f', default="macroFlowGraph.dot")
p.add_option('--bJumpWindow', '-j', default="0")
p.add_option('--whichGap', '-g', default="1")
options, arguments = p.parse_args()
outMicroCountMatrixName = (options.outMicroCountMatrixName)
tLag = int(options.lagstep)
headDir = (options.headDir)
trajlistfiles = (options.trajlist)
pLevelsFilename = (options.plevelsFile)
outMacrostateAssignementsMap = (options.outMacrostateAssignementsMap)
optimumMacrostateSize = float(options.optimumMacrostateSize)
writeMAssignments = int(options.writeMacroAssignments)
maximumAssignIterations = int(options.maximumAssignIterations)
numRemoveBarelyConnectedMicros = int(options.removeBarelyConnectedMicros)
writeTCMtxt = int(options.writeTCMtxt)
scanModeTopDensity = float(options.scanModeTopDensity)
inputMatrix = (options.inputMatrix)
outFlowGraphName = (options.outFlowGraphName)
bJumpWindow = int(options.bJumpWindow)
chooseGap = int(options.whichGap)
#if (len(inputMatrix) == 0 ):
# originalMicrosCountM= getMicroTransitionsFromAssignements(tLag, headDir, trajlistfiles, bJumpWindow)
#else:
# print "(OUTPUT) ", ("Reading data from TCM file: \"%s\" ", inputMatrix)
# if ( linecache.getline(inputMatrix, 1).strip() == "%%MatrixMarket matrix coordinate integer general"):
# print "(OUTPUT) ", ("Detected sparce matrix in the Matrix Market format")
# originalMicrosCountM = scipy.io.mmread(inputMatrix)
# else:
# print "(OUTPUT) ", ("Detected matrix in raw txt format")
# originalMicrosCountM = genfromtxt(inputMatrix)
# originalMicrosCountM = lil_matrix(originalMicrosCountM)
originalMicrosCountM = scipy.io.mmread(inputMatrix)
#The code is made to use a float matrix, even if the input (transitions) are integers. This way is just convinient to avoid errors due to loosing floats
originalMicrosCountM = originalMicrosCountM.tocsc()/1.0
writeCountMatrix(originalMicrosCountM, outMicroCountMatrixName, "Writing microstates transition count matrix", writeTCMtxt)
if (numRemoveBarelyConnectedMicros > 0):
originalMicrosCountM = removeBarelyConnectedMicros(originalMicrosCountM, numRemoveBarelyConnectedMicros)
connectedMicrosCountM, connectedMicrosIndex = getConnectedMicrostates(originalMicrosCountM)
writeCountMatrix(connectedMicrosCountM, ("%s_connected" % outMicroCountMatrixName), "Writing connected microstates transition count matrix", 0)
connectedMicrosCountM_X = csc_matrix(connectedMicrosCountM + connectedMicrosCountM.conj().transpose())/2 ;
microstate_size = connectedMicrosCountM.sum(axis=1)
cumulativeSumOfRows = cumulativeDesityFunctionOfHeightFilter(microstate_size)
pLevels=[]
if ( scanModeTopDensity > 0.0 ):
pLevels = scanPlevels(cumulativeSumOfRows, connectedMicrosCountM_X, microstate_size, 0.01, 0.01, scanModeTopDensity, chooseGap)
else:
pLevels = readPlevels(pLevelsFilename, cumulativeSumOfRows)
clusters = zeros(len(pLevels), int)
levels = []
levelsLine=""
for i in range (0, len(pLevels)):
if ((sum(cumulativeSumOfRows<=pLevels[i])) > 2): #Detect and remove density levels with <10 microstate
levels.append(sum(cumulativeSumOfRows<=pLevels[i]))
levelsLine += ("%1.3f " % pLevels[i])
else:
print "(OUTPUT) ", ("Density level at \"%1.3f\" is empty or have to few microstates (<2), it was removed it from the analysis"% (pLevels[i]))
print "(OUTPUT) ", ("**SHC analysis will use %d density levels: %s" % (len(levels), levelsLine))
(aBettis, specGaps) = apBetti(connectedMicrosCountM_X, microstate_size, levels)
for i in range (0, len(levels)):
if (chooseGap < 1):
print "(OUTPUT) ", ("WARNING: The spectral gap choosen (1st, 2nd, etc) cannot have a value less than 1, automaticaly changing the gap (-g) to 1")
chooseGap=1
if (chooseGap > 1):
print "(OUTPUT) ", ("WARNING:You are using an spectral gap ( ) different to the 1st. Is this really what you want to do?")
clusters[i] = aBettis[i][chooseGap-1]
superLevels=superLevelSet(microstate_size, levels, clusters) #0 is IDs and 1 is IGs
(adja, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, ci, csize) = superMapper(connectedMicrosCountM_X, superLevels)
(cptLocalMax, cptGradFlow, cptEquilibriumEQ) = flowGrad(adja, levelIdx, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter)
writeFlowGraph(cptGradFlow, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, superLevels, outFlowGraphName, pLevels)
(ci, csize, fassign, T, Qmax, id_fuzzy) = optimumAssignment(connectedMicrosCountM_X, cptEquilibriumEQ, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, maximumAssignIterations)
writeMacrostateMap(outMacrostateAssignementsMap, originalMicrosCountM.shape[0], ci, connectedMicrosIndex)
if (writeMAssignments ==1):
writeMacroAssignments(tLag, headDir, trajlistfiles, ci, connectedMicrosIndex, originalMicrosCountM.shape[0])
print "(OUTPUT) ", ("Done with SHC!")
def scanPlevels(cumulativeSumOfRows, connectedMicrosCountM_X, microstate_size, start, incr, end, chooseGap):
print "(OUTPUT) ", "Will perform a scan to discover optimum density levels for SHC (EXPERIMENTAL)"
clustersScan = zeros(1, int)
pLevels=[]
pLevelsScan=[]
pLevelsScan.append(0)
pLevelSGQuality=[]
pLevelNumMacro=[]
tmpMaxNumMacro=0
tmpMaxGapQuality=0
testLevels = np.arange(start,end,incr)
for i in testLevels:
levelsScan = []
pLevelsScan[0] = i
specGapQuality=0
print "(OUTPUT) ", ("Testing Density level: \"%1.3f\" " % pLevelsScan[0])
if ((sum(cumulativeSumOfRows<=pLevelsScan[0])) > 1+chooseGap):
levelsScan.append(sum(cumulativeSumOfRows<=pLevelsScan[0]))
(aBettis, specGaps) = apBetti(connectedMicrosCountM_X, microstate_size, levelsScan)
clustersScan[0] = aBettis[0][0]
superLevels=superLevelSet(microstate_size, levelsScan, clustersScan) #0 is IDs and 1 is IGs
(adja, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, ci, csize) = superMapper(connectedMicrosCountM_X, superLevels)
print specGaps
specGapQuality = specGaps[0][chooseGap-1] - specGaps[0][chooseGap]
if ( (len(csize[0])) > tmpMaxNumMacro):
tmpMaxNumMacro = len(csize[0])
tmpMaxGapQuality = specGapQuality
pLevels.append(np.copy(pLevelsScan[0]))
pLevelSGQuality.append(np.copy(specGapQuality))
pLevelNumMacro.append(np.copy(tmpMaxNumMacro))
elif ( ((len(csize[0])) <= tmpMaxNumMacro) and (specGapQuality > tmpMaxGapQuality) ):
tmpMaxNumMacro = len(csize[0])
tmpMaxGapQuality = specGapQuality
pLevels[(len(pLevels)-1)] = pLevelsScan[0]
pLevelSGQuality[(len(pLevels)-1)] = specGapQuality
pLevelNumMacro[(len(pLevels)-1)] = len(csize[0])
else:
print "(OUTPUT) ", ("Skipping density level at \"%1.3f\" because it contains to few microstates ( <2 )" % pLevelsScan[0])
print "(OUTPUT) ", "Optimum density levels identified & num of macrostates per level:"
counter=0
for i in pLevels:
print "(OUTPUT) \t", i, "\t", pLevelNumMacro[counter]
counter+=1
print "(OUTPUT) ", "Sum of the differences of the spectral gaps:"
for i in pLevelSGQuality:
print "(OUTPUT) ", i
print "(OUTPUT) ", "Density levels scan DONE. Proceding to the SHC clustering!"
return pLevels
def removeBarelyConnectedMicros(originalMicrosCountM, numRemoveBarelyConnectedMicros):
print "(OUTPUT) ", ("Removing barely connected microstates with a cut off <%d transitions (in or out) (EXPERIMENTAL)" % numRemoveBarelyConnectedMicros)
counter=0
originalMicrosCountM = originalMicrosCountM.todense()
for i in range (0, originalMicrosCountM.shape[0]):
if (((originalMicrosCountM[i,:].sum() - originalMicrosCountM[i,i] - numRemoveBarelyConnectedMicros) < 0 ) or ((originalMicrosCountM[:,i].sum() - originalMicrosCountM[i,i] - numRemoveBarelyConnectedMicros) < 0 )):
counter+=1
originalMicrosCountM[i,:] = 0
originalMicrosCountM[:,i] = 0
print "(OUTPUT) ", ("Removed %d barely connected microstates (turn to pop 0)..." % counter)
originalMicrosCountM = csc_matrix(originalMicrosCountM)
return(originalMicrosCountM)
def writeMacrostateMap(outName, nMicro, ci, connectedMicrosIndex):
print "(OUTPUT) ", ("Writting macrostate maping file: %s" % outName)
f = open(outName,'w')
micro2macro = zeros((nMicro), int)
micro2macro[connectedMicrosIndex] = ci
for i in range(0, nMicro):
line = (micro2macro[i]-1)
print >>f, line
f.close()
print "(OUTPUT) ", ("Done writting macrostate maping file!")
def writeMacroAssignments(tLag, headDir, trajlistfiles, ci, connectedMicrosIndex, nMicro):
print "(OUTPUT) ", ("Writting macrostate assignments to:")
micro2macro = zeros((nMicro), int)
micro2macro[connectedMicrosIndex] = ci
for filenameInp in file(trajlistfiles):
filenameInp = filenameInp.strip()
filenameInp = "%s/assignments/%s" % (headDir,filenameInp)
tmpLineLen=len(filenameInp)+10
sys.stdout.write('(OUTPUT) %s' % filenameInp)
for i in range (0, tmpLineLen):
sys.stdout.write('\b')
output = []
for line in file(filenameInp):
line=line.strip().split()
if (int(line[0]) > -1):
lineout="%d %d" %( int(line[0]), (micro2macro[int(line[0])] -1))
else:
lineout="%d -1" %( int(line[0]))
output.append(lineout)
f = open(filenameInp,'w')
for line in output:
print >>f, line
f.close()
print "\n", "(OUTPUT) ", ("Done writting macrostate assignments!")
def getMicroTransitionsFromAssignements(tLag, headDir, trajlistfiles,bJumpWindow):
originalNumOfMicros=0
totalCounts=0
numberOfTrajs=0
print "(OUTPUT) ", ("Assesing the number of microstates...")
for filenameInp in file(trajlistfiles):
filenameInp = filenameInp.strip()
filenameInp = "%s/assignments/%s" % (headDir,filenameInp)
tmpLineLen=len(filenameInp)+10
numberOfTrajs+=1
sys.stdout.write('(OUTPUT) %s' % filenameInp)
for i in range (0, tmpLineLen):
sys.stdout.write('\b')
for line in file(filenameInp):
line = line.strip().split()
line = int(line[0])
if (line > originalNumOfMicros):
originalNumOfMicros = line
if (originalNumOfMicros>0):
originalNumOfMicros+=1
print "(OUTPUT) ", ("Found %d microstates in %d trajectories" % (originalNumOfMicros, numberOfTrajs))
elif (originalNumOfMicros==0):
print "(OUTPUT) ", ("Found 0 microstates in %d trajectories, cannot continue!", numberOfTrajs)
exit(0)
print "(OUTPUT) ", ("Reading microstates assignments from files and counting transitions:")
originalMicrosCount= lil_matrix((originalNumOfMicros, originalNumOfMicros))
tmpLineLen=0
for filenameInp in file(trajlistfiles):
filenameInp = filenameInp.strip()
filenameInp = "%s/assignments/%s" % (headDir,filenameInp)
tmpLineLen=len(filenameInp)+10
for i in range (0, tmpLineLen):
sys.stdout.write('\b')
previousm=-1
trajLength = 0
for line in file(filenameInp):
trajLength += 1
###NEXT IS SLIDING WINDOW###
if ( bJumpWindow == 0 ):
trajectory=zeros ((trajLength), int)
for i in range (1, trajLength):
line = linecache.getline(filenameInp, i).strip().split()
trajectory[i] = line[0]
for i in range (0, trajLength-tLag):
if ((trajectory[i] >= 0) & (trajectory[i+tLag]>= 0)) :
originalMicrosCount[trajectory[i], trajectory[i+tLag]]+=1
###END SLIDING WINDOW###
###NEXT IS JUMP WINDOW###
if ( bJumpWindow == 1 ):
trajectory=zeros ((trajLength/tLag)+1, int)
for i in range (0, trajLength): #Qin's Fix (THX)
line = linecache.getline(filenameInp, i+1).strip().split()
if(i%tLag==0): #Qin's Fix (THX)
trajectory[i/tLag]=(int(line[0]))
for i in range(0, (trajLength/tLag)-1):
if ((trajectory[i] >= 0) & (trajectory[i+1]>= 0)) :
originalMicrosCount[trajectory[i], trajectory[i+1]]+=1
###END JUMP WINDOW##
print "\n", "(OUTPUT) ", ("Finished with microstates count!")
print "(OUTPUT) ", ("Total number of microstate transitions: %d" % originalMicrosCount.sum() )
originalMicrosCount = originalMicrosCount.tocsc()
emptyNumber=0
for i in range (0, originalNumOfMicros):
if ((originalMicrosCount[i,:].sum() + originalMicrosCount[:,i].sum()) == 0):
emptyNumber+=1
print("Warning microstate %d is empty!" % i)
if(emptyNumber > 0):
print "(OUTPUT) ", ("Warning, there are %d empty microstates" % emptyNumber)
print "(OUTPUT) ", ("There are %d non-empty microstates" % (originalMicrosCount.shape[0]-emptyNumber))
return (originalMicrosCount)
def writeCountMatrix ( originalMicrosCount, outMicroCountMatrixName, message, doWriteTXT):
print "(OUTPUT) ", (message)
scipy.io.mmwrite(outMicroCountMatrixName, originalMicrosCount, field="integer")
if (doWriteTXT == 1):
print "(OUTPUT) Writing (also) a count matrix in TXT format! (May be very slow, be patient)"
outMicroCountMatrixName="%s.txt"%(outMicroCountMatrixName)
f = open(outMicroCountMatrixName,'w')
advanceCounter=0.0
numMicros=originalMicrosCount.shape[0]
originalMicrosCount=originalMicrosCount.tolil()
outline="0.0% Complete"
sys.stdout.write('(OUTPUT) %s' %outline)
for i in range(0, numMicros):
advanceCounter+=1.0
print advanceCounter, numMicros
line=" "
for j in range(0, numMicros):
line+= str(int(originalMicrosCount[i,j])) + " "
print >>f, line
if (advanceCounter >= (numMicros/100.0)):
for k in range (0, len(outline)+10):
sys.stdout.write('\b')
sys.stdout.write('(OUTPUT) %s' % outline)
outline="%.1f%% Complete " % ((i+1)*100/numMicros)
advanceCounter=0
print "\n", "(OUTPUT) ", ("Finished TXT write!")
f.close()
def getConnectedMicrostates (originalMicrosCount):
print "(OUTPUT) ", ("Searching connected microstates using graph theory")
microConnectedComponents=cs_graph_components((originalMicrosCount + originalMicrosCount.conj().transpose()))
componentsSize=zeros((microConnectedComponents[0]+1), int)
emptySize=0
for i in microConnectedComponents[1]:
if (i >= 0):
componentsSize[i+1]+=1
else:
emptySize +=1
indexMaxConnected, sizeMaxConnected = componentsSize.argmax(0), componentsSize.max(0)
lineout = ("Found %d connected microstates, %d disconnected microstates and %d empty microstates" % (sizeMaxConnected, (componentsSize.sum()-sizeMaxConnected), emptySize))
print "(OUTPUT) ", lineout
if ((emptySize > 0) | ((componentsSize.sum()-sizeMaxConnected) > 0)):
print "(OUTPUT) ", "Removing disconnected microstates"
connectedMicrosIndex = where(microConnectedComponents[1] == (indexMaxConnected-1))
connectedMicrosIndex = getIndexFromArray(connectedMicrosIndex[0])
connectedMicros = originalMicrosCount[ix_(connectedMicrosIndex,connectedMicrosIndex)]
else:
connectedMicros = originalMicrosCount
connectedMicrosIndex = range(0,componentsSize.sum())
return connectedMicros, connectedMicrosIndex
def readPlevels(fileName, cumulativeSumOfRows):
print "(OUTPUT) ", ("Reading density levels from file: %s" % fileName)
pLevels=[]
for line in file(fileName):
line = line.strip()
pLevels.append(float(line))
return (pLevels)
def cumulativeDesityFunctionOfHeightFilter(x):
total = sum(x)
x = -x
x.ravel().sort()
x = -x
y = x.cumsum(axis=0)/total
return y
def getIndexFromMatrix(indexA):
xx = indexA
xxx=[]
for i in range (0, len(xx)):
xxx.append(xx[i,0])
return(xxx)
def getIndexBFromMatrix(indexB):
xx = indexB
xxx=[]
for i in range (0, len(xx)):
xxx.append(xx[0,i])
return(xxx)
def getIndexFromArray(indexA):
xx = indexA
xxx=[]
for i in range (0, len(xx)):
xxx.append(xx[i])
return(xxx)
def IntList2array(listA):
xx = listA
xxx= zeros((len(listA)),int)
for i in range (0, len(xx)):
xxx[i] = xx[i]
return(xxx)
def apBetti(X, filterX, levels):
print "(OUTPUT) ", ("Computing persistent aproximate betti numbers via spectral gaps")
#X = X.tocsc()
ig = filterX/(max(filterX))
#print "PPC",filterX, (max(filterX))
ig = -ig
rk = ig.argsort(axis=0)
ig.sort(axis=0)
ig = -ig
MAXNUMBEREIG = 20;
k = MAXNUMBEREIG
eps = 1e-4
randSurf = 1e-1
N = len(filterX)
revecs = []
revals = []
Components = []
specGaps = []
aBettis = []
for i in range (0, len(levels)):
revecs.append(0)
revals.append(0)
Components.append(0)
specGaps.append(0)
aBettis.append(0)
print "(OUTPUT) ", ("Level\tSize\t#Comp\tB0_1\tGap_1\t\tB0_2\tGap_2\t\tB0_3\tGap_3")
for i in range (0, len(levels)):
if (levels[i] > 1):
n = int(levels[i])
else:
n = int(sum(ig>=levels[i]))
outline= ("%d\t %d\t"%(i,n));
if (n == 1):
Components[i] = 1
specGaps[i] = ones(MAXNUMBEREIG);
aBettis[i] = [1, zeros(MAXNUMBEREIG-1)]
else:
tmpindx = getIndexFromMatrix(rk[0:n])
Y = csc_matrix(((X[ix_(tmpindx,tmpindx)])) + (eps*identity(n)) +(randSurf * ones((n,n), float)/n))
Y2 = zeros((n,n))
tmparray=[]
for j in Y.sum(axis=1):
tmparray.append(j[0,0])
Y2[diag_indices(n)]= tmparray
Y2 = csc_matrix(Y2)
sigma = 1+eps+randSurf
B = Y - sigma*Y2
sigma_solve = dsolve.splu(B)
Y2L = aslinearoperator(Y2)
if ((n-4) > MAXNUMBEREIG):
# revals[i],revecs[i] = ARPACK_gen_eigs( Y2L.matvec, sigma_solve.solve, Y2L.shape[0], sigma, MAXNUMBEREIG, 'LM' )
revals[i],revecs[i] = eigs( Y, MAXNUMBEREIG, Y2, sigma, which='LM', maxiter=10000 )
else:
revals[i],revecs[i] = scipy.linalg.eig( Y.todense(),Y2.todense() )
revals[i]=real(revals[i])
#SORT EIGENVALUES AND EIGENVECTORS
tmpindsort = argsort(-revals[i])
revals[i] = revals[i][tmpindsort]
revecs[i] = revecs[i][:, tmpindsort] # second axis !!
if (n > MAXNUMBEREIG):
revals[i] = revals[i][:MAXNUMBEREIG]
revecs[i] = revecs[i][:, :MAXNUMBEREIG]
#Remove later DASM
# tmplineout=""
# for ii in revals[i]:
# tmplineout+=" "+ str(ii)
# print "(DEBUG) Using a matrix of %ix%i, eigenvalues are:\n(DEBUG) \t" %((n-4),(n-4)), tmplineout
#END REMOVE#
Components[i] = sum(revals[i]>(1-1e-5))
tmpSpecGaps = -(abs(diff(revals[i])))
aBettis[i] = tmpSpecGaps.argsort(axis=0)
for xx in range (1, len(revals[i])): #FIX for eigenvalues = 1.0 on lowlevels
if ((revals[i][xx]+1e-5) >= 1) and (aBettis[i][0] < xx):
aBettis[i][0]+=1
else:
break
tmpSpecGaps.sort(axis=0)
specGaps[i] = -tmpSpecGaps
outline += ('%d\t'% Components[i])
for gaplist in range (0, min(3,len(aBettis[i]))):
outline += ('%d\t %f\t'%(aBettis[i][gaplist], specGaps[i][gaplist]));
print "(OUTPUT) ", outline
print "(OUTPUT) ",("Done with betti numbers!")
return (aBettis, specGaps)
def superLevelSet(filterX, levels, clusters):
ig = -filterX
idd = ig.argsort(axis=0)
ig.sort(axis=0)
ig = -ig
superLevelSetId = []
superLevelSetIg = []
for i in range (0, len (levels)):
superLevelSetId.append(np.copy(idd[0:levels[i]]))
superLevelSetIg.append(np.copy(clusters[i]))
return (superLevelSetId, superLevelSetIg)
def superMapper (X,superLevelSet):
print "(OUTPUT) ", ('Executing the SMapper')
numPoints = X.shape[0]
dim = X.shape[1]
if (dim!=numPoints):
print "(OUTPUT) ", ('ERROR: the input for the mapper must be a symmetric transition count matrix!')
sys.exit()
numLevels = len(superLevelSet[0])
lengthX = []
idxSort = []
for i in range (0, numLevels):
lengthX=concatenate((lengthX,len(superLevelSet[0][i])), axis=None)
tmpReshape = superLevelSet[0][i].reshape(1,lengthX[i])
tmpReshape2 = []
for j in range (0, size(tmpReshape, axis=1)):
tmpReshape2.append(np.copy(tmpReshape[0,j]))
idxSort=concatenate((idxSort,tmpReshape2), axis=None)
Y = X[ix_(idxSort,idxSort)];
print "(OUTPUT) ", ("SMapper:\tnumber of points %d" % numPoints);
print "(OUTPUT) ", ("\t\tnumber of levels %d" % len(superLevelSet[0]));
numGraphNodes = 0
nodeInfoLevel = []
nodeInfoLevelSize = []
nodeInfoSet = []
nodeInfoFilter = []
levelIdx = []
adja = []
ci = []
csize = []
numCluster = []
for level in range (0, len(superLevelSet[0])):
index1= getIndexFromMatrix(superLevelSet[0][level])
data = (X[ix_(index1,index1)])
citmp, csizetmp, specVals, specVecs, specGaps, conduct, cluster_treeData, cluster_treeConduct, cluster_treeLeft, cluster_treeRight = spectralClustering(data,superLevelSet[1][level])
ci.append(np.copy(citmp))
csize.append(np.copy(csizetmp))
numCluster.append(len(csize[level]))
print "(OUTPUT) ", ("Level %d has %d macrostates out of %d microstates" % (level ,numCluster[level], data.shape[0]))
numGraphNodes = len(nodeInfoLevel)
for i in range (0,numCluster[level]):
new_node = i + numGraphNodes
if (i==0):
levelIdx.append(np.copy([new_node]))
else:
levelIdx[level] = concatenate((levelIdx[level],new_node), axis=None)
nodeInfoLevel.append(np.copy(level));
nodeInfoLevelSize.append(data.shape[0])
thisNodeIndex = where(ci[level]==i)
nodeInfoSet.append(np.copy(superLevelSet[0][level][thisNodeIndex]))
nodeInfoFilter.append(np.copy(level))
if(level > 0):
prevLvlIdx = levelIdx[level-1]
thisLvlIdx = levelIdx[level]
for i in range (0,len(prevLvlIdx)):
for j in range (0,len(thisLvlIdx)):
a = prevLvlIdx[i]
b = thisLvlIdx[j]
N_ab = len(intersect1d(getIndexFromMatrix(nodeInfoSet[a]),getIndexFromMatrix(nodeInfoSet[b])));
if (N_ab > 0):
adja.append(np.copy([a,b,N_ab]))
adjaArray = array2matrix(adja, len(nodeInfoLevel))
if (numLevels == 1):
adjaArray = zeros((len(nodeInfoLevel),len(nodeInfoLevel)),int)
print "(OUTPUT) ", ('SMapper done...')
return(adjaArray, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, ci, csize)
def array2matrix(arr, lenArra):
result = zeros((lenArra,lenArra),int)
for i in arr:
result[i[0]][i[1]]= i[2]
result[i[1]][i[0]]= i[2]
return (result)
def spectralClustering(X,k):
clusterSizeThreshold=0
n = X.shape[0];
eps = 1e-4
randSurf = 1e-1
MAXCLUSTER = min(50,n)
Y = csc_matrix(X + (eps*eye(n,n)) +(randSurf * ones((n,n), float)/n))
Y2 = zeros((n,n))
tmparray=[]
for j in Y.sum(axis=1):
tmparray.append(np.copy(j[0,0]))
Y2[diag_indices(n)]= tmparray
Y2 = csc_matrix(Y2)
sigma = 1+eps+randSurf
B = Y - sigma*Y2
sigma_solve = dsolve.splu(B)
Y2L = aslinearoperator(Y2)
# printDebug(MAXCLUSTER, "MAXCLUSTER")
printDebug(n, "N")
#SPARCE matrix solver HAVES SOME PROBLEM since can return eigenvalues = 0.0, maybe increment the number of cycles DASM#
if ((n-4) > MAXCLUSTER):
# specVals,specVecs = ARPACK_gen_eigs(Y2L.matvec, sigma_solve.solve, Y2L.shape[0], sigma, MAXCLUSTER-1, 'LM')
specVals,specVecs = eigs( Y, MAXCLUSTER-1, Y2, sigma, which='LM',maxiter=10000 )
printDebug(specVals,"Specvals1a")
else:
specVals,specVecs = scipy.linalg.eig(Y.todense(),Y2.todense())
specVals=real(specVals)
printDebug(specVals,"Specvals1b")
#END#
#NEXT temporary fix
# specVals,specVecs = scipy.linalg.eig(Y.todense(),Y2.todense())
# specVals=real(specVals)
#END fix
# #SORT EIGENVALUES AND EIGENVECTORS
tmpindsort = argsort(-specVals)
specVals = specVals[tmpindsort]
specVecs = specVecs[:, tmpindsort] # second axis !!
# if (n > MAXCLUSTER):
# specVals = specVals[:MAXCLUSTER]
# specVecs = specVecs[:, :MAXCLUSTER]
printDebug(specVals, "SpecvalsSortShort")
specGaps = -(abs(diff(specVals)))
numComponents = sum(specVals>1-(1e-10))
#TODO: add this DASM#
#if numComponents>1,
# cluster_tree{1}.left = 2;
# cluster_tree{1}.right = 3;
# for i=1:numComponents,
# mn = mean(abs(spectrum.vecs(:,i)));
# cluster_tree{i+1}.data = find(abs(spectrum.vecs(:,i))>=mn);
# cluster_tree{i+1}.left = 0;
# cluster_tree{i+1}.right = 0;
# id_complement = find(abs(spectrum.vecs(:,i))<mn);
# cluster_tree{i+1}.conduct = sum(sum(X(cluster_tree{i+1}.data,id_complement)))/sum(sum(X(cluster_tree{i+1}.data,cluster_tree{i+1}.data)));
# end
#end
#END TODO#
cluster_treeData=[]
cluster_treeData.append(range(0,n))
cluster_treeLeft = [0]
cluster_treeRight = [0]
cluster_treeConduct = [0]
printDebug(numComponents,"numComponents")
printDebug(k+1,"k+1")
for i in range (numComponents, k+1): #k is the number of components identified by Betty numbers
tree_size = len(cluster_treeData)
variation = zeros((tree_size))
for j in range (0, tree_size):
if ((cluster_treeLeft[j] == 0) and (cluster_treeRight[j] == 0)):
tmp = specVecs[cluster_treeData[j], i]
if (len(tmp) > 1):
variation[j] = (var(tmp)*len(tmp)/(len(tmp)-1));
else:
variation[j] = 0;
mx = variation.max(0)
ind = variation.argmax(0)
indices = cluster_treeData[ind]
printDebug(indices,"indices")
printDebug(len(indices),"lenindices")
nn = len(indices)
if (i==1):
Xsplit = csc_matrix(X[ix_(indices,indices)]+eps*eye(nn,nn)+randSurf*ones((nn,nn))/nn)
vecFiedler = specVecs[:,i]
else:
Xsplit = csc_matrix(X[ix_(indices,indices)]+eps*eye(nn,nn)+randSurf*ones((nn,nn))/nn)
Y2 = zeros((nn,nn))
tmparray=[]
for j in Xsplit.sum(axis=1):
tmparray.append(np.copy(j[0,0]))
Y2[diag_indices(nn)]= tmparray
Y2 = csc_matrix(Y2)
B = Xsplit - sigma*Y2
sigma_solve = dsolve.splu(B)
Y2L = aslinearoperator(Y2)
##TODO: maybe somethingWrongHere DASM##
if ((nn-4) > 20):
# splitVals,splitVecs = ARPACK_gen_eigs(Y2L.matvec, sigma_solve.solve, Y2L.shape[0], sigma, 3, 'LM')
splitVals,splitVecs = eigs( Xsplit, 3, Y2, sigma, which='LM',maxiter=10000 )
else:
splitVals,splitVecs = scipy.linalg.eig(Xsplit.todense(),Y2.todense())
splitVals=real(splitVals)
##END ToDo##
##SORT EIGENVALUES AND EIGENVECTORS##
tmpindsort = argsort(-splitVals)
splitVals = splitVals[tmpindsort]
splitVecs = splitVecs[:, tmpindsort] # second axis !!
if (nn > 3):
splitVals = splitVals[:3]
splitVecs = splitVecs[:, :3]
if (len(splitVecs[0]) > 1):
vecFiedler = splitVecs[:,1]
else:
vecFiedler = splitVecs
left_indices = (vecFiedler < vecFiedler.mean()).nonzero()[0]
right_indices = (vecFiedler >= vecFiedler.mean()).nonzero()[0]
if ((min(len(left_indices),len(right_indices))) > 0): #ARPACK needs matrix >=5 to get speigs
lind = tree_size + 1
rind = tree_size + 2
cluster_treeLeft[ind] = lind
cluster_treeRight[ind] = rind
indices = IntList2array(indices)
cluster_treeData.append(indices[left_indices])
cluster_treeData.append(indices[right_indices])
cluster_treeLeft.append(0)
cluster_treeRight.append(0)
cluster_treeLeft.append(0)
cluster_treeRight.append(0)
if (len(left_indices)==1):
left_indices = concatenate((left_indices[0], left_indices[0]), axis=None)
if (len(right_indices)==1):
right_indices = concatenate((right_indices[0], right_indices[0]), axis=None)
cut = Xsplit[ix_(left_indices,right_indices)].sum()
volume_left = Xsplit[ix_(left_indices,left_indices)].sum()
volume_right = Xsplit[ix_(right_indices,right_indices)].sum()
cluster_treeConduct.append(cut/min(volume_left,volume_right))
cluster_treeConduct.append(cut/min(volume_left,volume_right))
leaves = []
leaveSize = []
ci = zeros((n), int)
if ((clusterSizeThreshold > 0) and (clusterSizeThreshold < 1)):
clusterSizeThreshold = around(clusterSizeThreshold*n);
else:
clusterSizeThreshold = around(clusterSizeThreshold);
for i in range (0, len(cluster_treeData)):
if ((cluster_treeLeft[i] == 0) and (cluster_treeRight[i] == 0)):
if (len(leaves) == 0):
leaves = [i]
ci[cluster_treeData[i]] = 1
else:
leaves = concatenate((leaves,i), axis=None)
ci[cluster_treeData[i]] = len(leaves)
# print leaves #Funny that makes an extra cicle?
leaveSize = zeros((len(leaves)))
for i in range (0,len(leaves)):
leaveSize[i] = sum(ci == (i+1))
idd = (leaveSize >= clusterSizeThreshold).nonzero()[0]
csize = np.copy(leaveSize[idd])
ci = zeros((n),int)
conduct = zeros((len(idd)));
for i in range (0, len(idd)):
ci[cluster_treeData[leaves[idd[i]]]]=i
conduct[i] = cluster_treeConduct[leaves[idd[i]]]
return(ci, csize, specVals, specVecs, specGaps, conduct, cluster_treeData, cluster_treeConduct, cluster_treeLeft, cluster_treeRight)
def flowGrad(G, levelIdx, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter):
numLevel = len(levelIdx)
dG = triu(G);
y=[]
localMax = (where(dG.sum(axis=0)[0] == 0))[1].transpose()
dd = zeros((len(G)), int);
for i in range (0,len(localMax)):
dd[localMax[i]] = len(nodeInfoSet[localMax[i]]);
dG = dG + diag(dd)
dG_inf=dG^numLevel
for i in range (0,len(G)):
y.append(where(dG_inf[:,i] > 0))
dGdivsum = getIndexFromMatrix((1.0/dG.sum(axis=0)).transpose())
MarkovT = dG * diag(dGdivsum)
yLocalMax = localMax
yGradFlow = dG
yEquilibriumEQ = MarkovT**numLevel
print "(OUTPUT) ", ("Number of local maxima: %d" % len(localMax))
return(yLocalMax, yGradFlow, yEquilibriumEQ)
def optimumAssignment(X, cptEquilibriumEQ, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, maximumAssignIterations):
print "(OUTPUT) ", ("Finding Optimum Assignments")
numMicro = max(X.shape[0], X.shape[1])
numNode = len(nodeInfoSet)
MacroStates = (where(diag(cptEquilibriumEQ)==1)[0]).transpose()
numMacro = len(MacroStates)
if numMacro == 1:
print "(OUTPUT) ", ("SHC has found only one Macrostate! Noting to optimize...")
ci = ones((numMicro), int)
csize = numMicro
fassign = []
T = []
Qmax = 1
id_fuzzy = []
return(ci, csize, fassign, T, Qmax, id_fuzzy)
print "(OUTPUT) ", ("Optimum assignments Done...")
distEQ = cptEquilibriumEQ[MacroStates,:]
ci = zeros((numMicro), int);
id_macro = []
# deterministic assignment on basins of local maxima
for i in range (0, numMacro):
macroBasin = (where(distEQ[i,:] == 1)[1]).transpose()
for j in range (0, len(macroBasin)):
if (j==0):
id_macro.append(np.copy(nodeInfoSet[macroBasin[j]]))
id_macro[i] = union1d(getIndexFromMatrix(id_macro[i]),getIndexFromMatrix(id_macro[i]))
else:
id_macro[i] = union1d(id_macro[i],getIndexFromMatrix(nodeInfoSet[macroBasin[j]]))
ci[id_macro[i]] = i+1 #Take care that ci is +1 since it maps microstates numbers, 0 is for fussy
# fuzzy microstates on barrier
id_fuzzy = (where(ci==0))[0]
print "(OUTPUT) ", ("Number of barrier microstates: %d" % len(id_fuzzy))
# Construct new transition count matrix from X
T = eye((numMacro+len(id_fuzzy)), (numMacro+len(id_fuzzy)))
T = T.tolil()
Xdense = X.todense()
# for i in range (0, numMacro):
# row_id = where(ci==(i+1))
# for j in range (i, numMacro):
# # print i, j
# col_id = where(ci==(j+1))
# T[i,j] = X[row_id,col_id].sum()
# #print len(id_fuzzy)
# for j in range (1, len(id_fuzzy)):
# # print i, j
# tmpindx=array([id_fuzzy[j]])
# T[i,j+numMacro] = X[row_id,tmpindx].sum()
for i in range (0, numMacro):
row_id = where(ci==(i+1))[0]
for j in range (i, numMacro):
col_id = where(ci==(j+1))[0]
T[i,j] = Xdense[ix_(row_id,col_id)].sum()
#print len(id_fuzzy)
for j in range (1, len(id_fuzzy)):
tmpindx=array([id_fuzzy[j],id_fuzzy[j]])
T[i,j+numMacro] = Xdense[ix_(row_id,tmpindx)].sum()
T = T + (triu(T,1)).transpose()
T = T.todense()
# print "(OUTPUT) SLOW 1"
T[numMacro:(numMacro+len(id_fuzzy)),numMacro:(numMacro+len(id_fuzzy))] = Xdense[ix_(id_fuzzy,id_fuzzy)]
# print "(OUTPUT) SLOW 2"
d = T.sum(axis=1)
n = d.shape[0]
dd = zeros((n,n))
tmparray=[]
# print "(OUTPUT) SLOW 3"
for j in d:
tmparray.append(1.0/j[0,0])
dd[diag_indices(n)]= tmparray
# print "(OUTPUT) SLOW 4"
# dd = lil_matrix((n,n))
# jj=0
# for j in (d):
# if (j[0,0] > 0):
# dd[jj,jj]=(1.0/j[0,0])
# else:
# dd[jj,jj]= 0 #Is this correct? Why this could happen?
# jj+=1
dd = csc_matrix(dd)
T = csc_matrix(T)
# print "(OUTPUT) SLOW 5"
M = T*dd
# print "(OUTPUT) SLOW 6"
Mp = M.todense()
# print "(OUTPUT) SLOW 7"
# print Mp.sum()
eps = 1e-4 # small for tie-breaker
fass = zeros((numMacro, (numMacro+len(id_fuzzy))))
# print "(OUTPUT) SLOW 8"
for i in range(0, numMacro):
# print "(OUTPUT) SLOW 8a", i
fass[i][i]=1
# print "(OUTPUT) SLOW 9"
fass[:,numMacro:] = Mp[:numMacro,numMacro:]
iterN = 0
fassign=[]
id_domore=[]
CI=[]
CSIZ=[]
Q=[]
fassign.append(copy(fass))
fass_sort = -fass
id_fass_sort = fass_sort.argsort(axis=0)
fass_sort.sort(axis=0)
fass_sort= -fass_sort
id_domore.append((where ((fass_sort[0,:] < eps) | ((fass_sort[0,:]-fass_sort[1,:])<eps)))[0])
print "(OUTPUT) ", ("Number of empty assignments: %d" % len(id_domore[iterN]));
CI.append(copy(ci))
CI[iterN][id_fuzzy] = 1+(id_fass_sort[0,numMacro:])
CSIZ.append(hiscMacro(CI[iterN]))
Q.append(metastability(X,CI[iterN]))
numMacro = ci.max(0)
print "(OUTPUT) ", ("Num of macrostates: %d" % ci.max(0))
print "(OUTPUT) ", ("Metastability (Q) = %.3f (%2.2f%%)" % (Q[iterN], (Q[iterN]/numMacro*100)))
Qmax = Q[iterN]
iter_max = iterN
ci = np.copy(CI[iterN])
csize = np.copy(CSIZ[iterN])
fassigne =[]
while ((id_domore[iterN].size>0) and (iterN < maximumAssignIterations)):
iterN = iterN + 1
print "(OUTPUT) ", ("*Iteration %d" % iterN)
numMacro = ci.max(0)
print "(OUTPUT) ", ("Number of macrostates: %d" % ci.max(0))
Mp = Mp*M
fass[:,id_domore[iterN-1]] = Mp[:numMacro,id_domore[iterN-1]]
fass_sort = -fass
id_fass_sort = fass_sort.argsort(axis=0)
fass_sort.sort(axis=0)
fass_sort= -fass_sort
id_domore.append((where ((fass_sort[0,:] < eps) | ((fass_sort[0,:]-fass_sort[1,:])<eps)))[0])
print "(OUTPUT) ", ("Number of empty assignment: %d" % len(id_domore[iterN]));
# Method I (first-reach diffusion): find the optimal assignment
CI.append(copy(ci))
CI[iterN][id_fuzzy] = 1+(id_fass_sort[0,numMacro:])
CSIZ.append(hiscMacro(CI[iterN]))
Q.append(metastability(X,CI[iterN]))
print "(OUTPUT) ", ("(Q) I (first-reach) = \t%.3f (%2.2f%%)" % (Q[iterN], (Q[iterN]/numMacro*100)));
# print Qmax, Q[iterN]
if (Qmax < Q[iterN]):
Qmax = Q[iterN]
iter_max = iterN
ci = np.copy(CI[iterN])
csize = np.copy(CSIZ[iterN])
# print ci
# Method II (all-iteration diffusion): rearrange the fuzzy assignment by the last iteration of Mp
numMacro = ci.max(0)
print "(OUTPUT) ", ("Number of macrostates: %d" % ci.max(0))
fassign.append(copy(fass)) #Copy the array to avoid creating a pointer (THX Raymond)
fassign[iterN][:,numMacro:] = Mp[:numMacro,numMacro:]
fassign[iterN][:,id_domore[iterN]] = (ones((numMacro,len(id_domore[iterN])))/numMacro);
F_rowsort = -fassign[iterN]
id_rowsort = F_rowsort.argsort(axis=0)
F_rowsort.sort(axis=0)
F_rowsort = -F_rowsort
CI[iterN][id_fuzzy] = id_rowsort[0,numMacro:]
CSIZ[iterN]=hiscMacro(CI[iterN])
Q[iterN] = metastability(X,CI[iterN])
print "(OUTPUT) ", ("(Q) II (all-iteration) = \t%.3f (%2.2f%%)" % (Q[iterN], (Q[iterN]/numMacro*100)));
if (Qmax < Q[iterN]):
Qmax = Q[iterN]
iter_max = iterN
ci = np.copy(CI[iterN])
csize = np.copy(CSIZ[iterN])
# print ci
print "(OUTPUT) ", ("---- Maximal metastability reached at iteration %d: %f (%2.2f%%) ----\n" % (iter_max,Qmax,(Qmax/numMacro*100)))
print "(OUTPUT) ", ("---- Final number of macrostates: %d ----\n" % ci.max(0))
print "(OUTPUT) ", ("Optimum assignments Done...")
return(ci, csize, fassign, T, Qmax, id_fuzzy)
def metastability(X,ci):
#Compute the metastability according to macro-clustering ci
numMacro=max(ci);
idX=[]
for i in range(0,numMacro):
idX.append(where(ci==(i+1))[0])
if (len (idX[i]) == 1):
idX[i] = [idX[i][0],idX[i][0]]
QQ = zeros((numMacro,numMacro))
for i in range(0,numMacro):
for j in range(0,numMacro):
QQ[i,j]=(X[ix_(idX[i],idX[j])].sum())
QQ[j,i]=QQ[i,j]
D = QQ.sum(axis=1)
Q = (diag(diag(1./D)*QQ)).sum()
return(Q)
def hiscMacro(arr): #Wrapper to Emulate matlab's --hisc-- function that counts the number of elements per class in a histogram
hisc=zeros((max(arr)), int)
for i in (arr):
hisc[i-1]+=1
return (hisc)
def writeFlowGraph(cptGradFlow, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, superLevels, outFlowGraphName, pLevels):
print "(OUTPUT) ", ("---- Generating Macrostate flowgraph ---")
# print "(DEBUG) ", scipy.linalg.norm(cptGradFlow - (cptGradFlow.conj().T))
if ( scipy.linalg.norm(cptGradFlow - (cptGradFlow.conj().T))==0 ):
print "(OUTPUT) ", ("error: Input graph is UNDIRECTED! I CANNOT GENERATE THE FLOW GRAPHIC!")
return
numNodes = max(shape(cptGradFlow))
colorParam=[]
sizeParam=[]
for i in range(len(nodeInfoLevel)):
colorParam.append(len(superLevels[1]) - nodeInfoFilter[i] - 1 )
sizeParam.append(100.0*len(nodeInfoSet[i])/nodeInfoLevelSize[i])
# printDebug(sizeParam, "sizeParam")
maxColorParam=max(colorParam)
colorScaleORI = arange(0.0,1.1,0.1)
colorScaleNEW = arange(0.3,.91,0.06)
print colorScaleORI, colorScaleNEW
colorInterpolator = interp1d(colorScaleORI,colorScaleNEW)
for i in range(numNodes):
colorParam[i]= colorInterpolator(float(colorParam[i])/maxColorParam)
# printDebug(colorParam, "colorParam")
sParam = np.copy(sizeParam)
levelColor = []
cm = get_cmap('jet')
for i in range(numNodes):
tmpColor=cm(colorParam[i]) # color will now be an RGBA tuple, THX internet
levelColor.append([int(tmpColor[0]*255),int(tmpColor[1]*255),int(tmpColor[2]*255),int(tmpColor[3]*255)])
for i in range(len(sizeParam)):
sizeParam[i] = 0.1 + sizeParam[i]/max(sizeParam)
outline = 'digraph "G" {\n'
for i in range(numNodes):
outline += ' node%d [label="%d:%2.0f%%", color="#%02x%02x%02x%02x",style=filled, shape=circle, width=%0.2f];\n' % (i, i, sParam[i], levelColor[i][0],levelColor[i][1],levelColor[i][2],levelColor[i][3], sizeParam[i])
# printDebug(cptGradFlow, "cptGradFlow")
for i in range(numNodes):
connNodes = where(cptGradFlow[:,i] > 0)[0]
for j in range(size(connNodes)):
outline += ' node%d -> node%d [label="%d"];\n' % (i, connNodes[0, j],cptGradFlow[connNodes[0,j],i])
levelSizes=[]
for i in range(len(superLevels[1])):
levelSizes.append(len(superLevels[0][i]))
levelSizeInfo = ""
for i in levelSizes:
levelSizeInfo += '%d; ' % i;
l=zeros((len(levelIdx)), int)
for i in range (len(levelIdx)):
l[i] = len(levelIdx[i])
l_end = l.cumsum(axis=0)
tmpNextLevIdxInfo=0
levelIdxInfo=""
for i in range(0,len(l_end)-1):
levelIdxInfo += "%d-%d; " % (tmpNextLevIdxInfo, l_end[i]-1)
tmpNextLevIdxInfo=l_end[i]
levelIdxInfo += "%d-%d; " % (tmpNextLevIdxInfo, l_end[len(l_end)-1])
levelDesity=""
for i in pLevels:
levelDesity += "%2.0f%%; " % (i*100.0)
outline += ' label = " Levels: %d \\l Density Levels: %s \\l Level Sizes: %s \\l Node Index: %s \\l\n' % (len(superLevels[1]), levelDesity, levelSizeInfo, levelIdxInfo)
outline += ' labelloc="b";\nlabeljust="l";\n'
outline += ' center = 1;\n overlap=scale;\n'
outline +='}'
print "(OUTPUT) ", ("Writting Macrostate flowgraph to: %s" % outFlowGraphName)
f = open(outFlowGraphName,'w')
print >>f, outline
f.close()
print "(OUTPUT) ", ("Macrostate flowgraph generated")
return
def printDebug(obj, message):
outline= ("(DEBUG) %s: " % message)
try:
for i in obj:
outline += ", " + str(i)
except TypeError:
outline += " " + str(obj)
print outline.replace("\n", " ")
if __name__ == '__main__':
main()
| apache-2.0 | -28,220,316,076,639,290 | 36.912297 | 229 | 0.655434 | false | 2.817186 | false | false | false |
edx/ease | ease/model_creator.py | 1 | 7903 | #Provides interface functions to create and save models
import numpy
import re
import nltk
import sys
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import os
import sklearn.ensemble
from itertools import chain
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from .essay_set import EssaySet
from . import util_functions
from . import feature_extractor
import logging
from . import predictor_extractor
log=logging.getLogger()
def read_in_test_data(filename):
"""
Reads in test data file found at filename.
filename must be a tab delimited file with columns id, dummy number column, score, dummy score, text
returns the score and the text
"""
tid, e_set, score, score2, text = [], [], [], [], []
combined_raw = open(filename).read()
raw_lines = combined_raw.splitlines()
for row in range(1, len(raw_lines)):
tid1, set1, score1, score12, text1 = raw_lines[row].strip().split("\t")
tid.append(int(tid1))
text.append(text1)
e_set.append(int(set1))
score.append(int(score1))
score2.append(int(score12))
return score, text
def read_in_test_prompt(filename):
"""
Reads in the prompt from a text file
Returns string
"""
prompt_string = open(filename).read()
return prompt_string
def read_in_test_data_twocolumn(filename,sep=","):
"""
Reads in a two column version of the test data.
Filename must point to a delimited file.
In filename, the first column should be integer score data.
The second column should be string text data.
Sep specifies the type of separator between fields.
"""
score, text = [], []
combined_raw = open(filename).read()
raw_lines = combined_raw.splitlines()
for row in range(1, len(raw_lines)):
score1, text1 = raw_lines[row].strip().split("\t")
text.append(text1)
score.append(int(score1))
return score, text
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in range(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x
def get_cv_error(clf,feats,scores):
"""
Gets cross validated error for a given classifier, set of features, and scores
clf - classifier
feats - features to feed into the classified and cross validate over
scores - scores associated with the features -- feature row 1 associates with score 1, etc.
"""
results={'success' : False, 'kappa' : 0, 'mae' : 0}
try:
cv_preds=util_functions.gen_cv_preds(clf,feats,scores)
err=numpy.mean(numpy.abs(numpy.array(cv_preds)-scores))
kappa=util_functions.quadratic_weighted_kappa(list(cv_preds),scores)
results['mae']=err
results['kappa']=kappa
results['success']=True
except ValueError as ex:
# If this is hit, everything is fine. It is hard to explain why the error occurs, but it isn't a big deal.
msg = u"Not enough classes (0,1,etc) in each cross validation fold: {ex}".format(ex=ex)
log.debug(msg)
except:
log.exception("Error getting cv error estimates.")
return results
def get_algorithms(algorithm):
"""
Gets two classifiers for each type of algorithm, and returns them. First for predicting, second for cv error.
type - one of util_functions.AlgorithmTypes
"""
if algorithm == util_functions.AlgorithmTypes.classification:
clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
else:
clf = sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
return clf, clf2
def extract_features_and_generate_model_predictors(predictor_set, algorithm=util_functions.AlgorithmTypes.regression):
"""
Extracts features and generates predictors based on a given predictor set
predictor_set - a PredictorSet object that has been initialized with data
type - one of util_functions.AlgorithmType
"""
if(algorithm not in [util_functions.AlgorithmTypes.regression, util_functions.AlgorithmTypes.classification]):
algorithm = util_functions.AlgorithmTypes.regression
f = predictor_extractor.PredictorExtractor()
f.initialize_dictionaries(predictor_set)
train_feats = f.gen_feats(predictor_set)
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,predictor_set._target)
try:
set_score = numpy.asarray(predictor_set._target, dtype=numpy.int)
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score = predictor_set._target
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results
def extract_features_and_generate_model(essays, algorithm=util_functions.AlgorithmTypes.regression):
"""
Feed in an essay set to get feature vector and classifier
essays must be an essay set object
additional array is an optional argument that can specify
a numpy array of values to add in
returns a trained FeatureExtractor object and a trained classifier
"""
f = feature_extractor.FeatureExtractor()
f.initialize_dictionaries(essays)
train_feats = f.gen_feats(essays)
set_score = numpy.asarray(essays._score, dtype=numpy.int)
if len(util_functions.f7(list(set_score)))>5:
algorithm = util_functions.AlgorithmTypes.regression
else:
algorithm = util_functions.AlgorithmTypes.classification
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,essays._score)
try:
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results
def dump_model_to_file(prompt_string, feature_ext, classifier, text, score, model_path):
"""
Writes out a model to a file.
prompt string is a string containing the prompt
feature_ext is a trained FeatureExtractor object
classifier is a trained classifier
model_path is the path of write out the model file to
"""
model_file = {'prompt': prompt_string, 'extractor': feature_ext, 'model': classifier, 'text' : text, 'score' : score}
pickle.dump(model_file, file=open(model_path, "w"))
def create_essay_set_and_dump_model(text,score,prompt,model_path,additional_array=None):
"""
Function that creates essay set, extracts features, and writes out model
See above functions for argument descriptions
"""
essay_set=create_essay_set(text,score,prompt)
feature_ext,clf=extract_features_and_generate_model(essay_set,additional_array)
dump_model_to_file(prompt,feature_ext,clf,model_path)
| agpl-3.0 | 8,545,315,089,119,740,000 | 35.75814 | 121 | 0.690118 | false | 3.715562 | false | false | false |
russellb/nova | nova/ipv6/api.py | 1 | 1342 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova.openstack.common import cfg
from nova import utils
ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
help='Backend to use for IPv6 generation')
FLAGS = flags.FLAGS
FLAGS.register_opt(ipv6_backend_opt)
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable('ipv6_backend',
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| apache-2.0 | 3,333,885,616,564,730,000 | 29.5 | 78 | 0.682563 | false | 3.727778 | false | false | false |
guillaume-philippon/aquilon | lib/aquilon/aqdb/model/operating_system.py | 1 | 2257 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Operating System as a high level cfg object """
from datetime import datetime
from sqlalchemy import (Column, Integer, DateTime, Sequence, String, ForeignKey,
UniqueConstraint)
from sqlalchemy.orm import relation, deferred
from aquilon.aqdb.model import Base, Archetype, AssetLifecycle
from aquilon.aqdb.column_types.aqstr import AqStr
_TN = 'operating_system'
class OperatingSystem(Base):
""" Operating Systems """
__tablename__ = _TN
_class_label = 'Operating System'
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
name = Column(AqStr(32), nullable=False)
version = Column(AqStr(16), nullable=False)
archetype_id = Column(ForeignKey(Archetype.id, ondelete="CASCADE"),
nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
comments = Column(String(255), nullable=True)
lifecycle_id = Column(ForeignKey(AssetLifecycle.id), nullable=False)
archetype = relation(Archetype, lazy=False, innerjoin=True)
lifecycle = relation(AssetLifecycle, innerjoin=True)
__table_args__ = (UniqueConstraint(archetype_id, name, version),
{'info': {'unique_fields': ['name', 'version',
'archetype']}})
def __format__(self, format_spec):
instance = "%s/%s-%s" % (self.archetype.name, self.name, self.version)
return self.format_helper(format_spec, instance)
| apache-2.0 | -5,904,684,791,283,177,000 | 39.303571 | 80 | 0.673017 | false | 3.786913 | false | false | false |
CaliOpen/CaliOpen | src/backend/tools/py.CLI/caliopen_cli/commands/dump_indexes_mappings.py | 1 | 1108 | import json
from caliopen_storage.helpers.json import JSONEncoder
def dump_indexes(**kwargs):
# Discover base core classes
from caliopen_main.user.core import User
from caliopen_main.contact.objects.contact import Contact
from caliopen_main.message.objects.message import Message
from caliopen_main.common.objects.tag import ResourceTag
from caliopen_storage.core import core_registry
_exports = {
'contact': ['Contact'],
'message': ['Message'],
}
for keys in _exports:
for obj in _exports[keys]:
kls = core_registry.get(obj)
if not kls:
raise Exception('core class %s not found in registry' % obj)
output_file = '%s/%s.json' % (kwargs["output_path"], obj.lower())
dump_index_mapping(kls._index_class, output_file)
def dump_index_mapping(kls, output_file):
"""Output the json definition class."""
m = kls.build_mapping().to_dict()
with open(output_file, 'w') as f:
f.write(json.dumps(m, cls=JSONEncoder,
indent=4, sort_keys=True))
| gpl-3.0 | -7,902,594,706,566,011,000 | 34.741935 | 77 | 0.631769 | false | 3.73064 | false | false | false |
Mausy5043/bonediagd | daemon13.py | 1 | 4546 | #!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon13.py measures the network traffic.
# These are all counters, therefore no averaging is needed.
import syslog, traceback
import os, sys, time, math
from libdaemon import Daemon
import ConfigParser
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
leaf = os.path.realpath(__file__).split('/')[-2]
class MyDaemon(Daemon):
def run(self):
iniconf = ConfigParser.ConfigParser()
inisection = "13"
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + leaf + '/config.ini')
if DEBUG: print "config file : ", s
if DEBUG: print iniconf.items(inisection)
reportTime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplesperCycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
cycleTime = samples * sampleTime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
startTime = time.time()
result = do_work().split(',')
data = map(int, result)
# report sample average
if (startTime % reportTime < sampleTime):
if DEBUG:print data
averages = data
#averages = sum(data[:]) / len(data)
#if DEBUG:print averages
do_report(averages, flock, fdata)
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print "Unexpected error:"
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def syslog_trace(trace):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line:
syslog.syslog(syslog.LOG_ALERT,line)
def cat(filename):
ret = ""
if os.path.isfile(filename):
with open(filename,'r') as f:
ret = f.read().strip('\n')
return ret
def do_work():
# 6 #datapoints gathered here
# Network traffic
wlIn = 0
wlOut = 0
etIn = 0
etOut = 0
loIn = 0
loOut = 0
list = cat("/proc/net/dev").replace(":"," ").splitlines()
for line in range(2,len(list)):
device = list[line].split()[0]
if device == "lo":
loIn = int(list[line].split()[1])
loOut = int(list[line].split()[9])
if device == "eth0":
etIn = int(list[line].split()[1])
etOut = int(list[line].split()[9])
if device == "wlan0":
wlIn = int(list[line].split()[1])
wlOut = int(list[line].split()[9])
if device == "wlan1":
wlIn += int(list[line].split()[1])
wlOut += int(list[line].split()[9])
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(loIn, loOut, etIn, etOut, wlIn, wlOut)
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = time.strftime('%Y-%m-%dT%H:%M:%S, %s')
result = ', '.join(map(str, result))
lock(flock)
with open(fdata, 'a') as f:
f.write('{0}, {1}\n'.format(outDate, result) )
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + leaf + '/13.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: {0!s} start|stop|restart|foreground".format(sys.argv[0])
sys.exit(2)
| mit | -1,613,244,532,009,783,600 | 29.510067 | 120 | 0.604707 | false | 3.265805 | false | false | false |
grigorisg9gr/menpo | menpo/image/test/image_basics_test.py | 1 | 4345 | import warnings
import numpy as np
from numpy.testing import assert_allclose
from nose.tools import raises
from pathlib import Path
import menpo
from menpo.image import Image, MaskedImage, BooleanImage
from menpo.shape import PointCloud
from menpo.transform import UniformScale, Translation
def test_image_as_masked():
img = Image(np.random.rand(3, 3, 1), copy=False)
m_img = img.as_masked()
assert(type(m_img) == MaskedImage)
assert_allclose(m_img.pixels, img.pixels)
def test_image_has_nan_values():
img = Image(np.random.rand(1, 3, 3), copy=False)
img.pixels[0, 0, 0] = np.nan
assert img.has_nan_values()
def test_image_no_nan_values():
img = Image(np.random.rand(1, 3, 3), copy=False)
assert not img.has_nan_values()
def test_masked_image_as_unmasked():
m_img = MaskedImage(np.random.rand(1, 3, 3), copy=False)
img = m_img.as_unmasked()
assert(type(img) == Image)
assert_allclose(m_img.pixels, img.pixels)
def test_masked_image_as_unmasked_fill():
m_img = MaskedImage(np.random.rand(1, 3, 3), copy=False)
m_img.mask.pixels[0, 0, 0] = False
img = m_img.as_unmasked(fill=8)
assert(type(img) == Image)
assert_allclose(m_img.pixels[0, 1:, 1:], img.pixels[0, 1:, 1:])
assert_allclose(img.pixels[0, 0, 0], 8.0)
def test_masked_image_as_unmasked_fill_tuple():
m_img = MaskedImage(np.random.rand(3, 3, 3), copy=False)
m_img.mask.pixels[0, 0, 0] = False
img = m_img.as_unmasked(fill=(1, 2, 3))
assert(type(img) == Image)
assert_allclose(m_img.pixels[0, 1:, 1:], img.pixels[0, 1:, 1:])
assert_allclose(img.pixels[:, 0, 0], (1, 2, 3))
@raises(NotImplementedError)
def test_boolean_image_as_masked_raises_not_implemented_error():
b_img = BooleanImage.init_blank((4, 5))
b_img.as_masked()
def test_warp_to_shape_preserves_path():
bb = menpo.io.import_builtin_asset.breakingbad_jpg()
bb2 = bb.rescale(0.1)
assert hasattr(bb2, 'path')
assert bb2.path == bb.path
def test_warp_to_mask_preserves_path():
bb = menpo.io.import_builtin_asset.breakingbad_jpg()
no_op = UniformScale(1.0, n_dims=2)
bb2 = bb.warp_to_mask(BooleanImage.init_blank((10, 10)), no_op)
assert hasattr(bb2, 'path')
assert bb2.path == bb.path
def test_warp_to_shape_boolean_preserves_path():
i1 = BooleanImage.init_blank((10, 10))
i1.path = Path('.')
i2 = i1.rescale(0.8)
assert hasattr(i2, 'path')
assert i2.path == i1.path
def test_init_from_rolled_channels():
p = np.empty([50, 60, 3])
im = Image.init_from_channels_at_back(p)
assert im.n_channels == 3
assert im.height == 50
assert im.width == 60
def test_init_from_channels_at_back_less_dimensions():
p = np.empty([50, 60])
im = Image.init_from_channels_at_back(p)
assert im.n_channels == 1
assert im.height == 50
assert im.width == 60
def test_init_from_pointcloud():
pc = PointCloud.init_2d_grid((10, 10))
im = Image.init_from_pointcloud(pc)
assert im.shape == (9, 9)
def test_init_from_pointcloud_return_transform():
correct_tr = Translation([5, 5])
pc = correct_tr.apply(PointCloud.init_2d_grid((10, 10)))
im, tr = Image.init_from_pointcloud(pc, return_transform=True)
assert im.shape == (9, 9)
assert_allclose(tr.as_vector(), -correct_tr.as_vector())
def test_init_from_pointcloud_attach_group():
pc = PointCloud.init_2d_grid((10, 10))
im = Image.init_from_pointcloud(pc, group='test')
assert im.shape == (9, 9)
assert im.n_landmark_groups == 1
def test_init_from_pointcloud_boundary():
pc = PointCloud.init_2d_grid((10, 10))
im = Image.init_from_pointcloud(pc, boundary=5)
print(im.shape)
assert im.shape == (19, 19)
def test_bounds_2d():
im = Image.init_blank((50, 30))
assert_allclose(im.bounds(), ((0, 0), (49, 29)))
def test_bounds_3d():
im = Image.init_blank((50, 30, 10))
assert_allclose(im.bounds(), ((0, 0, 0), (49, 29, 9)))
def test_constrain_landmarks_to_bounds():
im = Image.init_blank((10, 10))
im.landmarks['test'] = PointCloud.init_2d_grid((20, 20))
with warnings.catch_warnings():
warnings.simplefilter('ignore')
im.constrain_landmarks_to_bounds()
assert not im.has_landmarks_outside_bounds()
assert_allclose(im.landmarks['test'].bounds(), im.bounds())
| bsd-3-clause | -8,169,848,640,593,623,000 | 28.557823 | 67 | 0.650403 | false | 2.85105 | true | false | false |
myfreecomm/fixofx | test/ofxtools_qif_converter.py | 1 | 9183 | # Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../3rdparty')
sys.path.insert(0, '../lib')
import ofxtools
import textwrap
import unittest
from pyparsing import ParseException
from time import localtime, strftime
class QifConverterTests(unittest.TestCase):
def test_bank_stmttype(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CHECKING")
def test_ccard_stmttype(self):
qiftext = textwrap.dedent('''\
!Type:CCard
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CREDITCARD")
def test_no_stmttype(self):
qiftext = textwrap.dedent('''\
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CHECKING")
def test_no_txns(self):
qiftext = textwrap.dedent('''\
!Type:Bank
''')
today = strftime("%Y%m%d", localtime())
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, today)
self.assertEqual(converter.end_date, today)
def test_us_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_uk_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D13/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_ambiguous_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20051201"))
def test_mixed_us_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/12/2005
^
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050112"))
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_mixed_uk_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12/01/2005
^
D13/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050112"))
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_slashfree_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12012005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20051201"))
def test_unparseable_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
DFnargle
^
''')
self.assertRaises(ValueError, ofxtools.QifConverter, qiftext)
def test_len_eight_no_int_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
DAAAAAAAA
^
''')
self.assertRaises(ValueError, ofxtools.QifConverter, qiftext)
def test_asc_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
D01/27/2005
^
D02/01/2005
^
D02/01/2005
^
D02/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_desc_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/13/2005
^
D02/01/2005
^
D02/01/2005
^
D01/27/2005
^
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_mixed_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
^
D02/13/2005
^
D01/13/2005
^
D02/01/2005
^
D01/27/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_default_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^
''')
converter = ofxtools.QifConverter(qiftext)
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>USD') != -1)
def test_found_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^EUR
''')
converter = ofxtools.QifConverter(qiftext)
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>EUR') != -1)
def test_explicit_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^
''')
converter = ofxtools.QifConverter(qiftext, curdef='GBP')
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>GBP') != -1)
def test_amount2(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
U25.42
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20050201"][0]
self.assertEqual(txn["Amount"], "25.42")
def test_bad_amount_precision(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.930
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn["Amount"], "417.93")
def test_dash_amount(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
T25.42
^
D02/01/2005
T-
^
''')
converter = ofxtools.QifConverter(qiftext)
txn_list = converter.txns_by_date["20050201"]
self.assertEqual(len(txn_list), 1)
txn = txn_list[0]
self.assertEqual(txn["Amount"], "25.42")
def test_trailing_minus(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D08/06/2008
T26.24-
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20080806"][0]
self.assertEqual(txn["Amount"], "-26.24")
def test_n_a_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
NN/A
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_creditcard_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
NXXXX-XXXX-XXXX-1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_creditcard_stmt_number(self):
qiftext = textwrap.dedent('''\
!Type:CCard
D01/25/2007
T417.93
N1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_check_stmt_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
N1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.get("Type"), "CHECK")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,239,589,236,136,788,000 | 27.255385 | 74 | 0.556354 | false | 3.546929 | true | false | false |
tuxxi/OpenBurn | openburn/ui/mainwindow.py | 1 | 3263 | from qtpy.QtWidgets import (QWidget, QFrame, QMainWindow, QMenuBar, QStatusBar, QAction, QApplication,
QTabWidget, QVBoxLayout)
from qtpy.QtGui import QIcon
from openburn import RESOURCE_PATH
from openburn.ui.dialogs.about import AboutDialog
from openburn.ui.designtab import DesignTab
class MainWindow(QMainWindow):
"""OpenBurn's main window"""
title = "OpenBurn"
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 800, 600)
self.setWindowIcon(QIcon(RESOURCE_PATH + "icons/nakka-finocyl.gif"))
self.create_default_widgets()
self.setup_ui()
def create_default_widgets(self):
"""Creates static widgets such as menubar and statusbar"""
def create_menubar():
"""Create menu bar and populate it with sub menu actions"""
def file_menu():
"""Create a file submenu"""
self.file_sub_menu = self.menubar.addMenu('File')
self.open_action = QAction('Open File', self)
self.open_action.setStatusTip('Open a new design')
self.open_action.setShortcut('CTRL+O')
# self.open_action.triggered.connect(self.open_file)
self.exit_action = QAction('Exit', self)
self.exit_action.setStatusTip('Exit the application.')
self.exit_action.setShortcut('CTRL+Q')
self.exit_action.triggered.connect(QApplication.quit)
self.file_sub_menu.addAction(self.open_action)
self.file_sub_menu.addAction(self.exit_action)
def edit_menu():
self.edit_dub_menu = self.menubar.addMenu('Edit')
def tools_menu():
self.edit_dub_menu = self.menubar.addMenu('Tools')
def help_menu():
"""Create help submenu"""
self.help_sub_menu = self.menubar.addMenu('Help')
self.about_action = QAction('About', self)
self.about_action.setStatusTip('About the application.')
self.about_action.setShortcut('CTRL+H')
self.about_action.triggered.connect(self.about_dialog.exec_)
self.help_sub_menu.addAction(self.about_action)
self.menubar = QMenuBar(self)
file_menu()
edit_menu()
tools_menu()
help_menu()
def create_statusbar():
self.statusbar = QStatusBar(self)
self.statusbar.showMessage("Ready", 0)
self.about_dialog = AboutDialog(self)
create_menubar()
self.setMenuBar(self.menubar)
create_statusbar()
self.setStatusBar(self.statusbar)
def setup_ui(self):
"""setup the tab widget UI"""
self.tab_widget = QTabWidget()
self.tab_widget.addTab(DesignTab(), "Design")
self.tab_widget.addTab(QWidget(), "Simulation")
self.tab_widget.addTab(QWidget(), "Propellants")
self.layout = QVBoxLayout()
self.layout.addWidget(self.tab_widget)
self.frame = QFrame()
self.frame.setLayout(self.layout)
self.setCentralWidget(self.frame)
| gpl-3.0 | -73,988,300,967,120,670 | 34.086022 | 102 | 0.592706 | false | 3.988998 | false | false | false |
nicproulx/mne-python | mne/time_frequency/tests/test_psd.py | 2 | 7360 | import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_raises
from nose.tools import assert_true
from mne import pick_types, Epochs, read_events
from mne.io import RawArray, read_raw_fif
from mne.utils import requires_version, slow_test, run_tests_if_main
from mne.time_frequency import psd_welch, psd_multitaper
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
@requires_version('scipy', '0.12')
def test_psd():
"""Tests the welch and multitaper PSD."""
raw = read_raw_fif(raw_fname)
picks_psd = [0, 1]
# Populate raw with sinusoids
rng = np.random.RandomState(40)
data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
freqs_sig = [8., 50.]
for ix, freq in zip(picks_psd, freqs_sig):
data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
first_samp = raw._first_samps[0]
raw = RawArray(data, raw.info)
tmin, tmax = 0, 20 # use a few seconds of data
fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz
n_fft = 128
# -- Raw --
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
kws_welch = dict(n_fft=n_fft)
kws_mt = dict(low_bias=True)
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(raw, proj=False, **kws)
psds_proj, freqs_proj = func(raw, proj=True, **kws)
assert_true(psds.shape == (len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Is power found where it should be
ixs_max = np.argmax(psds, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj)
# Array input shouldn't work
assert_raises(ValueError, func, raw[:3, :20][0])
# test n_per_seg in psd_welch (and padding)
psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128,
**kws_psd)
psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128,
**kws_psd)
assert_true(len(freqs1) == np.floor(len(freqs2) / 2.))
assert_true(psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.))
# tests ValueError when n_per_seg=None and n_fft > signal length
kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq']))
assert_raises(ValueError, psd_welch, raw, proj=False, n_per_seg=None,
**kws_psd)
# ValueError when n_overlap > n_per_seg
kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90))
assert_raises(ValueError, psd_welch, raw, proj=False, **kws_psd)
# -- Epochs/Evoked --
events = read_events(event_fname)
events[:, 0] -= first_samp
tmin, tmax, event_id = -0.5, 0.5, 1
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None)
evoked = epochs.average()
tmin_full, tmax_full = -1, 1
epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full,
picks=picks_psd, proj=False, preload=True,
baseline=None)
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(
epochs[:1], proj=False, **kws)
psds_proj, freqs_proj = func(
epochs[:1], proj=True, **kws)
psds_f, freqs_f = func(
epochs_full[:1], proj=False, **kws)
# this one will fail if you add for example 0.1 to tmin
assert_array_almost_equal(psds, psds_f, 27)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj, 27)
# Is power found where it should be
ixs_max = np.argmax(psds.mean(0), axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
assert_true(psds.shape == (1, len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Array input shouldn't work
assert_raises(ValueError, func, epochs.get_data())
# Testing evoked (doesn't work w/ compute_epochs_psd)
psds_ev, freqs_ev = func(
evoked, proj=False, **kws)
psds_ev_proj, freqs_ev_proj = func(
evoked, proj=True, **kws)
# Is power found where it should be
ixs_max = np.argmax(psds_ev, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
@slow_test
@requires_version('scipy', '0.12')
def test_compares_psd():
"""Test PSD estimation on raw for plt.psd and scipy.signal.welch."""
raw = read_raw_fif(raw_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
exclude=exclude)[:2]
tmin, tmax = 0, 10 # use the first 60s of data
fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz
n_fft = 2048
# Compute psds with the new implementation using Welch
psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=False, picks=picks,
n_fft=n_fft, n_jobs=1)
# Compute psds with plt.psd
start, stop = raw.time_as_index([tmin, tmax])
data, times = raw[picks, start:(stop + 1)]
from matplotlib.pyplot import psd
out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
freqs_mpl = out[0][1]
psds_mpl = np.array([o[0] for o in out])
mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
freqs_mpl = freqs_mpl[mask]
psds_mpl = psds_mpl[:, mask]
assert_array_almost_equal(psds_welch, psds_mpl)
assert_array_almost_equal(freqs_welch, freqs_mpl)
assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))
assert_true(np.sum(freqs_welch < 0) == 0)
assert_true(np.sum(freqs_mpl < 0) == 0)
assert_true(np.sum(psds_welch < 0) == 0)
assert_true(np.sum(psds_mpl < 0) == 0)
run_tests_if_main()
| bsd-3-clause | -7,658,759,830,892,655,000 | 37.736842 | 77 | 0.588179 | false | 2.926441 | true | false | false |
ewongbb/stem | stem/prereq.py | 1 | 4914 | # Copyright 2012-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Checks for stem dependencies. We require python 2.6 or greater (including the
3.x series), but note we'll be bumping our requirements to python 2.7 in stem
2.0. Other requirements for complete functionality are...
* cryptography module
* validating descriptor signature integrity
::
check_requirements - checks for minimum requirements for running stem
is_python_3 - checks if python 3.0 or later is available
is_crypto_available - checks if the cryptography module is available
"""
import inspect
import sys
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
CRYPTO_UNAVAILABLE = "Unable to import the cryptography module. Because of this we'll be unable to verify descriptor signature integrity. You can get cryptography from: https://pypi.python.org/pypi/cryptography"
PYNACL_UNAVAILABLE = "Unable to import the pynacl module. Because of this we'll be unable to verify descriptor ed25519 certificate integrity. You can get pynacl from https://pypi.python.org/pypi/PyNaCl/"
def check_requirements():
"""
Checks that we meet the minimum requirements to run stem. If we don't then
this raises an ImportError with the issue.
:raises: **ImportError** with the problem if we don't meet stem's
requirements
"""
major_version, minor_version = sys.version_info[0:2]
if major_version < 2 or (major_version == 2 and minor_version < 6):
raise ImportError('stem requires python version 2.6 or greater')
def _is_python_26():
"""
Checks if we're running python 2.6. This isn't for users as it'll be removed
in stem 2.0 (when python 2.6 support goes away).
:returns: **True** if we're running python 2.6, **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version == 2 and minor_version == 6
def is_python_27():
"""
Checks if we're running python 2.7 or above (including the 3.x series).
.. deprecated:: 1.5.0
Function lacks much utility and will be eventually removed.
:returns: **True** if we meet this requirement and **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version > 2 or (major_version == 2 and minor_version >= 7)
def is_python_3():
"""
Checks if we're in the 3.0 - 3.x range.
:returns: **True** if we meet this requirement and **False** otherwise
"""
return sys.version_info[0] == 3
@lru_cache()
def is_crypto_available():
"""
Checks if the cryptography functions we use are available. This is used for
verifying relay descriptor signatures.
:returns: **True** if we can use the cryptography module and **False**
otherwise
"""
from stem.util import log
try:
from cryptography.utils import int_from_bytes, int_to_bytes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.serialization import load_der_public_key
if not hasattr(rsa.RSAPrivateKey, 'sign'):
raise ImportError()
return True
except ImportError:
log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE)
return False
@lru_cache()
def is_mock_available():
"""
Checks if the mock module is available. In python 3.3 and up it is a builtin
unittest module, but before this it needed to be `installed separately
<https://pypi.python.org/pypi/mock/>`_. Imports should be as follows....
::
try:
# added in python 3.3
from unittest.mock import Mock
except ImportError:
from mock import Mock
:returns: **True** if the mock module is available and **False** otherwise
"""
try:
# checks for python 3.3 version
import unittest.mock
return True
except ImportError:
pass
try:
import mock
# check for mock's patch.dict() which was introduced in version 0.7.0
if not hasattr(mock.patch, 'dict'):
raise ImportError()
# check for mock's new_callable argument for patch() which was introduced in version 0.8.0
if 'new_callable' not in inspect.getargspec(mock.patch).args:
raise ImportError()
return True
except ImportError:
return False
@lru_cache()
def _is_pynacl_available():
"""
Checks if the pynacl functions we use are available. This is used for
verifying ed25519 certificates in relay descriptor signatures.
:returns: **True** if we can use pynacl and **False** otherwise
"""
from stem.util import log
try:
from nacl import encoding
from nacl import signing
return True
except ImportError:
log.log_once('stem.prereq._is_pynacl_available', log.INFO, PYNACL_UNAVAILABLE)
return False
| lgpl-3.0 | -1,769,451,857,329,862,700 | 27.241379 | 211 | 0.712658 | false | 3.921788 | false | false | false |
ME-ICA/me-ica | meica.libs/mdp/parallel/pp_support.py | 1 | 13771 | """
Adapters for the Parallel Python library (http://www.parallelpython.com).
The PPScheduler class uses an existing pp scheduler and is a simple adapter.
LocalPPScheduler includes the creation of a local pp scheduler.
NetworkPPScheduler includes the management of the remote slaves via SSH.
"""
from __future__ import with_statement
import sys
import os
import time
import subprocess
import signal
import traceback
import tempfile
import scheduling
import pp
import mdp
TEMPDIR_PREFIX='pp4mdp-monkeypatch.'
def _monkeypatch_pp(container_dir):
"""Apply a hack for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=620551.
Importing numpy fails because the parent directory of the slave
script (/usr/share/pyshared) is added to the begging of sys.path.
This is a temporary fix until parallel python or the way it is
packaged in debian is changed.
This function monkey-patches the ppworker module and changes the
path to the slave script. A temporary directory is created and the
worker script is copied there.
The temporary directory should be automatically removed when this
module is destroyed.
XXX: remove this when parallel python or the way it is packaged in debian is changed.
"""
import os.path, shutil
# this part copied from pp.py, should give the same result hopefully
ppworker = os.path.join(os.path.dirname(os.path.abspath(pp.__file__)),
'ppworker.py')
global _ppworker_dir
_ppworker_dir = mdp.utils.TemporaryDirectory(prefix=TEMPDIR_PREFIX, dir=container_dir)
ppworker3 = os.path.join(_ppworker_dir.name, 'ppworker.py')
shutil.copy(ppworker, ppworker3)
mdp._pp_worker_command = pp._Worker.command[:]
try:
pp._Worker.command[pp._Worker.command.index(ppworker)] = ppworker3
except TypeError:
# pp 1.6.0 compatibility
pp._Worker.command = pp._Worker.command.replace(ppworker, ppworker3)
if hasattr(mdp.config, 'pp_monkeypatch_dirname'):
_monkeypatch_pp(mdp.config.pp_monkeypatch_dirname)
class PPScheduler(scheduling.Scheduler):
"""Adaptor scheduler for the parallel python scheduler.
This scheduler is a simple wrapper for a pp server. A pp server instance
has to be provided.
"""
def __init__(self, ppserver, max_queue_length=1,
result_container=None, verbose=False):
"""Initialize the scheduler.
ppserver -- Parallel Python Server instance.
max_queue_length -- How long the queue can get before add_task blocks.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
"""
if result_container is None:
result_container = scheduling.ListResultContainer()
super(PPScheduler, self).__init__(result_container=result_container,
verbose=verbose)
self.ppserver = ppserver
self.max_queue_length = max_queue_length
def _process_task(self, data, task_callable, task_index):
"""Non-blocking processing of tasks.
Depending on the scheduler state this function is non-blocking or
blocking. One reason for blocking can be a full task-queue.
"""
task = (data, task_callable.fork(), task_index)
def execute_task(task):
"""Call the first args entry and return the return value."""
data, task_callable, task_index = task
task_callable.setup_environment()
return task_callable(data), task_index
while True:
if len(self.ppserver._Server__queue) > self.max_queue_length:
# release lock for other threads and wait
self._lock.release()
time.sleep(0.5)
self._lock.acquire()
else:
# release lock to enable result storage
self._lock.release()
# the inner tuple is a trick to prevent introspection by pp
# this forces pp to simply pickle the object
self.ppserver.submit(execute_task, args=(task,),
callback=self._pp_result_callback)
break
def _pp_result_callback(self, result):
"""Calback method for pp to unpack the result and the task id.
This method then calls the normal _store_result method.
"""
if result is None:
result = (None, None)
self._store_result(*result)
def _shutdown(self):
"""Call destroy on the ppserver."""
self.ppserver.destroy()
class LocalPPScheduler(PPScheduler):
"""Uses a local pp server to distribute the work across cpu cores.
The pp server is created automatically instead of being provided by the
user (in contrast to PPScheduler).
"""
def __init__(self, ncpus="autodetect", max_queue_length=1,
result_container=None, verbose=False):
"""Create an internal pp server and initialize the scheduler.
ncpus -- Integer or 'autodetect', specifies the number of processes
used.
max_queue_length -- How long the queue can get before add_task blocks.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
"""
ppserver = pp.Server(ncpus=ncpus)
super(LocalPPScheduler, self).__init__(ppserver=ppserver,
max_queue_length=max_queue_length,
result_container=result_container,
verbose=verbose)
# default secret
SECRET = "rosebud"
class NetworkPPScheduler(PPScheduler):
"""Scheduler which can manage pp remote servers (requires SSH).
The remote slave servers are automatically started and killed at the end.
Since the slaves are started via SSH this schduler does not work on normal
Windows systems. On such systems you can start the pp slaves
manually and then use the standard PPScheduler.
"""
def __init__(self, max_queue_length=1,
result_container=None,
verbose=False,
remote_slaves=None,
source_paths=None,
port=50017,
secret=SECRET,
nice=-19,
timeout=3600,
n_local_workers=0,
slave_kill_filename=None,
remote_python_executable=None):
"""Initialize the remote slaves and create the internal pp scheduler.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
remote_slaves -- List of tuples, the first tuple entry is a string
containing the name or IP adress of the slave, the second entry
contains the number of processes (i.e. the pp ncpus parameter).
The second entry can be None to use 'autodetect'.
source_paths -- List of paths that will be appended to sys.path in the
slaves.
n_local_workers -- Value of ncpus for this machine.
secret -- Secret password to secure the remote slaves.
slave_kill_filename -- Filename (including path) where a list of the
remote slave processes should be stored. Together with the
'kill_slaves' function this makes it possible to quickly all
remote slave processes in case something goes wrong.
If None, a tempfile is created.
"""
self._remote_slaves = remote_slaves
self._running_remote_slaves = None # list of strings 'address:port'
# list with processes for the ssh connections to the slaves
self._ssh_procs = None
self._remote_pids = None # list of the pids of the remote servers
self._port = port
if slave_kill_filename is None:
slave_kill_file = tempfile.mkstemp(prefix='MDPtmp-')[1]
self.slave_kill_file = slave_kill_file
self._secret = secret
self._slave_nice = nice
self._timeout = timeout
if not source_paths:
self._source_paths = []
else:
self._source_paths = source_paths
if remote_python_executable is None:
remote_python_executable = sys.executable
self._python_executable = remote_python_executable
module_file = os.path.abspath(__file__)
self._script_path = os.path.dirname(module_file)
self.verbose = verbose
# start ppserver
self._start_slaves()
ppslaves = tuple(["%s:%d" % (address, self._port)
for address in self._running_remote_slaves])
ppserver = pp.Server(ppservers=ppslaves,
ncpus=n_local_workers,
secret=self._secret)
super(NetworkPPScheduler, self).__init__(ppserver=ppserver,
max_queue_length=max_queue_length,
result_container=result_container,
verbose=verbose)
def _shutdown(self):
"""Shutdown all slaves."""
for ssh_proc in self._ssh_procs:
os.kill(ssh_proc.pid, signal.SIGQUIT)
super(NetworkPPScheduler, self)._shutdown()
if self.verbose:
print "All slaves shut down."
def start_slave(self, address, ncpus="autodetect"):
"""Start a single remote slave.
The return value is a tuple of the ssh process handle and
the remote pid.
"""
try:
print "starting slave " + address + " ..."
proc = subprocess.Popen(["ssh","-T", "%s" % address],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.write("cd %s\n" % self._script_path)
cmd = (self._python_executable +
" pp_slave_script.py %d %d %d %s %d" %
(self._slave_nice, self._port, self._timeout, self._secret,
ncpus))
proc.stdin.write(cmd + "\n")
# send additional information to the remote process
proc.stdin.write(self._python_executable + "\n")
for sys_path in self._source_paths:
proc.stdin.write(sys_path + "\n")
proc.stdin.write("_done_" + "\n")
# print status message from slave
sys.stdout.write(address + ": " + proc.stdout.readline())
# get PID for remote slave process
pid = None
if self.verbose:
print "*** output from slave %s ***" % address
while pid is None:
# the slave process might first output some hello message
try:
value = proc.stdout.readline()
if self.verbose:
print value
pid = int(value)
except ValueError:
pass
if self.verbose:
print "*** output end ***"
return (proc, pid)
except:
print "Initialization of slave %s has failed." % address
traceback.print_exc()
return None
def _start_slaves(self):
"""Start remote slaves.
The slaves that could be started are stored in a textfile, in the form
name:port:pid
"""
with open(self.slave_kill_file, 'w') as slave_kill_file:
self._running_remote_slaves = []
self._remote_pids = []
self._ssh_procs = []
for (address, ncpus) in self._remote_slaves:
ssh_proc, pid = self.start_slave(address, ncpus=ncpus)
if pid is not None:
slave_kill_file.write("%s:%d:%d\n" %
(address, pid, ssh_proc.pid))
self._running_remote_slaves.append(address)
self._remote_pids.append(pid)
self._ssh_procs.append(ssh_proc)
def kill_slaves(slave_kill_filename):
"""Kill all remote slaves which are stored in the given file.
This functions is only meant for emergency situations, when something
went wrong and the slaves have to be killed manually.
"""
with open(slave_kill_filename) as tempfile:
for line in tempfile:
address, pid, ssh_pid = line.split(":")
pid = int(pid)
ssh_pid = int(ssh_pid)
# open ssh connection to to kill remote slave
proc = subprocess.Popen(["ssh","-T", address],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.write("kill %d\n" % pid)
proc.stdin.flush()
# kill old ssh connection
try:
os.kill(ssh_pid, signal.SIGKILL)
except:
pass
# a kill might prevent the kill command transmission
# os.kill(proc.pid, signal.SIGQUIT)
print "killed slave " + address + " (pid %d)" % pid
print "all slaves killed."
if __name__ == "__main__":
if len(sys.argv) == 2:
kill_slaves(sys.argv[1])
else:
sys.stderr.write("usage: %s slave_list.txt\n" % __file__)
| lgpl-2.1 | 7,051,554,097,536,873,000 | 39.622419 | 90 | 0.584053 | false | 4.469653 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.