repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
OpenInfoporto/infoporto.odoo.ecommerce | infoporto/odoo/ecommerce/lib/odoo.py | 1 | 5284 | from infoporto.odoo.core.odoo import OdooInstance
class Odoo(object):
# settings
def getCurrency(self):
""" Retrieve currency from Odoo Company settings """
odoo_core = OdooInstance()
# company ID should be dynamic
return odoo_core.read('res.company', 1, ['currency_id'])
# product.category
def getAncestors(self, cid):
""" Retrieve recursively all parents for the given cid """
odoo_core = OdooInstance()
res = []
last_found = cid
while last_found:
category = odoo_core.read('product.category', int(last_found), ['id', 'name', 'parent_id'])
if category['parent_id']:
last_found = category['parent_id'][0]
else:
last_found = False
res.append(dict(id=category['id'], name=category['name']))
return reversed(res)
def getCategory(self, cid):
odoo_core = OdooInstance()
category = odoo_core.read('product.category', [int(cid)], ['id', 'name'])
return category[0]
def getCategories(self, cid=False):
odoo_core = OdooInstance()
if not cid:
args = [('parent_id', '=', False)]
else:
args = [('parent_id', '=', int(cid))]
ids = odoo_core.search('product.category', args)
categories = odoo_core.read('product.category', ids, ['id', 'name'])
return categories
def getProducts(self, cid=False):
odoo_core = OdooInstance()
if not cid:
args = []
else:
args = [('categ_id', '=', int(cid))]
ids = odoo_core.search('product.product', args)
products = odoo_core.read('product.product', ids,
['id', 'name', 'description',
'lst_price', 'image', 'image_medium',
'categ_id', 'taxes_id'])
for product in products:
if product['taxes_id']:
tax = odoo_core.read('account.tax',
int(product['taxes_id'][0]), ['amount'])['amount']
else:
tax = 0.0
product['tax'] = tax
product = self.sanitizeProduct(product)
return products
# product.product
def getProduct(self, pid):
odoo_core = OdooInstance()
product = odoo_core.read('product.product', int(pid),
['id', 'name', 'description',
'lst_price', 'image', 'image_medium',
'categ_id', 'taxes_id'])
if product['taxes_id']:
tax = odoo_core.read('account.tax',
int(product['taxes_id'][0]), ['amount'])['amount']
else:
tax = 0.0
product['tax'] = tax
return self.sanitizeProduct(product)
def getInShowcase(self):
#odoo_core = OdooInstance()
#TODO: an attribute shoudl be added to Odoo product management
return self.getProducts()
def sanitizeProduct(self, p):
""" Sanitize product for using in templates """
from money import Money
p['price'] = p['lst_price']
p['lst_price'] = Money(amount=p['lst_price'],
currency=self.getCurrency().get('currency_id')[1])
p['price_total'] = Money(amount=p['price'] * (1 + p['tax']),
currency=self.getCurrency().get('currency_id')[1])
p['categ_id'] = p['categ_id'][0]
# Category norm
if p['image']:
p['image'] = ''.join(["data:image/png;base64,", p['image']])
if p['image_medium']:
p['image_medium'] = ''.join(["data:image/png;base64,", p['image_medium']])
return p
def createSalesOrder(self, params, cart):
""" Create a partner if the e-mail weren't found, create a Sales Order
and its Sales Order Line """
odoo_core = OdooInstance()
# check if user exists ...
args = [('email', '=', params['user']['email'])]
ids = odoo_core.search('res.partner', args)
# ... otherwise create it
if not ids:
partner_id = odoo_core.create('res.partner',
dict(name=params['user']['name'],
email=params['user']['email']))
# build sales order
so = dict(partner_id=ids[0] or partner_id,
state="manual",
amount_total=params['total'] * 1.22,
amount_tax=params['total'] * 1.22 - params['total'],
amount_untaxed=params['total'])
so_id = odoo_core.create('sale.order', so)
for el in cart:
sol = dict(order_id=so_id,
product_uom=1,
price_unit=float(el['price_total']),
product_uom_qty=1,
state='confirmed',
product_id=el['id'],
order_partner_id=ids[0],
tax_id=[1])
sol_id = odoo_core.create('sale.order.line', sol)
#FIXME: taxes?!?
return so_id
| gpl-2.0 | 833,721,185,680,812,400 | 32.025 | 103 | 0.488266 | false | 4.268174 | false | false | false |
cgeoffroy/son-analyze | son-scikit/tests/unit/son_scikit/hl_prometheus_test.py | 1 | 2358 | # Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
# pylint: disable=invalid-name,missing-docstring
import copy
import datetime
import typing # noqa pylint: disable=unused-import
from son_analyze.core import prometheus
import son_scikit.hl_prometheus as hl
def test_build_sonata_df(basic_query_01):
x = prometheus.PrometheusData(basic_query_01)
base_entry = x.raw['data']['result'][0]
new_entry1 = copy.deepcopy(base_entry)
new_entry1['metric']['__name__'] = 'uno'
x.add_entry(new_entry1)
new_entry2 = copy.deepcopy(base_entry)
new_entry2['metric']['__name__'] = 'bis'
new_entry2['values'] = [(i[0], 20+i[1]) for i in new_entry2['values']]
x.add_entry(new_entry2)
new_entry3 = copy.deepcopy(base_entry)
new_entry3['metric']['__name__'] = 'ter'
def trans(t): # pylint: disable=missing-docstring,invalid-name
d = hl.convert_timestamp_to_posix(t[0])
d = d + datetime.timedelta(0, 1)
return (d.timestamp(), 30+t[1])
new_entry3['values'] = [trans(i) for i in new_entry3['values']]
x.add_entry(new_entry3)
tmp = hl.build_sonata_df_by_id(x)
for _, elt in tmp.items():
assert elt.index.freq == 'S'
assert any(elt.notnull())
| apache-2.0 | 4,939,344,521,716,886,000 | 38.3 | 74 | 0.707379 | false | 3.363766 | false | false | false |
fnurl/alot | docs/source/generate_commands.py | 1 | 4877 | from __future__ import absolute_import
import sys
import os
HERE = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(HERE, '..', '..'))
from alot.commands import *
from alot.commands import COMMANDS
import alot.buffers
from argparse import HelpFormatter, SUPPRESS, OPTIONAL, ZERO_OR_MORE, ONE_OR_MORE, PARSER, REMAINDER
from alot.utils.argparse import BooleanAction
from gettext import gettext as _
import collections as _collections
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
NOTE = ".. CAUTION: THIS FILE IS AUTO-GENERATED!\n\n\n"
class HF(HelpFormatter):
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def rstify_parser(parser):
#header = parser.format_usage().strip()
#print '\n\n%s\n' % header + '_' * len(header)
parser.formatter_class = HF
#parser.print_help()
#continue
formatter = parser._get_formatter()
out = ""
# usage
usage = formatter._format_usage(None, parser._actions,
parser._mutually_exclusive_groups,
'').strip()
usage = usage.replace('--','---')
# section header
out += '.. describe:: %s\n\n' % parser.prog
# description
out += ' '*4 + parser.description
out += '\n\n'
if len(parser._positionals._group_actions) == 1:
out += " argument\n"
a = parser._positionals._group_actions[0]
out += ' '*8 + str(parser._positionals._group_actions[0].help)
if a.choices:
out += ". valid choices are: %s." % ','.join(['\`%s\`' % s for s
in a.choices])
if a.default:
out += ". defaults to: '%s'." % a.default
out += '\n\n'
elif len(parser._positionals._group_actions) > 1:
out += " positional arguments\n"
for index, a in enumerate(parser._positionals._group_actions):
out += " %s: %s" % (index, a.help)
if a.choices:
out += ". valid choices are: %s." % ','.join(['\`%s\`' % s for s
in a.choices])
if a.default:
out += ". defaults to: '%s'." % a.default
out += '\n'
out += '\n\n'
if parser._optionals._group_actions:
out += " optional arguments\n"
for a in parser._optionals._group_actions:
switches = [s.replace('--','---') for s in a.option_strings]
out += " :%s: %s" % (', '.join(switches), a.help)
if a.choices and not isinstance(a, BooleanAction):
out += ". Valid choices are: %s" % ','.join(['\`%s\`' % s for s
in a.choices])
if a.default:
out += " (Defaults to: '%s')" % a.default
out += '.\n'
out += '\n'
# epilog
#out += formatter.add_text(parser.epilog)
return out
def get_mode_docs():
docs = {}
b = alot.buffers.Buffer
for entry in alot.buffers.__dict__.values():
if isinstance(entry, type):
if issubclass(entry, b) and not entry == b:
docs[entry.modename] = entry.__doc__.strip()
return docs
if __name__ == "__main__":
modes = []
for mode, modecommands in COMMANDS.items():
modefilename = mode+'.rst'
modefile = open(os.path.join(HERE, 'usage', 'modes', modefilename), 'w')
modefile.write(NOTE)
if mode != 'global':
modes.append(mode)
header = 'Commands in `%s` mode' % mode
modefile.write('%s\n%s\n' % (header, '-' * len(header)))
modefile.write('The following commands are available in %s mode\n\n' % mode)
else:
header = 'Global Commands'
modefile.write('%s\n%s\n' % (header, '-' * len(header)))
modefile.write('The following commands are available globally\n\n')
for cmdstring,struct in modecommands.items():
cls, parser, forced_args = struct
labelline = '.. _cmd.%s.%s:\n\n' % (mode, cmdstring.replace('_',
'-'))
modefile.write(labelline)
modefile.write(rstify_parser(parser))
modefile.close()
| gpl-3.0 | 7,545,414,185,903,129,000 | 36.229008 | 100 | 0.498462 | false | 4.007395 | false | false | false |
exhacking/TKinter | jam.py | 1 | 1891 | # file: digital.py
# versi: python 2.7
# Program Jam Digital dengan Tkinter
# created by Exhacking.net
# update: 02/08/2012 12.13 AM
# memanggil modul Tkinter
from Tkinter import *
# memanggil modul time (untuk mengakses waktu saat ini)
import time
class JamDigital:
""" Kelas Jam Digital"""
def __init__(self, parent, title):
self.parent = parent
self.parent.title(title)
self.parent.protocol("WM_DELETE_WINDOW", self.onTutup)
self.parent.resizable(False, False)
# buat variabel String untuk teks jam
self.teksJam = StringVar()
self.aturKomponen()
# melalukan looping untuk tampilan jam
self.update()
def aturKomponen(self):
mainFrame = Frame(self.parent, bd=10)
mainFrame.pack(fill=BOTH, expand=YES)
# teks jam dibuat dengan komponen Label, yang bisa berubah
# setiap waktu.
self.lblJam = Label(mainFrame, textvariable=self.teksJam,
font=('Helvetica', 40))
self.lblJam.pack(expand=YES)
self.lblInfo = Label(mainFrame, text="http://www.exhacking.net",
fg='red')
self.lblInfo.pack(side=TOP, pady=5)
def update(self):
# strftime() berfungsi untuk merubah data waktu secara lokal
# menjadi bentuk string yang kita inginkan.
datJam = time.strftime("%H:%M:%S", time.localtime())
# mengubah teks jam sesuai dengan waktu saat ini
self.teksJam.set(datJam)
# perubahan teks jam dalam selang waktu 1 detik (1000 ms)
self.timer = self.parent.after(1000, self.update)
def onTutup(self, event=None):
self.parent.destroy()
if __name__ == '__main__':
root = Tk()
app = JamDigital(root, "Jam Digital")
root.mainloop() | gpl-3.0 | -8,135,576,838,944,078,000 | 29.516129 | 72 | 0.594395 | false | 3.221465 | false | false | false |
PlanetHunt/satgen | config_step.py | 1 | 3618 | #!/bin/env python
# -*- coding: utf-8 -*-
"""
This class is menat to read step configurations
for the different parameters related to the satelite.
the syntax of the step parameters should be like this
[Parameter Name]
Start Value = float / int
Step = float / int
End Value = float / int
...
...
...
This configuration will be read and put to a matrix
so diffrenet satellite will be creared.
"""
from logger import Logger
import ConfigParser
import itertools
import ast
import re
class ConfigStep:
def __init__(self, log_level="ERROR"):
self.logger = Logger(log_level)
self.log = self.logger.get_logger()
self.step_conf = dict()
def set_step_conf(self, conf):
"""
Set the step config
"""
return self.step_conf
def get_step_conf(self):
"""
Returns the step conf
"""
return self.step_conf
def get_conf_parser(self):
"""
Generates a ConfigParser instance
"""
return ConfigParser.ConfigParser()
def read_conf(self, address):
"""
Reads the config file contents and
generates a configuration dict
"""
config = self.get_conf_parser()
config.read(address)
sections = config.sections()
for section in sections:
self.get_step_conf()[section] = dict()
for option in config.options(section):
config_value = config.get(section, option, True)
self.get_step_conf()[section][option.title()] = config_value
def add_edge_length(self, a, b):
"""
add to same size tuples of edge-length toghether.
"""
return tuple(sum(x) for x in zip(a, b))
def convert_to_tuple(self, tuple_str):
"""
converts the given tuple string to a tuple python object
"""
return ast.literal_eval(tuple_str)
def do_steps(self):
"""
Returns all the possible values for different paramters in array
With the help of this results, the combination matirx will be created
"""
steps = self.get_step_conf()
all_step_config = dict()
for k, v in steps.items():
tmp_list = list()
all_step_config[k] = tmp_list
start = v["Start Value"]
end = v["End Value"]
# special handling of edge length
if(k == "Edge Length"):
start = self.convert_to_tuple(start)
end = self.convert_to_tuple(end)
tmp_list.append(str(start))
while(start != end):
start = self.add_edge_length(
start, self.convert_to_tuple(v["Step"]))
tmp_list.append(str(start))
print start
else:
tmp_list.append(float(start))
while float(start) < float(end):
start = float(start) + float(v["Step"])
tmp_list.append(start)
return all_step_config
def get_combinations(self):
"""
Returns all the possible combinations from the given dict
it uses product function.
"""
all_steps = self.do_steps()
self.option = [k for k, v in all_steps.items()]
result = itertools.product(*(v for k, v in all_steps.items()))
return result
def get_options(self):
all_steps = self.get_step_conf()
return self.option
# steps = ConfigStep()
# steps.read_conf("steps.cfg")
# print list(steps.get_combinations())
# print steps.get_options()
| mit | 1,390,979,606,951,293,700 | 28.414634 | 77 | 0.563571 | false | 4.168203 | true | false | false |
projectweekend/raspberry-pi-io | raspberry_pi_io/io.py | 1 | 1595 | import yaml
from api import DeviceConfig
from gpio import PinManager
from rabbit import AsyncConsumer
class IOService(object):
def __init__(self, config_file):
with open(config_file) as file:
self.config = yaml.safe_load(file)
self.load_device_config()
self.initialize_pin_manager()
self.initialize_consumer()
@staticmethod
def _error(response):
return {'error': 1, 'response': response}
@staticmethod
def _response(response):
return {'error': 0, 'response': response}
def load_device_config(self):
self.device_config = DeviceConfig(
api=self.config['api'],
user_email=self.config['user_email'],
user_key=self.config['user_key'],
device_id=self.config['device_id']).get()
def initialize_pin_manager(self):
self.pin_manager = PinManager(self.device_config['pinConfig'])
def initialize_consumer(self):
def action(instruction):
response = getattr(self.pin_manager, instruction['action'])(int(instruction['pin']))
return {
'response': response
}
self.consumer = AsyncConsumer(
rabbit_url=self.device_config['rabbitURL'],
queue=self.config['device_id'],
exchange='raspberry-pi-io',
exchange_type='direct',
routing_key=self.config['device_id'],
action=action)
def start(self):
try:
self.consumer.run()
except:
self.consumer.stop()
raise
| mit | -9,010,794,593,780,724,000 | 27.482143 | 96 | 0.584326 | false | 4.197368 | true | false | false |
adamwiggins/cocos2d | test/test_menu_centered.py | 2 | 1158 | #
# Cocos
# http://code.google.com/p/los-cocos/
#
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("TITLE" )
self.menu_valign = CENTER
self.menu_halign = CENTER
# then add the items
items = [
( MenuItem('Item 1', self.on_quit ) ),
( MenuItem('Item 2', self.on_quit ) ),
( MenuItem('Item 3', self.on_quit ) ),
( MenuItem('Item 4', self.on_quit ) ),
( MenuItem('Item 5', self.on_quit ) ),
( MenuItem('Item 6', self.on_quit ) ),
( MenuItem('Item 7', self.on_quit ) ),
]
self.create_menu( items, shake(), shake_back() )
def on_quit( self ):
pyglet.app.exit()
if __name__ == "__main__":
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
| bsd-3-clause | -8,547,460,937,734,142,000 | 21.705882 | 65 | 0.556995 | false | 3.289773 | false | false | false |
durante987/nonogram_solver | nonogram_solver.py | 1 | 1269 | #!/usr/bin/env python3.8
"""
A program that tries to solve nonograms.
"""
import argparse
import logging
import sys
from nonogram.raster import Raster
from nonogram import solver
def main(args=None):
"""
Read the puzzle from the input file and start solving it.
"""
logging.basicConfig(format='%(message)s',
level=logging.DEBUG if args.debug else logging.WARNING)
with open(args.input_file, 'r') as inp:
raster = Raster.from_file(inp)
solution = solver.solve(raster)
if not solution:
print("Program couldn't find any solution.")
logging.debug(str(raster))
sys.exit(2)
print(str(solution), end='')
if args.bmp_file:
solution.to_bitmap(args.bmp_file)
if __name__ == '__main__':
# pylint: disable=invalid-name
parser = argparse.ArgumentParser(description='Solve nonograms')
parser.add_argument('input_file', help='file specifying the nonogram')
parser.add_argument(
'--bmp', dest='bmp_file', help='write the solution to the specified'
' file in BMP format')
parser.add_argument('--debug', help='enable debug logs',
action='store_true')
main(args=parser.parse_args())
| mit | -9,051,952,016,072,988,000 | 27.840909 | 79 | 0.622537 | false | 3.880734 | false | false | false |
Springerle/hovercraft-slides | {{cookiecutter.repo_name}}/setup.py | 1 | 1618 | """A setup shim for 'rituals'"""
import os
import re
import sys
import subprocess
from datetime import datetime
try:
url = subprocess.check_output('git remote get-url origin',
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError:
url = '{{ cookiecutter.url }}'
else:
url = url.decode('utf-8').strip()
if url.endswith('.git'):
url = url[:-4]
if url.startswith('ssh://'):
url = url[6:]
url = re.sub(r'git@([^:/]+)[:/]', r'https://\1/', url)
try:
now = '{:%Y%m%d-%H%M}'.format(datetime.now())
version = subprocess.check_output("git describe --long --dirty='-{}' --all --always".format(now),
stderr=subprocess.STDOUT, shell=True)
version = version.decode('utf-8').strip().replace('/', '-')
except subprocess.CalledProcessError:
filedate = os.path.getmtime(os.path.join(os.path.dirname(__file__), 'index.rst'))
version = datetime.fromtimestamp(filedate).isoformat('-')[:16].replace(':', '').replace('-', '.')
project = dict(
name=os.path.basename(os.path.dirname(os.path.abspath(__file__))),
version=version,
url=url,
author='{{ cookiecutter.full_name }}',
author_email='{{ cookiecutter.email }}',
license='{{ cookiecutter.license }}',
)
if __name__ == "__main__":
install = True
for arg in sys.argv[1:]:
if arg.startswith('--') and arg.lstrip('-') in project:
print(project.get(arg.lstrip('-')))
install = False
if install:
subprocess.call("pip install -r requirements.txt", shell=True)
| cc0-1.0 | 7,890,750,058,121,851,000 | 33.425532 | 101 | 0.588381 | false | 3.65237 | false | false | false |
luskaner/wps-dict | wps_dict/wps_dict/interface/gui/bootstrap.py | 1 | 3244 |
from os.path import dirname, abspath
from .SignalHandler import *
from ...providers.online.downloadable.list import online_downloadable_providers
from ...providers.online.queryable.list import online_queryable_providers
from ...providers.offline.list import offline_providers
from ...tools.list import tools
gi.require_version('Gtk', '3.0')
# noinspection PyPep8,PyUnresolvedReferences
from gi.repository import Gtk
# noinspection PyUnusedLocal
def _set_treeview_row(_, cell, *__):
inconsistent = cell.get_property('inconsistent')
enabled = cell.get_property('active')
cell.set_property('inconsistent', inconsistent)
cell.set_active(enabled)
def _get_column(builder, are_tools=False):
column = Gtk.TreeViewColumn()
name = Gtk.CellRendererText()
enabled = Gtk.CellRendererToggle()
if are_tools:
enabled.connect("toggled", SignalHandler(builder).on_cell_toggled_tools)
else:
enabled.connect("toggled", SignalHandler(builder).on_cell_toggled_providers)
column.pack_start(name, True)
column.pack_start(enabled, True)
column.add_attribute(name, "text", 0)
column.add_attribute(enabled, "active", 1)
if not are_tools:
column.add_attribute(enabled, "inconsistent", 2)
column.set_cell_data_func(enabled, _set_treeview_row)
return column
def generate_provider_tree(builder):
providers_list = builder.get_object("providers_list")
item_offline_providers = providers_list.append(None, ['Offline providers', True, False])
item_online_providers = providers_list.append(None, ['Online providers', True, False])
item_online_downloadable_providers = providers_list.append(item_online_providers,
['Downloadable providers', True, False])
item_online_queryable_providers = providers_list.append(item_online_providers, ['Queryable providers', True, False])
for offline_provider in offline_providers.keys():
providers_list.append(item_offline_providers, [offline_provider, True, False])
for online_provider in online_queryable_providers.keys():
providers_list.append(item_online_queryable_providers, [online_provider, True, False])
for online_downloadable_provider in online_downloadable_providers.keys():
providers_list.append(item_online_downloadable_providers, [online_downloadable_provider, True, False])
builder.get_object("providers_tree_view").get_selection().set_mode(Gtk.SelectionMode.NONE)
builder.get_object("providers_tree_view").append_column(_get_column(builder))
def generate_tool_tree(builder):
tools_list = builder.get_object("tools_list")
for tool in tools.keys():
tools_list.append([tool, True, False])
builder.get_object("tools_tree_view").append_column(_get_column(builder, True))
builder.get_object("tools_tree_view").get_selection().set_mode(Gtk.SelectionMode.NONE)
def init():
builder = Gtk.Builder()
builder.add_from_file(dirname(abspath(__file__)) + "/ui.glade")
builder.connect_signals(SignalHandler(builder))
window = builder.get_object("main_window")
generate_provider_tree(builder)
generate_tool_tree(builder)
window.show_all()
Gtk.main()
| gpl-3.0 | -6,190,776,960,386,111,000 | 38.084337 | 120 | 0.711776 | false | 3.798595 | false | false | false |
ykaneko/quantum | quantum/extensions/portbindings.py | 1 | 3587 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.api import extensions
from quantum.api.v2 import attributes
# The service will return the vif type for the specific port.
VIF_TYPE = 'binding:vif_type'
# In some cases different implementations may be run on different hosts.
# The host on which the port will be allocated.
HOST_ID = 'binding:host_id'
# The profile will be a dictionary that enables the application running
# on the specific host to pass and receive vif port specific information to
# the plugin.
PROFILE = 'binding:profile'
# The capabilities will be a dictionary that enables pass information about
# functionalies quantum provides. The following value should be provided.
# - port_filter : Boolean value indicating Quantum provides port filtering
# features such as security group and anti MAC/IP spoofing
CAPABILITIES = 'binding:capabilities'
CAP_PORT_FILTER = 'port_filter'
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_OTHER = 'other'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
VIF_TYPE: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
HOST_ID: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
PROFILE: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'validate': {'type:dict': None},
'is_visible': True},
CAPABILITIES: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
}
}
class Portbindings(extensions.ExtensionDescriptor):
"""Extension class supporting port bindings.
This class is used by quantum's extension framework to make
metadata about the port bindings available to external applications.
With admin rights one will be able to update and read the values.
"""
@classmethod
def get_name(cls):
return "Port Binding"
@classmethod
def get_alias(cls):
return "binding"
@classmethod
def get_description(cls):
return "Expose port bindings of a virtual port to external application"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/binding/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-11-14T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 | 8,823,158,288,647,582,000 | 34.514851 | 79 | 0.651241 | false | 3.972315 | false | false | false |
ntymtsiv/tempest | tempest/services/compute/v3/json/quotas_client.py | 1 | 2980 | # Copyright 2012 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common.rest_client import RestClient
from tempest import config
CONF = config.CONF
class QuotasV3ClientJSON(RestClient):
def __init__(self, auth_provider):
super(QuotasV3ClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_v3_type
def get_quota_set(self, tenant_id):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['quota_set']
def get_default_quota_set(self, tenant_id):
"""List the default quota set for a tenant."""
url = 'os-quota-sets/%s/defaults' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['quota_set']
def update_quota_set(self, tenant_id, force=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
security_group_rules=None, cores=None,
security_groups=None):
"""
Updates the tenant's quota limits for one or more resources
"""
post_body = {}
if force is not None:
post_body['force'] = force
if metadata_items is not None:
post_body['metadata_items'] = metadata_items
if ram is not None:
post_body['ram'] = ram
if floating_ips is not None:
post_body['floating_ips'] = floating_ips
if fixed_ips is not None:
post_body['fixed_ips'] = fixed_ips
if key_pairs is not None:
post_body['key_pairs'] = key_pairs
if instances is not None:
post_body['instances'] = instances
if security_group_rules is not None:
post_body['security_group_rules'] = security_group_rules
if cores is not None:
post_body['cores'] = cores
if security_groups is not None:
post_body['security_groups'] = security_groups
post_body = json.dumps({'quota_set': post_body})
resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body,
self.headers)
body = json.loads(body)
return resp, body['quota_set']
| apache-2.0 | -6,148,559,382,273,381,000 | 31.747253 | 78 | 0.6 | false | 3.941799 | false | false | false |
koss822/misc | Linux/MySettings/myvim/vim/bundle/jedi-vim/pythonx/jedi/test/completion/decorators.py | 1 | 5367 | # -----------------
# normal decorators
# -----------------
def decorator(func):
def wrapper(*args):
return func(1, *args)
return wrapper
@decorator
def decorated(a,b):
return a,b
exe = decorated(set, '')
#? set
exe[1]
#? int()
exe[0]
# more complicated with args/kwargs
def dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@dec
def fu(a, b, c, *args, **kwargs):
return a, b, c, args, kwargs
exe = fu(list, c=set, b=3, d='')
#? list
exe[0]
#? int()
exe[1]
#? set
exe[2]
#? []
exe[3][0].
#? str()
exe[4]['d']
exe = fu(list, set, 3, '', d='')
#? str()
exe[3][0]
# -----------------
# multiple decorators
# -----------------
def dec2(func2):
def wrapper2(first_arg, *args2, **kwargs2):
return func2(first_arg, *args2, **kwargs2)
return wrapper2
@dec2
@dec
def fu2(a, b, c, *args, **kwargs):
return a, b, c, args, kwargs
exe = fu2(list, c=set, b=3, d='str')
#? list
exe[0]
#? int()
exe[1]
#? set
exe[2]
#? []
exe[3][0].
#? str()
exe[4]['d']
# -----------------
# Decorator is a class
# -----------------
def same_func(func):
return func
class Decorator(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(1, *args, **kwargs)
@Decorator
def nothing(a,b,c):
return a,b,c
#? int()
nothing("")[0]
#? str()
nothing("")[1]
@same_func
@Decorator
def nothing(a,b,c):
return a,b,c
#? int()
nothing("")[0]
class MethodDecoratorAsClass():
class_var = 3
@Decorator
def func_without_self(arg, arg2):
return arg, arg2
@Decorator
def func_with_self(self, arg):
return self.class_var
#? int()
MethodDecoratorAsClass().func_without_self('')[0]
#? str()
MethodDecoratorAsClass().func_without_self('')[1]
#?
MethodDecoratorAsClass().func_with_self(1)
class SelfVars():
"""Init decorator problem as an instance, #247"""
@Decorator
def __init__(self):
"""
__init__ decorators should be ignored when looking up variables in the
class.
"""
self.c = list
@Decorator
def shouldnt_expose_var(not_self):
"""
Even though in real Python this shouldn't expose the variable, in this
case Jedi exposes the variable, because these kind of decorators are
normally descriptors, which SHOULD be exposed (at least 90%).
"""
not_self.b = 1.0
def other_method(self):
#? float()
self.b
#? list
self.c
# -----------------
# not found decorators (are just ignored)
# -----------------
@not_found_decorator
def just_a_func():
return 1
#? int()
just_a_func()
#? ['__closure__']
just_a_func.__closure__
class JustAClass:
@not_found_decorator2
def a(self):
return 1
#? ['__call__']
JustAClass().a.__call__
#? int()
JustAClass().a()
#? ['__call__']
JustAClass.a.__call__
#? int()
JustAClass.a()
# -----------------
# illegal decorators
# -----------------
class DecoratorWithoutCall():
def __init__(self, func):
self.func = func
@DecoratorWithoutCall
def f():
return 1
# cannot be resolved - should be ignored
@DecoratorWithoutCall(None)
def g():
return 1
#?
f()
#? int()
g()
class X():
@str
def x(self):
pass
def y(self):
#? str()
self.x
#?
self.x()
def decorator_var_args(function, *args):
return function(*args)
@decorator_var_args
def function_var_args(param):
return param
#? int()
function_var_args(1)
# -----------------
# method decorators
# -----------------
def dec(f):
def wrapper(s):
return f(s)
return wrapper
class MethodDecorators():
_class_var = 1
def __init__(self):
self._method_var = ''
@dec
def constant(self):
return 1.0
@dec
def class_var(self):
return self._class_var
@dec
def method_var(self):
return self._method_var
#? float()
MethodDecorators().constant()
#? int()
MethodDecorators().class_var()
#? str()
MethodDecorators().method_var()
class Base():
@not_existing
def __init__(self):
pass
@not_existing
def b(self):
return ''
@dec
def c(self):
return 1
class MethodDecoratorDoesntExist(Base):
"""#272 github: combination of method decorators and super()"""
def a(self):
#?
super().__init__()
#? str()
super().b()
#? int()
super().c()
#? float()
self.d()
@doesnt_exist
def d(self):
return 1.0
# -----------------
# others
# -----------------
def memoize(function):
def wrapper(*args):
if random.choice([0, 1]):
pass
else:
rv = function(*args)
return rv
return wrapper
@memoize
def follow_statement(stmt):
return stmt
# here we had problems with the else clause, because the parent was not right.
#? int()
follow_statement(1)
# -----------------
# class decorators
# -----------------
# class decorators should just be ignored
@should_ignore
class A():
def ret(self):
return 1
#? int()
A().ret()
# -----------------
# On decorator completions
# -----------------
import abc
#? ['abc']
@abc
#? ['abstractmethod']
@abc.abstractmethod
| gpl-3.0 | 978,030,315,969,505,900 | 15.31307 | 78 | 0.533259 | false | 3.246824 | false | false | false |
odahoda/noisicaa | noisicaa/music/project_client.py | 1 | 15366 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import asyncio
from fractions import Fraction
import functools
import getpass
import logging
import random
import socket
from typing import Any, Dict, List, Tuple, Callable, TypeVar
from noisicaa import audioproc
from noisicaa import core
from noisicaa import lv2
from noisicaa import node_db as node_db_lib
from noisicaa import editor_main_pb2
from noisicaa.core import empty_message_pb2
from noisicaa.core import ipc
from noisicaa.core import session_data_pb2
from . import render_pb2
from . import project as project_lib
from . import writer_client
from . import render
from . import player as player_lib
from . import session_value_store
from . import loadtest_generator
logger = logging.getLogger(__name__)
class ProjectClient(object):
def __init__(
self, *,
event_loop: asyncio.AbstractEventLoop,
server: ipc.Server,
manager: ipc.Stub,
tmp_dir: str,
node_db: node_db_lib.NodeDBClient,
urid_mapper: lv2.ProxyURIDMapper
) -> None:
self.__event_loop = event_loop
self.__server = server
self.__tmp_dir = tmp_dir
self.__manager = manager
self.__node_db = node_db
self.__urid_mapper = urid_mapper
self.__pool = None # type: project_lib.Pool
self.__project = None # type: project_lib.BaseProject
self.__writer_client = None # type: writer_client.WriterClient
self.__writer_address = None # type: str
self.__session_values = None # type: session_value_store.SessionValueStore
self.__session_data_listeners = core.CallbackMap[str, Any]()
self.__players = {} # type: Dict[str, player_lib.Player]
self.__cb_endpoint_name = 'project-%016x' % random.getrandbits(63)
self.__cb_endpoint_address = None # type: str
@property
def project(self) -> project_lib.BaseProject:
return self.__project
async def setup(self) -> None:
cb_endpoint = ipc.ServerEndpoint(self.__cb_endpoint_name)
cb_endpoint.add_handler(
'CONTROL_VALUE_CHANGE', self.__handle_control_value_change,
audioproc.ControlValueChange, empty_message_pb2.EmptyMessage)
cb_endpoint.add_handler(
'PLUGIN_STATE_CHANGE', self.__handle_plugin_state_change,
audioproc.PluginStateChange, empty_message_pb2.EmptyMessage)
self.__cb_endpoint_address = await self.__server.add_endpoint(cb_endpoint)
async def cleanup(self) -> None:
players = list(self.__players.values())
self.__players.clear()
for player in players:
await player.cleanup()
if self.__cb_endpoint_address is not None:
await self.__server.remove_endpoint(self.__cb_endpoint_name)
self.__cb_endpoint_address = None
await self.close()
async def __create_writer(self) -> None:
logger.info("Creating writer process...")
create_writer_response = editor_main_pb2.CreateProcessResponse()
await self.__manager.call(
'CREATE_WRITER_PROCESS', None, create_writer_response)
self.__writer_address = create_writer_response.address
logger.info("Connecting to writer process %r...", self.__writer_address)
self.__writer_client = writer_client.WriterClient(
event_loop=self.__event_loop)
await self.__writer_client.setup()
await self.__writer_client.connect(self.__writer_address)
async def __init_session_data(self) -> None:
session_name = '%s.%s' % (getpass.getuser(), socket.getfqdn())
self.__session_values = session_value_store.SessionValueStore(
self.__event_loop, session_name)
await self.__session_values.init(self.__project.data_dir)
for session_value in self.__session_values.values():
self.__session_data_listeners.call(
session_value.name, self.__session_proto_to_py(session_value))
# def get_object(self, obj_id: int) -> model_base.ObjectBase:
# return self.__pool[obj_id]
async def __handle_control_value_change(
self,
request: audioproc.ControlValueChange,
response: empty_message_pb2.EmptyMessage
) -> None:
assert self.__project is not None
logger.info(
"control_value_change(%s, %s, %s, %f, %d)",
request.realm, request.node_id,
request.value.name, request.value.value, request.value.generation)
node = None
for node in self.__project.nodes:
if node.pipeline_node_id == request.node_id:
break
else:
raise ValueError("Invalid node_id '%s'" % request.node_id)
with self.__project.apply_mutations('Change control value "%s"' % request.value.name):
node.set_control_value(
request.value.name, request.value.value, request.value.generation)
async def __handle_plugin_state_change(
self,
request: audioproc.PluginStateChange,
response: empty_message_pb2.EmptyMessage
) -> None:
assert self.__project is not None
node = None
for node in self.__project.nodes:
if node.pipeline_node_id == request.node_id:
break
else:
raise ValueError("Invalid node_id '%s'" % request.node_id)
with self.__project.apply_mutations('Change plugin state'):
node.set_plugin_state(request.state)
async def create(self, path: str) -> None:
assert self.__project is None
await self.__create_writer()
self.__pool = project_lib.Pool(project_cls=project_lib.Project)
self.__project = await project_lib.Project.create_blank(
path=path,
pool=self.__pool,
writer=self.__writer_client,
node_db=self.__node_db)
self.__project.monitor_model_changes()
await self.__init_session_data()
async def create_loadtest(self, path: str, spec: Dict[str, Any]) -> None:
assert self.__project is None
await self.__create_writer()
self.__pool = project_lib.Pool(project_cls=project_lib.Project)
self.__project = await project_lib.Project.create_blank(
path=path,
pool=self.__pool,
writer=self.__writer_client,
node_db=self.__node_db)
self.__project.monitor_model_changes()
with self.__project.apply_mutations('Fill it with junk'):
loadtest_generator.fill_project(self.__project, spec)
await self.__init_session_data()
async def create_inmemory(self) -> None:
assert self.__project is None
self.__pool = project_lib.Pool()
self.__project = self.__pool.create(
project_lib.BaseProject, node_db=self.__node_db)
self.__pool.set_root(self.__project)
self.__project.monitor_model_changes()
await self.__init_session_data()
async def open(self, path: str) -> None:
assert self.__project is None
await self.__create_writer()
self.__pool = project_lib.Pool(project_cls=project_lib.Project)
self.__project = await project_lib.Project.open(
path=path,
pool=self.__pool,
writer=self.__writer_client,
node_db=self.__node_db)
self.__project.monitor_model_changes()
await self.__init_session_data()
async def close(self) -> None:
if self.__project is not None:
await self.__project.close()
self.__project = None
self.__pool = None
if self.__writer_client is not None:
await self.__writer_client.close()
await self.__writer_client.cleanup()
self.__writer_client = None
if self.__writer_address is not None:
await self.__manager.call(
'SHUTDOWN_PROCESS',
editor_main_pb2.ShutdownProcessRequest(
address=self.__writer_address))
self.__writer_address = None
async def create_player(self, *, audioproc_address: str) -> Tuple[str, str]:
assert self.__project is not None
logger.info("Creating audioproc client...")
audioproc_client = audioproc.AudioProcClient(
self.__event_loop, self.__server, self.__urid_mapper)
await audioproc_client.setup()
logger.info("Connecting audioproc client...")
await audioproc_client.connect(audioproc_address)
realm_name = 'project:%s' % self.__project.id
logger.info("Creating realm '%s'...", realm_name)
await audioproc_client.create_realm(
name=realm_name,
parent='root',
enable_player=True,
callback_address=self.__cb_endpoint_address)
player = player_lib.Player(
project=self.__project,
callback_address=self.__cb_endpoint_address,
event_loop=self.__event_loop,
audioproc_client=audioproc_client,
realm=realm_name,
session_values=self.__session_values)
await player.setup()
self.__players[player.id] = player
return (player.id, player.realm)
async def delete_player(self, player_id: str) -> None:
player = self.__players.pop(player_id)
await player.cleanup()
if player.audioproc_client is not None:
if player.realm is not None:
logger.info("Deleting realm '%s'...", player.realm)
await player.audioproc_client.delete_realm(name=player.realm)
await player.audioproc_client.disconnect()
await player.audioproc_client.cleanup()
async def create_plugin_ui(self, player_id: str, node_id: str) -> Tuple[int, Tuple[int, int]]:
player = self.__players[player_id]
return await player.create_plugin_ui(node_id)
async def delete_plugin_ui(self, player_id: str, node_id: str) -> None:
player = self.__players[player_id]
await player.delete_plugin_ui(node_id)
async def update_player_state(self, player_id: str, state: audioproc.PlayerState) -> None:
player = self.__players[player_id]
await player.update_state(state)
async def dump(self) -> None:
raise NotImplementedError
# await self._stub.call('DUMP')
async def render(
self, callback_address: str, render_settings: render_pb2.RenderSettings
) -> None:
assert self.__project is not None
renderer = render.Renderer(
project=self.__project,
tmp_dir=self.__tmp_dir,
server=self.__server,
manager=self.__manager,
event_loop=self.__event_loop,
callback_address=callback_address,
render_settings=render_settings,
urid_mapper=self.__urid_mapper,
)
await renderer.run()
def add_session_data_listener(
self, key: str, func: Callable[[Any], None]) -> core.Listener:
return self.__session_data_listeners.add(key, func)
def __session_proto_to_py(self, session_value: session_data_pb2.SessionValue) -> Any:
value_type = session_value.WhichOneof('type')
if value_type == 'string_value':
return session_value.string_value
elif value_type == 'bytes_value':
return session_value.bytes_value
elif value_type == 'bool_value':
return session_value.bool_value
elif value_type == 'int_value':
return session_value.int_value
elif value_type == 'double_value':
return session_value.double_value
elif value_type == 'fraction_value':
return Fraction(
session_value.fraction_value.numerator,
session_value.fraction_value.denominator)
elif value_type == 'musical_time_value':
return audioproc.MusicalTime.from_proto(session_value.musical_time_value)
elif value_type == 'musical_duration_value':
return audioproc.MusicalDuration.from_proto(session_value.musical_duration_value)
else:
raise ValueError(session_value)
def set_session_value(self, key: str, value: Any) -> None:
self.set_session_values({key: value})
def set_session_values(self, data: Dict[str, Any]) -> None:
session_values = [] # type: List[session_data_pb2.SessionValue]
for key, value in data.items():
session_value = session_data_pb2.SessionValue()
session_value.name = key
if isinstance(value, str):
session_value.string_value = value
elif isinstance(value, bytes):
session_value.bytes_value = value
elif isinstance(value, bool):
session_value.bool_value = value
elif isinstance(value, int):
session_value.int_value = value
elif isinstance(value, float):
session_value.double_value = value
elif isinstance(value, Fraction):
session_value.fraction_value.numerator = value.numerator
session_value.fraction_value.denominator = value.denominator
elif isinstance(value, audioproc.MusicalTime):
session_value.musical_time_value.numerator = value.numerator
session_value.musical_time_value.denominator = value.denominator
elif isinstance(value, audioproc.MusicalDuration):
session_value.musical_time_value.numerator = value.numerator
session_value.musical_time_value.denominator = value.denominator
else:
raise ValueError("%s: %s" % (key, type(value)))
session_values.append(session_value)
task = self.__event_loop.create_task(self.__session_values.set_values(session_values))
task.add_done_callback(functools.partial(self.__set_session_values_done, data))
def __set_session_values_done(self, data: Dict[str, Any], task: asyncio.Task) -> None:
for key, value in data.items():
self.__session_data_listeners.call(key, value)
T = TypeVar('T')
def get_session_value(self, key: str, default: T) -> T: # pylint: disable=undefined-variable
try:
session_value = self.__session_values.get_value(key)
except KeyError:
return default
else:
return self.__session_proto_to_py(session_value)
| gpl-2.0 | 6,034,009,819,632,288,000 | 38.099237 | 98 | 0.617988 | false | 3.993243 | false | false | false |
pFernbach/hpp-rbprm-corba | src/hpp/corbaserver/rbprm/client.py | 1 | 1639 | #!/usr/bin/env python
#
# Copyright (c) 2014 CNRS
# Author: Steve Tonneau
#
# This file is part of hpp-rbprm-corba.
# hpp-rbprm-corba is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-rbprm-corba is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-manipulation-corba. If not, see
# <http://www.gnu.org/licenses/>.
from hpp.corbaserver.client import Client as _Parent
from hpp_idl.hpp.corbaserver.rbprm import RbprmBuilder
class Client (_Parent):
"""
Connect and create clients for hpp-rbprm library.
"""
defaultClients = {
'rbprmbuilder' : RbprmBuilder,
}
def __init__(self, url = None, context = "corbaserver"):
"""
Initialize CORBA and create default clients.
:param url: URL in the IOR, corbaloc, corbalocs, and corbanames formats.
For a remote corba server, use
url = "corbaloc:iiop:<host>:<port>/NameService"
"""
self._initOrb (url)
self._makeClients ("rbprm", self.defaultClients, context)
# self.rbprmbuilder is created by self._makeClients
# The old code stored the object as self.rbprm
# Make it backward compatible.
self.rbprm = self.rbprmbuilder
| lgpl-3.0 | -2,557,956,103,731,145,700 | 35.422222 | 76 | 0.706528 | false | 3.47983 | false | false | false |
alexmogavero/home-assistant | homeassistant/components/knx.py | 1 | 12903 | """
Support for KNX components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/knx/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['knxip==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 3671
DOMAIN = 'knx'
EVENT_KNX_FRAME_RECEIVED = 'knx_frame_received'
KNXTUNNEL = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the connection to the KNX IP interface."""
global KNXTUNNEL
from knxip.ip import KNXIPTunnel
from knxip.core import KNXException
host = config[DOMAIN].get(CONF_HOST)
port = config[DOMAIN].get(CONF_PORT)
if host is '0.0.0.0':
_LOGGER.debug("Will try to auto-detect KNX/IP gateway")
KNXTUNNEL = KNXIPTunnel(host, port)
try:
res = KNXTUNNEL.connect()
_LOGGER.debug("Res = %s", res)
if not res:
_LOGGER.error("Could not connect to KNX/IP interface %s", host)
return False
except KNXException as ex:
_LOGGER.exception("Can't connect to KNX/IP interface: %s", ex)
KNXTUNNEL = None
return False
_LOGGER.info("KNX IP tunnel to %s:%i established", host, port)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, close_tunnel)
return True
def close_tunnel(_data):
"""Close the NKX tunnel connection on shutdown."""
global KNXTUNNEL
KNXTUNNEL.disconnect()
KNXTUNNEL = None
class KNXConfig(object):
"""Handle the fetching of configuration from the config file."""
def __init__(self, config):
"""Initialize the configuration."""
from knxip.core import parse_group_address
self.config = config
self.should_poll = config.get('poll', True)
if config.get('address'):
self._address = parse_group_address(config.get('address'))
else:
self._address = None
if self.config.get('state_address'):
self._state_address = parse_group_address(
self.config.get('state_address'))
else:
self._state_address = None
@property
def name(self):
"""Return the name given to the entity."""
return self.config['name']
@property
def address(self):
"""Return the address of the device as an integer value.
3 types of addresses are supported:
integer - 0-65535
2 level - a/b
3 level - a/b/c
"""
return self._address
@property
def state_address(self):
"""Return the group address the device sends its current state to.
Some KNX devices can send the current state to a seperate
group address. This makes send e.g. when an actuator can
be switched but also have a timer functionality.
"""
return self._state_address
class KNXGroupAddress(Entity):
"""Representation of devices connected to a KNX group address."""
def __init__(self, hass, config):
"""Initialize the device."""
self._config = config
self._state = False
self._data = None
_LOGGER.debug(
"Initalizing KNX group address for %s (%s)",
self.name, self.address
)
def handle_knx_message(addr, data):
"""Handle an incoming KNX frame.
Handle an incoming frame and update our status if it contains
information relating to this device.
"""
if (addr == self.state_address) or (addr == self.address):
self._state = data[0]
self.schedule_update_ha_state()
KNXTUNNEL.register_listener(self.address, handle_knx_message)
if self.state_address:
KNXTUNNEL.register_listener(self.state_address, handle_knx_message)
@property
def name(self):
"""Return the entity's display name."""
return self._config.name
@property
def config(self):
"""Return the entity's configuration."""
return self._config
@property
def should_poll(self):
"""Return the state of the polling, if needed."""
return self._config.should_poll
@property
def is_on(self):
"""Return True if the value is not 0 is on, else False."""
return self._state != 0
@property
def address(self):
"""Return the KNX group address."""
return self._config.address
@property
def state_address(self):
"""Return the KNX group address."""
return self._config.state_address
@property
def cache(self):
"""Return the name given to the entity."""
return self._config.config.get('cache', True)
def group_write(self, value):
"""Write to the group address."""
KNXTUNNEL.group_write(self.address, [value])
def update(self):
"""Get the state from KNX bus or cache."""
from knxip.core import KNXException
try:
if self.state_address:
res = KNXTUNNEL.group_read(
self.state_address, use_cache=self.cache)
else:
res = KNXTUNNEL.group_read(self.address, use_cache=self.cache)
if res:
self._state = res[0]
self._data = res
else:
_LOGGER.debug(
"%s: unable to read from KNX address: %s (None)",
self.name, self.address
)
except KNXException:
_LOGGER.exception(
"%s: unable to read from KNX address: %s",
self.name, self.address
)
return False
class KNXMultiAddressDevice(Entity):
"""Representation of devices connected to a multiple KNX group address.
This is needed for devices like dimmers or shutter actuators as they have
to be controlled by multiple group addresses.
"""
def __init__(self, hass, config, required, optional=None):
"""Initialize the device.
The namelist argument lists the required addresses. E.g. for a dimming
actuators, the namelist might look like:
onoff_address: 0/0/1
brightness_address: 0/0/2
"""
from knxip.core import parse_group_address, KNXException
self.names = {}
self.values = {}
self._config = config
self._state = False
self._data = None
_LOGGER.debug(
"%s: initalizing KNX multi address device",
self.name
)
settings = self._config.config
if config.address:
_LOGGER.debug(
"%s: base address: address=%s",
self.name, settings.get('address')
)
self.names[config.address] = 'base'
if config.state_address:
_LOGGER.debug(
"%s, state address: state_address=%s",
self.name, settings.get('state_address')
)
self.names[config.state_address] = 'state'
# parse required addresses
for name in required:
paramname = '{}{}'.format(name, '_address')
addr = settings.get(paramname)
if addr is None:
_LOGGER.error(
"%s: Required KNX group address %s missing",
self.name, paramname
)
raise KNXException(
"%s: Group address for {} missing in "
"configuration for {}".format(
self.name, paramname
)
)
_LOGGER.debug(
"%s: (required parameter) %s=%s",
self.name, paramname, addr
)
addr = parse_group_address(addr)
self.names[addr] = name
# parse optional addresses
for name in optional:
paramname = '{}{}'.format(name, '_address')
addr = settings.get(paramname)
_LOGGER.debug(
"%s: (optional parameter) %s=%s",
self.name, paramname, addr
)
if addr:
try:
addr = parse_group_address(addr)
except KNXException:
_LOGGER.exception(
"%s: cannot parse group address %s",
self.name, addr
)
self.names[addr] = name
@property
def name(self):
"""Return the entity's display name."""
return self._config.name
@property
def config(self):
"""Return the entity's configuration."""
return self._config
@property
def should_poll(self):
"""Return the state of the polling, if needed."""
return self._config.should_poll
@property
def cache(self):
"""Return the name given to the entity."""
return self._config.config.get('cache', True)
def has_attribute(self, name):
"""Check if the attribute with the given name is defined.
This is mostly important for optional addresses.
"""
for attributename in self.names.values():
if attributename == name:
return True
return False
def set_percentage(self, name, percentage):
"""Set a percentage in knx for a given attribute.
DPT_Scaling / DPT 5.001 is a single byte scaled percentage
"""
percentage = abs(percentage) # only accept positive values
scaled_value = percentage * 255 / 100
value = min(255, scaled_value)
return self.set_int_value(name, value)
def get_percentage(self, name):
"""Get a percentage from knx for a given attribute.
DPT_Scaling / DPT 5.001 is a single byte scaled percentage
"""
value = self.get_int_value(name)
percentage = round(value * 100 / 255)
return percentage
def set_int_value(self, name, value, num_bytes=1):
"""Set an integer value for a given attribute."""
# KNX packets are big endian
value = round(value) # only accept integers
b_value = value.to_bytes(num_bytes, byteorder='big')
return self.set_value(name, list(b_value))
def get_int_value(self, name):
"""Get an integer value for a given attribute."""
# KNX packets are big endian
summed_value = 0
raw_value = self.value(name)
try:
# convert raw value in bytes
for val in raw_value:
summed_value *= 256
summed_value += val
except TypeError:
# pknx returns a non-iterable type for unsuccessful reads
pass
return summed_value
def value(self, name):
"""Return the value to a given named attribute."""
from knxip.core import KNXException
addr = None
for attributeaddress, attributename in self.names.items():
if attributename == name:
addr = attributeaddress
if addr is None:
_LOGGER.error("%s: attribute '%s' undefined",
self.name, name)
_LOGGER.debug(
"%s: defined attributes: %s",
self.name, str(self.names)
)
return False
try:
res = KNXTUNNEL.group_read(addr, use_cache=self.cache)
except KNXException:
_LOGGER.exception(
"%s: unable to read from KNX address: %s",
self.name, addr
)
return False
return res
def set_value(self, name, value):
"""Set the value of a given named attribute."""
from knxip.core import KNXException
addr = None
for attributeaddress, attributename in self.names.items():
if attributename == name:
addr = attributeaddress
if addr is None:
_LOGGER.error("%s: attribute '%s' undefined",
self.name, name)
_LOGGER.debug(
"%s: defined attributes: %s",
self.name, str(self.names)
)
return False
try:
KNXTUNNEL.group_write(addr, value)
except KNXException:
_LOGGER.exception(
"%s: unable to write to KNX address: %s",
self.name, addr
)
return False
return True
| apache-2.0 | -1,636,903,083,226,797,800 | 29.288732 | 79 | 0.558785 | false | 4.293844 | true | false | false |
mrgambal/vulyk | vulyk/models/tasks.py | 1 | 6601 | # -*- coding: utf-8 -*-
"""Module contains all models directly related to the main entity - tasks."""
from collections import namedtuple
from typing import Any, Dict, List
from bson import ObjectId
from flask_mongoengine import Document
from mongoengine import (
BooleanField,
CASCADE,
DateTimeField,
DictField,
IntField,
ListField,
ReferenceField,
StringField
)
from vulyk.models.user import User
from vulyk.signals import on_batch_done
__all__ = [
'AbstractAnswer',
'AbstractTask',
'Batch',
'BatchUpdateResult'
]
BatchUpdateResult = namedtuple('BatchUpdateResult', ['success', 'closed'])
class Batch(Document):
"""
Helper category to group tasks.
"""
id = StringField(max_length=50, primary_key=True)
task_type = StringField(max_length=50, required=True, db_field='taskType')
tasks_count = IntField(default=0, required=True, db_field='tasksCount')
tasks_processed = IntField(default=0, db_field='tasksProcessed')
closed = BooleanField(default=False, required=False)
batch_meta = DictField(db_field='batchMeta')
meta = {
'collection': 'batches',
'allow_inheritance': True,
'indexes': [
'task_type',
'closed'
]
}
@classmethod
def task_done_in(cls, batch_id: str) -> BatchUpdateResult:
"""
Increment needed values upon a task from the batch is done. In case if
all tasks are finished – close the batch.
:param batch_id: Batch ID
:type batch_id: str
:return: Aggregate which represents complex effect of the method
:rtype: BatchUpdateResult
"""
num_changed = 0
batch = cls.objects.get(id=batch_id) # type: Batch
processed = batch.tasks_processed + 1
if processed > batch.tasks_count:
return BatchUpdateResult(success=False, closed=False)
closed = processed == batch.tasks_count
update_q = {'inc__tasks_processed': 1}
if closed:
update_q['set__closed'] = closed
num_changed = cls \
.objects(id=batch.id, closed=False) \
.update(**update_q)
if num_changed == 0:
update_q.pop('set__closed', None)
closed = False
num_changed = batch.update(**update_q)
elif closed:
on_batch_done.send(batch)
return BatchUpdateResult(success=num_changed > 0, closed=closed)
def __str__(self) -> str:
return str(self.id)
def __repr__(self) -> str:
return 'Batch [{id}] ({processed}/{count})'.format(
id=self.id,
processed=self.tasks_processed,
count=self.tasks_count)
class AbstractTask(Document):
"""
This is AbstractTask model.
You need to inherit it in your model
"""
id = StringField(max_length=200, default='', primary_key=True)
task_type = StringField(max_length=50, required=True, db_field='taskType')
batch = ReferenceField(Batch, reverse_delete_rule=CASCADE)
users_count = IntField(default=0, db_field='usersCount')
users_processed = ListField(ReferenceField(User),
db_field='usersProcessed')
users_skipped = ListField(ReferenceField(User), db_field='usersSkipped')
closed = BooleanField(default=False)
task_data = DictField(required=True)
meta = {
'collection': 'tasks',
'allow_inheritance': True,
'indexes': [
'task_type',
'batch'
]
}
def as_dict(self) -> Dict[str, Any]:
"""
Converts the model-instance into a safe and lightweight dictionary.
:rtype: Dict[str, Any]
"""
return {
'id': self.id,
'closed': self.closed,
'data': self.task_data
}
@classmethod
def ids_in_batch(cls, batch: Batch) -> List[str]:
"""
Collects IDs of all tasks that belong to certain batch.
:param batch: Batch instance
:type batch: Batch
:return: List of IDs
:rtype: List[str]
"""
return cls.objects(batch=batch).distinct('id')
def __str__(self) -> str:
return str(self.id)
def __repr__(self) -> str:
return str(self)
class AbstractAnswer(Document):
"""
This is AbstractTask model.
You need to inherit it in your model
"""
task = ReferenceField(AbstractTask, reverse_delete_rule=CASCADE)
created_by = ReferenceField(User, reverse_delete_rule=CASCADE,
db_field='createdBy')
created_at = DateTimeField(db_field='createdAt')
task_type = StringField(max_length=50, required=True, db_field='taskType')
# not sure - could be extended
result = DictField()
meta = {
'collection': 'reports',
'allow_inheritance': True,
'indexes': [
'task',
'created_by',
'created_at',
{
'fields': ['created_by', 'task'],
'unique': True
}
]
}
# TODO: decide, if we need it at all
@property
def corrections(self) -> int:
"""
Returns whole amount of actions/corrections given by user in this
particular answer.
:return: Count of corrections in this answer
:rtype: int
"""
return 1
@corrections.setter
def corrections(self, value: int) -> None:
pass
@corrections.deleter
def corrections(self) -> None:
pass
@classmethod
def answers_numbers_by_tasks(cls, task_ids: List[str]) -> Dict[ObjectId, int]:
"""
Groups answers, filtered by tasks they belong to, by user and count
number of answers for every user.
:param task_ids: List of tasks IDs
:type task_ids: List[str]
:return: Map having user IDs as keys and answers numbers as values
:rtype: Dict[ObjectId, int]
"""
return cls.objects(task__in=task_ids).item_frequencies('created_by')
def as_dict(self) -> Dict[str, Dict]:
"""
Converts the model-instance into a safe that will include also task
and user.
:rtype: Dict[str, Dict]
"""
return {
'task': self.task.as_dict(),
'answer': self.result,
'user': self.created_by.as_dict()
}
def __str__(self) -> str:
return str(self.pk)
def __repr__(self) -> str:
return 'Report [{} by {}]'.format(self.created_by, self.task)
| bsd-3-clause | -6,040,339,416,882,159,000 | 26.961864 | 82 | 0.579936 | false | 4.009113 | false | false | false |
Yellowen/Owrang | stock/doctype/stock_entry/stock_entry.py | 1 | 35585 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
import webnotes.defaults
from webnotes.utils import cstr, cint, flt, comma_or, nowdate
from webnotes.model.doc import Document, addchild
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
from stock.utils import get_incoming_rate
from stock.stock_ledger import get_previous_sle
from controllers.queries import get_match_cond
import json
sql = webnotes.conn.sql
class NotUpdateStockError(webnotes.ValidationError): pass
class StockOverReturnError(webnotes.ValidationError): pass
class IncorrectValuationRateError(webnotes.ValidationError): pass
class DuplicateEntryForProductionOrderError(webnotes.ValidationError): pass
from controllers.stock_controller import StockController
class DocType(StockController):
def __init__(self, doc, doclist=None):
self.doc = doc
self.doclist = doclist
self.fname = 'mtn_details'
def validate(self):
self.validate_posting_time()
self.validate_purpose()
pro_obj = self.doc.production_order and \
get_obj('Production Order', self.doc.production_order) or None
self.validate_item()
self.validate_uom_is_integer("uom", "qty")
self.validate_uom_is_integer("stock_uom", "transfer_qty")
self.validate_warehouse(pro_obj)
self.validate_production_order(pro_obj)
self.get_stock_and_rate()
self.validate_incoming_rate()
self.validate_bom()
self.validate_finished_goods()
self.validate_return_reference_doc()
self.validate_with_material_request()
self.validate_fiscal_year()
self.set_total_amount()
def on_submit(self):
self.update_stock_ledger()
self.update_serial_no(1)
self.update_production_order(1)
self.make_gl_entries()
def on_cancel(self):
self.update_stock_ledger()
self.update_serial_no(0)
self.update_production_order(0)
self.make_cancel_gl_entries()
def validate_fiscal_year(self):
import accounts.utils
accounts.utils.validate_fiscal_year(self.doc.posting_date, self.doc.fiscal_year,
self.meta.get_label("posting_date"))
def validate_purpose(self):
valid_purposes = ["Material Issue", "Material Receipt", "Material Transfer",
"Manufacture/Repack", "Subcontract", "Sales Return", "Purchase Return"]
if self.doc.purpose not in valid_purposes:
msgprint(_("Purpose must be one of ") + comma_or(valid_purposes),
raise_exception=True)
def validate_item(self):
stock_items = self.get_stock_items()
for item in self.doclist.get({"parentfield": "mtn_details"}):
if item.item_code not in stock_items:
msgprint(_("""Only Stock Items are allowed for Stock Entry"""),
raise_exception=True)
def validate_warehouse(self, pro_obj):
"""perform various (sometimes conditional) validations on warehouse"""
source_mandatory = ["Material Issue", "Material Transfer", "Purchase Return"]
target_mandatory = ["Material Receipt", "Material Transfer", "Sales Return"]
validate_for_manufacture_repack = any([d.bom_no for d in self.doclist.get(
{"parentfield": "mtn_details"})])
if self.doc.purpose in source_mandatory and self.doc.purpose not in target_mandatory:
self.doc.to_warehouse = None
for d in getlist(self.doclist, 'mtn_details'):
d.t_warehouse = None
elif self.doc.purpose in target_mandatory and self.doc.purpose not in source_mandatory:
self.doc.from_warehouse = None
for d in getlist(self.doclist, 'mtn_details'):
d.s_warehouse = None
for d in getlist(self.doclist, 'mtn_details'):
if not d.s_warehouse and not d.t_warehouse:
d.s_warehouse = self.doc.from_warehouse
d.t_warehouse = self.doc.to_warehouse
if not (d.s_warehouse or d.t_warehouse):
msgprint(_("Atleast one warehouse is mandatory"), raise_exception=1)
if self.doc.purpose in source_mandatory and not d.s_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Source Warehouse") + _(" is mandatory"), raise_exception=1)
if self.doc.purpose in target_mandatory and not d.t_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Target Warehouse") + _(" is mandatory"), raise_exception=1)
if self.doc.purpose == "Manufacture/Repack":
if validate_for_manufacture_repack:
if d.bom_no:
d.s_warehouse = None
if not d.t_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Target Warehouse") + _(" is mandatory"), raise_exception=1)
elif pro_obj and cstr(d.t_warehouse) != pro_obj.doc.fg_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Target Warehouse") + _(" should be same as that in ")
+ _("Production Order"), raise_exception=1)
else:
d.t_warehouse = None
if not d.s_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Source Warehouse") + _(" is mandatory"), raise_exception=1)
if cstr(d.s_warehouse) == cstr(d.t_warehouse):
msgprint(_("Source and Target Warehouse cannot be same"),
raise_exception=1)
def validate_production_order(self, pro_obj=None):
if not pro_obj:
if self.doc.production_order:
pro_obj = get_obj('Production Order', self.doc.production_order)
else:
return
if self.doc.purpose == "Manufacture/Repack":
# check for double entry
self.check_duplicate_entry_for_production_order()
elif self.doc.purpose != "Material Transfer":
self.doc.production_order = None
def check_duplicate_entry_for_production_order(self):
other_ste = [t[0] for t in webnotes.conn.get_values("Stock Entry", {
"production_order": self.doc.production_order,
"purpose": self.doc.purpose,
"docstatus": ["!=", 2],
"name": ["!=", self.doc.name]
}, "name")]
if other_ste:
production_item, qty = webnotes.conn.get_value("Production Order",
self.doc.production_order, ["production_item", "qty"])
args = other_ste + [production_item]
fg_qty_already_entered = webnotes.conn.sql("""select sum(actual_qty)
from `tabStock Entry Detail`
where parent in (%s)
and item_code = %s
and ifnull(s_warehouse,'')='' """ % (", ".join(["%s" * len(other_ste)]), "%s"), args)[0][0]
if fg_qty_already_entered >= qty:
webnotes.throw(_("Stock Entries already created for Production Order ")
+ self.doc.production_order + ":" + ", ".join(other_ste), DuplicateEntryForProductionOrderError)
def set_total_amount(self):
self.doc.total_amount = sum([flt(item.amount) for item in self.doclist.get({"parentfield": "mtn_details"})])
def get_stock_and_rate(self):
"""get stock and incoming rate on posting date"""
for d in getlist(self.doclist, 'mtn_details'):
args = webnotes._dict({
"item_code": d.item_code,
"warehouse": d.s_warehouse or d.t_warehouse,
"posting_date": self.doc.posting_date,
"posting_time": self.doc.posting_time,
"qty": d.s_warehouse and -1*d.transfer_qty or d.transfer_qty,
"serial_no": d.serial_no,
"bom_no": d.bom_no,
})
# get actual stock at source warehouse
d.actual_qty = get_previous_sle(args).get("qty_after_transaction") or 0
# get incoming rate
if not flt(d.incoming_rate):
d.incoming_rate = self.get_incoming_rate(args)
d.amount = flt(d.transfer_qty) * flt(d.incoming_rate)
def get_incoming_rate(self, args):
incoming_rate = 0
if self.doc.purpose == "Sales Return" and \
(self.doc.delivery_note_no or self.doc.sales_invoice_no):
sle = webnotes.conn.sql("""select name, posting_date, posting_time,
actual_qty, stock_value, warehouse from `tabStock Ledger Entry`
where voucher_type = %s and voucher_no = %s and
item_code = %s limit 1""",
((self.doc.delivery_note_no and "Delivery Note" or "Sales Invoice"),
self.doc.delivery_note_no or self.doc.sales_invoice_no, args.item_code), as_dict=1)
if sle:
args.update({
"posting_date": sle[0].posting_date,
"posting_time": sle[0].posting_time,
"sle": sle[0].name,
"warehouse": sle[0].warehouse,
})
previous_sle = get_previous_sle(args)
incoming_rate = (flt(sle[0].stock_value) - flt(previous_sle.get("stock_value"))) / \
flt(sle[0].actual_qty)
else:
incoming_rate = get_incoming_rate(args)
return incoming_rate
def validate_incoming_rate(self):
for d in getlist(self.doclist, 'mtn_details'):
if d.t_warehouse:
self.validate_value("incoming_rate", ">", 0, d, raise_exception=IncorrectValuationRateError)
def validate_bom(self):
for d in getlist(self.doclist, 'mtn_details'):
if d.bom_no and not webnotes.conn.sql("""select name from `tabBOM`
where item = %s and name = %s and docstatus = 1 and is_active = 1""",
(d.item_code, d.bom_no)):
msgprint(_("Item") + " %s: " % cstr(d.item_code)
+ _("does not belong to BOM: ") + cstr(d.bom_no)
+ _(" or the BOM is cancelled or inactive"), raise_exception=1)
def validate_finished_goods(self):
"""validation: finished good quantity should be same as manufacturing quantity"""
for d in getlist(self.doclist, 'mtn_details'):
if d.bom_no and flt(d.transfer_qty) != flt(self.doc.fg_completed_qty):
msgprint(_("Row #") + " %s: " % d.idx
+ _("Quantity should be equal to Manufacturing Quantity. ")
+ _("To fetch items again, click on 'Get Items' button \
or update the Quantity manually."), raise_exception=1)
def validate_return_reference_doc(self):
"""validate item with reference doc"""
ref = get_return_doclist_and_details(self.doc.fields)
if ref.doclist:
# validate docstatus
if ref.doclist[0].docstatus != 1:
webnotes.msgprint(_(ref.doclist[0].doctype) + ' "' + ref.doclist[0].name + '": '
+ _("Status should be Submitted"), raise_exception=webnotes.InvalidStatusError)
# update stock check
if ref.doclist[0].doctype == "Sales Invoice" and cint(ref.doclist[0].update_stock) != 1:
webnotes.msgprint(_(ref.doclist[0].doctype) + ' "' + ref.doclist[0].name + '": '
+ _("Update Stock should be checked."),
raise_exception=NotUpdateStockError)
# posting date check
ref_posting_datetime = "%s %s" % (cstr(ref.doclist[0].posting_date),
cstr(ref.doclist[0].posting_time) or "00:00:00")
this_posting_datetime = "%s %s" % (cstr(self.doc.posting_date),
cstr(self.doc.posting_time))
if this_posting_datetime < ref_posting_datetime:
from webnotes.utils.dateutils import datetime_in_user_format
webnotes.msgprint(_("Posting Date Time cannot be before")
+ ": " + datetime_in_user_format(ref_posting_datetime),
raise_exception=True)
stock_items = get_stock_items_for_return(ref.doclist, ref.parentfields)
already_returned_item_qty = self.get_already_returned_item_qty(ref.fieldname)
for item in self.doclist.get({"parentfield": "mtn_details"}):
# validate if item exists in the ref doclist and that it is a stock item
if item.item_code not in stock_items:
msgprint(_("Item") + ': "' + item.item_code + _("\" does not exist in ") +
ref.doclist[0].doctype + ": " + ref.doclist[0].name,
raise_exception=webnotes.DoesNotExistError)
# validate quantity <= ref item's qty - qty already returned
ref_item = ref.doclist.getone({"item_code": item.item_code})
returnable_qty = ref_item.qty - flt(already_returned_item_qty.get(item.item_code))
self.validate_value("transfer_qty", "<=", returnable_qty, item,
raise_exception=StockOverReturnError)
def get_already_returned_item_qty(self, ref_fieldname):
return dict(webnotes.conn.sql("""select item_code, sum(transfer_qty) as qty
from `tabStock Entry Detail` where parent in (
select name from `tabStock Entry` where `%s`=%s and docstatus=1)
group by item_code""" % (ref_fieldname, "%s"), (self.doc.fields.get(ref_fieldname),)))
def update_serial_no(self, is_submit):
"""Create / Update Serial No"""
from stock.doctype.stock_ledger_entry.stock_ledger_entry import update_serial_nos_after_submit, get_serial_nos
update_serial_nos_after_submit(self, "Stock Entry", "mtn_details")
for d in getlist(self.doclist, 'mtn_details'):
for serial_no in get_serial_nos(d.serial_no):
if self.doc.purpose == 'Purchase Return':
sr = webnotes.bean("Serial No", serial_no)
sr.doc.status = "Purchase Returned" if is_submit else "Available"
sr.save()
if self.doc.purpose == "Sales Return":
sr = webnotes.bean("Serial No", serial_no)
sr.doc.status = "Sales Returned" if is_submit else "Delivered"
sr.save()
def update_stock_ledger(self):
sl_entries = []
for d in getlist(self.doclist, 'mtn_details'):
if cstr(d.s_warehouse) and self.doc.docstatus == 1:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
if cstr(d.t_warehouse):
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.t_warehouse),
"actual_qty": flt(d.transfer_qty),
"incoming_rate": flt(d.incoming_rate)
}))
# On cancellation, make stock ledger entry for
# target warehouse first, to update serial no values properly
if cstr(d.s_warehouse) and self.doc.docstatus == 2:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
self.make_sl_entries(sl_entries, self.doc.amended_from and 'Yes' or 'No')
def update_production_order(self, is_submit):
if self.doc.production_order:
# first perform some validations
# (they are here coz this fn is also called during on_cancel)
pro_obj = get_obj("Production Order", self.doc.production_order)
if flt(pro_obj.doc.docstatus) != 1:
msgprint("""You cannot do any transaction against
Production Order : %s, as it's not submitted"""
% (pro_obj.doc.name), raise_exception=1)
if pro_obj.doc.status == 'Stopped':
msgprint("""You cannot do any transaction against Production Order : %s,
as it's status is 'Stopped'"""% (pro_obj.doc.name), raise_exception=1)
# update bin
if self.doc.purpose == "Manufacture/Repack":
from stock.utils import update_bin
pro_obj.doc.produced_qty = flt(pro_obj.doc.produced_qty) + \
(is_submit and 1 or -1 ) * flt(self.doc.fg_completed_qty)
args = {
"item_code": pro_obj.doc.production_item,
"warehouse": pro_obj.doc.fg_warehouse,
"posting_date": self.doc.posting_date,
"planned_qty": (is_submit and -1 or 1 ) * flt(self.doc.fg_completed_qty)
}
update_bin(args)
# update production order status
pro_obj.doc.status = (flt(pro_obj.doc.qty)==flt(pro_obj.doc.produced_qty)) \
and 'Completed' or 'In Process'
pro_obj.doc.save()
def get_item_details(self, arg):
arg = json.loads(arg)
item = sql("""select stock_uom, description, item_name from `tabItem`
where name = %s and (ifnull(end_of_life,'')='' or end_of_life ='0000-00-00'
or end_of_life > now())""", (arg.get('item_code')), as_dict = 1)
if not item:
msgprint("Item is not active", raise_exception=1)
ret = {
'uom' : item and item[0]['stock_uom'] or '',
'stock_uom' : item and item[0]['stock_uom'] or '',
'description' : item and item[0]['description'] or '',
'item_name' : item and item[0]['item_name'] or '',
'qty' : 0,
'transfer_qty' : 0,
'conversion_factor' : 1,
'batch_no' : '',
'actual_qty' : 0,
'incoming_rate' : 0
}
stock_and_rate = arg.get('warehouse') and self.get_warehouse_details(json.dumps(arg)) or {}
ret.update(stock_and_rate)
return ret
def get_uom_details(self, arg = ''):
arg, ret = eval(arg), {}
uom = sql("""select conversion_factor from `tabUOM Conversion Detail`
where parent = %s and uom = %s""", (arg['item_code'], arg['uom']), as_dict = 1)
if not uom or not flt(uom[0].conversion_factor):
msgprint("There is no Conversion Factor for UOM '%s' in Item '%s'" % (arg['uom'],
arg['item_code']))
ret = {'uom' : ''}
else:
ret = {
'conversion_factor' : flt(uom[0]['conversion_factor']),
'transfer_qty' : flt(arg['qty']) * flt(uom[0]['conversion_factor']),
}
return ret
def get_warehouse_details(self, args):
args = json.loads(args)
ret = {}
if args.get('warehouse') and args.get('item_code'):
args.update({
"posting_date": self.doc.posting_date,
"posting_time": self.doc.posting_time,
})
args = webnotes._dict(args)
ret = {
"actual_qty" : get_previous_sle(args).get("qty_after_transaction") or 0,
"incoming_rate" : self.get_incoming_rate(args)
}
return ret
def get_items(self):
self.doclist = self.doc.clear_table(self.doclist, 'mtn_details', 1)
pro_obj = None
if self.doc.production_order:
# common validations
pro_obj = get_obj('Production Order', self.doc.production_order)
if pro_obj:
self.validate_production_order(pro_obj)
self.doc.bom_no = pro_obj.doc.bom_no
else:
# invalid production order
self.doc.production_order = None
if self.doc.bom_no:
if self.doc.purpose in ["Material Issue", "Material Transfer", "Manufacture/Repack",
"Subcontract"]:
if self.doc.production_order and self.doc.purpose == "Material Transfer":
item_dict = self.get_pending_raw_materials(pro_obj)
else:
item_dict = self.get_bom_raw_materials(self.doc.fg_completed_qty)
for item in item_dict.values():
if pro_obj:
item["from_warehouse"] = pro_obj.doc.wip_warehouse
item["to_warehouse"] = ""
# add raw materials to Stock Entry Detail table
self.add_to_stock_entry_detail(item_dict)
# add finished good item to Stock Entry Detail table -- along with bom_no
if self.doc.production_order and self.doc.purpose == "Manufacture/Repack":
self.add_to_stock_entry_detail({
cstr(pro_obj.doc.production_item): {
"to_warehouse": pro_obj.doc.fg_warehouse,
"from_warehouse": "",
"qty": self.doc.fg_completed_qty,
"description": pro_obj.doc.description,
"stock_uom": pro_obj.doc.stock_uom
}
}, bom_no=pro_obj.doc.bom_no)
elif self.doc.purpose in ["Material Receipt", "Manufacture/Repack"]:
if self.doc.purpose=="Material Receipt":
self.doc.from_warehouse = ""
item = webnotes.conn.sql("""select item, description, uom from `tabBOM`
where name=%s""", (self.doc.bom_no,), as_dict=1)
self.add_to_stock_entry_detail({
item[0]["item"] : {
"qty": self.doc.fg_completed_qty,
"description": item[0]["description"],
"stock_uom": item[0]["uom"],
"from_warehouse": ""
}
}, bom_no=self.doc.bom_no)
self.get_stock_and_rate()
def get_bom_raw_materials(self, qty):
"""
get all items from flat bom except
child items of sub-contracted and sub assembly items
and sub assembly items itself.
"""
# item dict = { item_code: {qty, description, stock_uom} }
item_dict = {}
def _make_items_dict(items_list):
"""makes dict of unique items with it's qty"""
for item in items_list:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = {
"qty": flt(item.qty),
"description": item.description,
"stock_uom": item.stock_uom,
"from_warehouse": item.default_warehouse
}
if self.doc.use_multi_level_bom:
# get all raw materials with sub assembly childs
fl_bom_sa_child_item = sql("""select
fb.item_code,
ifnull(sum(fb.qty_consumed_per_unit),0)*%s as qty,
fb.description,
fb.stock_uom,
it.default_warehouse
from
`tabBOM Explosion Item` fb,`tabItem` it
where
it.name = fb.item_code
and ifnull(it.is_pro_applicable, 'No') = 'No'
and ifnull(it.is_sub_contracted_item, 'No') = 'No'
and fb.docstatus < 2
and fb.parent=%s group by item_code, stock_uom""",
(qty, self.doc.bom_no), as_dict=1)
if fl_bom_sa_child_item:
_make_items_dict(fl_bom_sa_child_item)
else:
# get only BOM items
fl_bom_sa_items = sql("""select
`tabItem`.item_code,
ifnull(sum(`tabBOM Item`.qty_consumed_per_unit), 0) *%s as qty,
`tabItem`.description,
`tabItem`.stock_uom,
`tabItem`.default_warehouse
from
`tabBOM Item`, `tabItem`
where
`tabBOM Item`.parent = %s and
`tabBOM Item`.item_code = tabItem.name and
`tabBOM Item`.docstatus < 2
group by item_code""", (qty, self.doc.bom_no), as_dict=1)
if fl_bom_sa_items:
_make_items_dict(fl_bom_sa_items)
return item_dict
def get_pending_raw_materials(self, pro_obj):
"""
issue (item quantity) that is pending to issue or desire to transfer,
whichever is less
"""
item_dict = self.get_bom_raw_materials(1)
issued_item_qty = self.get_issued_qty()
max_qty = flt(pro_obj.doc.qty)
only_pending_fetched = []
for item in item_dict:
pending_to_issue = (max_qty * item_dict[item]["qty"]) - issued_item_qty.get(item, 0)
desire_to_transfer = flt(self.doc.fg_completed_qty) * item_dict[item]["qty"]
if desire_to_transfer <= pending_to_issue:
item_dict[item]["qty"] = desire_to_transfer
else:
item_dict[item]["qty"] = pending_to_issue
if pending_to_issue:
only_pending_fetched.append(item)
# delete items with 0 qty
for item in item_dict.keys():
if not item_dict[item]["qty"]:
del item_dict[item]
# show some message
if not len(item_dict):
webnotes.msgprint(_("""All items have already been transferred \
for this Production Order."""))
elif only_pending_fetched:
webnotes.msgprint(_("""Only quantities pending to be transferred \
were fetched for the following items:\n""" + "\n".join(only_pending_fetched)))
return item_dict
def get_issued_qty(self):
issued_item_qty = {}
result = sql("""select t1.item_code, sum(t1.qty)
from `tabStock Entry Detail` t1, `tabStock Entry` t2
where t1.parent = t2.name and t2.production_order = %s and t2.docstatus = 1
and t2.purpose = 'Material Transfer'
group by t1.item_code""", self.doc.production_order)
for t in result:
issued_item_qty[t[0]] = flt(t[1])
return issued_item_qty
def add_to_stock_entry_detail(self, item_dict, bom_no=None):
for d in item_dict:
se_child = addchild(self.doc, 'mtn_details', 'Stock Entry Detail',
self.doclist)
se_child.s_warehouse = item_dict[d].get("from_warehouse", self.doc.from_warehouse)
se_child.t_warehouse = item_dict[d].get("to_warehouse", self.doc.to_warehouse)
se_child.item_code = cstr(d)
se_child.description = item_dict[d]["description"]
se_child.uom = item_dict[d]["stock_uom"]
se_child.stock_uom = item_dict[d]["stock_uom"]
se_child.qty = flt(item_dict[d]["qty"])
# in stock uom
se_child.transfer_qty = flt(item_dict[d]["qty"])
se_child.conversion_factor = 1.00
# to be assigned for finished item
se_child.bom_no = bom_no
def get_cust_values(self):
"""fetches customer details"""
if self.doc.delivery_note_no:
doctype = "Delivery Note"
name = self.doc.delivery_note_no
else:
doctype = "Sales Invoice"
name = self.doc.sales_invoice_no
result = webnotes.conn.sql("""select customer, customer_name,
address_display as customer_address
from `tab%s` where name=%s""" % (doctype, "%s"), (name,), as_dict=1)
return result and result[0] or {}
def get_cust_addr(self):
from utilities.transaction_base import get_default_address, get_address_display
res = sql("select customer_name from `tabCustomer` where name = '%s'"%self.doc.customer)
address_display = None
customer_address = get_default_address("customer", self.doc.customer)
if customer_address:
address_display = get_address_display(customer_address)
ret = {
'customer_name' : res and res[0][0] or '',
'customer_address' : address_display}
return ret
def get_supp_values(self):
result = webnotes.conn.sql("""select supplier, supplier_name,
address_display as supplier_address
from `tabPurchase Receipt` where name=%s""", (self.doc.purchase_receipt_no,),
as_dict=1)
return result and result[0] or {}
def get_supp_addr(self):
from utilities.transaction_base import get_default_address, get_address_display
res = sql("""select supplier_name from `tabSupplier`
where name=%s""", self.doc.supplier)
address_display = None
supplier_address = get_default_address("customer", self.doc.customer)
if supplier_address:
address_display = get_address_display(supplier_address)
ret = {
'supplier_name' : res and res[0][0] or '',
'supplier_address' : address_display }
return ret
def validate_with_material_request(self):
for item in self.doclist.get({"parentfield": "mtn_details"}):
if item.material_request:
mreq_item = webnotes.conn.get_value("Material Request Item",
{"name": item.material_request_item, "parent": item.material_request},
["item_code", "warehouse", "idx"], as_dict=True)
if mreq_item.item_code != item.item_code or mreq_item.warehouse != item.t_warehouse:
msgprint(_("Row #") + (" %d: " % item.idx) + _("does not match")
+ " " + _("Row #") + (" %d %s " % (mreq_item.idx, _("of")))
+ _("Material Request") + (" - %s" % item.material_request),
raise_exception=webnotes.MappingMismatchError)
@webnotes.whitelist()
def get_production_order_details(production_order):
result = webnotes.conn.sql("""select bom_no,
ifnull(qty, 0) - ifnull(produced_qty, 0) as fg_completed_qty, use_multi_level_bom
from `tabProduction Order` where name = %s""", production_order, as_dict=1)
return result and result[0] or {}
def query_sales_return_doc(doctype, txt, searchfield, start, page_len, filters):
conditions = ""
if doctype == "Sales Invoice":
conditions = "and update_stock=1"
return webnotes.conn.sql("""select name, customer, customer_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `customer` like %%(txt)s) %s %s
order by name, customer, customer_name
limit %s""" % (doctype, searchfield, conditions,
get_match_cond(doctype, searchfield), "%(start)s, %(page_len)s"),
{"txt": "%%%s%%" % txt, "start": start, "page_len": page_len},
as_list=True)
def query_purchase_return_doc(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, supplier, supplier_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `supplier` like %%(txt)s) %s
order by name, supplier, supplier_name
limit %s""" % (doctype, searchfield, get_match_cond(doctype, searchfield),
"%(start)s, %(page_len)s"), {"txt": "%%%s%%" % txt, "start":
start, "page_len": page_len}, as_list=True)
def query_return_item(doctype, txt, searchfield, start, page_len, filters):
txt = txt.replace("%", "")
ref = get_return_doclist_and_details(filters)
stock_items = get_stock_items_for_return(ref.doclist, ref.parentfields)
result = []
for item in ref.doclist.get({"parentfield": ["in", ref.parentfields]}):
if item.item_code in stock_items:
item.item_name = cstr(item.item_name)
item.description = cstr(item.description)
if (txt in item.item_code) or (txt in item.item_name) or (txt in item.description):
val = [
item.item_code,
(len(item.item_name) > 40) and (item.item_name[:40] + "...") or item.item_name,
(len(item.description) > 40) and (item.description[:40] + "...") or \
item.description
]
if val not in result:
result.append(val)
return result[start:start+page_len]
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
if not filters.get("posting_date"):
filters["posting_date"] = nowdate()
batch_nos = None
args = {
'item_code': filters['item_code'],
's_warehouse': filters['s_warehouse'],
'posting_date': filters['posting_date'],
'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype, searchfield),
"start": start,
"page_len": page_len
}
if filters.get("s_warehouse"):
batch_nos = webnotes.conn.sql("""select batch_no
from `tabStock Ledger Entry` sle
where item_code = '%(item_code)s'
and warehouse = '%(s_warehouse)s'
and batch_no like '%(txt)s'
and exists(select * from `tabBatch`
where name = sle.batch_no
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '')
and docstatus != 2)
%(mcond)s
group by batch_no having sum(actual_qty) > 0
order by batch_no desc
limit %(start)s, %(page_len)s """
% args)
if batch_nos:
return batch_nos
else:
return webnotes.conn.sql("""select name from `tabBatch`
where item = '%(item_code)s'
and docstatus < 2
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '' or expiry_date = "0000-00-00")
%(mcond)s
order by name desc
limit %(start)s, %(page_len)s
""" % args)
def get_stock_items_for_return(ref_doclist, parentfields):
"""return item codes filtered from doclist, which are stock items"""
if isinstance(parentfields, basestring):
parentfields = [parentfields]
all_items = list(set([d.item_code for d in
ref_doclist.get({"parentfield": ["in", parentfields]})]))
stock_items = webnotes.conn.sql_list("""select name from `tabItem`
where is_stock_item='Yes' and name in (%s)""" % (", ".join(["%s"] * len(all_items))),
tuple(all_items))
return stock_items
def get_return_doclist_and_details(args):
ref = webnotes._dict()
# get ref_doclist
if args["purpose"] in return_map:
for fieldname, val in return_map[args["purpose"]].items():
if args.get(fieldname):
ref.fieldname = fieldname
ref.doclist = webnotes.get_doclist(val[0], args[fieldname])
ref.parentfields = val[1]
break
return ref
return_map = {
"Sales Return": {
# [Ref DocType, [Item tables' parentfields]]
"delivery_note_no": ["Delivery Note", ["delivery_note_details", "packing_details"]],
"sales_invoice_no": ["Sales Invoice", ["entries", "packing_details"]]
},
"Purchase Return": {
"purchase_receipt_no": ["Purchase Receipt", ["purchase_receipt_details"]]
}
}
@webnotes.whitelist()
def make_return_jv(stock_entry):
se = webnotes.bean("Stock Entry", stock_entry)
if not se.doc.purpose in ["Sales Return", "Purchase Return"]:
return
ref = get_return_doclist_and_details(se.doc.fields)
if ref.doclist[0].doctype == "Delivery Note":
result = make_return_jv_from_delivery_note(se, ref)
elif ref.doclist[0].doctype == "Sales Invoice":
result = make_return_jv_from_sales_invoice(se, ref)
elif ref.doclist[0].doctype == "Purchase Receipt":
result = make_return_jv_from_purchase_receipt(se, ref)
# create jv doclist and fetch balance for each unique row item
jv_list = [{
"__islocal": 1,
"doctype": "Journal Voucher",
"posting_date": se.doc.posting_date,
"voucher_type": se.doc.purpose == "Sales Return" and "Credit Note" or "Debit Note",
"fiscal_year": se.doc.fiscal_year,
"company": se.doc.company
}]
from accounts.utils import get_balance_on
for r in result:
jv_list.append({
"__islocal": 1,
"doctype": "Journal Voucher Detail",
"parentfield": "entries",
"account": r.get("account"),
"against_invoice": r.get("against_invoice"),
"against_voucher": r.get("against_voucher"),
"balance": get_balance_on(r.get("account"), se.doc.posting_date) \
if r.get("account") else 0
})
return jv_list
def make_return_jv_from_sales_invoice(se, ref):
# customer account entry
parent = {
"account": ref.doclist[0].debit_to,
"against_invoice": ref.doclist[0].name,
}
# income account entries
children = []
for se_item in se.doclist.get({"parentfield": "mtn_details"}):
# find item in ref.doclist
ref_item = ref.doclist.getone({"item_code": se_item.item_code})
account = get_sales_account_from_item(ref.doclist, ref_item)
if account not in children:
children.append(account)
return [parent] + [{"account": account} for account in children]
def get_sales_account_from_item(doclist, ref_item):
account = None
if not ref_item.income_account:
if ref_item.parent_item:
parent_item = doclist.getone({"item_code": ref_item.parent_item})
account = parent_item.income_account
else:
account = ref_item.income_account
return account
def make_return_jv_from_delivery_note(se, ref):
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "delivery_note",
ref.doclist[0].name)
if not invoices_against_delivery:
sales_orders_against_delivery = [d.prevdoc_docname for d in
ref.doclist.get({"prevdoc_doctype": "Sales Order"}) if d.prevdoc_docname]
if sales_orders_against_delivery:
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "sales_order",
sales_orders_against_delivery)
if not invoices_against_delivery:
return []
packing_item_parent_map = dict([[d.item_code, d.parent_item] for d in ref.doclist.get(
{"parentfield": ref.parentfields[1]})])
parent = {}
children = []
for se_item in se.doclist.get({"parentfield": "mtn_details"}):
for sales_invoice in invoices_against_delivery:
si = webnotes.bean("Sales Invoice", sales_invoice)
if se_item.item_code in packing_item_parent_map:
ref_item = si.doclist.get({"item_code": packing_item_parent_map[se_item.item_code]})
else:
ref_item = si.doclist.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = get_sales_account_from_item(si.doclist, ref_item)
if account not in children:
children.append(account)
if not parent:
parent = {"account": si.doc.debit_to}
break
if len(invoices_against_delivery) == 1:
parent["against_invoice"] = invoices_against_delivery[0]
result = [parent] + [{"account": account} for account in children]
return result
def get_invoice_list(doctype, link_field, value):
if isinstance(value, basestring):
value = [value]
return webnotes.conn.sql_list("""select distinct parent from `tab%s`
where docstatus = 1 and `%s` in (%s)""" % (doctype, link_field,
", ".join(["%s"]*len(value))), tuple(value))
def make_return_jv_from_purchase_receipt(se, ref):
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_receipt",
ref.doclist[0].name)
if not invoice_against_receipt:
purchase_orders_against_receipt = [d.prevdoc_docname for d in
ref.doclist.get({"prevdoc_doctype": "Purchase Order"}) if d.prevdoc_docname]
if purchase_orders_against_receipt:
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_order",
purchase_orders_against_receipt)
if not invoice_against_receipt:
return []
parent = {}
children = []
for se_item in se.doclist.get({"parentfield": "mtn_details"}):
for purchase_invoice in invoice_against_receipt:
pi = webnotes.bean("Purchase Invoice", purchase_invoice)
ref_item = pi.doclist.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = ref_item.expense_head
if account not in children:
children.append(account)
if not parent:
parent = {"account": pi.doc.credit_to}
break
if len(invoice_against_receipt) == 1:
parent["against_voucher"] = invoice_against_receipt[0]
result = [parent] + [{"account": account} for account in children]
return result
| agpl-3.0 | -4,433,923,836,272,466,400 | 34.656313 | 112 | 0.658423 | false | 3.018492 | false | false | false |
koalalorenzo/greatdiary | main.py | 1 | 15891 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import gtk
import webkit
import gobject
import xml.dom.minidom
from xml.dom.minidom import Node
import time
import os
import sys
import crypt, random, string
import libcrypt
from sqlite3 import dbapi2 as sqlite
gobject.threads_init()
def dialog_info(info):
"""
this function show a info dialog.
"""
dialog = gtk.MessageDialog(
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO,
gtk.BUTTONS_OK,
None)
dialog.set_markup(info)
dialog.show_all()
dialog.run()
dialog.destroy()
def dialog_get_password(motivo="This will be used for <i>identification</i> purposes"):
"""
This function ask for password.
"""
dialog = gtk.MessageDialog(
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK,
None)
dialog.set_markup("Please enter the <b>password</b>")
def responseToDialog(entry, dialog, response):
dialog.response(response)
entry = gtk.Entry()
entry.set_visibility(False)
entry.connect("activate", responseToDialog, dialog, gtk.RESPONSE_OK)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("Password:"), False, 5, 5)
hbox.pack_end(entry)
dialog.format_secondary_markup(motivo)
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
dialog.run()
text = entry.get_text()
dialog.destroy()
return text
def accent2html(astring,reverse=False):
"""
This 'stupid' function replace accents with the html tag.
"""
values = {
"à": "à",
"è": "è",
"ì": "ì",
"ò": "ò",
"ù": "ù",
"À": "À",
"È": "È",
"Ì": "Ì",
"Ò": "Ò",
"Ù": "Ù",
" ": " ",
"!": "!"
}
if not reverse:
astring = astring.replace("&","&")
for lettera in values.keys():
if reverse:
astring = astring.replace(values[lettera],lettera)
else:
astring = astring.replace(lettera,values[lettera])
if reverse:
astring = astring.replace("&","&")
return astring
def get_salt(chars = string.letters + string.digits): return random.choice(chars) + random.choice(chars)
def db2html(database="database.sql", password=None):
"""
Use this class to export the database values in html format.
"""
if not os.path.exists("%s.dir/" % database):
os.mkdir("%s.dir/" % database)
adatabase = sqlite.connect(database)
os.chdir("%s.dir/" % database)
cursor = adatabase.cursor()
if not password:
password = dialog_get_password()
eget = cursor.execute("SELECT * FROM settings")
for (key, value) in eget:
if key == "salt":
salt = value
elif key == "password":
check = value
if not crypt.crypt(password,salt) == check:
dialog_info("Your password is not correct!")
sys.exit(1)
eget = cursor.execute("SELECT * FROM pages")
for (number, date, text ) in eget:
xs = open("%s-%s.html" % (number, date), "w")
xs.write("<html>\n%s</html>" % libcrypt.decrypt(text,password).decode("base64"))
xs.close()
dialog_info("diary converted in html pages")
class Page(object):
"""
This class is used to easily manage a diary page.
"""
def __init__(self):
self.meta = dict()
self.text = str()
self.time = str()
self.number = int()
def set_page(self, text, date, number):
self.text = text
self.time = date
self.number = number
class PagesManager(object):
"""
This class manage the pages and the database.
"""
def __init__(self, database_path):
self.pages = dict()
self.settings = dict()
self.__load_database(database_path)
if self.settings["is_crypted"]:
self.tmp_password = ""
def __load_database(self, database_path):
if not database_path:
database_path = "./database.sql"
self.database = sqlite.connect(database_path)
self.cursor = self.database.cursor()
eget = self.cursor.execute("SELECT * FROM settings")
for ( key, value ) in eget:
if value == "True":
self.settings[key] = True
elif value == "False":
self.settings[key] = False
else:
self.settings[key] = value
def get_pages(self):
eget = self.cursor.execute("SELECT * FROM pages")
for (number, date, text ) in eget:
self.pages[number] = Page()
if self.settings["is_crypted"] and self.tmp_password:
text = libcrypt.decrypt(text,self.tmp_password)
try:
self.pages[number].set_page(text.decode("base64"), date, number)
except:
self.pages[number].set_page(text, date, number)
def make_page(self, text, date=None):
if not date:
date = time.strftime("%A %d %B %Y - %H:%M:%S")
self.get_pages()
num = len(self.pages.keys()) + 1
if self.settings["is_crypted"] and self.tmp_password:
text = libcrypt.crypt(text.encode("base64") ,self.tmp_password)
self.cursor.execute( "INSERT INTO pages (number, date, text) VALUES ('%s', '%s', '%s')" % (num, date, text) )
self.database.commit()
self.get_pages()
def check_passwd(self):
if not crypt.crypt(self.tmp_password,self.settings["salt"]) == self.settings["password"]:
return False
return True
def commit(self): self.database.commit()
def close(self):
self.database.commit()
self.database.close()
class Gui(object):
"""
This class manages, builds and destroys the windows.
"""
def __init__(self, database_path="database.sql"):
self.manager = PagesManager(database_path)
if self.manager.settings["is_crypted"]:
self.manager.tmp_password = dialog_get_password()
if not self.manager.check_passwd():
dialog_info("Your password is not correct!")
sys.exit(1)
self.manager.get_pages()
self.__number = len(self.manager.pages.keys()) + 1
self.window = gtk.Window()
self.window.set_title("Gread Diary")
self.__icon = self.window.render_icon(gtk.STOCK_ORIENTATION_PORTRAIT, gtk.ICON_SIZE_MENU)
self.window.set_icon(self.__icon)
self.window.set_size_request(660,500)
self.window.set_resizable(True)
self.window.connect("destroy", self.destroy)
self.new_button = gtk.ToolButton(gtk.STOCK_NEW)
self.new_button.connect("clicked", self.new)
self.save_button = gtk.ToolButton(gtk.STOCK_SAVE)
self.save_button.connect("clicked", self.save)
self.convert_button = gtk.ToolButton(gtk.STOCK_CONVERT)
self.convert_button.connect("clicked", self.__convert)
self.about_button = gtk.ToolButton(gtk.STOCK_ABOUT)
self.about_button.connect("clicked", self.__about)
self.back_button = gtk.ToolButton(gtk.STOCK_GO_BACK)
self.back_button.connect("clicked", self.__go_back)
self.forward_button = gtk.ToolButton(gtk.STOCK_GO_FORWARD)
self.forward_button.connect("clicked", self.__go_forward)
self.space_button_one = gtk.ToolItem()
self.space_button_two = gtk.ToolItem()
self.number_button = gtk.ToolItem()
self.number_entry = gtk.Entry()
self.number_entry.connect("activate", self.__change_page)
self.number_button.add(self.number_entry)
self.number_button.set_expand(False)
self.space_button_one.set_expand(True)
self.space_button_two.set_expand(True)
self.panel_bar = gtk.Toolbar()
self.panel_bar.add(self.back_button)
self.panel_bar.add(self.space_button_one)
self.panel_bar.add(self.new_button)
self.panel_bar.add(self.save_button)
self.panel_bar.add(self.convert_button)
self.panel_bar.add(self.about_button)
self.panel_bar.add(self.space_button_two)
self.panel_bar.add(self.number_button)
self.panel_bar.add(self.forward_button)
self.webkit = webkit.WebView()
#self.webkit.connect("populate-popup", self.__hide_menu)
self.scroll_box = gtk.ScrolledWindow()
self.scroll_box.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.scroll_box.add(self.webkit)
self.__vbox = gtk.VBox(False, 0)
self.__vbox.pack_start(self.scroll_box, True)
self.__vbox.pack_start(self.panel_bar, False)
self.window.add(self.__vbox)
self.__disable_input()
self.forward_button.set_sensitive(False)
self.window.show_all()
self.convert_button.hide()
if not len(self.manager.pages.keys()) >= 1:
self.back_button.set_sensitive(False)
self.forward_button.set_sensitive(False)
else:
#This allow to go back and read the page
self.show_page(len(self.manager.pages.keys()))
self.show_intro_page()
def __hide_menu(self, view, menu):
if not self.webkit.get_editable():
menu.destroy()
def __change_page(self, view=None, menu=None): self.show_page(int(self.number_entry.get_text()))
def __go_back(self, view=None, menu=None):
self.show_page(self.__number - 1)
def __about(self, view=None, menu=None):
about = gtk.AboutDialog()
about.set_program_name("Great Diary")
about.set_version("2.0")
about.set_copyright("(c) Lorenzo Setale")
about.set_comments("A symple diary written with python, gtk, webkit and using sqlite as storage!")
about.set_website("http://code.google.com/p/greatdiary/")
about.set_logo(self.window.render_icon(gtk.STOCK_ORIENTATION_PORTRAIT, gtk.ICON_SIZE_DIALOG))
about.run()
about.destroy()
def __convert(self, view=None, menu=None): db2html(password=self.manager.tmp_password)
def __go_forward(self, view=None, menu=None):
self.show_page(self.__number + 1)
def new(self, widget=None, data=None):
self.save_button.set_sensitive(True)
self.webkit.load_string("", "text/html", "iso-8859-15", "new-page")
self.webkit.set_editable(True)
self.number_entry.set_editable(False)
self.number_entry.set_text(str(len(self.manager.pages.keys())+1))
self.__number = len(self.manager.pages.keys())+1
self.back_button.set_sensitive(True)
self.forward_button.set_sensitive(False)
def save(self, widget=None, data=None):
self.webkit.execute_script("document.title=document.documentElement.innerHTML;")
text = accent2html(self.webkit.get_main_frame().get_title())
self.manager.make_page(text)
self.__number = len(self.manager.pages.keys())
self.__disable_input()
self.number_entry.set_editable(True)
def __disable_input(self):
self.webkit.set_editable(False)
self.save_button.set_sensitive(False)
def show_page(self, anumber):
self.__disable_input()
self.manager.get_pages()
if int(anumber) >= len(self.manager.pages.keys()):
anumber = len(self.manager.pages.keys())
self.back_button.set_sensitive(True)
self.forward_button.set_sensitive(False)
elif int(anumber) <= 1:
anumber = 1
self.back_button.set_sensitive(False)
self.forward_button.set_sensitive(True)
else:
self.back_button.set_sensitive(True)
self.forward_button.set_sensitive(True)
self.webkit.load_string("<html>\n%s</html>" % self.manager.pages[anumber].text.replace("&nbsp;"," "), "text/html", "iso-8859-15", "new-page")
self.__number = anumber
self.number_entry.set_text(str(anumber))
def quit(self, widget=None, data=None):
self.destroy()
def destroy(self, widget=None, data=None):
self.manager.close()
gtk.main_quit()
def show_intro_page(self):
HTML = "<html>"
HTML += """<head><style type="text/css">
.core {
clear: none;
min-width: 512px;
margin: 0 15px 10px 15px;
background: #cccccc;
padding: 5px 3px;
-webkit-border-radius: 13px;
-webkit-transition: all 0.1s ease-out;
background-color: #babdb6;
border: 0px solid #000; box-shadow:0px 0px 15px #000;
-webkit-box-shadow: 0px 0px 15px #000;
}
.baloon {
margin: 5px;
border: 1px solid transparent;
}.title {
padding: 5px 0px 0px 5px;
text-align: left;
font: bold 1.1em "Trebuchet MS", Helvetica, Sans-Serif;
background: -webkit-gradient(linear, left top, left bottom, from(#eeeeec), to(#babdb6));
-webkit-border-radius: 7px 7px 0px 0px;
-webkit-transition: all 0.1s ease-out;
}
</style></head>"""
HTML += """<body><br><div class="core"><div class="baloon"><div class="title">"""
HTML += """Welcome to GreatDiary: Your secret diary!</div>"""
HTML += """This is your secret diary, you can write everything you want: your emotions are safe there and are crypted by your password!<br><br> """
HTML += """<b>It's easy to use</b>: like a diary you can browse the pages by pressing the """
HTML += """two button with the arrows. You can write by clicking to the add-button in the bottom-center of this window and then save"""
HTML += """ your page with the save-button.</div></div>"""
HTML += """<div style="position: fixed; margin: auto; width: 100%; top: auto; right: 0; bottom: 0; left: 0; background-color: #3b5998;"""
HTML += """ border: 0px solid #000; box-shadow:0px 0px 15px #000;"""
HTML += """ -webkit-box-shadow: 0px 0px 15px #000; padding: 5px 10px; color: white;"></div></body></html>"""
self.webkit.load_string(HTML, "text/html", "iso-8859-15", "intro")
self.number_entry.set_editable(False)
self.number_entry.set_text(str(len(self.manager.pages.keys())+1))
self.__number = len(self.manager.pages.keys())+1
if __name__ == "__main__":
DEF_DB_PATH = "./database.sql"
if len(sys.argv) > 1:
DEF_DB_PATH = " ".join(sys.argv[1:])
if not os.path.isfile(DEF_DB_PATH):
dialog_info("This is thefirst time that you run Great Diary. Now we are going to generate the database and then we will crypt them by a password.")
print "Generating the database:",
database = sqlite.connect(DEF_DB_PATH)
cursor = database.cursor()
while 1:
password = dialog_get_password(motivo="This will be used to crypt the pages and database.")
if len(password) > 3:
break
else:
dialog_info("The password must be longer than 3 lecters")
salt = get_salt()
cursor.execute("CREATE TABLE pages (number INTEGER NOT NULL PRIMARY KEY, date TEXT NOT NULL, text TEXT NOT NULL)")
cursor.execute("CREATE TABLE settings (key TEXT NOT NULL, value TEXT NOT NULL)")
cursor.execute("INSERT INTO settings (key, value) VALUES ('is_crypted', 'True')")
cursor.execute("INSERT INTO settings (key, value) VALUES ('salt', '%s')" % salt)
cursor.execute("INSERT INTO settings (key, value) VALUES ('password', '%s')" % crypt.crypt(password,salt) )
database.commit()
database.close()
print "done"
dialog_info("Done! Everything is OK! Now you can use GreatDiary")
c = Gui(database_path=DEF_DB_PATH)
gtk.main()
| gpl-3.0 | 8,236,639,972,998,706,000 | 36.992823 | 155 | 0.601914 | false | 3.537759 | false | false | false |
egh/spydaap | spydaap/parser/vorbis.py | 1 | 3664 | # Copyright (C) 2008 Erik Hetzner
# This file is part of Spydaap. Spydaap is free software: you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# Spydaap is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Spydaap. If not, see <http://www.gnu.org/licenses/>.
import mutagen
import spydaap
import re
import os
from spydaap.daap import do
# * TODO Implement song.songtrackcount, song.disccount
# daap.songbeatsperminute
# daap.songcomment
# daap.songdateadded
# daap.songdatemodified,
# daap.songdisabled,
# daap.songeqpreset
# daap.songformat
# daap.songdescription
# daap.songrelativevolume,
# daap.songsize,
# daap.songstarttime,
# daap.songstoptime,
# daap.songtime,
# daap.songuserrating,
# daap.songdatakind,
# daap.songdataurl
class VorbisParser(spydaap.parser.Parser):
vorbis_string_map = {
'grouping': 'daap.songgrouping',
'title': 'dmap.itemname',
'artist': 'daap.songartist',
'composer': 'daap.songcomposer',
'genre': 'daap.songgenre',
'album': 'daap.songalbum',
'albumartist': 'daap.songalbumartist',
}
vorbis_int_map = {
'bpm': 'daap.songbeatsperminute',
'date': 'daap.songyear',
'year': 'daap.songyear',
'compilation': 'daap.songcompilation',
}
def handle_track(self, flac, d):
tracknumber = None
trackcount = None
if 'tracknumber' in flac.tags:
t = str(flac.tags['tracknumber']).split('/')
tracknumber = self.my_int(t[0])
if (len(t) == 2):
trackcount = self.my_int(t[1])
if 'tracktotal' in flac.tags:
trackcount = self.my_int(flac.tags['tracktotal'])
if tracknumber:
d.append(do('daap.songtracknumber', tracknumber))
if trackcount:
d.append(do('daap.songtrackcount', trackcount))
def handle_disc(self, flac, d):
discnumber = None
disccount = None
if 'discnumber' in flac.tags:
t = unicode(flac.tags['discnumber'][0]).split('/')
discnumber = self.my_int(t[0])
if (len(t) == 2):
disccount = self.my_int(t[1])
if 'disctotal' in flac.tags:
disccount = self.my_int(flac.tags['disctotal'])
if discnumber:
d.append(do('daap.songdiscnumber', discnumber))
if disccount:
d.append(do('daap.songdisccount', disccount))
file_re = re.compile(".*\\.([fF][lL][aA][cC]|[oO][gG]{2})$")
def understands(self, filename):
return self.file_re.match(filename)
def parse(self, filename):
md = mutagen.File(filename)
d = []
if md.tags is not None:
self.handle_string_tags(self.vorbis_string_map, md, d)
self.handle_int_tags(self.vorbis_int_map, md, d)
self.handle_track(md, d)
self.handle_disc(md, d)
self.add_file_info(filename, d)
d.extend([do('daap.songtime', md.info.length * 1000),
do('daap.songsamplerate', md.info.sample_rate)])
name = self.set_itemname_if_unset(os.path.basename(filename), d)
if hasattr(self, 'parse_extra_vorbis'):
self.parse_extra_vorbis(filename, md, d)
return (d, name)
| gpl-3.0 | -2,897,420,776,476,224,500 | 32.925926 | 72 | 0.623908 | false | 3.283154 | false | false | false |
BillClyde/safenetfs | safenet/api/directory.py | 1 | 4211 | import safenet.api
import requests
import json
from StringIO import StringIO
import base64
__author__ = "William Clyde"
__copyright__ = "Copyright 2016, William Clyde"
__license__ = "MIT"
class Directory:
"""Directory management"""
def __init__(self):
"""__init__"""
self.headers = {'content-type': 'application/json',
'authorization':
'Bearer {0}'.format(safenet.api.getToken())}
def create(self, path, meta_data, is_private=True):
"""Create a new directory
Parameters
----------
:param path: string
path of new directory
:param meta_data: string
optional directory information
:param is_private: bool
marks the file as private
Returns
-------
bool
True if successful, False otherwise
"""
privacy = "true" if is_private else "false"
data = """{{ "isPrivate": {privacy}, "metadata": "{meta_data}" }}"""
response = requests.post(safenet.api.DRIVE_DIR_URL + path,
headers=self.headers,
data=data.format(privacy, meta_data=base64.b64encode(meta_data)))
if response.status_code == 200:
return True
return False
def get(self, path):
"""Get directory at path
Parameters
----------
:param path: string
path to directory
"""
response = requests.get(safenet.api.DRIVE_DIR_URL + path, headers=self.headers)
if response.status_code == 200:
return json.load(StringIO(response.text))
else:
return response.reason
def update(self, path, new_name, meta_data):
"""Update the name of the directory
Parameters
----------
:param path: string
path to directory
:param new_name: string
updated directory name
:param meta_data: string
optional directory information
Returns
-------
bool
True if successful, otherwise False
"""
data = """{{ "name":"{new_name}", "metadata":"{meta_data}" }}"""
response = requests.put(safenet.api.DRIVE_DIR_URL + path,
data=data.format(new_name=new_name,
meta_data=base64.b64encode(meta_data)),
headers=self.headers)
if response.status_code == 200:
return True
return False
def move(self, src_path, dest_path, copy=False):
"""Move directory to new location with optional copy
Parameters
----------
:param src_path: string
current path to directory
:param dest_path: string
new path to directory
:param copy: bool
copy file instead of moving
Returns
-------
bool
True if successful, otherwise False
"""
action = "copy" if copy else "move"
data = """{{ "srcRootPath":"drive",
"srcPath":"{src_path}",
"destRootPath":"drive",
"destPath":"{dest_path}",
"action":"{action}" }} """.format(src_path=src_path,
dest_path=dest_path,
action=action)
response = requests.post(safenet.api.DIR_URL + "/movedir",
data=data,
headers=self.headers)
if response.status_code == 200:
return True
return False
def delete(self, path):
"""delete
Parameters
----------
:param path: string
path of the directory to delete
Returns
-------
bool
True if successful, otherwise False
"""
response = requests.delete(safenet.api.DRIVE_DIR_URL + path, headers=self.headers)
if response.status_code == 200:
return True
return False
| mit | 7,075,495,754,665,540,000 | 28.243056 | 98 | 0.495607 | false | 4.763575 | false | false | false |
macarthur-lab/xbrowse | xbrowse_server/base/management/commands/get_lof_variants.py | 1 | 4504 | from collections import defaultdict
import csv
from django.core.management.base import BaseCommand
import elasticsearch
import elasticsearch_dsl
import json
import settings
from seqr.models import Individual
from seqr.views.utils.orm_to_json_utils import _get_json_for_individuals
from xbrowse_server.base.models import Project as BaseProject
EXCLUDE_PROJECTS = ['ext', '1000 genomes', 'DISABLED', 'project', 'interview', 'non-cmg', 'amel']
PER_PAGE = 5000
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("--metadata-only", action="store_true", help="Only get the project/ individual metadata.")
parser.add_argument("--use-project-indices-csv", action="store_true", help="Load projects to search from project_indices.csv")
parser.add_argument("--index", nargs='+', help="Individual index to use")
def handle(self, *args, **options):
if options["index"]:
es_indices = options["index"]
elif options["use_project_indices_csv"]:
with open('project_indices.csv') as csvfile:
reader = csv.DictReader(csvfile)
es_indices = {row['index'] for row in reader}
else:
projects_q = BaseProject.objects.filter(genome_version='37')
for exclude_project in EXCLUDE_PROJECTS:
projects_q = projects_q.exclude(project_name__icontains=exclude_project)
indices_for_project = defaultdict(list)
for project in projects_q:
indices_for_project[project.get_elasticsearch_index()].append(project)
indices_for_project.pop(None, None)
seqr_projects = []
with open('project_indices.csv', 'wb') as csvfile:
fieldnames = ['projectGuid', 'index']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for index, projects in indices_for_project.items():
for project in projects:
seqr_projects.append(project.seqr_project)
writer.writerow({'projectGuid': project.seqr_project.guid, 'index': index})
individuals = _get_json_for_individuals(Individual.objects.filter(family__project__in=seqr_projects))
with open('seqr_individuals.csv', 'wb') as csvfile:
fieldnames = ['projectGuid', 'familyGuid', 'individualId', 'paternalId', 'maternalId', 'sex',
'affected']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore')
writer.writeheader()
for individual in individuals:
writer.writerow(individual)
es_indices = indices_for_project.keys()
if not options["metadata_only"]:
es_client = elasticsearch.Elasticsearch(host=settings.ELASTICSEARCH_SERVICE_HOSTNAME, timeout=10000)
search = elasticsearch_dsl.Search(using=es_client, index='*,'.join(es_indices) + "*")
search = search.query("match", mainTranscript_lof='HC')
search = search.source(['contig', 'pos', 'ref', 'alt', '*num_alt', '*gq', '*ab', '*dp', '*ad'])
print('Searching across {} indices...'.format(len(es_indices)))
result_count_search = search.params(size=0)
total = result_count_search.execute().hits.total
print('Loading {} variants...'.format(total))
with open('lof_variants.csv', 'a') as csvfile:
sample_fields = ['num_alt', 'gq', 'ab', 'dp', 'ad']
fieldnames = ['contig', 'pos', 'ref', 'alt', 'index'] + sample_fields
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore')
if not options["index"]:
writer.writeheader()
for i, hit in enumerate(search.scan()):
result = {key: hit[key] for key in hit}
result['index'] = hit.meta.index
for field in sample_fields:
result[field] = json.dumps({
key.rstrip('_{}'.format(field)): val for key, val in result.items() if key.endswith(field)
})
writer.writerow(result)
if i % 10000 == 0:
print('Parsed {} variants'.format(i))
print('Loaded {} variants'.format(i))
print('Done') | agpl-3.0 | -8,824,804,290,275,084,000 | 47.44086 | 134 | 0.583925 | false | 4.201493 | false | false | false |
coala-analyzer/coala-quickstart | coala_quickstart/generation/FileGlobs.py | 1 | 2475 | import os
from coalib.parsing.Globbing import glob_escape
from coala_quickstart.generation.Utilities import get_gitignore_glob
from coala_utils.Question import ask_question
from coala_quickstart.Strings import GLOB_HELP
from coalib.collecting.Collectors import collect_files
def get_project_files(log_printer,
printer,
project_dir,
file_path_completer,
non_interactive=False):
"""
Gets the list of files matching files in the user's project directory
after prompting for glob expressions.
:param log_printer:
A ``LogPrinter`` object.
:param printer:
A ``ConsolePrinter`` object.
:param file_path_completer:
A ``file_path_completer`` object.
:param non_interactive
Whether coala-quickstart is in non-interactive mode
:return:
A list of file paths matching the files.
"""
file_globs = ['**']
ignore_globs = None
gitignore_dir_list = []
for dir_name, subdir_name, file_list in os.walk(project_dir):
if os.path.isfile(os.path.join(dir_name, '.gitignore')):
gitignore_dir_list += [dir_name]
if gitignore_dir_list:
printer.print('The contents of your .gitignore file for the project '
'will be automatically loaded as the files to ignore.',
color='green')
ignore_globs = get_gitignore_glob(project_dir, gitignore_dir_list)
if non_interactive and not ignore_globs:
ignore_globs = []
if ignore_globs is None:
printer.print(GLOB_HELP)
file_path_completer.activate(seed_dir=project_dir)
ignore_globs = ask_question(
'Which files do you want coala to ignore inside the '
'project directory?',
printer=printer,
typecast=list)
file_path_completer.deactivate()
printer.print()
ignore_globs = list(ignore_globs)
escaped_project_dir = glob_escape(project_dir)
file_path_globs = [os.path.join(
escaped_project_dir, glob_exp) for glob_exp in file_globs]
ignore_path_globs = [os.path.join(
escaped_project_dir, glob_exp) for glob_exp in ignore_globs]
ignore_path_globs.append(os.path.join(escaped_project_dir, '.git/**'))
file_paths = collect_files(
file_path_globs,
log_printer,
ignored_file_paths=ignore_path_globs)
return file_paths, ignore_globs
| agpl-3.0 | 1,518,963,703,581,851,000 | 33.375 | 77 | 0.634343 | false | 3.891509 | false | false | false |
tehasdf/AdventOfCode2016 | p4.py | 1 | 1371 | from collections import Counter
def split(name):
name, _, sector_checksum = name.strip().rpartition('-')
sector, _, checksum = sector_checksum.partition('[')
checksum = checksum[:-1]
return name, int(sector), checksum
def real(name, checksum):
letters = Counter(name.replace('-', ''))
return ''.join(sorted(letters, key=lambda x: (-letters[x], x))[:5]) \
== checksum
def decrypt(name, counter):
def letters():
for letter in name:
if letter == '-':
yield ' '
else:
x = ord(letter) - ord('a')
x = (x + counter) % 26
yield chr(x + ord('a'))
return ''.join(letters())
def p1(inp):
return sum(sector for name, sector, checksum in map(split, inp)
if real(name, checksum))
def p2(inp):
for line in inp:
name, sector, checksum = split(line)
name = decrypt(name, sector)
if 'north' in name:
print sector, name
assert real('aaaaa-bbb-z-y-x', 'abxyz')
assert real('a-b-c-d-e-f-g-h', 'abcde')
assert real('not-a-real-room', 'oarel')
assert not real('totally-real-room', 'decoy')
with open('input_4.txt') as f:
print p1(f)
assert decrypt('q', 343) == 'v'
assert decrypt('qzmt-zixmtkozy-ivhz', 343) == 'very encrypted name'
with open('input_4.txt') as f:
print p2(f)
| mit | 1,116,403,845,823,417,200 | 24.388889 | 73 | 0.565281 | false | 3.335766 | false | false | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtNetwork/QHttpMultiPart.py | 1 | 1266 | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib/python3/dist-packages/PyQt4/QtNetwork.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QHttpMultiPart(__PyQt4_QtCore.QObject):
"""
QHttpMultiPart(QObject parent=None)
QHttpMultiPart(QHttpMultiPart.ContentType, QObject parent=None)
"""
def append(self, QHttpPart): # real signature unknown; restored from __doc__
""" QHttpMultiPart.append(QHttpPart) """
pass
def boundary(self): # real signature unknown; restored from __doc__
""" QHttpMultiPart.boundary() -> QByteArray """
pass
def setBoundary(self, QByteArray): # real signature unknown; restored from __doc__
""" QHttpMultiPart.setBoundary(QByteArray) """
pass
def setContentType(self, QHttpMultiPart_ContentType): # real signature unknown; restored from __doc__
""" QHttpMultiPart.setContentType(QHttpMultiPart.ContentType) """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
AlternativeType = 3
ContentType = None # (!) real value is ''
FormDataType = 2
MixedType = 0
RelatedType = 1
| gpl-2.0 | -5,000,133,939,483,875,000 | 29.878049 | 105 | 0.670616 | false | 3.767857 | false | false | false |
JeroenZegers/Nabu-MSSS | nabu/processing/feature_computers/angspec.py | 1 | 1409 | """@file angspec.py
contains the angular spectrum feature computer"""
import numpy as np
import base
import feature_computer
from sigproc import snip
class Angspec(feature_computer.FeatureComputer):
"""the feature computer class to compute angular spectrum feature"""
def comp_feat(self, sig, rate):
"""
compute the features
Args:
sig: the audio signal as a 1-D numpy array
rate: the sampling rate
Returns:
the features as a [seq_length x feature_dim] numpy array
"""
# snip the edges
sig = snip(sig, rate, float(self.conf['winlen']), float(self.conf['winstep']))
if 'scipy' in self.conf and self.conf['scipy'] == 'True':
feat = base.angspec_scipy(sig, rate, self.conf)
else:
feat = base.angspec(sig, rate, self.conf)
if self.conf['include_energy'] == 'True':
if 'scipy' in self.conf and self.conf['scipy'] == 'True':
_, energy = base.fbank_scipy(sig, rate, self.conf)
else:
_, energy = base.fbank(sig, rate, self.conf)
feat = np.append(feat, energy[:, np.newaxis], 1)
return feat
def get_dim(self):
"""the feature dimemsion"""
dim = int(self.conf['nfft'])/2+1
if self.conf['include_energy'] == 'True':
dim += 1
return dim
| mit | 173,415,704,382,370,140 | 27.18 | 86 | 0.568488 | false | 3.881543 | false | false | false |
pablorecio/Cobaya | src/cobaya/config.py | 1 | 2598 | ###############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright (C) 2010, Lorenzo Gil Sanchez <[email protected]> #
###############################################################################
"""Configuration management.
"""
import StringIO
import ConfigParser
import os
import sys
class ConfigError(Exception):
pass
class Config(object):
default_conf = """
[hamster]
db = ~/.local/share/hamster-applet/hamster.db
log_file = ~/.local/share/hamster-applet/synced-tasks.dat
[remote]
url =
user =
password =
[tasks]
ticket_field = activity
project_field = tags
description_field = description
security_days = 10
"""
def __init__(self):
self.parser = ConfigParser.SafeConfigParser()
self.conf_files = [
os.path.join(os.path.dirname(sys.prefix), 'etc', 'cobaya.conf'),
os.path.join(os.path.expanduser('~'), '.cobayarc'),
]
def load(self, filename=None):
self.parser.readfp(StringIO.StringIO(self.default_conf))
if filename is not None:
self.conf_files.append(filename)
return self.parser.read(self.conf_files)
def get_option(self, option):
parts = option.split('.')
if not parts or len(parts) != 2:
raise ConfigError("Options must be qualified with the section")
section, option = parts
value = self.parser.get(section, option)
if value.startswith('~'):
value = value.replace('~', os.path.expanduser('~'))
return value
| gpl-3.0 | 8,995,140,087,937,473,000 | 33.64 | 79 | 0.516166 | false | 4.631016 | true | false | false |
drblez/dynamodb-transaction-manager | dynamodb2/constructor.py | 1 | 6099 | from datetime import datetime
import decimal
__author__ = 'drblez'
"""
Field('f1', 'value1').field('f2', 'value2').field('f3', 42).field('f4', ['a', 'b', 'c']).field('f5', [1, 2, 3]).dict
{
'f1': {'S': 'value1'),
'f2': {'S': 'value2'},
'f3': {'N': '42'},
'f4': {'SS': ['a', 'b', 'c']},
'f5': {'NS': [1, 2, 3]}
}
Update('f3').add(1).also(Update('f4').add(['d'])).also(Update('f5').delete([2, 3])).also(Update('f6').put(0)).
also(Update('f1').delete()).dict()
{
'f3': {'Value': {'N': '1'}, 'Action': 'ADD'}
'f4': {'Value': {'SS': ['d']}, 'Action': 'ADD'}
'f5': {'Value': {'NS': ['2', '3'], Action: 'DELETE'}
'f6': {'Action': 'DELETE'}
}
Expected('f1', True, 'value1').expected('f3', True, 42).expected('f6', False).dict()
{
'f1': {'Value': {'S', 'value1'}, 'Exists': true}
'f2': {'Value': {'N', '42'}, 'Exists': true}
'f6': {'Exists': false}
}
KeyConditions('f3').between(40, 44).also(KeyConditions('f1').eq('value1')).dict()
{
'f3': {'AttributeValueList': [{'N': '40'}, {'N': '44'}], 'ComparisonOperator': 'BETWEEN'},
'f1': {'AttributeValueList': [{'S', 'value1'}], 'ComparisonOperator': 'EQ'}
}
"""
class EmptyList(Exception):
pass
class BadDynamoDBType(Exception):
pass
class ActionAlreadyExists(Exception):
pass
class ExpectedError(Exception):
pass
def dynamodb_type(value):
if type(value) == str:
return 'S'
elif type(value) == int:
return 'N'
elif type(value) == float:
return 'N'
elif type(value) == decimal.Decimal:
return 'N'
elif type(value) == datetime:
return 'D'
elif type(value) == list:
if len(value) == 0:
raise EmptyList()
return dynamodb_type(value[0]) + 'S'
else:
raise BadDynamoDBType('Bad type {} of value {}'.format(type(value), value))
class Field():
def __init__(self, name, value):
self.name = name
self.type = dynamodb_type(value)
if self.type in ['SS', 'NS']:
t = []
for v in value:
t.append(str(v))
self.value = t
elif self.type == 'D':
self.type = 'S'
self.value = value.isoformat()
elif self.type == 'DS':
self.type = 'SS'
t = []
for v in value:
t.append(v.isoformat())
self.value = t
else:
self.value = str(value)
self.items = [self]
def field(self, name, value):
f = Field(name, value)
self.items.append(f)
return self
def dict(self):
d = {}
for i in self.items:
d[i.name] = {i.type: i.value}
return d
class Update():
def __init__(self, field):
self.field = field
self.action = None
self.value = None
self.items = []
def add(self, value):
if not self.action is None:
raise ActionAlreadyExists('For field {} exists action {}'.format(self.field, self.action))
self.value = Field('Value', value).dict()
self.action = 'ADD'
self.items.append(self)
return self
def put(self, value):
self.value = Field('Value', value).dict()
self.action = 'PUT'
self.items.append(self)
return self
def delete(self, value=None):
if not value is None:
self.value = Field('Value', value).dict()
self.action = 'DELETE'
self.items.append(self)
return self
def also(self, update):
self.items.append(update)
return self
def dict(self):
d = {}
for i in self.items:
if not i.value is None:
t = i.value
else:
t = {}
t['Action'] = i.action
d[i.field] = t
return d
class Expected():
def __init__(self, field, exists, value=None):
self.field = field
self.exists = str(exists).lower()
if exists and (value is None):
raise ExpectedError('Exists true and Value is None not compatible')
if value is None:
self.value = None
else:
self.value = Field('Value', value).dict()
self.items = [self]
def expected(self, field, exists, value=None):
e = Expected(field, exists, value)
self.items.append(e)
return self
def dict(self):
d = {}
for i in self.items:
if not i.value is None:
t = i.value
else:
t = {}
t['Exists'] = i.exists
d[i.field] = t
return d
class KeyConditions():
def __init__(self, field):
self.field = field
self.items = []
self.operator = None
self.values = []
def between(self, lower, upper):
v1 = Field('Value', lower).dict()['Value']
v2 = Field('Value', upper).dict()['Value']
self.values = [v1, v2]
self.operator = 'BETWEEN'
self.items.append(self)
return self
def __operator(self, operator, value):
self.values = [value]
self.operator = operator
self.items.append(self)
return self
def eq(self, value):
return self.__operator('EQ', value)
def le(self, value):
return self.__operator('LE', value)
def lt(self, value):
return self.__operator('LT', value)
def ge(self, value):
return self.__operator('GE', value)
def gt(self, value):
return self.__operator('GT', value)
def begins_with(self, value):
return self.__operator('BEGINS_WITH', value)
def also(self, key_conditions):
self.items.append(key_conditions)
return self
def dict(self):
d = {}
for i in self.items:
d[i.field] = {
'AttributeValueList': i.values,
'ComparisonOperator': i.operator
}
return d
| gpl-3.0 | 1,864,183,897,517,733,000 | 24.62605 | 120 | 0.499918 | false | 3.639021 | false | false | false |
arcturusannamalai/Ezhil-Lang | ezhil/ezhil_transforms.py | 2 | 7334 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
## (C) 2015 Muthiah Annamalai
## Licensed under GPL Version 3
##
## Interpreter for Ezhil language
from __future__ import print_function
import sys
PYTHON3 = (sys.version[0] == '3')
if PYTHON3:
unicode = str
## Tx
from .transform import Visitor, TransformVisitor
from .ezhil_scanner import EzhilToken
## AST elements
from .ast import Expr, ExprCall, ExprList, Stmt, ReturnStmt, \
BreakStmt, ContinueStmt, ElseStmt, IfStmt, WhileStmt, \
ForStmt, AssignStmt, PrintStmt, EvalStmt, ArgList, \
ValueList, Function, StmtList, Identifier, Number, \
String, Boolean
from .errors import RuntimeException, SemanticException
class TransformEntryExitProfile(TransformVisitor):
def __init__(self, **kwargs):
TransformVisitor.__init__(self, **kwargs)
def visit_program_or_script(self, stmt_list):
l, c = 0, 0
stmt_list.dbg_msg(" add call : profile(\"begin\")")
begin = ValueList([String("begin")], l, c, self.debug)
call_profile_begin = ExprCall(Identifier("profile", l, c), begin, l, c,
self.debug)
stmt_list.List.insert(0, call_profile_begin)
stmt_list.dbg_msg(" add call : 'profile(\"results\")'")
results = ValueList([String("results")], l, c, self.debug)
call_profile_results = ExprCall(Identifier("profile", l, c), results,
l, c, self.debug)
stmt_list.append(call_profile_results)
return
class TransformSafeModeFunctionCheck(TransformVisitor):
def __init__(self, **kwargs):
self.forbidden_fcn_names = [u'raw_input',u'input',u'fopen',u'open',u'fclose',\
u'உள்ளீடு',u'turtle',u'கோப்பை_எழுது',u'கோப்பை_திற',u'கோப்பை_மூடு']
TransformVisitor.__init__(self, **kwargs)
def visit_expr_call(self, expr_call):
callee = expr_call.func_id.id
if callee in self.forbidden_fcn_names:
raise RuntimeException(
u"ERROR %s:\n\t %s may not be used in SAFE MODE ." %
(self.interpreter.get_fname(), unicode(expr_call)))
if expr_call.arglist:
expr_call.arglist.visit(self)
return
# Type checker for ezhil - rules list #65
class TransformSemanticAnalyzer(TransformVisitor):
def __init__(self, **kwargs):
TransformVisitor.__init__(self, **kwargs)
return
# Find a list of rules for type checking Ezhil AST.
# You may only add like types. I.e. (You may only add numbers or strings but never between each other)
# You may index arrays with only integers or numbers or dictionaries with Strings
# You can type check argument types, and number of builtin functions.
# You may type check arguments for number of args in a function call.
def visit_expr_call(self, expr_call):
callee = expr_call.func_id.id
if callee == u"__getitem__":
# T.B.D
pass
if expr_call.arglist:
expr_call.arglist.visit(self)
return
# check if the constants are on lhs of assignment statements
# check if the strings are added to numbers
# check ...
def visit_assign_stmt(self, assign_stmt):
if any(
map(lambda typename: isinstance(assign_stmt.lvalue, typename),
[Number, String, Boolean, Function])):
raise SemanticException(
"Cannot use number, string, constant or functions on LHS of assignment %s"
% unicode(assign_stmt))
if assign_stmt.lvalue:
assign_stmt.lvalue.visit(self)
if assign_stmt.rvalue:
assign_stmt.rvalue.visit(self)
return
def visit_binary_expr(self, binexpr):
lhs_is_string = isinstance(binexpr.term, String)
rhs_is_string = isinstance(binexpr.next_expr, String)
lhs_id_expr_call = isinstance(binexpr.term, ExprCall) or isinstance(
binexpr.term, Identifier)
rhs_id_expr_call = isinstance(binexpr.next_expr,
ExprCall) or isinstance(
binexpr.next_expr, Identifier)
if isinstance(binexpr.next_expr, Expr):
binexpr.next_expr.visit(self)
return
binexpr.term.visit(self)
if binexpr.binop.kind != EzhilToken.PLUS:
if lhs_is_string or rhs_is_string:
if binexpr.binop.kind in EzhilToken.COMPARE or binexpr.binop.kind == EzhilToken.PROD:
pass
else:
raise SemanticException(
"Cannot use string with operators other than '+','>=','<=','!=','==','>','<' or '*' at expression %s %s"
% (unicode(binexpr), binexpr.get_pos()))
else:
if lhs_is_string or rhs_is_string:
if not ((lhs_is_string and rhs_is_string) or \
(lhs_is_string and rhs_id_expr_call) or \
(rhs_is_string and lhs_id_expr_call)):
raise SemanticException(
"Cannot join strings and expression at expression %s" %
unicode(binexpr))
return
def visit_import(self, importstmt):
if not isinstance(importstmt.filename, String):
raise SemanticException(
"Import statement should be a string at time of interpretation at %s"
% unicode(importstmt))
return
class TransformConstantFolder(TransformVisitor):
def __init__(self, **kwargs):
TransformVisitor.__init__(self, **kwargs)
self.rval = None
#print(self.top_ast)
return
def constant_fold(self, binexpr):
return binexpr.evaluate(None)
def can_fold_expr(self, expr):
if isinstance(expr, Number):
return True, expr
def reset(self):
self.rval = None
def get_rval(self):
op = self.rval
self.reset()
return op
def visit_number(self, num):
self.rval = num
return
def visit_binary_expr(self, binexpr):
# if lhs is constant and you are able to fold rhs
# then replace binexpr with the value
#print(type(binexpr.term))
#print(type(binexpr.next_expr))
next_expr_alt = None
if isinstance(binexpr.next_expr, Expr):
binexpr.next_expr.visit(self)
next_expr_alt = self.get_rval()
else:
next_expr_alt = binexpr.next_expr
binexpr.term.visit(self)
term_expr_alt = self.get_rval()
print(type(term_expr_alt))
#print("-------")
if next_expr_alt == None or term_expr_alt == None:
return None
#print("------x------")
lhs_is_num = isinstance(term_expr_alt, Number)
[foldable, val] = self.can_fold_expr(next_expr_alt)
if foldable:
print("foldable")
# new API needed to replace the node
binexpr.term = term_expr_alt
binexpr.next_expr = next_expr_alt
newval = self.constant_fold(binexpr)
binexpr.replace(newval)
print(str(newval), newval)
return Number(newval)
return None
| gpl-3.0 | 3,367,327,256,590,115,300 | 34.73399 | 128 | 0.583954 | false | 3.725732 | false | false | false |
tpoy0099/option_calculator | gui_impl/position_editor.py | 1 | 4509 | #coding=utf8
from qt_ui.ui_position_editor import Ui_position_editor_dialog
from gui_impl.qt_mvc_impl import MatrixModel, AutoFormDelegate
from gui_impl.qtableview_utility import getSelectedRows
from utility.data_handler import TableHandler
from PyQt4.QtCore import *
from PyQt4.QtGui import *
##############################################################################
class PosEditor(QDialog, Ui_position_editor_dialog):
EDIT_TABLE_HEADERS = ('group', 'code', 'dir', 'lots', 'open_price', 'margin', 'open_date')
def __init__(self, parent=None):
super(PosEditor, self).__init__(parent)
self.setupUi(self)
self.setModal(True)
#signal&slot
self.connect(self.cancel_button, SIGNAL("clicked()"), self.onCancelBtClicked)
self.connect(self.save_button, SIGNAL("clicked()"), self.onSaveAllBtClicked)
self.connect(self.reload_button, SIGNAL("clicked()"), self.onReloadBtClicked)
self.connect(self.save_csv_button, SIGNAL("clicked()"), self.onSaveCsvBtClicked)
self.connect(self.addrow_button, SIGNAL("clicked()"), self.onAddrowBtClicked)
self.connect(self.delrows_button, SIGNAL("clicked()"), self.onDelRowBtClicked)
#init mvc impl
self.model = MatrixModel(self)
self.delegate = AutoFormDelegate(self)
self.position_edit_vtable.setItemDelegate(self.delegate)
self.position_edit_vtable.setModel(self.model)
#init data
self.controler = None
self.model.setSize(0, PosEditor.EDIT_TABLE_HEADERS)
def setControler(self, ctl):
self.controler = ctl
#--------------------------------------------------
def wakeupEditor(self):
self.show()
def setEditTableContent(self, table_hdl_inst):
self.model.setTableContent(table_hdl_inst)
#--------------------------------------------------
def onAddrowBtClicked(self):
self.model.appendRows()
def onDelRowBtClicked(self):
rows = getSelectedRows(self.position_edit_vtable)
if rows:
self.model.deleteRows(rows)
def onCancelBtClicked(self):
self.model.clearContent()
self.close()
@staticmethod
def findInvalidRows(t_data=TableHandler()):
invalid_rows = list()
for r in range(0, t_data.rows):
for h in ['group', 'code', 'dir', 'lots', 'open_price', 'margin']:
val = t_data.getByHeader(r, h)
if val is None or val == '':
invalid_rows.append(r)
return invalid_rows
def onSaveAllBtClicked(self):
rtn = QMessageBox.question(self, 'Confirm', 'Save position changes?',
QMessageBox.Yes, QMessageBox.No)
if rtn == QMessageBox.Yes:
data = TableHandler()
data.copy(self.model.data)
invalid_rows = PosEditor.findInvalidRows(data)
if invalid_rows:
data.delRows(invalid_rows)
if data.rows > 0:
self.controler.onEditorClickBtSaveAll(data)
else:
QMessageBox.warning(self, 'Error',
'None valid records!', QMessageBox.Yes)
#notify
if invalid_rows:
info_str = 'Invalid rows deleted:\n%s' % str([i+1 for i in invalid_rows])
QMessageBox.warning(self, 'Warning', info_str, QMessageBox.Yes)
else:
self.close()
return
def onReloadBtClicked(self):
rtn = QMessageBox.question(self, 'Confirm', 'Reloading from position.csv?',
QMessageBox.Yes, QMessageBox.No)
if rtn == QMessageBox.Yes:
self.controler.onEditorClickBtReloadPosition()
return
def onSaveCsvBtClicked(self):
rtn = QMessageBox.question(self, 'Confirm', 'Writing positions to position.csv?',
QMessageBox.Yes, QMessageBox.No)
if rtn == QMessageBox.Yes:
self.controler.onSavePosition2Csv()
return
#######################################################################
if __name__ == '__main__':
import sys, random
app = QApplication(sys.argv)
pedit = PosEditor()
th = TableHandler()
th.reset(10, PosEditor.EDIT_TABLE_HEADERS)
for r in range(0, 10):
for h in PosEditor.EDIT_TABLE_HEADERS:
th.setByHeader(r, h, random.randint(0,10))
pedit.wakeupEditor()
sys.exit(app.exec_())
| gpl-2.0 | -6,616,695,230,852,411,000 | 37.211864 | 94 | 0.573963 | false | 4.080543 | false | false | false |
nasa-gibs/onearth | src/empty_tile/oe_generate_empty_tile.py | 1 | 11279 | #!/usr/bin/env python3
# Copyright (c) 2002-2017, California Institute of Technology.
# All rights reserved. Based on Government Sponsored Research under contracts NAS7-1407 and/or NAS7-03001.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the California Institute of Technology (Caltech), its operating division the Jet Propulsion Laboratory (JPL),
# the National Aeronautics and Space Administration (NASA), nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE CALIFORNIA INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# oe_generate_empty_tile.py
# The OnEarth Empty Tile Generator.
#
#
# Global Imagery Browse Services
# NASA Jet Propulsion Laboratory
# 2015
import sys
import urllib.request, urllib.parse, urllib.error
import xml.dom.minidom
from optparse import OptionParser
import png
toolName = "oe_generate_empty_tile.py"
versionNumber = "v1.4.0"
class ColorMap:
"""ColorMap metadata"""
def __init__(self, units, colormap_entries, style):
self.units = units
self.colormap_entries = colormap_entries
self.style = str(style).lower()
def __repr__(self):
if self.units != None:
xml = '<ColorMap units="%s">' % (self.units)
else:
xml = '<ColorMap>'
for colormap_entry in self.colormap_entries:
xml = xml + '\n ' + colormap_entry.__repr__()
xml = xml + '\n</ColorMap>'
return xml
def __str__(self):
return self.__repr__().encode(sys.stdout.encoding)
class ColorMapEntry:
"""ColorMapEntry values within a ColorMap"""
def __init__(self, red, green, blue, transparent, source_value, value, label, nodata):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.transparent = transparent
self.source_value = source_value
self.value = value
self.label = label
self.nodata = nodata
self.color = [float(red)/255.0,float(green)/255.0,float(blue)/255.0]
def __repr__(self):
if self.value != None:
xml = '<ColorMapEntry rgb="%d,%d,%d" transparent="%s" nodata="%s" sourceValue="%s" value="%s" label="%s"/>' % (self.red, self.green, self.blue, self.transparent, self.nodata, self.source_value, self.value, self.label)
else:
xml = '<ColorMapEntry rgb="%d,%d,%d" transparent="%s" nodata="%s" sourceValue="%s" label="%s"/>' % (self.red, self.green, self.blue, self.transparent, self.nodata, self.source_value, self.label)
return xml
def __str__(self):
return self.__repr__().encode(sys.stdout.encoding)
def parse_colormap(colormap_location, verbose):
try:
if verbose:
print("Reading color map:", colormap_location)
colormap_file = open(colormap_location,'r')
dom = xml.dom.minidom.parse(colormap_file)
colormap_file.close()
except IOError:
print("Accessing URL", colormap_location)
try:
dom = xml.dom.minidom.parse(urllib.request.urlopen(colormap_location))
except:
msg = "URL " + colormap_location + " is not accessible"
print(msg, file=sys.stderr)
raise Exception(msg)
style = "discrete"
colormap_entries = []
colormapentry_elements = dom.getElementsByTagName("ColorMapEntry")
for colormapentry in colormapentry_elements:
rgb = colormapentry.attributes['rgb'].value
red, green, blue = rgb.split(',')
try:
value = colormapentry.attributes['value'].value
if "(" in value or "[" in value:
style = "range"
except KeyError:
value = None
style = "classification"
try:
transparent = True if colormapentry.attributes['transparent'].value.lower() == 'true' else False
except KeyError:
transparent = False
try:
source_value = colormapentry.attributes['sourceValue'].value
except KeyError:
source_value = value
try:
label = colormapentry.attributes['label'].value
except KeyError:
label = value
try:
nodata = True if colormapentry.attributes['nodata'].value.lower() == 'true' else False
except KeyError:
nodata = False
colormap_entries.append(ColorMapEntry(red, green , blue, transparent, source_value, value, label, nodata))
colormap = ColorMap(None, colormap_entries, style)
if verbose:
print("ColorMap style:", style)
print(colormap)
return colormap
#-------------------------------------------------------------------------------
print(toolName + ' ' + versionNumber + '\n')
usageText = toolName + " --colormap [file] --output [file] --height [int] --width [int] --type [palette]"
# Define command line options and args.
parser=OptionParser(usage=usageText, version=versionNumber)
parser.add_option('-c', '--colormap',
action='store', type='string', dest='colormap',
help='Full path or URL of colormap filename.')
parser.add_option('-f', '--format',
action='store', type='string', dest='format', default = 'png',
help='Format of output file. Supported formats: png')
parser.add_option('-i', '--index',
action='store', type='string', dest='index',
help='The index of the color map to be used as the empty tile palette entry, overrides nodata value')
parser.add_option('-o', '--output',
action='store', type='string', dest='output',
help='The full path of the output file')
parser.add_option('-t', '--type',
action='store', type='string', dest='type', default = 'palette',
help='The image type: rgba or palette. Default: palette')
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Print out detailed log messages")
parser.add_option('-x', '--width',
action='store', type='string', dest='width', default = '512',
help='Width of the empty tile (default: 512)')
parser.add_option('-y', '--height',
action='store', type='string', dest='height', default = '512',
help='Height of the empty tile (default: 512)' )
# read command line args
(options, args) = parser.parse_args()
if options.colormap:
colormap_location = options.colormap
else:
print("colormap file must be specified...exiting")
exit()
if options.output:
output_location = options.output
else:
print("output file must be specified...exiting")
exit()
color_index = 0
# parse colormap and get color entry
try:
colormap = parse_colormap(colormap_location, options.verbose)
colormap_entry = colormap.colormap_entries[color_index] # default to first entry if none specified
if options.index != None:
colormap_entry = colormap.colormap_entries[int(options.index)]
color_index = int(options.index)
else:
for index,entry in enumerate(colormap.colormap_entries):
if entry.nodata == True:
colormap_entry = entry
color_index = index
break # use first nodata entry found
except Exception as e:
print(toolName + ": ERROR: " + str(e) + "\n", file=sys.stderr)
sys.exit(1)
# generate empty_tile
try:
if options.verbose:
print("Using index " + str(color_index) + " with entry:\n" + str(colormap_entry))
f = open(output_location, 'wb')
if options.type == "palette":
palette = []
for j in range (0, 256):
try:
entry = colormap.colormap_entries[j]
if entry.transparent == True:
alpha = 0
else:
alpha = 255
palette.append((entry.red,entry.green,entry.blue,alpha))
except IndexError: # pad with zeroes
palette.append((0,0,0,0))
rows = []
img = []
for i in range (1, (int(options.width))+1):
rows.append(color_index)
for i in range (0, int(options.height)):
img.append(rows)
w = png.Writer(int(options.width), int(options.height), palette=palette, bitdepth=8)
w.write(f, img)
else: # use RGBA
rows = []
img = []
for i in range (1, (int(options.width)*4)+1):
if i%4 == 1:
rows.append(colormap_entry.red)
elif i%4 == 2:
rows.append(colormap_entry.green)
elif i%4 == 3:
rows.append(colormap_entry.blue)
elif i%4 == 0:
if colormap_entry.transparent == True:
rows.append(0)
else:
rows.append(255)
for i in range (0, int(options.height)):
img.append(rows)
w = png.Writer(int(options.width), int(options.height), alpha=True)
w.write(f, img)
f.close()
print("\nSuccessfully generated empty tile " + output_location + " of size: " + str(options.width) + " by " + str(options.height))
except IOError as e:
print(toolName + ": " + str(e), file=sys.stderr)
sys.exit(1)
exit()
| apache-2.0 | 599,956,554,437,236,400 | 38.855124 | 229 | 0.61282 | false | 4.051365 | false | false | false |
LearnEra/LearnEraPlaftform | common/lib/xmodule/xmodule/modulestore/tests/test_publish.py | 1 | 7315 | """
Test the publish code (mostly testing that publishing doesn't result in orphans)
"""
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.test_split_w_old_mongo import SplitWMongoCourseBoostrapper
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore import ModuleStoreEnum
class TestPublish(SplitWMongoCourseBoostrapper):
"""
Test the publish code (primary causing orphans)
"""
def _create_course(self):
"""
Create the course, publish all verticals
* some detached items
"""
# There are 12 created items and 7 parent updates
# create course: finds: 1 to verify uniqueness, 1 to find parents
# sends: 1 to create course, 1 to create overview
with check_mongo_calls(5, 2):
super(TestPublish, self)._create_course(split=False) # 2 inserts (course and overview)
# with bulk will delay all inheritance computations which won't be added into the mongo_calls
with self.draft_mongo.bulk_operations(self.old_course_key):
# finds: 1 for parent to add child
# sends: 1 for insert, 1 for parent (add child)
with check_mongo_calls(1, 2):
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)
with check_mongo_calls(2, 2):
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)
# For each vertical (2) created:
# - load draft
# - load non-draft
# - get last error
# - load parent
# - load inheritable data
with check_mongo_calls(7, 4):
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)
self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)
# For each (4) item created
# - try to find draft
# - try to find non-draft
# - retrieve draft of new parent
# - get last error
# - load parent
# - load inheritable data
# - load parent
# count for updates increased to 16 b/c of edit_info updating
with check_mongo_calls(16, 8):
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)
self._create_item(
'discussion', 'Discussion1',
"discussion discussion_category=\"Lecture 1\" discussion_id=\"a08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 1\"/>\n",
{
"discussion_category": "Lecture 1",
"discussion_target": "Lecture 1",
"display_name": "Lecture 1 Discussion",
"discussion_id": "a08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert1',
split=False
)
self._create_item('html', 'Html2', "<p>Hello</p>", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)
self._create_item(
'discussion', 'Discussion2',
"discussion discussion_category=\"Lecture 2\" discussion_id=\"b08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 2\"/>\n",
{
"discussion_category": "Lecture 2",
"discussion_target": "Lecture 2",
"display_name": "Lecture 2 Discussion",
"discussion_id": "b08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert2',
split=False
)
with check_mongo_calls(0, 2):
# 2 finds b/c looking for non-existent parents
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, split=False)
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, split=False)
def test_publish_draft_delete(self):
"""
To reproduce a bug (STUD-811) publish a vertical, convert to draft, delete a child, move a child, publish.
See if deleted and moved children still is connected or exists in db (bug was disconnected but existed)
"""
vert_location = self.old_course_key.make_usage_key('vertical', block_id='Vert1')
item = self.draft_mongo.get_item(vert_location, 2)
# Finds:
# 1 get draft vert,
# 2-10 for each child: (3 children x 3 queries each)
# get draft and then published child
# compute inheritance
# 11 get published vert
# 12-15 get each ancestor (count then get): (2 x 2),
# 16 then fail count of course parent (1)
# 17 compute inheritance
# 18 get last error
# 19-20 get draft and published vert
# Sends:
# delete the subtree of drafts (1 call),
# update the published version of each node in subtree (4 calls),
# update the ancestors up to course (2 calls)
with check_mongo_calls(20, 7):
self.draft_mongo.publish(item.location, self.user_id)
# verify status
item = self.draft_mongo.get_item(vert_location, 0)
self.assertFalse(getattr(item, 'is_draft', False), "Item was published. Draft should not exist")
# however, children are still draft, but I'm not sure that's by design
# delete the draft version of the discussion
location = self.old_course_key.make_usage_key('discussion', block_id='Discussion1')
self.draft_mongo.delete_item(location, self.user_id)
draft_vert = self.draft_mongo.get_item(vert_location, 0)
self.assertTrue(getattr(draft_vert, 'is_draft', False), "Deletion didn't convert parent to draft")
self.assertNotIn(location, draft_vert.children)
# move the other child
other_child_loc = self.old_course_key.make_usage_key('html', block_id='Html2')
draft_vert.children.remove(other_child_loc)
other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', block_id='Vert2'), 0)
other_vert.children.append(other_child_loc)
self.draft_mongo.update_item(draft_vert, self.user_id)
self.draft_mongo.update_item(other_vert, self.user_id)
# publish
self.draft_mongo.publish(vert_location, self.user_id)
item = self.draft_mongo.get_item(draft_vert.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertNotIn(location, item.children)
self.assertIsNone(self.draft_mongo.get_parent_location(location))
with self.assertRaises(ItemNotFoundError):
self.draft_mongo.get_item(location)
self.assertNotIn(other_child_loc, item.children)
self.assertTrue(self.draft_mongo.has_item(other_child_loc), "Oops, lost moved item")
| agpl-3.0 | 2,435,826,882,299,244,500 | 52.394161 | 154 | 0.5892 | false | 3.91805 | true | false | false |
eek6/squeakspace | lib/squeakspace/common/util_http.py | 1 | 14648 |
import urlparse
import json
import Cookie
import squeakspace.common.squeak_ex as ex
def json_fun(object):
#return json.dumps(object)
return json.dumps(object, indent=4) + '\n'
def respond(environ, start_response, status, content, response_headers=None):
if response_headers == None:
response_headers = [('Content-type', 'text/plain'),
('Content-length', str(len(content)))]
start_response(status, response_headers)
return [content]
# delete this.
def respond_json(environ, start_response, status, object):
content = json_fun(content, sort_keys=True)
return respond(environ, start_response, status, content)
def json_response_headers(body):
return [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
class Response(Exception):
def __init__(self, body, response_headers=None):
self.body = body
self.response_headers = response_headers
def attach_cookies(self, simplecookie):
# cookies_str = simplecookie.output(header='', sep=';')
# if len(cookies_str) > 1 and cookies_str[0] == ' ':
# # get rid of a weird leading space.
# cookies_str = cookies_str[1:]
#
# self.response_headers.append(('Set-Cookie', cookies_str))
#
# print ('cookies_str', cookies_str)
#
for cookie_name in simplecookie:
cookie_str = simplecookie[cookie_name].output(header='')
if len(cookie_str) > 1 and cookie_str[0] == ' ':
# get rid of leading space
cookie_str = cookie_str[1:]
self.response_headers.append(('Set-Cookie', cookie_str))
#print ('cookie_str', cookie_str)
return self
def load_cookies(self, data):
return self.attach_cookies(Cookie.SimpleCookie(data))
def clear_cookies(self, cookies):
simplecookie = Cookie.SimpleCookie()
for cookie_name in cookies:
simplecookie[cookie_name] = ''
simplecookie[cookie_name]['path'] = '/'
simplecookie[cookie_name]['expires'] = 'Thu, 01 Jan 1970 00:00:00 UTC'
return self.attach_cookies(simplecookie)
def respond(self, environ, start_response):
return respond(environ, start_response, self.status, self.body, self.response_headers)
class OkResponse(Response):
status = '200 OK'
class BadRequestResponse(Response):
status = '400 Bad Request'
class ForbiddenResponse(Response):
status = '403 Forbidden'
class NotFoundResponse(Response):
status = '404 Not Found'
class MethodNotAllowedResponse(Response):
status = '405 Method Not Allowed'
class ConflictResponse(Response):
status = '409 Conflict'
class LengthRequiredResponse(Response):
status = '411 Length Required'
class RequestEntityTooLargeResponse(Response):
status = '413 Request Entity Too Large'
class RequestUriTooLongResponse(Response):
status = '414 Request-URI Too Long'
class ServerErrorResponse(Response):
status = '500 Internal Server Error'
class ServerErrorJsonResponse(ServerErrorResponse):
def __init__(self):
self.body = json_fun(
{'status' : 'error',
'reason' : 'server error'})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class QueryTooLongResponse(RequestEntityTooLargeResponse):
def __init__(self, query_length, max_length):
self.query_length = query_length
self.max_length = max_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'query too long',
'query_length' : query_length,
'max_length' : max_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MalformedQueryStringResponse(BadRequestResponse):
def __init__(self, query_string):
self.query_string = query_string
self.body = json_fun(
{'status' : 'error',
'reason' : 'malformed query string',
'query_string' : query_string})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class ContentLengthRequiredResponse(LengthRequiredResponse):
def __init__(self):
self.body = json_fun(
{'status' : 'error',
'reason' : 'Content-Length required'})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MalformedContentLengthResponse(BadRequestResponse):
def __init__(self, content_length):
self.content_length = content_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'malformed content length',
'content_length' : content_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class ContentLengthTooLargeResponse(RequestEntityTooLargeResponse):
def __init__(self, content_length, max_length):
self.content_length = content_length
self.max_length = max_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'Content-Length too large',
'content_length' : content_length,
'max_length' : max_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class IncorrectContentLengthResponse(BadRequestResponse):
def __init__(self, content_length, actual_body_length):
self.content_length = content_length
self.actual_content_length = actual_content_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'incorrect Content-Length',
'content_length' : content_length,
'actual_content_length' : actual_content_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class InvalidContentTypeResponse(BadRequestResponse):
def __init__(self, content_type, supported_content_type):
self.content_type = content_type
self.supported_content_type = supported_content_type
self.body = json_fun(
{'status' : 'error',
'reason' : 'Content-Type invalid',
'content_type' : content_type,
'supported_content_type' : supported_content_type})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MalformedContentResponse(BadRequestResponse):
# There should be a cut off here. Don't send the content
# back if it's too large.
def __init__(self, content):
self.content = content
self.body = json_fun(
{'status' : 'error',
'reason' : 'malformed content',
'content' : content})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class FieldRequiredResponse(BadRequestResponse):
def __init__(self, field):
self.field = field
self.body = json_fun(
{'status' : 'error',
'reason' : 'field required',
'field' : field})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class CookieRequiredResponse(BadRequestResponse):
def __init__(self, cookie):
self.cookie = cookie
self.body = json_fun(
{'status' : 'error',
'reason' : 'cookie required',
'cookie' : cookie})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class BadFieldResponse(BadRequestResponse):
def __init__(self, field, value):
self.field = field
self.value = value
self.body = json_fun(
{'status' : 'error',
'reason' : 'bad field',
'field' : field,
'value' : value})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MethodNotAllowedJsonResponse(MethodNotAllowedResponse):
def __init__(self, method, allow):
allow_str = ', '.join(allow)
self.method = method
self.allow = allow
self.body = json_fun(
{'status' : 'error',
'reason' : 'method not allowed',
'method' : method,
'allow' : allow})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body))),
('Allow', allow_str)]
def parse_get_request(environ, max_length = 2048):
query_string = environ['QUERY_STRING']
query_length = len(query_string)
if len(query_string) > max_length:
raise QueryTooLongResponse(query_length, max_length)
try:
# keep_blank_values = False, strict_parsing = True
return urlparse.parse_qs(query_string, False, True)
except ValueError:
raise MalformedQueryStringResponse(query_string)
def check_content_type(expected_content_type, content_type):
return expected_content_type == content_type or \
content_type[:len(expected_content_type) + 1] == expected_content_type + ';'
def parse_post_request(environ, max_length = 200*1024*1024): # 200 MB ok?
content_length_str = environ.get('CONTENT_LENGTH')
if content_length_str == None:
raise ContentLengthRequiredResponse()
content_length = None
try:
content_length = int(content_length_str)
except ValueError:
raise MalformedContentLengthResponse(content_length_str)
if content_length > max_length:
raise ContentLengthTooLargeResponse(content_length, max_length)
content_type = environ.get('CONTENT_TYPE')
supported_content_type = 'application/x-www-form-urlencoded'
if not check_content_type(supported_content_type, content_type):
raise InvalidContentTypeResponse(content_type, supported_content_type)
content_input = environ['wsgi.input']
content = content_input.read(content_length)
if content_length != len(content):
raise IncorrectContentLengthResponse(content_length, len(content))
try:
return urlparse.parse_qs(content, False, True)
except ValueError:
raise MalformedContentResponse(content)
def parse_cookies(environ):
cookies_str = environ.get('HTTP_COOKIE')
if cookies_str == None:
return None
else:
return Cookie.SimpleCookie(cookies_str)
def get_required(query_table, field):
try:
return query_table[field][0]
except KeyError:
raise FieldRequiredResponse(field)
def get_optional(query_table, field):
try:
return query_table[field][0]
except KeyError:
return None
def get_required_cookie(simplecookie, cookie):
if simplecookie == None:
raise CookieRequiredResponse(cookie)
try:
return simplecookie[cookie].value
except KeyError:
raise CookieRequiredResponse(cookie)
def get_optional_cookie(simplecookie, cookie):
try:
return simplecookie[cookie].value
except KeyError:
return None
def convert_int(string, field):
try:
if string != None:
return int(string)
else:
return None
except ValueError:
raise BadFieldResponse(field, string)
def convert_bool(string, field):
if string == None:
return None
lower = string.lower()
if lower == 'true':
return True
elif lower == 'false':
return False
else:
raise BadFieldResponse(field, string)
def convert_nat(string, field):
value = convert_int(string, field)
if value < 0:
raise BadFieldResponse(field, string)
return value
def dispatch_on_method(environ, handlers):
method = environ['REQUEST_METHOD']
handler = handlers.get(method)
if handler == None:
allow_array = handlers.keys()
allow_array.sort()
raise MethodNotAllowedJsonResponse(method, allow_array)
handler(environ)
def respond_with_handler(environ, start_response, handler):
response = None
try:
response = handler(environ)
except Response as r:
response = r
return response.respond(environ, start_response)
status_conversion_map = {ex.SqueakStatusCodes.bad_request : BadRequestResponse,
ex.SqueakStatusCodes.too_large : RequestEntityTooLargeResponse,
ex.SqueakStatusCodes.conflict : ConflictResponse,
ex.SqueakStatusCodes.not_found : NotFoundResponse,
ex.SqueakStatusCodes.forbidden : ForbiddenResponse,
ex.SqueakStatusCodes.server_error : ServerErrorResponse}
def convert_squeak_exception(e):
constructor = status_conversion_map[e.type]
content = json_fun(e.dict())
headers = json_response_headers(content)
return constructor(content, headers)
def ok_json(object):
content = json_fun(object)
headers = json_response_headers(content)
return OkResponse(content, headers)
#def bad_request(environ, start_response, reason):
# status = '400 Bad Request'
# content = 'Bad Request: ' + reason
# return respond(environ, start_response, status, content)
#
#def conflict(environ, start_response, reason):
# status = '409 Conflict'
# content = 'Conflict: ' + reason
# return respond(environ, start_response, status, content)
#
#def need_content_length(environ, start_response):
# status = '411 Length Required'
# content = 'Length Required'
# return respond(environ, start_response, status, content)
#
#def request_entity_too_large(environ, start_response):
# status = '413 Request Entity Too Large'
# content = 'Request Entity Too Large'
# return respond(environ, start_response, status, content)
| gpl-3.0 | -6,834,943,958,178,356,000 | 32.290909 | 94 | 0.608684 | false | 4.125035 | false | false | false |
T3kton/subcontractor | subcontractor/credentials.py | 1 | 1841 | import json
import ssl
from urllib import request
VAULT_TIMEOUT = 20
_handler = None
def getCredentials( value ):
if value is None:
return None
return _handler.get( value )
def setup( config ):
global _handler
vault_type = config.get( 'credentials', 'type', fallback=None )
if not vault_type: # could be None or ''
_handler = NullVault()
elif vault_type == 'hashicorp':
_handler = HashiCorptVault( config.get( 'credentials', 'host' ),
config.get( 'credentials', 'token' ),
config.get( 'credentials', 'proxy', fallback=None ),
config.getboolean( 'credentials', 'verify_ssl', fallback=True ) )
else:
raise ValueError( 'Unknown Credentials type "{0}"'.format( vault_type ) )
class NullVault():
def __init__( self ):
pass
def get( self, name ):
return None
class HashiCorptVault():
def __init__( self, host, token, proxy=None, verify_ssl=True ):
super().__init__()
if host[-1] == '/':
raise ValueError( 'VAULT_HOST must not end with "/"' )
self.host = host
handler_list = []
if proxy is not None:
handler_list.append( request.ProxyHandler( { 'http': proxy, 'https': proxy } ) )
else:
handler_list.append( request.ProxyHandler( {} ) )
if not verify_ssl:
handler_list.append( request.HTTPSHandler( context=ssl._create_unverified_context() ) )
self.opener = request.build_opener( *handler_list )
self.opener.addheaders = [
( 'X-Vault-Token', token ),
]
def get( self, url ):
req = request.Request( '{0}{1}'.format( self.host, url ), method='GET' )
resp = self.opener.open( req, timeout=VAULT_TIMEOUT )
# TODO: catch 404, 403, etc
return json.loads( resp.read().decode() )[ 'data' ][ 'data' ]
| apache-2.0 | 6,615,458,542,644,797,000 | 24.219178 | 97 | 0.595872 | false | 3.674651 | true | false | false |
ronas/PythonGNF | Igor/Tabuada.py | 1 | 1262 | '''
numero = int(input("Número para a tabuada: "))
for multiplicador in range(1,11):
print (numero,"x",multiplicador,"=",(numero*multiplicador))
'''
#
# Calculo de Horas trabalhadas.
# Autor: Igor Nunes
# Materia: Programa Python
# Orientador: Ronaldo
# Aula de total de horas trabalhadas
#
#Leitura dos dados do teclado...
horasTrabalhadas = input("Horas trabalhadas: ")
valorHoras = input("Valor da Horas: ")
imposto = input("imposto: ")
#
#Tratamento de entrada do usuário...
horasTrabalhadas = horasTrabalhadas.replace( "," , "." )
valorHoras = valorHoras.replace(",",".")
imposto = imposto.replace(",",".")
#
#Conversão dos valores de texto para numérico (ponto flutuante)...
horasTrabalhadas = float(horasTrabalhadas)
valorHoras = float(valorHoras)
imposto = float(imposto)
totalBruto = ( horasTrabalhadas * valorHoras )
# Valor do imposto.
#imposto = 24
# Calculo de porcentagem de imposto.
impostoDevido = totalBruto * (imposto /100 )
#Calculo do valor liquido.
totalLiquido = (totalBruto - impostoDevido)
#
'''
Limpa tela.
import os
os.system ("clear")
'''
#
print ("totalBruto R$ ",totalBruto )
#
print ("impostoDevido",impostoDevido)
#
print("totalLiquido R$ ",totalLiquido)
| gpl-3.0 | 7,292,178,075,437,664,000 | 23.16 | 66 | 0.68124 | false | 2.270758 | false | false | false |
Tuxemon/Tuxemon | tuxemon/event/actions/add_item.py | 1 | 1612 | #
# Tuxemon
# Copyright (c) 2014-2017 William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from tuxemon.event.eventaction import EventAction
from typing import Union, NamedTuple, final
class AddItemActionParameters(NamedTuple):
item_slug: str
quantity: Union[int, None]
@final
class AddItemAction(EventAction[AddItemActionParameters]):
"""
Adds an item to the current player's inventory.
The action parameter must contain an item name to look up in the item
database.
"""
name = "add_item"
param_class = AddItemActionParameters
def start(self) -> None:
player = self.session.player
if self.parameters.quantity is None:
quantity = 1
else:
quantity = self.parameters.quantity
player.alter_item_quantity(self.session, self.parameters.item_slug, quantity)
| gpl-3.0 | -5,283,441,857,624,997,000 | 31.24 | 85 | 0.71464 | false | 3.980247 | false | false | false |
rjonnal/zernike | __init__.py | 1 | 20006 | """This module contains functions for Zernike calculations. Mainly the private
function _zgen, a generator function for Zernike polynomials. The public
functions make use of _zgen to create height or slope maps in a unit
pupil, corresponding to individual Zernike terms.
Author: Ravi S. Jonnal / Werner Lab, UC Davis
Revision: 2.0 / 28 June 2014
"""
import numpy as np
from matplotlib import pyplot as plt
import sys
from time import sleep
import os
USE_CACHE_FILE = False
def fact(num):
"""Implementation of factorial function.
"""
# Check that the number is an integer.
assert(num%1==0)
# Check that $num\geq 0$.
assert(num>=0)
# Compute $num!$ recursively.
if num==0 or num==1:
return 1
else:
return num * fact(num-1)
def choose(a,b):
"""Binomial coefficient, implemented using
this module's factorial function.
See [here](http://www.encyclopediaofmath.org/index.php/Newton_binomial) for detail.
"""
assert(a>=b)
return fact(a)/(fact(b)*fact(a-b))
def splitEquation(eqStr,width,bookend):
if len(eqStr)<=width or len(eqStr)==0:
return eqStr
else:
spaceIndices = []
idx = 0
while idx>-1:
idx = eqStr.find(' ',idx+1)
spaceIndices.append(idx)
spaceIndices = spaceIndices[:-1]
idxList = [x for x in spaceIndices if x<width]
if len(idxList)==0:
return eqStr
else:
idx = idxList[-1]
head = eqStr[:idx]
innards = ' ' + bookend + '\n' + bookend
tail = splitEquation(eqStr[idx:],width,bookend)
test =head + innards + tail
return test
class Zernike:
def __init__(self):
if USE_CACHE_FILE:
cachedir = './cache/'
self._cachefn = os.path.join(cachedir,'zernike_cache.txt')
if not os.path.exists(cachedir):
os.makedirs(cachedir)
try:
self._termMatrix = np.loadtxt(self._cachefn).astype(np.int32)
except Exception as e:
print 'No term cache file. Creating.'
self._termMatrix = np.array([])
np.savetxt(self._cachefn,self._termMatrix)
# Make a dictionary of precomputed coefficients, using the cache file.
# This dictionary will be used to look up values when they exist in
# the dictionary, and will recompute them otherwise.
self._termDict = {}
if USE_CACHE_FILE:
for row in self._termMatrix:
n,m,kindIndex,s,j,k = row[:6]
t1,t2,t3,c,tXexp,tYexp = row[6:]
self._termDict[(n,m,kindIndex,s,j,k)] = (t1,t2,t3,c,tXexp,tYexp)
# The functions in this class can be asked for phase height,
# or partial x or partial y derivatives. 'Kind' refers to
# which of these is requested. Numerical encodings for 'kind'
# permit some arithmetical simplicity and generality
# (utilizing a number associated with the kind in a single
# equation, rather than having different sets of equations
# for each kind case).
self._kindDictionary = {}
self._kindDictionary['h'] = 0
self._kindDictionary['dx'] = 1
self._kindDictionary['dy'] = 2
def j2nm(self,j):
n = np.ceil((-3+np.sqrt(9+8*j))/2)
m = 2*j-n*(n+2)
return np.int(n),np.int(m)
def nm2j(self,n,m):
return np.int(n*(n+1)/2.0+(n+m)/2.0)
def _zeqn(self,n,m,kind='h',forceRecompute=False):
"""Return parameters sufficient for specifying a Zernike term
of desired order and azimuthal frequency.
Given an order (or degree) n and azimuthal frequency f, and x-
and y- rectangular (Cartesian) coordinates, produce parameters
necessary for constructing the appropriate Zernike
representation.
An individual polynomial has the format:
$$ Z_n^m = \sqrt{c} \Sigma^j\Sigma^k [a_{jk}X^jY^k] $$
This function returns a tuple ($c$,cdict). $c$ is the square
of the normalizing coefficient $\sqrt{c}$, and cdict contains
key-value pairs (($j$,$k$),$a$), mapping the $X$ and $Y$
exponents ($j$ and $k$, respectively) onto polynomial term
coefficients ($a$). The resulting structure can be used to
compute the wavefront height or slope for arbitrary pupil
coordinates, or to generate string representations of the
polynomials.
Zernike terms are only defined when n and m have the same
parity (both odd or both even).
Please see Schwiegerling lecture notes in
/doc/supporting_docs/ for eqn. references.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
Returns:
params (tuple): (c,cdict), with c being the normalizing
coefficient c and cdict being the map of exponent pairs
onto inner coefficients.
"""
absm = np.abs(m)
kindIndex = self._kindDictionary[kind.lower()]
if USE_CACHE_FILE:
# open cache file in append mode:
self._cacheHandle = file(self._cachefn,'a')
# check that n and m are both even or both odd
if (float(n-absm))%2.0:
errString = 'zernike._zgen error: ' + \
'parity of n and m are different; n = %d, m = %d'%(n,m)
sys.exit(errString)
# check that n is non-negative:
if n<0:
errString = 'zernike._zgen error: ' + \
'n must be non-negative; n = %d'%n
sys.exit(errString)
# $|m|$ must be less than or equal to $n$.
if abs(m)>n:
errString = 'zernike._zgen error: ' + \
'|m| must be less than or equal to n, but n=%d and m=%d.'%(n,m)
sys.exit(errString)
# These are the squares of the outer coefficients. It's useful
# to keep them this way for _convertToString, since we'd
# prefer to print the $\sqrt{}$ rather than a truncated irrational
# number.
if m==0:
outerCoef = n+1
else:
outerCoef = 2*(n+1)
srange = range((n-absm)/2+1)
cdict = {}
for s in srange:
jrange = range(((n-absm)/2)-s+1)
for j in jrange:
# Subtract 1 from absm to determine range,
# only when m<0.
if m<0:
krange = range((absm-1)/2+1)
else:
krange = range(absm/2+1)
for k in krange:
# If m==0, k must also be 0;
# see eqn. 13c, 19c, and 20c, each of which
# only sum over s and j, not k.
if m==0:
assert(k==0)
# For m==0 cases, n/2 is used in coef denominator. Make
# sure that n is even, or else n/2 is not well-defined
# because n is an integer.
if m==0:
assert n%2==0
# Check to see if calculations are cached.
# If so, use cached values; if not, recalculate.
cached = self._termDict.has_key((n,m,kindIndex,s,j,k))
if cached and not forceRecompute:
t1,t2,t3,c,tXexp,tYexp = self._termDict[(n,m,kindIndex,s,j,k)]
else:
# The coefficient for each term in this
# polynomial has the format: $$\frac{t1n}{t1d1
# t1d2 t1d3} t2 t3$$. These six terms are
# computed here.
t1n = ((-1)**(s+k))*fact(n-s)
t1d1 = fact(s)
t1d2 = fact((n + absm)/2-s)
t1d3 = fact((n - absm)/2-s)
t1 = t1n/(t1d1*t1d2*t1d3)
t2 = choose((n - absm)/2 - s, j)
t3 = choose(absm, 2*k + (m<0))
if kind.lower()=='h':
# The (implied) coefficient of the $X^a Y^b$
# term at the end of eqns. 13a-c.
c = 1
tXexp = n - 2*(s+j+k) - (m<0)
tYexp = 2*(j+k) + (m<0)
elif kind.lower()=='dx':
# The coefficient of the $X^a Y^b$ term at
# the end of eqns. 19a-c.
c = (n - 2*(s+j+k) - (m<0))
# Could cacluate explicitly:
# $tXexp = X^{(n - 2*(s+j+k)- 1 - (m<0))}$
#
# However, piggy-backing on previous
# calculation of c speeds things up.
tXexp = c - 1
tYexp = 2*(j+k) + (m<0)
elif kind.lower()=='dy':
# The coefficient of the $X^a Y^b$ term at
# the end of eqns. 20a-c.
c = 2*(j+k) + (m<0)
tXexp = n - 2*(s+j+k) - (m<0)
tYexp = c - 1
else:
errString = 'zernike._zgen error: ' + \
'invalid kind \'%s\'; should be \'h\', \'dx\', or \'dy\'.'%kind
sys.exit(errString)
if not cached and USE_CACHE_FILE:
self._cacheHandle.write('%d\t'*12%(n,m,kindIndex,s,j,k,t1,t2,t3,c,tXexp,tYexp)+'\n')
ct123 = c*t1*t2*t3
# The key for the polynomial dictionary is the pair of X,Y
# coefficients.
termKey = (tXexp,tYexp)
# Leave this term out of the dictionary if its coefficient
# is 0.
if ct123:
# If we already have this term, add to its coefficient.
if cdict.has_key(termKey):
cdict[termKey] = cdict[termKey] + ct123
# If not, add it to the dictionary.
else:
cdict[termKey] = ct123
# Remove zeros to speed up computations later.
cdict = {key: value for key, value in cdict.items() if value}
return (outerCoef,cdict)
def _convertToString(self,params):
"""Return a string representation of a Zernike polynomial.
This function takes a tuple, consisting of a squared
normalizing coefficient and dictionary of inner coefficients
and exponents, provided by _zeqn, and returns a string
representation of the polynomial, with LaTeX- style markup.
Example: a params of (10, {(3,4): 7, (2,5): -1}) would produce a
two-term polynomial '\sqrt{10} [7 X^3 Y^4 - X^2 Y^5]', which could be used in LaTeX,
pandoc, markdown, MathJax, or Word with MathType, to produce:
$$ \sqrt{10} [7 X^3 Y^4 - X^2 Y^5] $$
Args:
params (tuple): A pair consisting of an outer coefficient
$c$ and a dictionary mapping tuples (xexp,yexp) of
exponents onto the corresponding term coefficients.
Returns:
string: A string representation of the polynomial.
"""
c = params[0]
cdict = params[1]
keys = sorted(cdict.keys(), key=lambda tup: (tup[0]+tup[1],tup[0]))[::-1]
outstr = ''
firstKey = True
for key in keys:
coef = cdict[key]
if coef>0:
sign = '+'
else:
sign = '-'
coef = abs(coef)
if coef<0 or not firstKey:
outstr = outstr + '%s'%sign
if coef>1 or (key[0]==0 and key[1]==0):
outstr = outstr + '%d'%coef
if key[0]:
outstr = outstr + 'X^{%d}'%key[0]
if key[1]:
outstr = outstr + 'Y^{%d}'%key[1]
firstKey = False
outstr = outstr + ' '
outstr = outstr.strip()
if np.sqrt(float(c))%1.0<.00001:
cstr = '%d'%(np.sqrt(c))
else:
cstr = '\sqrt{%d}'%(c)
if len(outstr):
outstr = '%s [%s]'%(cstr,outstr)
else:
outstr = '%s'%(cstr)
return outstr
def _convertToSurface(self,params,X,Y,mask=None):
"""Return a phase map specified by a Zernike polynomial.
This function takes a tuple, consisting of a squared
normalizing coefficient and dictionary of inner coefficients
and exponents, provided by _zeqn, and x- and y- rectangular
(Cartesian) coordinates, and produces a phase map.
This function works by evaluating the polynomial expressed by
params at each coordinate specified by X and Y.
Args:
params (tuple): A pair consisting of an outer coefficient
$c$ and a dictionary mapping tuples (xexp,yexp) of
exponents onto the corresponding term coefficients.
X (float): A scalar, vector, or matrix of X coordinates in unit pupil.
Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil.
kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope)
or partial y derivative, respectively.
Returns:
float: height, dx, or dy; returned structure same size as X and Y.
"""
# Check that shapes of X and Y are equal (not necessarily square).
if not (X.shape[0]==Y.shape[0] and \
X.shape[1]==Y.shape[1]):
errString = 'zernike.getSurface error: ' + \
'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \
'and Y is %d x %d'%(Y.shape[0],Y.shape[1])
sys.exit(errString)
if mask is None:
mask = np.ones(X.shape)
params = self._zeqn(n,m,kind)
normalizer = np.sqrt(params[0])
matrix_out = np.zeros(X.shape)
for item in params[1].items():
matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1])
matrix_out = matrix_out * np.sqrt(normalizer)
matrix_out = matrix_out * mask
return matrix_out
def getSurface(self,n,m,X,Y,kind='h',mask=None):
"""Return a phase map specified by a Zernike order and azimuthal frequency.
Given an order (or degree) n and azimuthal frequency f, and x- and y-
rectangular (Cartesian) coordinates, produce a phase map of either height,
partial x derivative, or partial y derivative.
Zernike terms are only defined when n and m have the same parity (both odd
or both even).
The input X and Y values should be located inside a unit pupil, such that
$$\sqrt{X^2 + Y^2}\leq 1$$
Please see Schwiegerling lecture notes in /doc/supporting_docs/ for eqn.
references.
This function works by calling Zernike._zeqn to calculate the coefficients
and exponents of the polynomial, and then using the supplied X and Y
coordinates to produce the height map (or partial derivative).
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
X (float): A scalar, vector, or matrix of X coordinates in unit pupil.
Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil.
kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope)
or partial y derivative, respectively.
Returns:
float: height, dx, or dy; returned structure same size as X and Y.
"""
# Check that shapes of X and Y are equal (not necessarily square).
if not np.all(X.shape==Y.shape):
errString = 'zernike.getSurface error: ' + \
'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \
'and Y is %d x %d'%(Y.shape[0],Y.shape[1])
sys.exit(errString)
if mask is None:
mask = np.ones(X.shape)
params = self._zeqn(n,m,kind)
normalizer = np.sqrt(params[0])
matrix_out = np.zeros(X.shape)
for item in params[1].items():
matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1])
matrix_out = matrix_out * normalizer
matrix_out = matrix_out * mask
return matrix_out
def getEquationString(self,n,m,kind='h',doubleDollar=False):
"""Return LaTeX-encoded of the Zernike polynomial specified by
order n, frequency m.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
doubleDollar (bool): determines how to bookend the
polynomial string; True causes bookending with '$$', to
produce "display" math mode, whereas False would produce
a string suitable for inline use.
Returns:
str: a LaTeX representation of the Zernike polynomial
specified by n, m, and Kind.
"""
params = self._zeqn(n,m,kind)
rightString = self._convertToString(params)
if kind.lower()=='h':
leftString = 'Z^{%d}_{%d}'%(m,n)
elif kind.lower()=='dx':
leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta x}'%(m,n)
elif kind.lower()=='dy':
leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta y}'%(m,n)
else:
sys.exit('zernike.getEquationString: invalid kind %s'%kind)
if doubleDollar:
bookend = '$$'
else:
bookend = '$'
return '%s %s = %s %s'%(bookend,leftString,rightString,bookend)
def plotPolynomial(self,n,m,kind='h'):
"""Plot a polynomial surface specified by order n, frequency m, and kind.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
Calling function/script required to provide a plotting context (e.g. pyplot.figure).
"""
from mpl_toolkits.mplot3d import Axes3D
N = 64
mask = np.zeros((N,N))
xx,yy = np.meshgrid(np.linspace(-1,1,N),np.linspace(-1,1,N))
d = np.sqrt(xx**2 + yy**2)
mask[np.where(d<1)] = 1
surface = self.getSurface(n,m,xx,yy,kind,mask)
surface = surface * mask
#plt.figure()
ax = plt.axes([0,.2,1,.8],projection='3d')
surf = ax.plot_wireframe(xx,yy,surface,rstride=1,cstride=1,color='k')
ax.view_init(elev=70., azim=40)
eqstr = self.getEquationString(n,m,kind)
eqstr = splitEquation(eqstr,160,'$')
print 'plotting %s'%eqstr
plt.axes([0,0,1,.2])
plt.xticks([])
plt.yticks([])
plt.box('off')
fontsize = 12
plt.text(0.5,0.5,eqstr,ha='center',va='center',fontsize=fontsize)
| gpl-2.0 | 5,358,316,222,424,130,000 | 33.732639 | 112 | 0.517845 | false | 3.833301 | false | false | false |
dongguangming/django-books | models.py | 1 | 1560 | #import all of the things we will be using
from django.db import models
from tagging.fields import TagField
# to help with translation of field names
from django.utils.translation import ugettext_lazy as _
# to have a generic foreign key for any model
from django.contrib.contenttypes import generic
# stores model info so this can be applied to any model
from django.contrib.contenttypes.models import ContentType
class Book(models.Model):
"""
The details of a Book
"""
# fields that describe this book
name = models.CharField(_('name'), max_length=48)
isbn = models.CharField(_('isbn'), max_length=16)
url = models.URLField(_('url'), verify_exists=False, blank=True)
description = models.TextField(_('description'))
# to add to any model
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type',
'object_id')
# for the list of tags for this book
tags = TagField()
# misc fields
deleted = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True)
# so that {{book.get_absolute_url}} outputs the whole url
@models.permalink
def get_absolute_url(self):
return ("book_details", [self.pk])
# outputs name when printing this object as a string
def __unicode__(self):
return self.name
| bsd-3-clause | 7,885,029,974,770,089,000 | 36.04878 | 76 | 0.633974 | false | 4.309392 | false | false | false |
taarifa/taarifa_backend | taarifa_backend/models.py | 1 | 4325 | import datetime
from flask_security import RoleMixin, UserMixin
from flask_mongoengine.wtf import model_form
from taarifa_backend import db
fieldmap = {
'BinaryField': db.BinaryField,
'BooleanField': db.BooleanField,
'ComplexDateTimeField': db.ComplexDateTimeField,
'DateTimeField': db.DateTimeField,
'DecimalField': db.DecimalField,
'DictField': db.DictField,
'DynamicField': db.DynamicField,
'EmailField': db.EmailField,
'EmbeddedDocumentField': db.EmbeddedDocumentField,
'FileField': db.FileField,
'FloatField': db.FloatField,
'GenericEmbeddedDocumentField': db.GenericEmbeddedDocumentField,
'GenericReferenceField': db.GenericReferenceField,
'GeoPointField': db.GeoPointField,
'ImageField': db.ImageField,
'IntField': db.IntField,
'ListField': db.ListField,
'MapField': db.MapField,
'ObjectIdField': db.ObjectIdField,
'ReferenceField': db.ReferenceField,
'SequenceField': db.SequenceField,
'SortedListField': db.SortedListField,
'StringField': db.StringField,
'URLField': db.URLField,
'UUIDField': db.UUIDField,
}
class Field(db.EmbeddedDocument):
"""Field in a :class:`Service`."""
db_field = db.StringField(default=None)
required = db.BooleanField(default=False)
default = db.DynamicField(default=None)
unique = db.BooleanField(default=False)
unique_with = db.DynamicField(default=None)
primary_key = db.BooleanField(default=False)
choices = db.DynamicField(default=None)
help_text = db.StringField(default=None)
verbose_name = db.StringField(default=None)
class Service(db.Document):
"""A service schema served by the API."""
meta = {'strict': False}
name = db.StringField(required=True)
fields = db.DictField(required=True)
description = db.StringField()
group = db.StringField()
keywords = db.ListField(db.StringField())
protocol_type = db.StringField()
service_name = db.StringField(required=True)
service_code = db.StringField(required=True, unique=True)
def build_schema(service):
build_field = lambda d: fieldmap[d.pop('type')](**d)
return type(str(service.name), (Report,),
dict(description=service.description,
group=service.group,
keywords=service.keywords,
protocol_type=service.protocol_type,
service_name=service.service_name,
service_code=service.service_code,
meta={'allow_inheritance': True},
**dict((k, build_field(v)) for k, v in service.fields.items()))
)
class Metadata(object):
"""
Description of a service
"""
def __init__(self, service_code, service_name, description, group=None):
self.service_code = service_code
self.service_name = service_name
self.description = description
self.group = group
def __repr__(self):
args = [self.service_code, self.service_name, self.description, self.group]
return 'Metadata(%s)' % ', '.join(map(str, args))
class Report(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
latitude = db.FloatField(required=True)
longitude = db.FloatField(required=True)
meta = {'allow_inheritance': True, 'strict': False}
ReportForm = model_form(Report, exclude=['created_at'])
class Role(db.Document, RoleMixin):
name = db.StringField(max_length=80, unique=True)
description = db.StringField(max_length=255)
class User(db.Document, UserMixin):
email = db.StringField(max_length=255, unique=True)
password = db.StringField(max_length=255)
active = db.BooleanField(default=True)
confirmed_at = db.DateTimeField()
roles = db.ListField(db.ReferenceField(Role), default=[])
def get_available_services():
return [build_schema(o) for o in Service.objects]
def get_service_class(service_code):
try:
return build_schema(Service.objects.get(service_code=service_code))
except Service.DoesNotExist:
return Report
def get_form(service_code):
return model_form(get_service_class(service_code), exclude=['created_at'])
def clear_database():
for cls in [Report, Role, User]:
cls.drop_collection()
| bsd-3-clause | 7,360,685,039,675,874,000 | 30.801471 | 84 | 0.672139 | false | 3.854724 | false | false | false |
gochaorg/mailscripts | emailer/mailer.py | 1 | 12253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import smtplib
import os
import re
import sys
import imaplib
import email
import tempfile
import shutil
import datetime
import quopri
import base64
import hashlib
import quopri
import base64
import hashlib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
class Mailer:
"""Класс по работе с электронной почтой"""
addrTo = []
'''Список адресатов'''
addrFrom = False
login = False
subject = 'Без темы'
subjectTmpl = '{subject} {part}/{total}'
text = ''
attach = []
smtpHost = False
smtpPort = 25
useSSL = False
useTLS = False
smtpPassword = False
verbose = False
timeout = 30
split = False
imapHost = False
imapPort = 143
#md5 = False
imapHost = False
imapPort = 143
imapPassword = False
imapSSL = False
def timeString( self, d ):
"""Возвращает текстовое представление времени
d - Дата"""
timeStr = "{year}-{month:#02}-{day:#02}_{hour:#02}-{minute:#02}-{second:#02}".format(
year=d.year,
month=d.month,
day=d.day,
hour=d.hour,
minute=d.minute,
second=d.second )
return timeStr
def log(self,text):
'''Лог - Выводил текст (text)'''
if self.verbose:
print text
def serr(self,text):
"""Лог - ошибка состояния объекта, text - описание"""
print 'Ошибка состояния объекта = '+text
def exception(self,ex):
"""Лог - исключительная ситуация, ex - описание"""
print 'Ошибка почты {err}'.format( err=ex )
def attachFile(self,msg,fileName):
"""Присоединяет файл (fileName) к сообщению (msg)"""
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(fileName, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition','attachment; filename="%s"' % os.path.basename(fileName))
msg.attach(part)
return True
def splitFile(self,fileName):
"""Разделяет файл (fileName) на куски (формат 7z) во временную директорию.
Возвращает путь временной директории, после использования директории следует самостоятельно ее удалить.
Если не получилось разделить (ошибка), то вернет False"""
tmpDir = tempfile.mkdtemp('mailer')
verb = ''
if not self.verbose:
verb = '1>/dev/null'
cmd = "7z a -v{volsize} '{arcfile}' '{sendfile}' {verbose}".format(
volsize=self.split,
arcfile='{tmpdir}/{basename}.7z'.format(
tmpdir=tmpDir,
basename=os.path.basename(fileName),
),
sendfile=fileName,
verbose=verb
)
cmd = cmd.replace( "(", "\(" ).replace( ")","\)" )
result = os.system( cmd )
if result==0:
return tmpDir
else:
return False
def sendParts(self,tmpDir,srcFilePath):
"""Отсылает файлы указанной директории (tmpDir) отдельными письмами.
Возвращает кол-вот отправленых писем."""
def mf( msg ):
return lambda: msg.makeMessage()
succCount = 0
messages = []
for dirpath, dirnames, filenames in os.walk(tmpDir):
count = len(filenames)
idx = 0
for filename in filenames:
idx = idx + 1
filepath = os.path.join( dirpath,filename )
date_ = datetime.datetime.now()
tmpl = self.subjectTmpl
subject = tmpl.format(
subject=self.subject,
part=idx,
total=count,
date=self.timeString(date_),
filepath=srcFilePath,
filename=os.path.basename(srcFilePath),
attachpath=filepath,
attachname=os.path.basename(filepath))
m = Mailer()
m.addrTo = self.addrTo
m.addrFrom = self.addrFrom
m.subject = subject
m.text = self.text
m.attach = filepath
m.smtpHost = self.smtpHost
m.smtpPort = self.smtpPort
m.useSSL = self.useSSL
m.useTLS = self.useTLS
m.smtpPassword = self.smtpPassword
m.verbose = self.verbose
m.timeout = self.timeout
m.split = False
#msg = m.makeMessage()
msg = mf( m )
if not isinstance(msg,bool):
messages.append( msg )
succ = self.sendMailMessage( messages )
if succ:
return len(messages)
return 0
def send(self):
"""Отправляет письмо на почту.
Если указано разделять вложения на части - то отправит несколько писем.
Возвращает кол-во отправленых писем."""
if isinstance(self.split,str):
count = 0
if isinstance(self.attach,(list,tuple)):
if len(self.attach)>0:
for attc in self.attach:
if os.path.isfile( attc ):
tmpDir = self.splitFile( attc )
if os.path.isdir(tmpDir):
count = count + self.sendParts(tmpDir,attc)
shutil.rmtree(tmpDir)
pass
elif os.path.isfile( self.attach ):
tmpDir = self.splitFile( self.attach )
if os.path.isdir(tmpDir):
count = count + self.sendParts(tmpDir,self.attach)
shutil.rmtree(tmpDir)
self.log( 'Отправлено {counter} писем'.format(counter=count) )
return count>0
return self.sendMail()
def sendMailMessage(self,msg):
"""Отправляет сообщения (msg) на почту.
msg - либо список сообщений (объекты MIMEMultipart / набор функций(без аргументов) - возвращающие MIMEMultipart)
/ либо отдельный объект MIMEMultipart.
Возвращает True - успешно / False - Ошибка отправки.
"""
try:
if self.useSSL:
self.log( 'Соединение по SSL, хост={host} порт={port}'.format( host=self.smtpHost,port=self.smtpPort ) )
mailServer = smtplib.SMTP_SSL( self.smtpHost, self.smtpPort, timeout=float(self.timeout) )
else:
self.log( 'Соединение, хост={host} порт={port}'.format( host=self.smtpHost,port=self.smtpPort ) )
mailServer = smtplib.SMTP( self.smtpHost, self.smtpPort, timeout=float(self.timeout) )
self.log( 'Команда EHLO' )
mailServer.ehlo()
if self.useTLS:
self.log( 'Команда STARTTLS' )
mailServer.starttls()
_login_ = self.login
if _login_ != False:
_login_ = self.addrFrom
self.log( 'Команда LOGIN, логин={login}'.format(login=_login_) )
mailServer.login( _login_, self.smtpPassword )
if isinstance(msg,(tuple,list)):
for message in msg:
m = message
if hasattr(message, '__call__'):
m = message()
if not isinstance(m,bool):
self.log( 'Отправка письма, адресат:{to} тема:{subj}'.format(
to=m['To'],
subj=m['Subject']
) )
mailServer.sendmail(self.addrFrom, m['To'], m.as_string())
else:
self.log( 'Отправка письма, адресат:{to} тема:{subj}'.format(
to=msg['To'],
subj=msg['Subject']
) )
mailServer.sendmail(self.addrFrom, msg['To'], msg.as_string())
self.log( 'Закрытие соединения' )
mailServer.close()
self.log( 'Письмо отправлено' )
return True
except smtplib.SMTPException as e:
print 'Ошибка почты {err}'.format( err=e )
return False
def makeMessage(self):
"""Создает сообщение - объект MIMEMultipart и возвращает его."""
msg = MIMEMultipart()
if not isinstance(self.addrFrom,str):
self.serr( 'Не указан отправитель - addrFrom не строка' )
return False
msg['From'] = self.addrFrom
if isinstance(self.addrTo,(str,unicode)):
msg['To'] = self.addrTo
elif isinstance(self.addrTo,(list,tuple)):
if len(self.addrTo)==0:
self.serr( 'Не указан адресат - len(addrTo) = 0' )
return False
msg['To'] = ', '.join( self.addrTo )
else:
self.serr( 'addrTo не строка / список' )
return False
if isinstance(self.subject,(str,unicode)):
msg['Subject'] = self.subject
else:
self.serr( 'Не указана тема - subject не строка' )
return False
if isinstance(self.text,(str,unicode)):
msg.attach( MIMEText(self.text) )
else:
self.serr( 'text не строка' )
return False
if isinstance(self.attach,(list,tuple)):
for attc in self.attach:
self.attachFile( msg, attc )
elif os.path.exists( self.attach ):
self.attachFile( msg, self.attach )
return msg
def sendMail(self):
"""Отправляет отдельное письмо.
Если сообщение создано удачно и письмо отправлено вернет - True.
Если возникли проблемы - то вернет False."""
msg = self.makeMessage()
if isinstance(msg,bool):
return msg
return self.sendMailMessage( msg )
def imapWork(self,workFun):
"""Соединяется с сервером imap, производит login и передает управление функции workFun( m )
m - Объект imaplib.IMAP4. После завершению работы workFun завершает работу с imap."""
if not self.imapHost:
self.serr( 'Не указан параметр imap (imapHost)' )
return False
if not self.imapPort:
self.serr( 'Не указан параметр imap (imapPort)' )
return False
if not self.imapPassword:
self.serr( 'Не указан параметр password (imapPassword)' )
return False
if not self.addrFrom:
self.serr( 'Не указан параметр from (addrFrom)' )
return False
mail = None
if self.imapSSL:
self.log( 'Соединение с imap по ssl {host}:{port}'.format(host=self.imapHost,port=self.imapPort) )
mail = imaplib.IMAP4_SSL(self.imapHost,self.imapPort)
else:
self.log( 'Соединение с imap {host}:{port}'.format(host=self.imapHost,port=self.imapPort) )
mail = imaplib.IMAP4(self.imapHost,self.imapPort)
self.log( 'Команда LOGIN, логин={login}'.format(login=self.addrFrom) )
mail.login(self.addrFrom,self.imapPassword)
workFun( mail )
self.log( 'Завершение работы с imap' )
mail.logout()
return True
def decode_m_utf7(self,s):
r = []
decode = []
for c in s:
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(self.modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(self.modified_unbase64(''.join(decode[1:])))
out = ''.join(r)
if not isinstance(out, unicode):
out = unicode(out, 'latin-1')
return out
def modified_base64(self,s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def modified_unbase64(self,s):
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
def encode_m_utf7(s):
if isinstance(s, str) and sum(n for n in (ord(c) for c in s) if n > 127):
raise FolderNameError("%r contains characters not valid in a str folder name. "
"Convert to unicode first?" % s)
r = []
_in = []
for c in s:
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
return ''.join(r)
def list(self):
"""Просматривает список ящиков на сервере imap"""
def listwf(mail):
self.log( 'Команда LIST' )
res = mail.list()
if isinstance(res,(list,tuple)):
if len(res)>1 and res[0]=='OK' and isinstance(res[1],(list,tuple)):
for item in res[1]:
print self.decode_m_utf7( item )
succ = self.imapWork( listwf )
return succ | mit | -74,163,532,198,789,380 | 26.899743 | 114 | 0.656008 | false | 2.316823 | false | false | false |
nihlaeth/HorseLife | interface/cli/display.py | 1 | 7394 | """Interface to inherit from for *Display classes."""
from textwrap import fill
from core.core import Core
from core.messagecore import MessageCore
from core.stablecore import StableCore
from core.pasturecore import PastureCore
from core.towncore import TownCore
from core.contractercore import ContracterCore
from support.messages.quit import Quit
from support.messages.back import Back
from support.messages.meter import Meter
from support.messages.action import Action
from errors.invalidchoice import InvalidChoice
# pylint: disable=too-many-instance-attributes
class Display(object):
"""Common methods to inherit from by *Display classes."""
def __init__(self):
"""Initiate with only static data."""
self._screen_width = 70
self._separator = self._repeat('-', self._screen_width)
self._title = "You should not be seeing this"
self._description = "This class is not to be called directly."
self._actions = None
self._menu = None
self._info = None
self._story = None
self._level = None
self._core = None
self._choice = None
def init(self, session):
"""Get data from core."""
self._info = self._core.get_info(session)
self._level = self._core.get_level(session)
self._story = self._core.get_story(session)
self._actions = self._core.get_actions(session)
self._menu = self._core.get_menu()
self._choice = Display.display(self, self._level)
def display(self, level=0):
"""Display screen and return user choice (class)."""
print self._format_title()
print ''.join([self._wrap_text(self._description), "\n\n"])
call_list = []
count = 0
for string in self._info:
if isinstance(string, Meter):
print self._meter(string)
elif isinstance(string, Action):
print "".join([str(count), ") ", str(string)])
call_list.append(string)
count += 1
else:
print self._wrap_text(str(string))
print "\n\n"
if self._story is not None:
print self._separator
print self._wrap_text(self._story.text)
print "".join([str(count), ") ", str(self._story.action)])
call_list.append(self._story.action)
count += 1
print self._separator
for action in self._actions:
if isinstance(action, Action):
if action.min_level <= level:
print self._wrap_text(''.join([
str(count),
") ",
str(action)]))
call_list.append(action)
count += 1
else:
print self._wrap_text(''.join([
str(count),
") ",
str(action)]))
call_list.append(action)
count += 1
print ''.join(["\n\n", self._separator, "\n\n"])
for item in self._menu:
print self._wrap_text(''.join([str(count), ") ", str(item)]))
call_list.append(item)
count += 1
choice = self._get_int(count)
return call_list[choice]
def hide(self):
"""Just a placeholder."""
pass
def _repeat(self, string, num):
"""Repeat string num times and return it."""
return ''.join([str(string) for _ in range(num)])
def _format_title(self):
"""Format the page title and return it."""
frame = self._repeat("=", self._screen_width)
whitespace = len(frame) - 6 - len(self._title)
leading_whitespace = whitespace / 2
trailing_whitespace = (whitespace / 2 if whitespace % 2 == 0
else whitespace / 2 + 1)
header = ''.join([
"===",
self._repeat(" ", leading_whitespace),
self._title,
self._repeat(" ", trailing_whitespace),
"==="])
return ''.join([frame, "\n", header, "\n", frame])
def _get_int(self, limit, prompt="Choice: "):
"""Get an integer between 0 and limit from the user and return it.
Arguments:
limit -- the upper limit (exclusive)
promt -- text to be displayed to the user
"""
try:
response = int(raw_input(prompt))
except ValueError:
response = -1
while response < 0 or response >= limit:
print "Invalid choice, try again."
try:
response = int(raw_input(prompt))
except ValueError:
pass
return response
def get_string(self, min_length, prompt):
"""Get a str of min min_length characters from user and return it.
Arguments:
min_length -- the minimum string length
promt -- text to be displayed to the user
"""
response = raw_input(prompt)
while len(response) < min_length:
print ''.join([
"I need at least ",
str(min_length),
" characters."])
response = raw_input(prompt)
return response
def _wrap_text(self, text):
"""Wrap text to screen width while preserving paragraphs."""
paragraphs = text.split("\n")
return '\n'.join([fill(p, self._screen_width) for p in paragraphs])
def _meter(self, meter):
"""Return a graphical meter."""
percent_filled = float(meter.percent) / 100.
if meter.percent < 0:
percent_filled = 0
columnsfilled = int((self._screen_width - 2) * percent_filled)
return ''.join([
"[",
self._repeat("=", columnsfilled),
self._repeat(" ", self._screen_width - columnsfilled - 2),
"]"])
def choice(self, result):
"""Handle user choice on this end."""
if result is None:
return self.display()
elif isinstance(result, Core):
if isinstance(result, StableCore):
from stabledisplay import StableDisplay
next_display = StableDisplay(result)
elif isinstance(result, TownCore):
from towndisplay import TownDisplay
next_display = TownDisplay(result)
elif isinstance(result, PastureCore):
from pasturedisplay import PastureDisplay
next_display = PastureDisplay(result)
elif isinstance(result, MessageCore):
from messagedisplay import MessageDisplay
next_display = MessageDisplay(result)
elif isinstance(result, ContracterCore):
from contracterdisplay import ContracterDisplay
next_display = ContracterDisplay(result)
else:
raise InvalidChoice(result)
next_action = next_display.display()
if isinstance(next_action, Back):
return self.display()
elif isinstance(next_action, Quit):
return next_action
else:
raise InvalidChoice(result)
elif isinstance(result, Back) or isinstance(result, Quit):
return result
else:
raise InvalidChoice(result)
| gpl-2.0 | -9,055,528,757,811,274,000 | 34.37799 | 75 | 0.545983 | false | 4.564198 | false | false | false |
zojoncj/cleanthehead | nsnitro/nsresources/nssslvserver.py | 1 | 23417 | from nsbaseresource import NSBaseResource
__author__ = 'Aleksandar Topuzovic'
class NSSSLVServer(NSBaseResource):
def __init__(self, json_data=None):
"""
Supplied with json_data the object can be pre-filled
"""
super(NSSSLVServer, self).__init__()
self.options = {'vservername': '',
'cipherdetails': '',
'cleartextport': '',
'dh': '',
'dhfile': '',
'dhcount': '',
'ersa': '',
'ersacount': '',
'sessreuse': '',
'sesstimeout': '',
'cipherredirect': '',
'crlcheck': '',
'cipherurl': '',
'sslv2redirect': '',
'sslv2url': '',
'clientauth': '',
'clientcert': '',
'sslredirect': '',
'redirectportrewrite': '',
'nonfipsciphers': '',
'ssl2': '',
'ssl3': '',
'tls1': '',
'snienable': '',
'service': '',
'certkeyname': '',
'servicename': '',
'ocspcheck': '',
'pushenctrigger': '' }
self.resourcetype = NSSSLVServer.get_resourcetype()
if not (json_data is None):
for key in json_data.keys():
if key in self.options.keys():
self.options[key] = json_data[key]
@staticmethod
def get_resourcetype():
"""
Binding object showing the lbmonitor that can be bound to service.
"""
return "sslvserver"
# Read/write properties
def set_vservername(self, vservername):
"""
The name of the SSL virtual server.
Minimum length = 1
"""
self.options['vservername'] = vservername
def get_vservername(self):
"""
The name of the SSL virtual server.
Minimum length = 1
"""
return self.options['vservername']
def set_cleartextport(self, cleartextport):
"""
The port on the back-end web-servers where the clear-text data
is sent by system. Use this setting for the wildcard IP based
SSL Acceleration configuration (*:443).
Minimum value = 1
"""
self.options['cleartextport'] = cleartextport
def get_cleartextport(self):
"""
The port on the back-end web-servers where the clear-text data
is sent by system. Use this setting for the wildcard IP based
SSL Acceleration configuration (*:443).
Minimum value = 1
"""
return self.options['cleartextport']
def set_dh(self, dh):
"""
The state of DH key exchange support for the specified SSL virtual server.
Default value: DISABLED
"""
self.options['dh'] = dh
def get_dh(self):
"""
The state of DH key exchange support for the specified SSL virtual server.
Default value: DISABLED
"""
return self.options['dh']
def set_dhfile(self, dhfile):
"""
The file name and path for the DH parameter. The file format is
PEM. Note: The '-dh' argument must be enabled if this argument
is specified.
Minimum length = 1
"""
self.options['dhfile'] = dhfile
def get_dhfile(self):
"""
The file name and path for the DH parameter. The file format is
PEM. Note: The '-dh' argument must be enabled if this argument
is specified.
Minimum length = 1
"""
return self.options['dhfile']
def set_dhcount(self, ersa):
"""
The refresh count for the re-generation of DH public-key and
private-key from the DH parameter. Zero means infinite usage
(no refresh). Note: The '-dh' argument must be enabled if this
argument is specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
self.options['dhcount'] = ersa
def get_dhcount(self):
"""
The refresh count for the re-generation of DH public-key and
private-key from the DH parameter. Zero means infinite usage
(no refresh). Note: The '-dh' argument must be enabled if this
argument is specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
return self.options['dhcount']
def set_ersa(self, ersa):
"""
The state of Ephemeral RSA key exchange support for the SSL
virtual server.
Default value: ENABLED
"""
self.options['ersa'] = ersa
def get_ersa(self):
"""
The state of Ephemeral RSA key exchange support for the SSL
virtual server.
Default value: ENABLED
"""
return self.options['ersa']
def set_ersacount(self, ersacount):
"""
The refresh count for the re-generation of RSA public-key and
private-key pair. Zero means infinite usage (no refresh) Note:
The '-eRSA' argument must be enabled if this argument is
specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
self.options['ersacount'] = ersacount
def get_ersacount(self):
"""
The refresh count for the re-generation of RSA public-key and
private-key pair. Zero means infinite usage (no refresh) Note:
The '-eRSA' argument must be enabled if this argument is
specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
return self.options['ersacount']
def set_sessreuse(self, sessreuse):
"""
The state of session re-use support for the SSL virtual server.
Default value: ENABLED
"""
self.options['sessreuse'] = sessreuse
def get_sessreuse(self):
"""
The state of session re-use support for the SSL virtual server.
Default value: ENABLED
"""
return self.options['sessreuse']
def set_sesstimeout(self, sesstimeout):
"""
The Session timeout value in seconds. The value has to be a
positive integer. The '-sessReuse' argument must be enabled if
this argument is specified.
Default value: 120
Minimum value = 0
Maximum value = 0xFFFFFFFE
"""
self.options['sesstimeout'] = sesstimeout
def get_sesstimeout(self):
"""
The Session timeout value in seconds. The value has to be a
positive integer. The '-sessReuse' argument must be enabled if
this argument is specified.
Default value: 120
Minimum value = 0
Maximum value = 0xFFFFFFFE
"""
return self.options['sesstimeout']
def set_cipherredirect(self, cipherredirect):
"""
The state of Cipher Redirect feature.
Default value: DISABLED
"""
self.options['cipherredirect'] = cipherredirect
def get_cipherredirect(self):
"""
The state of Cipher Redirect feature.
Default value: DISABLED
"""
return self.options['cipherredirect']
def set_cipherurl(self, cipherurl):
"""
The redirect URL to be used with the Cipher Redirect feature.
"""
self.options['cipherurl'] = cipherurl
def get_cipherurl(self, cipherurl):
"""
The redirect URL to be used with the Cipher Redirect feature.
"""
return self.options['cipherurl']
def set_sslv2redirect(self, sslv2redirect):
"""
The state of SSLv2 Redirect feature.
Default value: DISABLED
"""
self.options['sslv2redirect'] = sslv2redirect
def get_sslv2redirect(self):
"""
The state of SSLv2 Redirect feature.
Default value: DISABLED
"""
return self.options['sslv2redirect']
def set_sslv2url(self, sslv2url):
"""
The redirect URL to be used with SSLv2 Redirect feature.
"""
self.options['sslv2url'] = sslv2url
def get_sslv2url(self):
"""
The redirect URL to be used with SSLv2 Redirect feature.
"""
return self.options['sslv2url']
def set_clientauth(self, clientauth):
"""
The state of Client-Authentication support for the SSL virtual server.
Default value: DISABLED
"""
self.options['clientauth'] = clientauth
def get_clientauth(self):
"""
The state of Client-Authentication support for the SSL virtual server.
Default value: DISABLED
"""
return self.options['clientauth']
def set_clientcert(self, clientcert):
"""
The rule for client authentication. If the clientCert if set to
Mandatory, the system will terminate the SSL handshake if the
SSL client does not provide a valid certificate. If the setting
is Optional, then System will allow SSL clients with no
certificate or invalid certificates to access the secure
resource. Note: Make sure proper access control policies are
defined before changing the above setting to Optional.
"""
self.options['clientcert'] = clientcert
def get_clientcert(self):
"""
The rule for client authentication. If the clientCert if set to
Mandatory, the system will terminate the SSL handshake if the
SSL client does not provide a valid certificate. If the setting
is Optional, then System will allow SSL clients with no
certificate or invalid certificates to access the secure
resource. Note: Make sure proper access control policies are
defined before changing the above setting to Optional.
"""
return self.options['clientcert']
def set_sslredirect(self, sslredirect):
"""
The state of HTTPS redirects for the SSL virtual server. This
is required for proper working of the redirect messages from
the web server. The redirect message from the server gives the
new location for the moved object. This is contained in the
HTTP header field: Location (for example, Location:
http://www.moved.org/here.html). For an SSL session, if the
client browser receives this message, the browser will try to
connect to the new location. This will break the secure SSL
session, as the object has moved from a secure site (https://)
to an unsecured one (http://). Browsers usually flash a warning
message on the screen and prompt the user to either continue or
disconnect. When the above feature is enabled, all such http://
redirect messages are automatically converted to https://. This
does not break the client SSL session.
Default value: DISABLED
"""
self.options['sslredirect'] = sslredirect
def get_sslredirect(self):
"""
The state of HTTPS redirects for the SSL virtual server. This
is required for proper working of the redirect messages from
the web server. The redirect message from the server gives the
new location for the moved object. This is contained in the
HTTP header field: Location (for example, Location:
http://www.moved.org/here.html). For an SSL session, if the
client browser receives this message, the browser will try to
connect to the new location. This will break the secure SSL
session, as the object has moved from a secure site (https://)
to an unsecured one (http://). Browsers usually flash a warning
message on the screen and prompt the user to either continue or
disconnect. When the above feature is enabled, all such http://
redirect messages are automatically converted to https://. This
does not break the client SSL session.
Default value: DISABLED
"""
return self.options['sslredirect']
def set_redirectportrewrite(self, redirectportrewrite):
"""
The state of port in rewrite while performing HTTPS redirect.
Default value: DISABLED
"""
self.options['redirectportrewrite'] = redirectportrewrite
def get_redirectportrewrite(self):
"""
The state of port in rewrite while performing HTTPS redirect.
Default value: DISABLED
"""
return self.options['redirectportrewrite']
def set_nonfipsciphers(self, nonfipsciphers):
"""
The state of usage of non FIPS approved ciphers. Valid only for
an SSL vserver bound with a FIPS key and certificate.
Default value: DISABLED
"""
self.options['nonfipsciphers'] = nonfipsciphers
def get_nonfipsciphers(self):
"""
The state of usage of non FIPS approved ciphers. Valid only for
an SSL vserver bound with a FIPS key and certificate.
Default value: DISABLED
"""
return self.options['nonfipsciphers']
def set_ssl2(self, ssl2):
"""
The state of SSLv2 protocol support for the SSL virtual server.
Default value: DISABLED
"""
self.options['ssl2'] = ssl2
def get_ssl2(self):
"""
The state of SSLv2 protocol support for the SSL virtual server.
Default value: DISABLED
"""
return self.options['ssl2']
def set_ssl3(self, ssl3):
"""
The state of SSLv3 protocol support for the SSL virtual server.
Default value: ENABLED
"""
self.options['ssl3'] = ssl3
def get_ssl3(self):
"""
The state of SSLv3 protocol support for the SSL virtual server.
Default value: ENABLED
"""
return self.options['ssl3']
def set_tls1(self, tls1):
"""
The state of TLSv1 protocol support for the SSL virtual server.
Default value: ENABLED
"""
self.options['tls1'] = tls1
def get_tls1(self):
"""
The state of TLSv1 protocol support for the SSL virtual server.
Default value: ENABLED
"""
return self.options['tls1']
def set_snienable(self, snienable):
"""
state of SNI feature on virtual server.
Default value: DISABLED
"""
self.options['snienable'] = snienable
def get_snienable(self):
"""
state of SNI feature on virtual server.
Default value: DISABLED
"""
return self.options['snienable']
def set_pushenctrigger(self, pushenctrigger):
"""
PUSH packet triggering encryption Always - Any PUSH packet
triggers encryption Ignore - Ignore PUSH packet for triggering
encryption Merge - For consecutive sequence of PUSH packets,
last PUSH packet triggers encryption Timer - PUSH packet
triggering encryption delayed by timer period defined in 'set
ssl parameter' .
"""
self.options['pushenctrigger'] = pushenctrigger
def get_pushenctrigger(self):
"""
PUSH packet triggering encryption Always - Any PUSH packet
triggers encryption Ignore - Ignore PUSH packet for triggering
encryption Merge - For consecutive sequence of PUSH packets,
last PUSH packet triggers encryption Timer - PUSH packet
triggering encryption delayed by timer period defined in 'set
ssl parameter' .
"""
return self.options['pushenctrigger']
def set_cipherdetails(self, cipherdetails):
"""
Details of the individual ciphers bound to the SSL vserver.
Select this flag value to display the details of the individual
ciphers bound to the SSL vserver.
"""
self.options['cipherdetails'] = cipherdetails
def get_cipherdetails(self):
"""
Details of the individual ciphers bound to the SSL vserver.
Select this flag value to display the details of the individual
ciphers bound to the SSL vserver.
"""
return self.options['cipherdetails']
# Read only properties
def get_crlcheck(self):
"""
The state of the CRL check parameter. (Mandatory/Optional)
"""
return self.options['crlcheck']
def get_service(self):
"""
Service
"""
return self.options['service']
def get_certkeyname(self):
"""
The name of the certificate key pair binding.
"""
return self.options['certkeyname']
def get_servicename(self):
"""
Service name.
"""
return self.options['servicename']
def get_ocspcheck(self):
"""
The state of the OCSP check parameter. (Mandatory/Optional)
"""
return self.options['ocspcheck']
@staticmethod
def get(nitro, sslvserver):
"""
Use this API to fetch sslvserver resource of given name.
"""
__sslvserver = NSSSLVServer()
__sslvserver.get_resource(nitro, sslvserver.get_vservername())
return __sslvserver
@staticmethod
def get_all(nitro):
"""
Use this API to fetch all configured sslvserver resources.
"""
__url = nitro.get_url() + NSSSLVServer.get_resourcetype()
__json_sslvservers = nitro.get(__url).get_response_field(NSSSLVServer.get_resourcetype())
__sslvservers = []
for json_sslvserver in __json_sslvservers:
__sslvservers.append(NSSSLVServer(json_sslvserver))
return __sslvservers
@staticmethod
def update(nitro, sslvserver):
"""
Use this API to update sslvserver of a given name.
"""
__sslvserver = NSSSLVServer()
__sslvserver.set_vservername(sslvserver.get_vservername())
__sslvserver.set_cleartextport(sslvserver.get_cleartextport())
__sslvserver.set_dh(sslvserver.get_dh())
__sslvserver.set_dhfile(sslvserver.get_dhfile())
__sslvserver.set_dhcount(sslvserver.get_dhcount())
__sslvserver.set_ersa(sslvserver.get_ersa())
__sslvserver.set_ersacount(sslvserver.get_ersacount())
__sslvserver.set_sessreuse(sslvserver.get_sessreuse())
__sslvserver.set_sesstimeout(sslvserver.get_sesstimeout())
__sslvserver.set_cipherredirect(sslvserver.get_cipherredirect())
__sslvserver.set_cipherurl(sslvserver.get_cipherurl())
__sslvserver.set_sslv2redirect(sslvserver.get_sslv2redirect())
__sslvserver.set_sslv2url(sslvserver.get_sslv2redirect())
__sslvserver.set_clientauth(sslvserver.get_clientauth())
__sslvserver.set_clientcert(sslvserver.get_clientcert())
__sslvserver.set_sslredirect(sslvserver.get_sslredirect())
__sslvserver.set_redirectportrewrite(sslvserver.get_redirectportrewrite())
__sslvserver.set_nonfipsciphers(sslvserver.get_nonfipsciphers())
__sslvserver.set_ssl2(sslvserver.get_ssl2())
__sslvserver.set_ssl3(sslvserver.get_ssl3())
__sslvserver.set_tls1(sslvserver.get_tls1())
__sslvserver.set_snienable(sslvserver.get_snienable())
__sslvserver.set_pushenctrigger(sslvserver.get_pushenctrigger())
return __sslvserver.update_resource(nitro)
# No unset functionality for now.
| apache-2.0 | -8,071,013,944,966,436,000 | 41.041293 | 105 | 0.503737 | false | 5.378273 | false | false | false |
benvcarr/algorithms_course | hw2/carr-hw2.py | 1 | 5854 | #!/usr/bin/python
"""
Benjamin Carr
Homework #2 - MPCS 55001
Answers:
(1) Program below.
(2) My program is correct for all cases where both the numbers and the distances from the median
are unique. I spent a lot of time (30+ hrs) trying to figure out other ways of doing this other than using a
dictionary to store key,val pairs but didn't come up with anything that would work for all situations. So, it
works for situations that meet that criteria. It also assumes the median is always equal to floor(n/2), which is
a bit of a mathematical compromise.
(3) It should run in O(n) time - the worst running time is a function of the O(n) lookup to select()
that is initially used to find the median. Both of the core FOR loops (lines 66 & 71) take O(n) as well.
"""
import sys
import math
from random import randint
def main():
startFindClosest()
def startFindClosest():
"""Begins the closest search process by reading in the stdin file.
Args:
None. Reads from stdin for file.
Returns:
No value. Prints closest k values to median to stdout."""
f = sys.stdin
line1 = f.readline()
while line1 != '':
k = int(f.readline())
array = form_array_from_string_line(line1)
print findClosestKValues(array, 0, len(array)-1, k)
line1 = f.readline()
if not line1:
break
return
def findClosestKValues(array, l_index, r_index, k):
"""Finds the closest K values to the median.
Args:
array: List object containing unsorted list of values.
k: The number of numbers closest to the median we wish to find.
Returns:
nums: a list object containing the closest k numbers to median."""
nums = []
temp_array = []
pairing = {}
"""
Note: This is code I tried to use to get it work for varying lengths to accurately output
the median value. It turned out to be more complex than imagined so I left it out.
if (len(array) % 2) == 0:
median_A = randomizedSelect(array, l_index, r_index, (len(array)/2))
median_B = randomizedSelect(array, l_index, r_index, ((len(array)-1)/2))
median = (median_A + median_B) / 2.0
else:
median = randomizedSelect(array, l_index, r_index, (len(array)/2))"""
median = randomizedSelect(array, l_index, r_index, math.floor(len(array)/2))
array.remove(median)
array.append(median)
for i in range(0,r_index+1):
pairing[abs(array[i]-median)] = array[i]
temp_array.append(abs(array[i] - median))
kth_element = randomizedSelect(temp_array, l_index, len(temp_array)-1, k)
for j in range(0,len(array)):
if temp_array[j] <= kth_element:
nums.append(pairing[temp_array[j]])
return nums
def form_array_from_string_line(line):
"""Begins the inversion count process by reading in the stdin file.
Args:
line: A string of input line (usually from a text file) with integers
contained within, separated by spaces.
Returns:
array: List object (Python's standard 'array' type) featuring each of
the integers as a separate item in the list."""
array = [int(n) for n in line.split()]
return array
def randomizedSelect(array, l_index, r_index, i):
"""Uses the randomizedPartion method to find the specified i-th value.
Args:
array: List object containing unsorted list of values.
l_index: Left index of the subarray we want to search in.
r_index: Right index of the subarray we want to search in.
i: The i-th sorted value we want to find.
Returns:
array: List object (Python's standard 'array' type) featuring each of
the integers as a separate item in the list."""
if l_index == r_index:
return array[l_index]
q = randomizedPartition(array, l_index, r_index)
k = q - l_index + 1
if i == k:
return array[q]
elif i < k:
return randomizedSelect(array, l_index, q-1, i)
else:
return randomizedSelect(array, q+1, r_index, i-k)
def randomizedPartition(array, l_index, r_index):
"""Randomizes the partion method.
Args:
array: List object containing unsorted list of values.
l_index: Left index of the subarray we want to search in.
r_index: Right index of the subarray we want to search in.
Returns:
i+1: Integer value of the index of the partition."""
i = randint(l_index, r_index)
array = valueSwap(array, i, r_index)
return partition(array, l_index, r_index)
def partition(array, l_index, r_index):
"""Identifies the partion index.
Args:
array: List object containing unsorted list of values.
l_index: Left index of the subarray we want to search in.
r_index: Right index of the subarray we want to search in.
Returns:
i+1: Integer value of the index of the partition."""
pivot = array[r_index]
i = l_index - 1
j = l_index
for j in range(l_index, r_index):
if array[j] <= pivot:
i += 1
array = valueSwap(array, i, j)
array = valueSwap(array, i+1, r_index)
return i+1
def valueSwap(array, index_one, index_two):
"""Swaps two values in a given array.
Args:
array: List object containing unsorted list of values.
index_one: Index of first item we want to swap.
index_two: Index of second item we want to swap.
Returns:
array: List with the desired values swapped."""
if len(array) <= 1:
return array
else:
try:
temp = array[index_one]
array[index_one] = array[index_two]
array[index_two] = temp
except IndexError, e:
print e
print "Tried to swap index: " + str(index_one) + ' with index: ' + str(index_two)
return array
if __name__ == '__main__':
main()
| apache-2.0 | -1,535,948,675,559,064,000 | 32.233918 | 116 | 0.640588 | false | 3.57169 | false | false | false |
USGSDenverPychron/pychron | pychron/spectrometer/base_magnet.py | 1 | 12604 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import time
import yaml
from math import pi
from numpy import arange, sin
from traits.api import Property, Float, Event, Instance
from traitsui.api import View, Item, VGroup, HGroup, Spring, RangeEditor
from pychron.loggable import Loggable
from pychron.paths import paths
def get_float(func):
def dec(*args, **kw):
try:
return float(func(*args, **kw))
except (TypeError, ValueError):
return 0.0
return dec
import threading
import time
class BaseMagnet(Loggable):
dac = Property(Float, depends_on='_dac')
mass = Float
_dac = Float
dacmin = Float(0.0)
dacmax = Float(10.0)
massmin = Property(Float, depends_on='_massmin')
massmax = Property(Float, depends_on='_massmax')
_massmin = Float(0.0)
_massmax = Float(200.0)
settling_time = 0.5
detector = Instance('pychron.spectrometer.base_detector.BaseDetector')
dac_changed = Event
mftable = Instance('pychron.spectrometer.mftable.MagnetFieldTable', ())
confirmation_threshold_mass = Float
use_deflection_correction = True
use_af_demagnetization = False
_suppress_mass_update = False
def __init__(self, *args, **kw):
super(BaseMagnet, self).__init__(*args, **kw)
self._lock = threading.Lock()
self._cond = threading.Condition((threading.Lock()))
def reload_mftable(self):
self.mftable.load_mftable()
def set_dac(self, *args, **kw):
raise NotImplementedError
def set_mftable(self, name):
self.mftable.set_path_name(name)
def update_field_table(self, *args, **kw):
self.mftable.update_field_table(*args, **kw)
# ===============================================================================
# persistence
# ===============================================================================
def load(self):
pass
def finish_loading(self):
"""
initialize the mftable
read DAC from device
:return:
"""
if self.spectrometer:
molweights = self.spectrometer.molecular_weights
name = self.spectrometer.name
else:
from pychron.spectrometer.molecular_weights import MOLECULAR_WEIGHTS as molweights
name = ''
self.mftable.initialize(molweights)
self.mftable.spectrometer_name = name.lower()
d = self.read_dac()
if d is not None:
self._dac = d
# load af demag
self._load_af_demag_configuration()
# ===============================================================================
# mapping
# ===============================================================================
def map_dac_to_mass(self, dac, detname):
"""
convert a DAC value (voltage) to mass for a given detector
use the mftable
:param dac: float, voltage (0-10V)
:param detname: str, name of a detector, e.g H1
:return: float, mass
"""
return self.mftable.map_dac_to_mass(dac, detname)
def map_mass_to_dac(self, mass, detname):
"""
convert a mass value from amu to dac for a given detector
:param mass: float, amu
:param detname: std, name of a detector, e.g. H1
:return: float, dac voltage
"""
dac = self.mftable.map_mass_to_dac(mass, detname)
self.debug('{} map mass to dac {} >> {}'.format(detname, mass, dac))
if dac is None:
self.warning('Could not map mass to dac. Returning current DAC {}'.format(self._dac))
dac = self._dac
return dac
def map_dac_to_isotope(self, dac=None, det=None, current=True):
"""
convert a dac voltage to isotope name for a given detector
:param dac: float, voltage
:param det: str, detector name
:param current: bool, get current hv
:return: str, e.g Ar40
"""
if dac is None:
dac = self._dac
if det is None:
det = self.detector
if det:
dac = self.spectrometer.uncorrect_dac(det, dac, current=current)
m = self.map_dac_to_mass(dac, det.name)
if m is not None:
return self.spectrometer.map_isotope(m)
def mass_change(self, m):
"""
set the self.mass attribute
suppress mass change handler
:param m: float
:return:
"""
self._suppress_mass_update = True
self.trait_set(mass=m)
self._suppress_mass_update = False
# ===============================================================================
# private
# ===============================================================================
def _wait_release(self):
self._lock.release()
# self._cond.notify()
def _wait_lock(self, timeout):
"""
http://stackoverflow.com/questions/8392640/how-to-implement-a-lock-with-a-timeout-in-python-2-7
@param timeout:
@return:
"""
with self._cond:
current_time = start_time = time.time()
while current_time < start_time + timeout:
if self._lock.acquire(False):
return True
else:
self._cond.wait(timeout - current_time + start_time)
current_time = time.time()
return False
def _load_af_demag_configuration(self):
self.use_af_demagnetization = False
p = paths.af_demagnetization
if os.path.isfile(p):
with open(p, 'r') as rfile:
try:
yd = yaml.load(rfile)
except BaseException, e:
self.warning_dialog('AF Demagnetization unavailable. Syntax error in file. Error: {}'.format(e))
return
if not isinstance(yd, dict):
self.warning_dialog('AF Demagnetization unavailable. Syntax error in file')
return
self.use_af_demagnetization = yd.get('enabled', True)
self.af_demag_threshold = yd.get('threshold', 1)
def _do_af_demagnetization(self, target, setfunc):
p = paths.af_demagnetization
if os.path.isfile(p):
with open(p, 'r') as rfile:
try:
yd = yaml.load(rfile)
except BaseException, e:
self.warning('AF Demagnetization unavailable. Syntax error in file. Error: {}'.format(e))
return
period = yd.get('period', None)
if period is None:
frequency = yd.get('frequency')
if frequency is None:
self.warning('AF Demagnetization unavailable. '
'Need to specify "period" or "frequency" in "{}"'.format(p))
return
else:
period = 1 / float(frequency)
else:
frequency = 1 / float(period)
duration = yd.get('duration')
if duration is None:
duration = 5
self.debug('defaulting to duration={}'.format(duration))
start_amplitude = yd.get('start_amplitude')
if start_amplitude is None:
self.warning('AF Demagnetization unavailable. '
'Need to specify "start_amplitude" in "{}"'.format(p))
return
sx = arange(0.5 * period, duration, period)
slope = start_amplitude / float(duration)
dacs = slope * sx * sin(frequency * pi * sx)
self.info('Doing AF Demagnetization around target={}. '
'duration={}, start_amplitude={}, period={}'.format(target, duration, start_amplitude, period))
for dac in reversed(dacs):
self.debug('set af dac raw:{} dac:{}'.format(dac, target + dac))
setfunc(target + dac)
time.sleep(period)
else:
self.warning('AF Demagnetization unavailable. {} not a valid file'.format(p))
def _validate_mass_change(self, cm, m):
ct = self.confirmation_threshold_mass
move_ok = True
if abs(cm - m) > ct:
move_ok = False
self.info('Requested move greater than threshold. Current={}, Request={}, Threshold={}'.format(cm, m, ct))
if self.confirmation_dialog('Requested magnet move is greater than threshold.\n'
'Current Mass={}\n'
'Requested Mass={}\n'
'Threshold={}\n'
'Are you sure you want to continue?'.format(cm, m, ct)):
move_ok = True
return move_ok
def _mass_changed(self, old, new):
if self._suppress_mass_update:
return
if self._validate_mass_change(old, new):
self._set_mass(new)
else:
self.mass_change(old)
def _set_mass(self, m):
if self.detector:
self.debug('setting mass {}'.format(m))
dac = self.map_mass_to_dac(m, self.detector.name)
dac = self.spectrometer.correct_dac(self.detector, dac)
self.dac = dac
# ===============================================================================
# property get/set
# ===============================================================================
def _validate_dac(self, d):
return self._validate_float(d)
def _get_dac(self):
return self._dac
def _set_dac(self, v):
if v is not None:
self.set_dac(v)
def _validate_float(self, d):
try:
return float(d)
except (ValueError, TypeError):
return d
def _validate_massmin(self, d):
d = self._validate_float(d)
if isinstance(d, float):
if d > self.massmax:
d = str(d)
return d
def _get_massmin(self):
return self._massmin
def _set_massmin(self, v):
self._massmin = v
def _validate_massmax(self, d):
d = self._validate_float(d)
if isinstance(d, float):
if d < self.massmin:
d = str(d)
return d
def _get_massmax(self):
return self._massmax
def _set_massmax(self, v):
self._massmax = v
# ===============================================================================
# views
# ===============================================================================
def traits_view(self):
v = View(VGroup(VGroup(Item('dac', editor=RangeEditor(low_name='dacmin',
high_name='dacmax',
format='%0.5f')),
Item('mass', editor=RangeEditor(mode='slider', low_name='massmin',
high_name='massmax',
format='%0.3f')),
HGroup(Spring(springy=False,
width=48),
Item('massmin', width=-40), Spring(springy=False,
width=138),
Item('massmax', width=-55),
show_labels=False),
show_border=True,
label='Control')))
return v
# ============= EOF =============================================
| apache-2.0 | 4,398,844,861,938,323,000 | 33.157182 | 118 | 0.485401 | false | 4.355218 | false | false | false |
jptomo/rpython-lang-scheme | rpython/translator/test/snippet.py | 1 | 19453 | """Snippets for translation
This module holds various snippets, to be used by translator
unittests.
We define argument types as default arguments to the snippet
functions.
"""
numtype = (int, float)
anytype = (int, float, str)
seqtype = (list, tuple)
def if_then_else(cond=anytype, x=anytype, y=anytype):
if cond:
return x
else:
return y
def my_gcd(a=numtype, b=numtype):
r = a % b
while r:
a = b
b = r
r = a % b
return b
def is_perfect_number(n=int):
div = 1
sum = 0
while div < n:
if n % div == 0:
sum += div
div += 1
return n == sum
def my_bool(x=int):
return not not x
def my_contains(seq=seqtype, elem=anytype):
return elem in seq
def is_one_or_two(n=int):
return n in [1, 2]
def two_plus_two():
"""Array test"""
array = [0] * 3
array[0] = 2
array[1] = 2
array[2] = array[0] + array[1]
return array[2]
def get_set_del_slice(l=list):
del l[:1]
del l[-1:]
del l[2:4]
l[:1] = [3]
l[-1:] = [9]
l[2:4] = [8,11]
return l[:2], l[5:], l[3:5]
def sieve_of_eratosthenes():
"""Sieve of Eratosthenes
This one is from an infamous benchmark, "The Great Computer
Language Shootout".
URL is: http://www.bagley.org/~doug/shootout/
"""
flags = [True] * (8192+1)
count = 0
i = 2
while i <= 8192:
if flags[i]:
k = i + i
while k <= 8192:
flags[k] = False
k = k + i
count = count + 1
i = i + 1
return count
def simple_func(i=numtype):
return i + 1
def while_func(i=numtype):
total = 0
while i > 0:
total = total + i
i = i - 1
return total
def nested_whiles(i=int, j=int):
s = ''
z = 5
while z > 0:
z = z - 1
u = i
while u < j:
u = u + 1
s = s + '.'
s = s + '!'
return s
def poor_man_range(i=int):
lst = []
while i > 0:
i = i - 1
lst.append(i)
lst.reverse()
return lst
def poor_man_rev_range(i=int):
lst = []
while i > 0:
i = i - 1
lst += [i]
return lst
def simple_id(x=anytype):
return x
def branch_id(cond=anytype, a=anytype, b=anytype):
while 1:
if cond:
return a
else:
return b
def builtinusage():
return pow(2, 2)
def yast(lst=seqtype):
total = 0
for z in lst:
total = total + z
return total
def time_waster(n=int):
"""Arbitrary test function"""
i = 0
x = 1
while i < n:
j = 0
while j <= i:
j = j + 1
x = x + (i & j)
i = i + 1
return x
def half_of_n(n=int):
"""Slice test"""
i = 0
lst = range(n)
while lst:
lst = lst[1:-1]
i = i + 1
return i
def int_id(x=int):
i = 0
while i < x:
i = i + 1
return i
def greet(target=str):
"""String test"""
hello = "hello"
return hello + target
def choose_last():
"""For loop test"""
set = ["foo", "bar", "spam", "egg", "python"]
choice = ""
for choice in set:
pass
return choice
def poly_branch(x=int):
if x:
y = [1,2,3]
else:
y = ['a','b','c']
z = y
return z*2
def s_and(x=anytype, y=anytype):
if x and y:
return 'yes'
else:
return 'no'
def break_continue(x=numtype):
result = []
i = 0
while 1:
i = i + 1
try:
if i&1:
continue
if i >= x:
break
finally:
result.append(i)
i = i + 1
return result
def reverse_3(lst=seqtype):
try:
a, b, c = lst
except:
return 0, 0, 0
return c, b, a
def finallys(lst=seqtype):
x = 1
try:
x = 2
try:
x = 3
a, = lst
x = 4
except KeyError:
return 5
except ValueError:
return 6
b, = lst
x = 7
finally:
x = 8
return x
def finally2(o, k):
try:
o[k] += 1
finally:
o[-1] = 'done'
def bare_raise(o, ignore):
try:
return o[5]
except:
if not ignore:
raise
def factorial(n=int):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def factorial2(n=int): # analysed in a different order
if n > 1:
return n * factorial2(n-1)
else:
return 1
def _append_five(lst):
lst += [5]
def call_five():
a = []
_append_five(a)
return a
def _append_six(lst):
lst += [6]
def call_five_six():
a = []
_append_five(a)
_append_six(a)
return a
def call_unpack_56():
a = call_five_six()
return len(a), a[0], a[1]
def forty_two():
return 42
def never_called():
return "booo"
def constant_result():
if forty_two():
return "yadda"
else:
return never_called()
class CallablePrebuiltConstant(object):
def __call__(self):
return 42
callable_prebuilt_constant = CallablePrebuiltConstant()
def call_cpbc():
return callable_prebuilt_constant()
class E1(Exception):
pass
class E2(Exception):
pass
def raise_choose(n):
if n == 1:
raise E1
elif n == 2:
raise E2
elif n == -1:
raise Exception
return 0
def try_raise_choose(n=int):
try:
raise_choose(n)
except E1:
return 1
except E2:
return 2
except Exception:
return -1
return 0
def do_try_raise_choose():
r = []
for n in [-1,0,1,2]:
r.append(try_raise_choose(n))
return r
# INHERITANCE / CLASS TESTS
class C(object): pass
def build_instance():
c = C()
return c
def set_attr():
c = C()
c.a = 1
c.a = 2
return c.a
def merge_setattr(x):
if x:
c = C()
c.a = 1
else:
c = C()
return c.a
class D(C): pass
class E(C): pass
def inheritance1():
d = D()
d.stuff = ()
e = E()
e.stuff = -12
e.stuff = 3
lst = [d, e]
return d.stuff, e.stuff
def inheritance2():
d = D()
d.stuff = (-12, -12)
e = E()
e.stuff = (3, 12.3)
return _getstuff(d), _getstuff(e)
class F:
pass
class G(F):
def m(self, x):
return self.m2(x)
def m2(self, x):
return D(), x
class H(F):
def m(self, y):
self.attr = 1
return E(), y
def knownkeysdict(b=anytype):
if b:
d = {'a': 0}
d['b'] = b
d['c'] = 'world'
else:
d = {'b': -123}
return d['b']
def generaldict(key=str, value=int, key2=str, value2=int):
d = {key: value}
d[key2] = value2
return d[key or key2]
def prime(n=int):
return len([i for i in range(1,n+1) if n%i==0]) == 2
class A0:
pass
class A1(A0):
clsattr = 123
class A2(A1):
clsattr = 456
class A3(A2):
clsattr = 789
class A4(A3):
pass
class A5(A0):
clsattr = 101112
def classattribute(flag=int):
if flag == 1:
x = A1()
elif flag == 2:
x = A2()
elif flag == 3:
x = A3()
elif flag == 4:
x = A4()
else:
x = A5()
return x.clsattr
class Z:
def my_method(self):
return self.my_attribute
class WithInit:
def __init__(self, n):
self.a = n
class WithMoreInit(WithInit):
def __init__(self, n, m):
WithInit.__init__(self, n)
self.b = m
def simple_method(v=anytype):
z = Z()
z.my_attribute = v
return z.my_method()
def with_init(v=int):
z = WithInit(v)
return z.a
def with_more_init(v=int, w=bool):
z = WithMoreInit(v, w)
if z.b:
return z.a
else:
return -z.a
global_z = Z()
global_z.my_attribute = 42
def global_instance():
return global_z.my_method()
def call_Z_my_method(z):
return z.my_method
def somepbc_simplify():
z = Z()
call_Z_my_method(global_z)
call_Z_my_method(z)
class ClassWithMethods:
def cm(cls, x):
return x
cm = classmethod(cm)
def sm(x):
return x
sm = staticmethod(sm)
global_c = C()
global_c.a = 1
def global_newstyle_instance():
return global_c
global_rl = []
global_rl.append(global_rl)
def global_recursive_list():
return global_rl
class MI_A(object):
a = 1
class MI_B(MI_A):
b = 2
class MI_C(MI_A):
c = 3
class MI_D(MI_B, MI_C):
d = 4
def multiple_inheritance():
i = MI_D()
return i.a + i.b + i.c + i.d
class CBase(object):
pass
class CSub1(CBase):
def m(self):
self.x = 42
return self.x
class CSub2(CBase):
def m(self):
self.x = 'world'
return self.x
def methodcall_is_precise(cond):
if cond:
x = CSub1()
x.m()
else:
x = CSub2()
x.m()
return CSub1().m()
def flow_type_info(i):
if isinstance(i, int):
a = i + 1
else:
a = len(str(i))
return a
def flow_usertype_info(ob):
if isinstance(ob, WithInit):
return ob
else:
return WithMoreInit(1, 2)
def star_args0(*args):
return args[0] / 2
def call_star_args0(z):
return star_args0(z)
def star_args1(a, *args):
return a + args[0] / 2
def call_star_args1(z):
return star_args1(z, 20)
def star_args1def(a=4, *args):
if args:
return a + args[0] / 2
else:
return a*3
def call_star_args1def(z):
a = star_args1def(z, 22)
b = star_args1def(5)
c = star_args1def()
return a+b+c
def star_args(x, y, *args):
return x + args[0]
def call_star_args(z):
return star_args(z, 5, 10, 15, 20)
def call_star_args_multiple(z):
a = star_args(z, 5, 10)
b = star_args(z, 5, 10, 15)
c = star_args(z, 5, 10, 15, 20)
return a+b+c
def default_args(x, y=2, z=3L):
return x+y+z
def call_default_args(u):
return default_args(111, u)
def default_and_star_args(x, y=2, z=3, *more):
return x+y+z+len(more)
def call_default_and_star_args(u):
return (default_and_star_args(111, u),
default_and_star_args(-1000, -2000, -3000, -4000, -5000))
def call_with_star(z):
return default_args(-20, *z)
def call_with_keyword(z):
return default_args(-20, z=z)
def call_very_complex(z, args, kwds):
return default_args(-20, z=z, *args, **kwds)
def powerset(setsize=int):
"""Powerset
This one is from a Philippine Pythonista Hangout, an modified
version of Andy Sy's code.
list.append is modified to list concatenation, and powerset
is pre-allocated and stored, instead of printed.
URL is: http://lists.free.net.ph/pipermail/python/2002-November/
"""
set = range(setsize)
maxcardinality = pow(2, setsize)
bitmask = 0L
powerset = [None] * maxcardinality
ptr = 0
while bitmask < maxcardinality:
bitpos = 1L
index = 0
subset = []
while bitpos < maxcardinality:
if bitpos & bitmask:
subset = subset + [set[index]]
index += 1
bitpos <<= 1
powerset[ptr] = subset
ptr += 1
bitmask += 1
return powerset
def harmonic(n):
result = 0.0
for i in range(n, 0, -1):
result += 1.0 / n
return result
# --------------------(Currently) Non runnable Functions ---------------------
def _somebug1(n=int):
l = []
v = l.append
while n:
l[7] = 5 # raises an exception
break
return v
def _getstuff(x):
return x.stuff
# --------------------(Currently) Non compilable Functions ---------------------
class BadInit(object):
def update(self, k):
self.k = 1
def __init__(self, v):
return
self.update(**{'k':v})
def read(self):
return self.k
global_bi = BadInit(1)
def global_badinit():
return global_bi.read()
def _attrs():
def b(): pass
b.f = 4
b.g = 5
return b.f + b.g
def _methodcall1(cond):
if cond:
x = G()
else:
x = H()
return x.m(42)
def func1():
pass
def func2():
pass
def mergefunctions(cond):
if cond:
x = func1
else:
x = func2
return x
def func_producing_exception():
raise ValueError, "this might e.g. block the caller"
def funccallsex():
return func_producing_exception()
def func_arg_unpack():
a,b = 3, "hello"
return a
class APBC:
def __init__(self):
self.answer = 42
apbc = APBC()
apbc.answer = 7
def preserve_pbc_attr_on_instance(cond):
if cond:
x = APBC()
else:
x = apbc
return x.answer
class APBCS(object):
__slots__ = ['answer']
def __init__(self):
self.answer = 42
apbcs = APBCS()
apbcs.answer = 7
def preserve_pbc_attr_on_instance_with_slots(cond):
if cond:
x = APBCS()
else:
x = apbcs
return x.answer
def is_and_knowntype(x):
if x is None:
return x
else:
return None
def isinstance_and_knowntype(x):
if isinstance(x, APBC):
return x
else:
return apbc
def simple_slice(x):
return x[:10]
def simple_iter(x):
return iter(x)
def simple_zip(x,y):
return zip(x,y)
def dict_copy(d):
return d.copy()
def dict_update(x):
d = {x:x}
d.update({1:2})
return d
def dict_keys():
d = {"a" : 1}
return d.keys()
def dict_keys2():
d = {"a" : 1}
keys = d.keys()
d["123"] = 12
return keys
def dict_values():
d = {"a" : "a"}
return d.values()
def dict_values2():
d = {54312 : "a"}
values = d.values()
d[1] = "12"
return values
def dict_items():
d = {'a' : 1}
return d.items()
class Exc(Exception):
pass
def exception_deduction0(x):
pass
def exception_deduction():
try:
exception_deduction0(2)
except Exc, e:
return e
return Exc()
def always_raising(x):
raise ValueError
def witness(x):
pass
def exception_deduction_with_raise1(x):
try:
exception_deduction0(2)
if x:
raise Exc()
except Exc, e:
witness(e)
return e
return Exc()
def exception_deduction_with_raise2(x):
try:
exception_deduction0(2)
if x:
raise Exc
except Exc, e:
witness(e)
return e
return Exc()
def exception_deduction_with_raise3(x):
try:
exception_deduction0(2)
if x:
raise Exc, Exc()
except Exc, e:
witness(e)
return e
return Exc()
def slice_union(x):
if x:
return slice(1)
else:
return slice(0, 10, 2)
def exception_deduction_we_are_dumb():
a = 1
try:
exception_deduction0(2)
except Exc, e:
a += 1
return e
return Exc()
class Exc2(Exception):
pass
def nested_exception_deduction():
try:
exception_deduction0(1)
except Exc, e:
try:
exception_deduction0(2)
except Exc2, f:
return (e, f)
return (e, Exc2())
return (Exc(), Exc2())
class Exc3(Exception):
def m(self):
return 1
class Exc4(Exc3):
def m(self):
return 1
class Sp:
def o(self):
raise Exc3
class Mod:
def __init__(self, s):
self.s = s
def p(self):
s = self.s
try:
s.o()
except Exc3, e:
return e.m()
return 0
class Mod3:
def __init__(self, s):
self.s = s
def p(self):
s = self.s
try:
s.o()
except Exc4, e1:
return e1.m()
except Exc3, e2:
try:
return e2.m()
except Exc4, e3:
return e3.m()
return 0
mod = Mod(Sp())
mod3 = Mod3(Sp())
def exc_deduction_our_exc_plus_others():
return mod.p()
def exc_deduction_our_excs_plus_others():
return mod3.p()
def call_two_funcs_but_one_can_only_raise(n):
fn = [witness, always_raising][n]
return fn(n)
# constant instances with __init__ vs. __new__
class Thing1:
def __init__(self):
self.thingness = 1
thing1 = Thing1()
def one_thing1():
return thing1
class Thing2(long):
def __new__(t, v):
return long.__new__(t, v * 2)
thing2 = Thing2(2)
def one_thing2():
return thing2
# propagation of fresh instances through attributes
class Stk:
def __init__(self):
self.itms = []
def push(self, v):
self.itms.append(v)
class EC:
def __init__(self):
self.stk = Stk()
def enter(self, f):
self.stk.push(f)
def propagation_of_fresh_instances_through_attrs(x):
e = EC()
e.enter(x)
# same involving recursion
class R:
def __init__(self, n):
if n > 0:
self.r = R(n-1)
else:
self.r = None
self.n = n
if self.r:
self.m = self.r.n
else:
self.m = -1
def make_r(n):
return R(n)
class B:
pass
class Even(B):
def __init__(self, n):
if n > 0:
self.x = [Odd(n-1)]
self.y = self.x[0].x
else:
self.x = []
self.y = []
class Odd(B):
def __init__(self, n):
self.x = [Even(n-1)]
self.y = self.x[0].x
def make_eo(n):
if n % 2 == 0:
return Even(n)
else:
return Odd(n)
# shows that we care about the expanded structure in front of changes to attributes involving only
# instances rev numbers
class Box:
pass
class Box2:
pass
class Box3(Box2):
pass
def flow_rev_numbers(n):
bx3 = Box3()
bx3.x = 1
bx = Box()
bx.bx3 = bx3
if n > 0:
z = bx.bx3.x
if n > 0:
bx2 = Box2()
bx2.x = 3
return z
raise Exception
# class specialization
class PolyStk:
_annspecialcase_ = "specialize:ctr_location"
def __init__(self):
self.itms = []
def push(self, v):
self.itms.append(v)
def top(self):
return self.itms[-1]
def class_spec():
istk = PolyStk()
istk.push(1)
sstk = PolyStk()
sstk.push("a")
istk.push(2)
sstk.push("b")
#if not isinstance(istk, PolyStk):
# return "confused"
return istk.top(), sstk.top()
from rpython.rlib.rarithmetic import ovfcheck
def add_func(i=numtype):
try:
return ovfcheck(i + 1)
except OverflowError:
raise
from sys import maxint
def div_func(i=numtype):
try:
return ovfcheck((-maxint-1) // i)
except (OverflowError, ZeroDivisionError):
raise
def mul_func(x=numtype, y=numtype):
try:
return ovfcheck(x * y)
except OverflowError:
raise
def mod_func(i=numtype):
try:
return ovfcheck((-maxint-1) % i)
except OverflowError:
raise
except ZeroDivisionError:
raise
def rshift_func(i=numtype):
try:
return (-maxint-1) >> i
except ValueError:
raise
class hugelmugel(OverflowError):
pass
def hugo(a, b, c):pass
def lshift_func(i=numtype):
try:
hugo(2, 3, 5)
return ovfcheck((-maxint-1) << i)
except (hugelmugel, OverflowError, StandardError, ValueError):
raise
def unary_func(i=numtype):
try:
return ovfcheck(-i), ovfcheck(abs(i-1))
except:
raise
# XXX it would be nice to get it right without an exception
# handler at all, but then we need to do much harder parsing
| mit | 713,307,751,522,421,600 | 16.384272 | 98 | 0.522027 | false | 2.990469 | false | false | false |
cu-csc/automaton | tests/deployment_tests.py | 1 | 2068 | """
Module that tests various deployment functionality
To run me from command line:
cd automaton/tests
export PYTHONPATH=$PYTHONPATH:../
python -m unittest -v deployment_tests
unset PYTHONPATH
I should have used nose but
"""
import unittest
from lib import util
from deployment import common
class test_deployment_functions(unittest.TestCase):
def setUp(self):
self.testing_machine = "vm-148-120.uc.futuregrid.org"
self.bad_machine_name = "Idonotexistwallah.wrong"
self.key_filename = "/Users/ali/.ssh/ali_alzabarah_fg.priv"
def test_port_status_check(self):
# ssh port
self.assertFalse(util.check_port_status("google.com"))
# ssh port
self.assertTrue(util.check_port_status("research.cs.colorado.edu"))
# http port
self.assertTrue(util.check_port_status("google.com", 80, 2))
# wrong domain
self.assertFalse(util.check_port_status("Idonotexistwallah.wrong"))
# wrong ip
self.assertFalse(util.check_port_status("256.256.256.256"))
def test_run_remote_command(self):
result = util.RemoteCommand(self.testing_machine,
self.key_filename, "grep "
"ewrqwerasdfqewr /etc/passwd").execute()
self.assertNotEqual(result, 0)
result = util.RemoteCommand(self.testing_machine, self.key_filename,
"ls -al /etc/passwd").execute()
self.assertEqual(result, 0)
def test_clone_git_repo(self):
self.assertIsNotNone(util.clone_git_repo("https://github.com/"
"alal3177/automaton.git"))
def test_is_executable(self):
self.assertFalse(util.is_executable_file("wrong/path"))
self.assertTrue(util.is_executable_file("/bin/echo"))
self.assertFalse(util.is_executable_file("/tmp"))
def test_get_executable_files(self):
self.assertIsNotNone(common.get_executable_files("/bin"))
if __name__ == '__main__':
unittest.main()
| mit | 3,975,989,049,178,258,000 | 32.901639 | 76 | 0.62911 | false | 3.843866 | true | false | false |
agirardeaudale/nbawebstats | docs/generaterequestrst.py | 1 | 2103 | #!/usr/bin/env python
from jinja2 import Environment, FileSystemLoader
import json
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.abspath(os.path.join(THIS_DIR, '../nbawebstats/requests.json'))
RST_PATH = os.path.abspath(os.path.join(THIS_DIR, 'requests.rst'))
def format_internal_link(name, domain):
return ":ref:`{0} <{1}-{2}>`".format(name, domain, name.lower())
def with_default(request_params, global_param_map):
return [x for x in request_params if 'default' in global_param_map[x]]
def without_default(request_params, global_param_map):
return [x for x in request_params if 'default' not in global_param_map[x]]
def format_string_literals(strings):
return ["``'{0}'``".format(x) for x in strings]
def format_param_links(param_names):
return [format_internal_link(x, 'param') for x in param_names]
def format_param_type_link(param_type):
param_type_name = {'int': 'Integer',
'boolean-yn': 'Boolean',
'boolean-01': 'Boolean',
'enum': 'Enumerated',
'enum-mapped': 'Enumerated',
'date': 'Date',
'season': 'Season',
'season-id': 'Season'}[param_type]
return format_internal_link(param_type_name, 'type')
def update_request_rst():
with open(DATA_PATH, 'r') as f:
data = json.load(f)
jinja_env = Environment(loader=FileSystemLoader(THIS_DIR),
trim_blocks=True,
lstrip_blocks=True)
jinja_env.filters['with_default'] = with_default
jinja_env.filters['without_default'] = without_default
jinja_env.filters['format_string_literals'] = format_string_literals
jinja_env.filters['format_param_links'] = format_param_links
jinja_env.filters['format_param_type_link'] = format_param_type_link
rst_contents = jinja_env.get_template('requests.template').render(data)
with open(RST_PATH, 'w') as f:
f.write(rst_contents)
if __name__ == '__main__':
update_request_rst()
| mit | -3,689,226,585,242,646,500 | 35.258621 | 83 | 0.61864 | false | 3.458882 | false | false | false |
OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/plugins/v3/admin_password.py | 1 | 2734 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import admin_password
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
ALIAS = "os-admin-password"
authorize = extensions.extension_authorizer('compute', 'v3:%s' % ALIAS)
class AdminPasswordController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminPasswordController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('change_password')
@wsgi.response(204)
@extensions.expected_errors((400, 404, 409, 501))
@validation.schema(admin_password.change_password)
def change_password(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
password = body['change_password']['admin_password']
try:
instance = self.compute_api.get(context, id, want_objects=True)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
self.compute_api.set_admin_password(context, instance, password)
except exception.InstancePasswordSetFailed as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as e:
raise common.raise_http_conflict_for_instance_invalid_state(
e, 'change_password')
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
class AdminPassword(extensions.V3APIExtensionBase):
"""Admin password management support."""
name = "AdminPassword"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = AdminPasswordController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | 1,025,928,680,341,808,000 | 35.453333 | 79 | 0.696781 | false | 4.232198 | false | false | false |
wpjesus/codematch | ietf/group/mails.py | 1 | 2488 | # generation of mails
import re
from django.utils.html import strip_tags
from django.utils.text import wrap
from django.conf import settings
from django.core.urlresolvers import reverse as urlreverse
from ietf.utils.mail import send_mail, send_mail_text
from ietf.mailtrigger.utils import gather_address_lists
def email_admin_re_charter(request, group, subject, text, mailtrigger):
(to,cc) = gather_address_lists(mailtrigger,group=group)
full_subject = u"Regarding %s %s: %s" % (group.type.name, group.acronym, subject)
text = strip_tags(text)
send_mail(request, to, None, full_subject,
"group/email_iesg_secretary_re_charter.txt",
dict(text=text,
group=group,
group_url=settings.IDTRACKER_BASE_URL + group.about_url(),
charter_url=settings.IDTRACKER_BASE_URL + urlreverse('doc_view', kwargs=dict(name=group.charter.name)) if group.charter else "[no charter]",
),
cc=cc,
)
def email_personnel_change(request, group, text, changed_personnel):
(to, cc) = gather_address_lists('group_personnel_change',group=group,changed_personnel=changed_personnel)
full_subject = u"Personnel change for %s %s" % (group.acronym,group.type.name)
send_mail_text(request, to, None, full_subject, text, cc=cc)
def email_milestones_changed(request, group, changes, states):
def wrap_up_email(addrs, text):
subject = u"Milestones changed for %s %s" % (group.acronym, group.type.name)
if re.search("Added .* for review, due",text):
subject = u"Review Required - " + subject
text = wrap(strip_tags(text), 70)
text += "\n\n"
text += u"URL: %s" % (settings.IDTRACKER_BASE_URL + group.about_url())
send_mail_text(request, addrs.to, None, subject, text, cc=addrs.cc)
# first send to those who should see any edits (such as management and chairs)
addrs = gather_address_lists('group_milestones_edited',group=group)
if addrs.to or addrs.cc:
wrap_up_email(addrs, u"\n\n".join(c + "." for c in changes))
# then send only the approved milestones to those who shouldn't be
# bothered with milestones pending approval
addrs = gather_address_lists('group_approved_milestones_edited',group=group)
msg = u"\n\n".join(c + "." for c,s in zip(changes,states) if not s == "review")
if (addrs.to or addrs.cc) and msg:
wrap_up_email(addrs, msg)
| bsd-3-clause | -7,655,907,386,327,128,000 | 41.169492 | 159 | 0.659164 | false | 3.357625 | false | false | false |
amaozhao/basecms | mptt/managers.py | 1 | 46897 | """
A custom manager for working with trees of objects.
"""
from __future__ import unicode_literals
import contextlib
from django.db import models, connections, router
from django.db.models import F, ManyToManyField, Max, Q
from django.utils.translation import ugettext as _
from mptt.exceptions import CantDisableUpdates, InvalidMove
__all__ = ('TreeManager',)
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
class TreeManager(models.Manager):
"""
A manager for working with trees of objects.
"""
def init_from_model(self, model):
"""
Sets things up. This would normally be done in contribute_to_class(),
but Django calls that before we've created our extra tree fields on the
model (which we need). So it's done here instead, after field setup.
"""
# Avoid calling "get_field_by_name()", which populates the related
# models cache and can cause circular imports in complex projects.
# Instead, find the tree_id field using "get_fields_with_model()".
[tree_field] = [
fld
for fld in model._meta.get_fields_with_model()
if fld[0].name == self.tree_id_attr]
if tree_field[1]:
# tree_model is the model that contains the tree fields.
# This is usually just the same as model, but not for derived
# models.
self.tree_model = tree_field[1]
else:
self.tree_model = model
self._base_manager = None
if self.tree_model is not model:
# _base_manager is the treemanager on tree_model
self._base_manager = self.tree_model._tree_manager
def get_query_set(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
This method can be removed when support for Django < 1.6 is dropped.
"""
return super(TreeManager, self).get_query_set(*args, **kwargs).order_by(
self.tree_id_attr, self.left_attr)
def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
return super(TreeManager, self).get_queryset(*args, **kwargs).order_by(
self.tree_id_attr, self.left_attr)
def _get_queryset_relatives(self, queryset, direction, include_self):
"""
Returns a queryset containing either the descendants
``direction == desc`` or the ancestors ``direction == asc`` of a given
queryset.
This function is not meant to be called directly, although there is no
harm in doing so.
Instead, it should be used via ``get_queryset_descendants()`` and/or
``get_queryset_ancestors()``.
This function exists mainly to consolidate the nearly duplicate code
that exists between the two aforementioned functions.
"""
assert self.model is queryset.model
opts = queryset.model._mptt_meta
if not queryset:
return self.none()
filters = None
for node in queryset:
lft, rght = node.lft, node.rght
if direction == 'asc':
if include_self:
lft += 1
rght -= 1
lft_op = 'lt'
rght_op = 'gt'
elif direction == 'desc':
if include_self:
lft -= 1
rght += 1
lft_op = 'gt'
rght_op = 'lt'
q = Q(**{
opts.tree_id_attr: getattr(node, opts.tree_id_attr),
'%s__%s' % (opts.left_attr, lft_op): lft,
'%s__%s' % (opts.right_attr, rght_op): rght,
})
if filters is None:
filters = q
else:
filters |= q
return self.filter(filters)
def get_queryset_descendants(self, queryset, include_self=False):
"""
Returns a queryset containing the descendants of all nodes in the
given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'desc', include_self)
def get_queryset_ancestors(self, queryset, include_self=False):
"""
Returns a queryset containing the ancestors
of all nodes in the given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'asc', include_self)
@contextlib.contextmanager
def disable_mptt_updates(self):
"""
Context manager. Disables mptt updates.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates.
This doesn't do anything clever. It *will* mess up your tree. You
should follow this method with a call to ``TreeManager.rebuild()``
to ensure your tree stays sane, and you should wrap both calls in a
transaction.
This is best for updates that span a large part of the table. If
you are doing localised changes (one tree, or a few trees) consider
using ``delay_mptt_updates``.
If you are making only minor changes to your tree, just let the
updates happen.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
If updates are already disabled on the model, this is a noop.
Usage::
with transaction.atomic():
with MyNode.objects.disable_mptt_updates():
## bulk updates.
MyNode.objects.rebuild()
"""
# Error cases:
if self.model._meta.abstract:
# an abstract model. Design decision needed - do we disable
# updates for all concrete models that derive from this model? I
# vote no - that's a bit implicit and it's a weird use-case
# anyway. Open to further discussion :)
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s,"
" it's an abstract model" % self.model.__name__
)
elif self.model._meta.proxy:
# a proxy model. disabling updates would implicitly affect other
# models using the db table. Caller should call this on the
# manager for the concrete model instead, to make the behavior
# explicit.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's a proxy"
" model. Call the concrete model instead."
% self.model.__name__
)
elif self.tree_model is not self.model:
# a multiple-inheritance child of an MPTTModel. Disabling
# updates may affect instances of other models in the tree.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it doesn't"
" contain the mptt fields."
% self.model.__name__
)
if not self.model._mptt_updates_enabled:
# already disabled, noop.
yield
else:
self.model._set_mptt_updates_enabled(False)
try:
yield
finally:
self.model._set_mptt_updates_enabled(True)
@contextlib.contextmanager
def delay_mptt_updates(self):
"""
Context manager. Delays mptt updates until the end of a block of bulk
processing.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results until the end
of the context block.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates. This is best for updates in a localised area of the db
table, especially if all the updates happen in a single tree and
the rest of the forest is left untouched. No subsequent rebuild is
necessary.
``delay_mptt_updates`` does a partial rebuild of the modified trees
(not the whole table). If used indiscriminately, this can actually
be much slower than just letting the updates occur when they're
required.
The worst case occurs when every tree in the table is modified just
once. That results in a full rebuild of the table, which can be
*very* slow.
If your updates will modify most of the trees in the table (not a
small number of trees), you should consider using
``TreeManager.disable_mptt_updates``, as it does much fewer
queries.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
Exceptions:
If an exception occurs before the processing of the block, delayed
updates will not be applied.
Usage::
with transaction.atomic():
with MyNode.objects.delay_mptt_updates():
## bulk updates.
"""
with self.disable_mptt_updates():
if self.model._mptt_is_tracking:
# already tracking, noop.
yield
else:
self.model._mptt_start_tracking()
try:
yield
except Exception:
# stop tracking, but discard results
self.model._mptt_stop_tracking()
raise
results = self.model._mptt_stop_tracking()
partial_rebuild = self.partial_rebuild
for tree_id in results:
partial_rebuild(tree_id)
@property
def parent_attr(self):
return self.model._mptt_meta.parent_attr
@property
def left_attr(self):
return self.model._mptt_meta.left_attr
@property
def right_attr(self):
return self.model._mptt_meta.right_attr
@property
def tree_id_attr(self):
return self.model._mptt_meta.tree_id_attr
@property
def level_attr(self):
return self.model._mptt_meta.level_attr
def _translate_lookups(self, **lookups):
new_lookups = {}
join_parts = '__'.join
for k, v in lookups.items():
parts = k.split('__')
new_parts = []
new_parts__append = new_parts.append
for part in parts:
new_parts__append(getattr(self, part + '_attr', part))
new_lookups[join_parts(new_parts)] = v
return new_lookups
def _mptt_filter(self, qs=None, **filters):
"""
Like ``self.filter()``, but translates name-agnostic filters for MPTT
fields.
"""
if self._base_manager:
return self._base_manager._mptt_filter(qs=qs, **filters)
if qs is None:
qs = self
return qs.filter(**self._translate_lookups(**filters))
def _mptt_update(self, qs=None, **items):
"""
Like ``self.update()``, but translates name-agnostic MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_update(qs=qs, **items)
if qs is None:
qs = self
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, **hints):
return connections[router.db_for_write(self.model, **hints)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
connection = self._get_connection()
qn = connection.ops.quote_name
meta = self.model._meta
mptt_field = rel_model._meta.get_field(rel_field)
if isinstance(mptt_field, ManyToManyField):
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
else:
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
return queryset.extra(select={count_attr: subquery})
def insert_node(self, node, target, position='last-child', save=False,
allow_existing_pk=False):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
set the node's parent and let mptt call this during save.
"""
if self._base_manager:
return self._base_manager.insert_node(
node, target, position=position, save=save)
if node.pk and not allow_existing_pk and self.filter(pk=node.pk).exists():
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
tree_id = self._get_next_tree_id()
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
elif target.is_root_node() and position in ['left', 'right']:
target_tree_id = getattr(target, self.tree_id_attr)
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target)
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
else:
setattr(node, self.left_attr, 0)
setattr(node, self.level_attr, 0)
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
tree_id = getattr(parent, self.tree_id_attr)
self._create_space(2, space_target, tree_id)
setattr(node, self.left_attr, -left)
setattr(node, self.right_attr, -left + 1)
setattr(node, self.level_attr, -level)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift)
if save:
node.save()
return node
def _move_node(self, node, target, position='last-child', save=True):
if self._base_manager:
return self._base_manager.move_node(node, target, position=position)
if self.tree_model._mptt_is_tracking:
# delegate to insert_node and clean up the gaps later.
return self.insert_node(node, target, position=position, save=save,
allow_existing_pk=True)
else:
if target is None:
if node.is_child_node():
self._make_child_root_node(node)
elif target.is_root_node() and position in ('left', 'right'):
self._make_sibling_of_root_node(node, target, position)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
move the node yourself by setting node.parent.
"""
self._move_node(node, target, position=position)
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
if self._base_manager:
return self._base_manager.root_node(tree_id)
return self._mptt_filter(tree_id=tree_id, parent=None).get()
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
if self._base_manager:
return self._base_manager.root_nodes()
return self._mptt_filter(parent=None)
def rebuild(self):
"""
Rebuilds all trees in the database table using `parent` link.
"""
if self._base_manager:
return self._base_manager.rebuild()
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
rebuild.alters_data = True
def partial_rebuild(self, tree_id):
"""
Partially rebuilds a tree i.e. It rebuilds only the tree with given
``tree_id`` in database table using ``parent`` link.
"""
if self._base_manager:
return self._base_manager.partial_rebuild(tree_id)
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None, tree_id=tree_id)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
if not pks:
return
if len(pks) > 1:
raise RuntimeError(
"More than one root node with tree_id %d. That's invalid,"
" do a full rebuild." % tree_id)
self._rebuild_helper(pks[0], 1, tree_id)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
for child_id in child_ids:
right = rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(
qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _post_insert_update_cached_parent_right(self, instance, right_shift, seen=None):
setattr(instance, self.right_attr, getattr(instance, self.right_attr) + right_shift)
attr = '_%s_cache' % self.parent_attr
if hasattr(instance, attr):
parent = getattr(instance, attr)
if parent:
if not seen:
seen = set()
seen.add(instance)
if parent in seen:
# detect infinite recursion and throw an error
raise InvalidMove
self._post_insert_update_cached_parent_right(parent, right_shift, seen=seen)
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.left_attr)
level = getattr(node, self.level_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
max_tree_id = list(self.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(
self, node, level_change,
left_right_change, new_tree_id, parent_pk=None):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
If ``parent_pk`` is ``None``, this indicates that ``node`` is
being moved to a brand new tree as its root node, and will thus
have its parent field set to ``NULL``. Otherwise, ``node`` will
have ``parent_pk`` set for its parent field.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %(new_parent)s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'new_parent': parent_pk is None and 'NULL' or '%s',
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
node.pk,
getattr(node, self.tree_id_attr)
]
if parent_pk is not None:
params.insert(-1, parent_pk)
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(
node, level_change, left_right_change, new_tree_id, parent.pk)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
node.pk, parent.pk,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, new_left)
setattr(node, self.right_attr, new_right)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
tree_id = getattr(node, self.tree_id_attr)
new_tree_id = getattr(target, self.tree_id_attr)
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
}
cursor = connection.cursor()
cursor.execute(move_tree_query, [
level_change, left_right_change, left_right_change,
new_tree_id, node.pk, parent.pk, left, right, tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
| mit | -7,849,403,533,287,064,000 | 38.376154 | 99 | 0.543766 | false | 4.029991 | false | false | false |
pickettd/SerialPort-RealTime-Data-Plotter | com_monitor.py | 1 | 5133 |
import Queue, threading, timeit, serial
from globals import *
class ComMonitorThread(threading.Thread):
""" A thread for monitoring a COM port. The COM port is
opened when the thread is started.
data_q:
Queue for received data. Items in the queue are
(data, timestamp) pairs, where data is a binary
string representing the received data, and timestamp
is the time elapsed from the thread's start (in
seconds).
error_q:
Queue for error messages. In particular, if the
serial port fails to open for some reason, an error
is placed into this queue.
port:
The COM port to open. Must be recognized by the
system.
port_baud/stopbits/parity:
Serial communication parameters
port_timeout:
The timeout used for reading the COM port. If this
value is low, the thread will return data in finer
grained chunks, with more accurate timestamps, but
it will also consume more CPU.
"""
def __init__( self,
data_q, error_q,
port_num,
port_baud,
port_stopbits = serial.STOPBITS_ONE,
port_parity = serial.PARITY_NONE,
port_timeout = 0.01):
threading.Thread.__init__(self)
self.serial_port = None
self.serial_arg = dict( port = port_num,
baudrate = port_baud,
stopbits = port_stopbits,
parity = port_parity,
timeout = port_timeout)
self.data_q = data_q
self.error_q = error_q
self.alive = threading.Event()
self.alive.set()
#------------------------------------------------------
def getAxes(self, bytes, gforce = True):
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 3)
y = round(y, 3)
z = round(z, 3)
return {"x": x, "y": y, "z": z}
#------------------------------------------------------
def run(self):
try:
if self.serial_port:
self.serial_port.close()
setTimeout = self.serial_arg['timeout']
self.serial_arg['timeout'] = 100
self.serial_port = serial.Serial(**self.serial_arg)
print(self.serial_port.readline())
self.serial_port.write("A")
print(self.serial_port.readline())
print(self.serial_port.readline())
self.serial_port.timeout = setTimeout
except serial.SerialException, e:
self.error_q.put(e.message)
return
# Restart the clock
startTime = timeit.default_timer()
while self.alive.isSet():
Line = self.serial_port.readline()
bytes = Line.split()
#print bytes
#use map(int) for simulation
#data = map(ord, bytes)
data = bytes
qdata = [0,0,0]
if len(data) == 0:
#print "zero data"
timestamp = timeit.default_timer() - startTime
self.data_q.put((qdata, timestamp))
if len(data) > 0:
print "got data"
timestamp = timeit.default_timer() - startTime
qdata = [4,4,4]
self.data_q.put((qdata, timestamp))
'''
if len(data) == 6:
timestamp = timeit.default_timer() - startTime
#data = list(map(ord, list(Line)))
print "Line", Line
print "bytes", bytes
print "data", data
axes = self.getAxes(data)
print " x = %.3fG" % ( axes['x'] )
print " y = %.3fG" % ( axes['y'] )
print " z = %.3fG" % ( axes['z'] )
qdata[0] = axes['x']
qdata[1] = axes['y']
qdata[2] = axes['z']
print "qdata :", qdata
#timestamp = timeit.default_timer()
self.data_q.put((qdata, timestamp))
'''
# clean up
if self.serial_port:
self.serial_port.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self, timeout)
| mit | 5,029,071,917,656,844,000 | 31.283019 | 64 | 0.447691 | false | 4.203931 | false | false | false |
fintech-circle/edx-platform | lms/djangoapps/gating/api.py | 1 | 4680 | """
API for the gating djangoapp
"""
from collections import defaultdict
import json
import logging
from lms.djangoapps.courseware.entrance_exams import get_entrance_exam_content
from openedx.core.lib.gating import api as gating_api
from opaque_keys.edx.keys import UsageKey
from util import milestones_helpers
log = logging.getLogger(__name__)
@gating_api.gating_enabled(default=False)
def evaluate_prerequisite(course, subsection_grade, user):
"""
Evaluates any gating milestone relationships attached to the given
subsection. If the subsection_grade meets the minimum score required
by dependent subsections, the related milestone will be marked
fulfilled for the user.
"""
prereq_milestone = gating_api.get_gating_milestone(course.id, subsection_grade.location, 'fulfills')
if prereq_milestone:
gated_content_milestones = defaultdict(list)
for milestone in gating_api.find_gating_milestones(course.id, content_key=None, relationship='requires'):
gated_content_milestones[milestone['id']].append(milestone)
gated_content = gated_content_milestones.get(prereq_milestone['id'])
if gated_content:
for milestone in gated_content:
min_percentage = _get_minimum_required_percentage(milestone)
subsection_percentage = _get_subsection_percentage(subsection_grade)
if subsection_percentage >= min_percentage:
milestones_helpers.add_user_milestone({'id': user.id}, prereq_milestone)
else:
milestones_helpers.remove_user_milestone({'id': user.id}, prereq_milestone)
def _get_minimum_required_percentage(milestone):
"""
Returns the minimum percentage requirement for the given milestone.
"""
# Default minimum score to 100
min_score = 100
requirements = milestone.get('requirements')
if requirements:
try:
min_score = int(requirements.get('min_score'))
except (ValueError, TypeError):
log.warning(
u'Gating: Failed to find minimum score for gating milestone %s, defaulting to 100',
json.dumps(milestone)
)
return min_score
def _get_subsection_percentage(subsection_grade):
"""
Returns the percentage value of the given subsection_grade.
"""
return _calculate_ratio(subsection_grade.graded_total.earned, subsection_grade.graded_total.possible) * 100.0
def _calculate_ratio(earned, possible):
"""
Returns the percentage of the given earned and possible values.
"""
return float(earned) / float(possible) if possible else 0.0
def evaluate_entrance_exam(course_grade, user):
"""
Evaluates any entrance exam milestone relationships attached
to the given course. If the course_grade meets the
minimum score required, the dependent milestones will be marked
fulfilled for the user.
"""
course = course_grade.course_data.course
if milestones_helpers.is_entrance_exams_enabled() and getattr(course, 'entrance_exam_enabled', False):
if get_entrance_exam_content(user, course):
exam_chapter_key = get_entrance_exam_usage_key(course)
exam_score_ratio = get_entrance_exam_score_ratio(course_grade, exam_chapter_key)
if exam_score_ratio >= course.entrance_exam_minimum_score_pct:
relationship_types = milestones_helpers.get_milestone_relationship_types()
content_milestones = milestones_helpers.get_course_content_milestones(
course.id,
exam_chapter_key,
relationship=relationship_types['FULFILLS']
)
# Mark each entrance exam dependent milestone as fulfilled by the user.
for milestone in content_milestones:
milestones_helpers.add_user_milestone({'id': user.id}, milestone)
def get_entrance_exam_usage_key(course):
"""
Returns the UsageKey of the entrance exam for the course.
"""
return UsageKey.from_string(course.entrance_exam_id).replace(course_key=course.id)
def get_entrance_exam_score_ratio(course_grade, exam_chapter_key):
"""
Returns the score for the given chapter as a ratio of the
aggregated earned over the possible points, resulting in a
decimal value less than 1.
"""
try:
earned, possible = course_grade.score_for_chapter(exam_chapter_key)
except KeyError:
earned, possible = 0.0, 0.0
log.warning(u'Gating: Unexpectedly failed to find chapter_grade for %s.', exam_chapter_key)
return _calculate_ratio(earned, possible)
| agpl-3.0 | -109,539,272,815,895,200 | 39.344828 | 113 | 0.678846 | false | 4.08377 | false | false | false |
starnose/liar | liarutils.py | 1 | 3600 | ###################################################################################
# liarutils.py, utility calls for liar
###################################################################################
#
# Copyright 2013 David Hicks (Starnose Ltd)
#
# This file is part of Liar
#
# Liar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation,
#
# Liar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Liar. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
#
# A couple of utility functions, both of which could be done a lot better.
# hexdump is used for output formatting and is a hacky translation of a C function
# createServerCert is a mess of os.system stuff for generating certificates from
# templates
#
###################################################################################
import string
import os
import random
def hexdump(data="", length=0):
output=""
p = 0
major = 0
minor = 0
printbuf = ""
plaintext=""
output+= " | 0 1 2 3 4 5 6 7 8 9 A B C D E F\n----+--------------------------------------------------"
while p < length:
if (minor % 16) == 0:
output+= " %s\n %3x|" % (printbuf,major)
plaintext = plaintext + printbuf
printbuf = ""
major += 1;
if (minor % 8) == 0:
if (minor % 16) != 0:
output+= " "
output+= " %2.2x" % ( ord(data[p]) )
if data[p] in string.letters or data[p] in string.digits or data[p] in string.punctuation:
if data[p] == '\x0a':
printbuf += '.'
else:
printbuf += data[p]
else:
printbuf +='.'
minor += 1
p += 1
plaintext = plaintext + printbuf
major = minor % 16
if major != 0:
major = (16 - major) * 3
if major > 24:
major += 1
while major != 0:
printbuf = " " + printbuf
major -= 1
output+= " %s\n\n" % (printbuf)
return output, plaintext
def createServerCert(servername, rootcert, rootkey, templatefile, outputdir):
random.seed()
if not (os.path.isfile(os.path.join(outputdir,"%s.key" % servername)) and os.path.isfile(os.path.join(outputdir,"%s.cert" % servername))):
#DIRTY, VERY VERY DIRTY INDEED - this should probably done with real, actual python.
os.system("sed s/SERVERNAME/%s/ %s > %s/%s.tmpl" % (servername, templatefile, outputdir, servername) )
os.system("sed s/SERVERSERIAL/%d/ %s/%s.tmpl > %s/%s.tmp" % (random.randint(0,32767), outputdir, servername, outputdir, servername) )
os.system("certtool --generate-privkey --bits 512 --outfile %s/%s.key" % (outputdir, servername) )
os.system("certtool --generate-request --load-privkey %s/%s.key --outfile %s/%s.req --template %s/%s.tmp" %
(outputdir, servername, outputdir, servername, outputdir, servername) )
os.system("certtool --generate-certificate --load-request %s/%s.req --outfile %s/%s.cert --load-ca-certificate %s --load-ca-privkey %s --template %s/%s.tmp" %
(outputdir, servername, outputdir, servername, rootcert, rootkey, outputdir, servername) )
| gpl-3.0 | 8,115,426,600,985,354,000 | 40.37931 | 164 | 0.553611 | false | 3.71517 | false | false | false |
Pinyto/cloud | keyserver/models.py | 1 | 4666 | # coding=utf-8
"""
Pinyto cloud - A secure cloud database for your personal data
Copyright (C) 2019 Pina Merkert <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from hashlib import sha256
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from keyserver.helper import create_salt
class Account(models.Model):
"""
A Pinyto account consists of a username, a password and
a pair of asymmetric keys. The keys are used for the
authentication with a pinyto server which stores the
data. Username and password are the credentials memorized
by the user which he can use to access his keys.
The password is not stored but a hash. If a password is
supplied the salt is added and the concatenation is hashed.
The hash of the hash gets hashed until the password was
hashed for hash_iteration times. The algorithm which is used
is SHA256. After the last iteration the hash can be compared
to the stored hash. If they match the password is correct.
"""
name = models.CharField(max_length=30, primary_key=True)
salt = models.CharField(max_length=10)
hash_iterations = models.IntegerField(default=10000)
hash = models.CharField(max_length=32)
N = models.CharField(max_length=1400)
e = models.BigIntegerField()
d = models.CharField(max_length=1400)
p = models.CharField(max_length=800)
q = models.CharField(max_length=800)
@staticmethod
def hash_password(password, salt, hash_iterations):
hash_string = password + salt
for i in range(hash_iterations):
hasher = sha256()
hasher.update(hash_string.encode('utf-8'))
hash_string = hasher.hexdigest()
return hash_string[:32]
@classmethod
def create(cls, name, password='', hash_iterations=420):
"""
Creates an account with hashed password, new random salt and 4096 bit RSA key pair.
:param name:
:type name: str
:param password: (technically this is an optional parameter but in reality you should not
use empty passwords)
:type password: str
:param hash_iterations: (optional)
:type hash_iterations: int
:return: An Account instance already saved to the database
:rtype: keyserver.models.Account
"""
salt = create_salt(10)
hash_string = cls.hash_password(password, salt, hash_iterations)
key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())
account = cls(name=name,
salt=salt,
hash_iterations=hash_iterations,
hash=hash_string,
N=str(key.public_key().public_numbers().n),
e=str(key.public_key().public_numbers().e),
d=str(key.private_numbers().d),
p=str(key.private_numbers().p),
q=str(key.private_numbers().q))
account.save()
return account
def check_password(self, password):
"""
This method checks if the given password is valid by comparing it to the stored hash.
:param password:
:type password: str
:rtype: boolean
"""
hash_string = self.hash_password(password, self.salt, self.hash_iterations)
return hash_string == self.hash
def change_password(self, password, hash_iterations=420):
"""
Changes the password to the supplied one.
hash_iterations are optional but can be used to upgrade the passwords to faster servers.
:param password:
:type password: str
:param hash_iterations: (optional)
:type hash_iterations: int
"""
self.salt = create_salt(10)
hash_string = self.hash_password(password, self.salt, hash_iterations)
self.hash_iterations = hash_iterations
self.hash = str(hash_string, encoding='utf-8')
self.save()
| gpl-3.0 | -5,410,453,920,801,267,000 | 39.224138 | 103 | 0.661166 | false | 4.257299 | false | false | false |
digitie/magneto | core/util.py | 1 | 3603 | # -*- coding: UTF-8 -*-
from time import sleep
import logging
import serial
import sys
import binascii
from log import MainLogger
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
"""This file provides useful utilities for the wanglib package."""
class InstrumentError(Exception):
"""Raise this when talking to instruments fails."""
pass
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def show_newlines(string):
"""
replace CR+LF with the words "CR" and "LF".
useful for debugging.
"""
if is_ascii(string):
try:
return string.replace('\r', '<CR>').replace('\n', '<LF>')
except AttributeError:
return binascii.hexlify(bytearray(string))
else:
try:
return binascii.hexlify(string)
except TypeError:
return binascii.hexlify(bytearray(string))
class Serial(serial.Serial):
"""
Extension of the standard serial class.
to log whatever's written or read, pass a filename into
the 'log' kwarg.
"""
def __init__(self, *args, **kwargs):
# make an event logger
self.logger = MainLogger
# take 'log' kwarg.
self.logfile = kwargs.pop('log', False)
if self.logfile:
self.start_logging(self.logfile)
# take default termination character
# by default, append empty string
self.term_chars = kwargs.pop('term_chars', '')
# hand off to standard serial init function
super(Serial, self).__init__(*args, **kwargs)
def start_logging(self):
""" start logging read/write data to file. """
# make log file handler
lfh = logging.StreamHandler(sys.stdout)
self.logger.addHandler(lfh)
# make log file formatter
lff = logging.Formatter('%(asctime)s %(message)s')
lfh.setFormatter(lff)
# set level low to log everything
self.logger.setLevel(1)
self.logger.debug('opened serial port')
def write(self, data):
data += self.term_chars
super(Serial, self).write(data)
self.logger.debug('write: ' + show_newlines(data))
def read(self, size=1):
resp = super(Serial, self).read(size)
#self.logger.debug(' read: ' + show_newlines(resp))
return resp
def readall(self):
"""Automatically read all the bytes from the serial port."""
return self.read(self.inWaiting())
def ask(self, query, lag=0.05):
"""
Write to the bus, then read response.
This doesn't seem to work very well.
"""
self.write(query)
sleep(lag)
return self.readall()
# ------------------------------------------------------
# up to this point, this file has dealt with customizing
# communication interfaces (GPIB / RS232). What follows
# are more random (though useful) utilities.
#
# The two halves of this file serve rather disparate
# needs, and should probably be broken in two pieces.
# Before I actually do that I'd like to trim down
# dependencies in the rest of the library - I think that
# will go a long way in reducing complexity.
# ------------------------------------------------------
def num(string):
"""
convert string to number. decide whether to convert to int or float.
"""
if '.' not in string:
return int(string)
else:
return float(string) | unlicense | -5,885,136,105,583,912,000 | 28.540984 | 72 | 0.600888 | false | 4.146145 | false | false | false |
robmcmullen/pyatasm | pyatasm/assemble.py | 1 | 3007 | from .pyatasm_mac65 import mac65_assemble
class Assemble(object):
def __init__(self, source, verbose=False):
self.verbose = verbose
if isinstance(source, str):
source = source.encode("utf-8")
self.errors, text = mac65_assemble(source)
self.segments = []
self.transitory_equates = {}
self.equates = {}
self.labels = {}
self.current_parser = self.null_parser
self.first_addr = None
self.last_addr = None
self.current_bytes = []
if text:
self.parse(text)
def __len__(self):
return len(self.segments)
def null_parser(self, line, cleanup=False):
pass
def source_parser(self, line, cleanup=False):
if cleanup:
if self.verbose: print("Code block: %x-%x" % (self.first_addr, self.last_addr))
self.segments.append((self.first_addr, self.last_addr, self.current_bytes))
self.first_addr = None
self.last_addr = None
self.current_bytes = []
return
lineno, addr, data, text = line[0:5], line[6:10], line[12:30], line[31:]
addr = int(addr, 16)
b = [int(a,16) for a in data.split()]
#print hex(index), b
if b:
count = len(b)
if self.first_addr is None:
self.first_addr = self.last_addr = addr
elif addr != self.last_addr:
if self.verbose: print("Code block: %x-%x" % (self.first_addr, self.last_addr))
self.segments.append((self.first_addr, self.last_addr, self.current_bytes))
self.first_addr = self.last_addr = addr
self.current_bytes = []
self.current_bytes.extend(b)
self.last_addr += count
def equates_parser(self, line, cleanup=False):
if cleanup:
return
symbol, addr = line.split(": ")
if symbol[0] == "*":
self.transitory_equates[symbol[1:].lower()] = int(addr, 16)
else:
self.equates[symbol.lower()] = int(addr, 16)
def symbol_parser(self, line, cleanup=False):
if cleanup:
return
symbol, addr = line.split(": ")
self.labels[symbol.lower()] = int(addr, 16)
def parse(self, text):
for line in text.splitlines():
line = line.strip()
if not line:
continue
if self.verbose: print("parsing:", line)
if line.startswith("Source:"):
self.current_parser(None, cleanup=True)
self.current_parser = self.source_parser
elif line == "Equates:":
self.current_parser(None, cleanup=True)
self.current_parser = self.equates_parser
elif line == "Symbol table:":
self.current_parser(None, cleanup=True)
self.current_parser = self.symbol_parser
else:
self.current_parser(line)
| gpl-2.0 | -70,870,082,602,270,100 | 35.228916 | 95 | 0.536748 | false | 3.910273 | false | false | false |
stcakova/Battle-city | world.py | 1 | 1375 | import pygame
from pygame import rect
from constants import *
from part import Wall, Brick
class World:
world = [[None for x in range(26)] for y in range(19)]
def __init__(self):
self.brick = pygame.image.load("pictures/brick.jpeg")
self.wall = pygame.image.load("pictures/wall.png")
self.screen = pygame.display.set_mode(SCREEN_SIZE)
self.phoenix = pygame.image.load("pictures/phoenix.jpg")
self.phoenix = pygame.transform.scale(
self.phoenix, (PART_SIZE * 2, PART_SIZE * 2))
def extract_world(self, multiplayer):
if not multiplayer:
maps = open(MAP)
else:
maps = open(MULTIPLAYER_MAP)
for height, line in enumerate(maps):
for width, element in enumerate(line):
if element == 'B' or element == 'F':
self.world[height][width] = Wall(
(width * PART_SIZE, height * PART_SIZE))
elif element == '#':
self.world[height][width] = Brick(
(width * PART_SIZE, height * PART_SIZE))
def draw_world(self, multiplayer):
for wall in walls:
if wall.existing:
wall.draw()
self.screen.blit(self.phoenix, (440, 640))
if multiplayer:
self.screen.blit(self.phoenix, (440, 0))
| gpl-2.0 | 8,877,047,811,533,497,000 | 33.375 | 64 | 0.555636 | false | 3.798343 | false | false | false |
DTOcean/dtocean-core | test_data/inputs_wp2_wave.py | 1 | 3952 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 09 10:39:38 2015
@author: 108630
"""
import os
from datetime import datetime, timedelta
import numpy as np
dir_path = os.path.dirname(__file__)
# Setup
x = np.linspace(0.,1000.,20.)
y = np.linspace(0.,300.,20.)
nx = len(x)
ny = len(y)
# Bathymetry?
X, Y = np.meshgrid(x,y)
Z = -X * 0.1 - 1
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "rock"
strata = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
# Mannings
#geoxyz = np.vstack((X.ravel(),Y.ravel(),G.ravel())).T
G = np.zeros((nx, ny)) + 0.3
geo_raw = {"values": G,
"coords": [x, y]}
sample_size = 1000
dates = []
dt = datetime(2010, 12, 01)
step = timedelta(seconds=3600)
for _ in xrange(sample_size):
dates.append(dt)
dt += step
Hm0 = 9. * np.random.random_sample(sample_size)
direction = 360. * np.random.random_sample(sample_size)
Te = 15. * np.random.random_sample(sample_size)
wave_series = {"DateTime": dates,
"Te": Te,
"Hm0": Hm0,
"Dir": direction}
# Fixed array layout
pos = [(450., 100.),
(550., 100.),
(450., 150.),
(550., 150.)]
FixedArrayLayout = np.array(pos)
#wave_xgrid = None
#B= np.array([0.,270.])/180*np.pi
#H= np.array([1.])
#T= np.array([6.])
#p= 1.0/len(B)/len(H)/len(T)* np.ones((len(T),len(H),len(B)))
#
#occurrence_matrix_coords = [T,H,B]
#wave_xgrid = {"values": p,
# "coords": occurrence_matrix_coords}
lease_area = np.array([[50., 50.],[950., 50.],[950., 250.],[50., 250.]],dtype=float)
power_law_exponent = np.array([7.])
nogo_areas = {"a": np.array([[0, 0],[.1, 0],[.1, .1],[0, .1]])}
rated_array_power = 5
main_direction = None
blockage_ratio = 1.
spectrum_type_farm = 'JONSWAP'
spectrum_gamma_farm = 3.3
spectrum_dir_spreading_farm = 0.
point_SSH = 0.
#user_array_option = 'rectangular'
#user_array_layout = None
user_array_option = 'User Defined Fixed'
user_array_layout = FixedArrayLayout
wave_data_directory = os.path.abspath(os.path.join(dir_path, "nemoh"))
float_flag = False
min_install = -np.inf
max_install = 0.
min_dist_x = 40.
min_dist_y = 40.
yaw_angle = 0.
rated_power_device = 1
op_threshold = 0
landing_point = (0.,0.)
test_data = {'bathymetry.layers': strata,
'corridor.landing_point': landing_point,
'device.installation_depth_max': max_install,
'device.installation_depth_min': min_install,
'device.minimum_distance_x': min_dist_x,
'device.minimum_distance_y': min_dist_y,
'options.optimisation_threshold': op_threshold,
'device.power_rating': rated_power_device,
'device.wave_data_directory': wave_data_directory,
'device.yaw': yaw_angle,
'farm.blockage_ratio': blockage_ratio,
'bathymetry.mannings': geo_raw,
'site.lease_boundary': lease_area,
'project.main_direction': main_direction,
'farm.nogo_areas': nogo_areas,
# 'farm.point_sea_surface_height': point_SSH,
# 'farm.power_law_exponent': power_law_exponent,
'project.rated_power': rated_array_power,
'farm.spec_gamma': spectrum_gamma_farm,
'farm.spec_spread': spectrum_dir_spreading_farm,
'farm.spectrum_name': spectrum_type_farm,
# 'farm.wave_occurrence': wave_xgrid,
'farm.wave_series': wave_series,
'options.user_array_layout': user_array_layout,
'options.user_array_option': user_array_option}
if __name__ == "__main__":
from dtocean_core.utils.files import pickle_test_data
file_path = os.path.abspath(__file__)
pkl_path = pickle_test_data(file_path, test_data)
print "generate test data: {}".format(pkl_path)
| gpl-3.0 | 6,940,826,449,139,902,000 | 27.846715 | 84 | 0.584008 | false | 2.903747 | false | false | false |
qsnake/gpaw | gpaw/test/xcatom.py | 1 | 1515 | import numpy as np
import numpy.random as ra
from gpaw.setup import create_setup
from gpaw.xc import XC
from gpaw.test import equal
x = 0.000001
ra.seed(8)
for xc in ['LDA', 'PBE']:
print xc
xc = XC(xc)
s = create_setup('N', xc)
ni = s.ni
nii = ni * (ni + 1) // 2
D_p = 0.1 * ra.random(nii) + 0.2
H_p = np.zeros(nii)
E = s.xc_correction.calculate(xc,D_p.reshape(1, -1),
H_p.reshape(1, -1))
dD_p = x * ra.random(nii)
dE = np.dot(H_p, dD_p) / x
D_p += dD_p
Ep = s.xc_correction.calculate(xc,D_p.reshape(1, -1),
H_p.reshape(1, -1))
D_p -= 2 * dD_p
Em = s.xc_correction.calculate(xc,D_p.reshape(1, -1),
H_p.reshape(1, -1))
print dE, dE - 0.5 * (Ep - Em) / x
equal(dE, 0.5 * (Ep - Em) / x, 1e-6)
Ems = s.xc_correction.calculate(xc,np.array(
[0.5 * D_p, 0.5 * D_p]), np.array([H_p, H_p]))
print Em - Ems
equal(Em, Ems, 1.0e-12)
D_sp = 0.1 * ra.random((2, nii)) + 0.2
H_sp = np.zeros((2, nii))
E = s.xc_correction.calculate(xc, D_sp, H_sp)
dD_sp = x * ra.random((2, nii))
dE = np.dot(H_sp.ravel(), dD_sp.ravel()) / x
D_sp += dD_sp
Ep = s.xc_correction.calculate(xc, D_sp, H_sp)
D_sp -= 2 * dD_sp
Em = s.xc_correction.calculate(xc, D_sp, H_sp)
print dE, dE - 0.5 * (Ep - Em) / x
equal(dE, 0.5 * (Ep - Em) / x, 1e-6)
| gpl-3.0 | -138,882,968,460,386,080 | 30.5625 | 77 | 0.473267 | false | 2.431782 | false | false | false |
cabanm/project-euler | problem46.py | 1 | 1213 | # Project Euler - Problem 46
# --------------------------
# What is the smallest odd composite that cannot be written as the sum of a prime and twice a square?
from myMath import isPrime
from time import time
# Find primes up to a certain number and output a list of them
def primes(top):
seive = range(2, top+1)
for m in range(2, top+1):
for n in range(m, top//m+1):
p = m*n
if p<=top: seive[p-2] = 0
primes = []
for i in range(top-1):
if seive[i] != 0: primes.append(seive[i])
return primes
p_max = 10000 # These have to be high enough for the program to produce the correct answer
s_max = 1000 # => 10000, 100
start_tot = time()
prime_list = primes(p_max)[1:] # Remove the number 2
can_be_written = []
# Get a large list of composites that we can write in such a way
for p in prime_list:
for s in range(1,s_max+1):
n = p + 2*s**2
if not isPrime(n): can_be_written.append(n)
# Get large list of odd composites and check whether each element is in the "can_be_written" list
max_comp = p_max+2*s_max**2
for n in [n for n in range(1,max_comp,2) if not isPrime(n)]: # The [...] generates odd composites
if n not in can_be_written:
print n
break
print "Time taken:", time()-start_tot
| gpl-2.0 | -4,701,724,964,660,752,000 | 29.325 | 101 | 0.663644 | false | 2.834112 | false | false | false |
tdjordan/tortoisegit | gitgtk/addremove.py | 1 | 1219 | #
# Add/Remove dialog for TortoiseHg
#
# Copyright (C) 2007 TK Soh <[email protected]>
#
try:
import pygtk
pygtk.require("2.0")
except:
pass
import gtk
import gobject
from mercurial import ui, util, hg
from mercurial.i18n import _
from status import GStatus
def run(hgcmd='add', root='', cwd='', files=[], **opts):
u = ui.ui()
u.updateopts(debug=False, traceback=False)
repo = hg.repository(u, path=root)
cmdoptions = {
'all':False, 'clean':False, 'ignored':False, 'modified':False,
'added':True, 'removed':True, 'deleted':True, 'unknown':False, 'rev':[],
'exclude':[], 'include':[], 'debug':True,'verbose':True
}
if hgcmd == 'add':
cmdoptions['unknown'] = True
elif hgcmd == 'remove':
cmdoptions['clean'] = True
else:
raise "Invalid command '%s'" % hgcmd
dialog = GStatus(u, repo, cwd, files, cmdoptions, True)
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
dialog.display()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == "__main__":
import sys
opts = {}
opts['hgcmd'] = 'adda'
opts['root'] = len(sys.argv) > 1 and sys.argv[1] or ''
run(**opts)
| gpl-2.0 | 2,293,648,415,399,881,700 | 23.38 | 80 | 0.579984 | false | 3.224868 | false | false | false |
vaimi/pydisambiguation | ui/qtgui.py | 1 | 6824 | from PyQt5.QtWidgets import (QWidget, QFrame, QLabel, QPushButton, QComboBox, QLineEdit,QTextEdit, QGridLayout, QApplication, QHBoxLayout, QRadioButton)
import logging
class AlgorithmRadioButton(QRadioButton):
def __init__(self, text, id=None, group=None):
super().__init__(text)
self.algorithmId = id
self.group = group
class DisambiquateApp(QApplication):
def __init__(self, sysargs, core):
super(DisambiquateApp, self).__init__(sysargs)
self.dw = DisambiquateWindow(core)
class DisambiquateWindow(QFrame):
def __init__(self, core):
super().__init__()
self.core = core
self.initUI()
def __makeLabel(self, text, tooltip):
label = QLabel(text)
label.setToolTip(tooltip)
return QLabel(text)
def __makeEditBox(self):
return QLineEdit()
def __makeRadioButton(self, text, key=None, group=None):
radiobutton = AlgorithmRadioButton(text, key, group)
radiobutton.clicked.connect(self.selectionChanged)
return radiobutton
def __makeComboBox(self, items):
comboBox = QComboBox()
[comboBox.addItem(algorithm['name'], algorithm['key']) for algorithm in items]
return comboBox
def __makeHorizontalLine(self):
hLine = QFrame()
hLine.setFrameShape(QFrame.HLine)
hLine.setFrameShadow(QFrame.Sunken)
return hLine
def __initElements(self):
self.gridLayout = QGridLayout()
self.radioLayout = QHBoxLayout()
self.variantLayout = QHBoxLayout()
self.buttonLayout = QHBoxLayout()
# First row
self.wordsLabel = self.__makeLabel('Word', '')
self.wordsEdit = self.__makeEditBox()
# Second row
self.sentencesLabel = self.__makeLabel('Sentence(s)', '')
self.sentencesEdit = QTextEdit()
# Third row
self.methodLabel = self.__makeLabel('Method', '')
groupsList = [algorithm['parent'] for algorithm in self.core.getAlgorithmsInfo() if algorithm['parent'] is not None]
groupsDict = dict((group,groupsList.count(group)) for group in set(groupsList))
groups = [group for group in groupsDict if groupsDict[group] > 1]
self.algorithmsRadioButtons = []
for group in groups:
self.algorithmsRadioButtons += [self.__makeRadioButton(group.name + ' (+)', None, group)]
self.algorithmsRadioButtons += [self.__makeRadioButton(algorithm['name'], algorithm['key']) for algorithm in self.core.getAlgorithmsInfo() if algorithm['parent'] is None or algorithm['parent'] not in groups]
#
self.variantLabel = self.__makeLabel('Variant', '')
self.variantComboBox = QComboBox()
# Fourth row
self.disambiquateButton = QPushButton("Disambiquate")
# Fifth row
self.hLine = self.__makeHorizontalLine()
# Sixth row
self.outputLabel = self.__makeLabel('Sense', '')
self.outputEdit = QTextEdit()
def __setElementSettings(self):
self.outputEdit.setReadOnly(True)
self.algorithmsRadioButtons[0].setChecked(True)
self.selectionChanged()
self.disambiquateButton.clicked.connect(self.disambiquateButtonClicked)
self.gridLayout.setSpacing(10)
def __setLayout(self):
row = 1
labelColumn = 0
contentColumn = 1
self.setLayout(self.gridLayout)
self.gridLayout.addWidget(self.wordsLabel, row, labelColumn)
self.gridLayout.addWidget(self.wordsEdit, row, contentColumn)
row += 1
self.gridLayout.addWidget(self.sentencesLabel, row, labelColumn)
self.gridLayout.addWidget(self.sentencesEdit, row, contentColumn, 2, 1)
row += 2
self.gridLayout.addWidget(self.methodLabel, row, labelColumn)
self.gridLayout.addLayout(self.radioLayout, row, contentColumn)
[self.radioLayout.addWidget(button) for button in self.algorithmsRadioButtons]
self.radioLayout.addStretch(1)
row += 1
self.gridLayout.addWidget(self.variantLabel, row, labelColumn)
self.gridLayout.addLayout(self.variantLayout, row, contentColumn)
self.variantLayout.addWidget(self.variantComboBox)
#self.variantLayout.addStretch(1)
row += 1
self.gridLayout.addLayout(self.buttonLayout, row, contentColumn)
self.buttonLayout.addWidget(self.disambiquateButton)
self.buttonLayout.addStretch(1)
row += 1
self.gridLayout.addWidget(self.hLine, row, contentColumn)
row += 1
self.gridLayout.addWidget(self.outputLabel, row, labelColumn)
self.gridLayout.addWidget(self.outputEdit, row, contentColumn, 2, 1)
def initUI(self):
self.__initElements()
self.__setElementSettings()
self.__setLayout()
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('PyDisambiquate')
self.show()
def selectionChanged(self):
self.variantComboBox.clear()
for button in self.algorithmsRadioButtons:
if button.isChecked():
if button.algorithmId == None:
self.variantComboBox.setEnabled(True)
[self.variantComboBox.addItem(algorithm['name'], algorithm['key']) for algorithm in self.core.getAlgorithmsInfo() if algorithm['parent'] is button.group]
else:
self.variantComboBox.setDisabled(True)
def disambiquateButtonClicked(self):
logging.debug("Disambiquate button pressed")
self.disambiquateButton.setDisabled(True)
words = self.__getWord().strip()
sentences = self.__getSentence().strip()
if not words or not sentences:
self.disambiquateButton.setEnabled(True)
pass
logging.debug("Words content: " + str(words))
logging.debug("Sentences content: " + str(sentences))
sense = False
for button in self.algorithmsRadioButtons:
if button.isChecked():
if button.group is None:
sense = self.core.runAlgorithm(button.algorithmId, words, sentences)
break
else:
sense = self.core.runAlgorithm(self.variantComboBox.itemData(self.variantComboBox.currentIndex()), words, sentences)
break
if sense['sense']:
outText = "%s: %s" % (sense['sense'], sense['sense'].definition())
else:
outText = "Unable to make sense"
logging.debug("Made sense: " + outText)
self.outputEdit.setText(outText)
self.disambiquateButton.setEnabled(True)
def __getWord(self):
return self.wordsEdit.text()
def __getSentence(self):
return self.sentencesEdit.toPlainText()
| gpl-3.0 | -1,755,694,650,714,857,700 | 36.494505 | 215 | 0.637749 | false | 4.115802 | false | false | false |
aaronmckinstry706/twitter-crime-prediction | src/jobs/crime_prediction/run.py | 1 | 13899 | import datetime
import logging
import operator
import os
import sys
import pyspark as pyspark
import pyspark.ml.feature as feature
import pyspark.ml.classification as classification
import pyspark.ml as ml
import pyspark.ml.clustering as clustering
import pyspark.sql as sql
import pyspark.sql.functions as functions
import pyspark.sql.types as types
import twokenize
import grid
LOGGER = logging.getLogger(__name__)
FORMATTER = logging.Formatter(
"[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
FILE_HANDLER = logging.FileHandler('script_log.txt')
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_HANDLER)
LOGGER.setLevel(logging.DEBUG)
LOGGER.info("Starting run.")
sc = pyspark.SparkContext()
# From https://stackoverflow.com/a/36218558 .
def sparkImport(module_name, module_directory):
"""
Convenience function.
Tells the SparkContext sc (must already exist) to load
module module_name on every computational node before
executing an RDD.
Args:
module_name: the name of the module, without ".py".
module_directory: the path, absolute or relative, to
the directory containing module
module_Name.
Returns: none.
"""
module_path = os.path.abspath(
module_directory + "/" + module_name + ".py")
sc.addPyFile(module_path)
# --------------------------------------------------------------------------------------------------
# PART 0: Define some useful parameters that define our task.
# --------------------------------------------------------------------------------------------------
# We are only considering data between 1 and 31 (inclusive) days prior to the prediction date.
NUM_DAYS = 31
PREDICTION_DATE = datetime.datetime(2015, 3, 3)
HISTORICAL_CUTOFF_DATE = PREDICTION_DATE - datetime.timedelta(days=31)
# We're only considering tweets and complaints within the given grid square.
# Southwest corner of New York:
# lat = 40.488320, lon = -74.290739
# Northeast corner of New York:
# lat = 40.957189, lon = -73.635679
latlongrid = grid.LatLonGrid(
lat_min=40.488320,
lat_max=40.957189,
lon_min=-74.290739,
lon_max=-73.635679,
lat_step=grid.get_lon_delta(1000, (40.957189 - 40.488320)/2.0),
lon_step=grid.get_lat_delta(1000))
# PART 1: Get topic distributions.
sparkImport("twokenize", ".")
sparkImport('grid', '.')
ss = (sql.SparkSession.builder.appName("TwitterTokenizing")
.getOrCreate())
tweets_schema = types.StructType([
types.StructField('id', types.LongType()),
types.StructField('timestamp', types.LongType()),
types.StructField('postalCode', types.StringType()),
types.StructField('lon', types.DoubleType()),
types.StructField('lat', types.DoubleType()),
types.StructField('tweet', types.StringType()),
types.StructField('user_id', types.LongType()),
types.StructField('application', types.StringType()),
types.StructField('source', types.StringType())
])
tweets_df = ss.read.csv('tweets2.csv',
escape='"',
header='true',
schema=tweets_schema,
mode='DROPMALFORMED')
tweets_df = tweets_df.select(['timestamp', 'lon', 'lat', 'tweet'])
date_column = (tweets_df['timestamp'].cast(types.TimestampType())
.cast(types.DateType()))
tweets_df = (tweets_df.withColumn('date', date_column)
.drop('timestamp'))
date_to_column = functions.lit(PREDICTION_DATE)
date_from_column = functions.lit(HISTORICAL_CUTOFF_DATE)
tweets_df = tweets_df.filter(
~(tweets_df['date'] < date_from_column)
& (tweets_df['date'] < date_to_column))
sql_tokenize = functions.udf(
lambda tweet: twokenize.tokenize(tweet),
returnType=types.ArrayType(types.StringType()))
tweets_df = (tweets_df
.withColumn('tweet_tokens', sql_tokenize(tweets_df['tweet']))
.drop('tweet'))
# tweets_df now has Row(tweet_tokens, date, lon, lat)
# The only way to group elements and get a set of data (as far as I know) is by converting the
# DataFrame into an RDD. This is because I can't find the right operation on GroupedData in Pyspark.
row_to_gridsquare_tokens = lambda row: (
latlongrid.grid_square_index(lat=row['lat'], lon=row['lon']),
row['tweet_tokens'])
tokens_rdd = (tweets_df.rdd.map(row_to_gridsquare_tokens)
.reduceByKey(operator.concat))
tokens_df_schema = types.StructType([
types.StructField('grid_square', types.IntegerType()),
types.StructField('tokens', types.ArrayType(types.StringType()))
])
tokens_df = ss.createDataFrame(tokens_rdd, schema=tokens_df_schema)
hashing_tf = feature.HashingTF(
numFeatures=(2^18)-1, inputCol='tokens', outputCol='token_frequencies')
lda = (clustering.LDA()
.setFeaturesCol('token_frequencies')
.setK(10)
.setTopicDistributionCol('topic_distribution'))
topic_distribution_pipeline = ml.Pipeline(stages=[hashing_tf, lda])
lda_model = topic_distribution_pipeline.fit(tokens_df)
topic_distributions = lda_model.transform(tokens_df).select(['grid_square', 'topic_distribution'])
# --------------------------------------------------------------------------------------------------
# PART 2: Get complaint counts per (grid square, date).
# --------------------------------------------------------------------------------------------------
complaints_df_schema = types.StructType([
types.StructField('CMPLNT_NUM', types.IntegerType(),
nullable=False),
types.StructField('CMPLNT_FR_DT', types.StringType()),
types.StructField('CMPLNT_FR_TM', types.StringType()),
types.StructField('CMPLNT_TO_DT', types.StringType()),
types.StructField('CMPLNT_TO_TM', types.StringType()),
types.StructField('RPT_DT', types.StringType(), nullable=False),
types.StructField('KY_CD', types.StringType()),
types.StructField('OFNS_DESC', types.StringType()),
types.StructField('PD_CD', types.IntegerType()),
types.StructField('PD_DESC', types.StringType()),
types.StructField('CRM_ATPT_CPTD_CD', types.StringType()),
types.StructField('LAW_CAT_CD', types.StringType()),
types.StructField('JURIS_DESC', types.StringType()),
types.StructField('BORO_NM', types.StringType()),
types.StructField('ADDR_PCT_CD', types.StringType()),
types.StructField('LOC_OF_OCCUR_DESC', types.StringType()),
types.StructField('PREM_TYP_DESC', types.StringType()),
types.StructField('PARKS_NM', types.StringType()),
types.StructField('HADEVELOPT', types.StringType()),
types.StructField('X_COORD_CD', types.FloatType()),
types.StructField('Y_COORD_CD', types.FloatType()),
types.StructField('Latitude', types.FloatType()),
types.StructField('Longitude', types.FloatType()),
types.StructField('Lat_Lon', types.StringType())])
complaints_df = ss.read.csv(
"crime_complaints_with_header.csv",
header=True,
schema=complaints_df_schema)
complaints_df = (complaints_df
.select(['CMPLNT_FR_DT', 'CMPLNT_TO_DT', 'Latitude', 'Longitude'])
.withColumnRenamed('CMPLNT_FR_DT', 'from_date_string')
.withColumnRenamed('CMPLNT_TO_DT', 'to_date_string')
.withColumnRenamed('Latitude', 'lat')
.withColumnRenamed('Longitude', 'lon'))
# Filter to find the complaints which have an exact date of occurrence
# or which have a start and end date.
complaints_df = complaints_df.filter(~complaints_df['from_date_string'].isNull())
# Now get the actual column dates.
def string_to_date(s):
if s == None:
return None
else:
return datetime.datetime.strptime(s, '%m/%d/%Y')
string_to_date_udf = functions.udf(string_to_date, types.DateType())
complaints_df = (complaints_df
.withColumn('from_date', string_to_date_udf(complaints_df['from_date_string']))
.withColumn('to_date', string_to_date_udf(complaints_df['to_date_string']))
.select(['from_date', 'to_date', 'lat', 'lon']))
# Now filter for complaints which occur on one day only.
complaints_df = (complaints_df
.filter(complaints_df['to_date'].isNull()
| (complaints_df['to_date'] == complaints_df['from_date']))
.withColumnRenamed('from_date', 'date'))
# Columns are now 'date', 'lat', and 'lon'.
# Compute grid square for each crime.
def grid_square_from_lat_lon(lat, lon):
return latlongrid.grid_square_index(lat=lat, lon=lon)
grid_square_from_lat_lon_udf = functions.udf(
grid_square_from_lat_lon, returnType=types.IntegerType())
complaints_df = (complaints_df
.withColumn('grid_square',
grid_square_from_lat_lon_udf(complaints_df['lat'], complaints_df['lon']))
.select('date', 'grid_square'))
# Now count by (GridSquare, Date).
complaint_counts_df = (complaints_df
.groupBy(complaints_df['grid_square'], complaints_df['date'])
.count()
.withColumnRenamed('count', 'complaint_count'))
complaint_counts_df = (complaint_counts_df
.withColumn(
'complaint_count',
complaint_counts_df['complaint_count'].cast(types.DoubleType())))
count_binarizer = feature.Binarizer(
threshold=0, inputCol='complaint_count', outputCol='binary_complaint_count')
complaint_counts_df = count_binarizer.transform(complaint_counts_df)
complaint_counts_df = (complaint_counts_df
.drop('complaint_counts')
.withColumnRenamed('binary_complaint_counts', 'complaint_counts'))
# Columns are now 'date', 'grid_square', 'complaint_count', 'binary_complaint_count'.
# Filter for complaints occurring within the date range..
past_complaint_counts_df = complaint_counts_df.filter(
(complaint_counts_df['date'] < date_to_column)
& (complaint_counts_df['date'] >= date_from_column))
current_complaint_counts_df = complaint_counts_df.filter(
complaint_counts_df['date'] == date_to_column)
# --------------------------------------------------------------------------------------------------
# PART 3: Defining the data matrix.
# --------------------------------------------------------------------------------------------------
# Complaint count dataframes only have entries for nonzero counts. Fill in the nonzero entries.
all_dates_squares_df = ss.createDataFrame(
[(gridSquare, PREDICTION_DATE - datetime.timedelta(days=i))
for gridSquare in range(-1, latlongrid.grid_size)
for i in range(1, 1 + NUM_DAYS)],
schema=types.StructType([
types.StructField('grid_square', types.IntegerType()),
types.StructField('date', types.DateType())]))
all_squares_df = ss.createDataFrame(
[(gridSquare,) for gridSquare in range(-1, latlongrid.grid_size)],
schema=types.StructType([
types.StructField('grid_square', types.IntegerType())]))
past_complaint_counts_df = past_complaint_counts_df.join(
all_dates_squares_df,
on=['grid_square', 'date'],
how='right_outer')
past_complaint_counts_df = past_complaint_counts_df.fillna({'complaint_count': 0})
current_complaint_counts_df = current_complaint_counts_df.join(
all_squares_df,
on='grid_square',
how='right_outer')
current_complaint_counts_df = (current_complaint_counts_df
.fillna({'complaint_count': 0})
.withColumn('date',
functions.when(current_complaint_counts_df['date'].isNull(), PREDICTION_DATE)
.otherwise(current_complaint_counts_df['date'])))
# Do a left outer join on topic_distributions and past_complaint_counts_df to get our data matrix.
data_matrix = topic_distributions.join(
past_complaint_counts_df,
on='grid_square',
how='inner')
# So far, data_matrix contains Row(date, grid_square, topic_distributions, complaint_count).
# Get weekday from date.
get_weekday_udf = functions.udf(lambda d: d.weekday(), returnType=types.IntegerType())
data_matrix = data_matrix.withColumn('weekday', get_weekday_udf(data_matrix['date']))
# Assemble the feature vectors.
weekday_one_hot_encoder = feature.OneHotEncoder(inputCol='weekday', outputCol='weekday_vector')
feature_vector_assembler = feature.VectorAssembler(
inputCols=['weekday_vector', 'topic_distribution'], outputCol='final_feature_vector')
feature_assembly_pipeline = (
ml.Pipeline(stages=[weekday_one_hot_encoder, feature_vector_assembler]).fit(data_matrix))
data_matrix = (feature_assembly_pipeline.transform(data_matrix)
.select('date', 'grid_square', 'final_feature_vector', 'complaint_count'))
LOGGER.debug(str(data_matrix.count()) + " rows like " + str(data_matrix.take(1)))
#logistic_regression = classification.LogisticRegression(
# maxIter=10, regParam=0.3, elasticNetParam=0.8,
# featuresCol='final_feature_vector', labelCol='complaint_count',
# probabilityCol='predicted_probability')
#logistic_model = logistic_regression.fit(data_matrix)
#LOGGER.info(
# "coefficients: " + str(logistic_model.coefficientMatrix) + ", intercept: " + str(logistic_model.interceptVector))
prediction_data_matrix = topic_distributions.join(
current_complaint_counts_df,
on='grid_square',
how='inner')
prediction_data_matrix = (prediction_data_matrix
.withColumn('weekday', get_weekday_udf(prediction_data_matrix['date']))
.select('weekday', 'grid_square', 'date', 'topic_distribution', 'complaint_count'))
prediction_data_matrix = (feature_assembly_pipeline.transform(prediction_data_matrix)
.select('grid_square', 'date', 'final_feature_vector', 'complaint_count'))
LOGGER.debug(str(prediction_data_matrix.count()) + " rows like "
+ str(prediction_data_matrix.take(1)))
exit(0)
#predicted_complaint_counts = (logistic_model.transform(prediction_data_matrix)
# .select('grid_square', 'complaint_count', 'predicted_probability')
# .collect())
#
#LOGGER.debug(str(predicted_complaint_counts.count()) + " rows like "
# + str(predicted_complaint_counts.take(1)) + ".")
#exit(0)
| gpl-3.0 | 3,743,084,505,325,110,000 | 38.598291 | 118 | 0.66163 | false | 3.428466 | false | false | false |
RuthAngus/LSST-max | code/GP_periodogram.py | 1 | 1066 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from GProtation import make_plot, lnprob, neglnlike
import emcee
import time
import george
from george.kernels import ExpSquaredKernel, ExpSine2Kernel
import scipy.optimize as spo
def GP_periodogram(x, y, yerr, p_init, plims, N):
"""
This function takes a light curves and attempts to produce a GP periodogram.
It returns the value of the highest peak.
The kernel hyperparameters are optimised over a grid of periods.
This is also a "profile likelihood".
x, y, yerr: the light curve.
p_init: the initial guess for the period.
plims: the (log) boundaries for the grid.
N: the number of grid points.
"""
# create the grid
periods = np.linspace(np.exp(plims[0], np.exp(plims[1], 10)
# initial hyperparameters
if __name__ == "__main__":
# fake data
x = np.arange(0, 10, 100)
p = 2
err = .1
y = np.sin(2*np.pi*(1./p)*x) + np.random.randn(100)*err
yerr = np.ones_like(y) * err
p_init, plims = 2, np.log(.1, 5)
GP_periodogram(x, y, yerr, p_init, plims, 10)
| mit | -6,016,110,204,443,567,000 | 27.052632 | 77 | 0.707317 | false | 2.820106 | false | false | false |
cffex/cmdb | test/project/webci_attr_type.py | 1 | 10754 | #!/usr/bin/python
#coding: utf-8
#print "Content-type: text/html\n"
#Author: LIPH
import web
import demjson
import json
import string
import time
import urlparse
import webci_attr
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
render = web.template.render('templates/')
urls = ( '/ciattrtype/(.*)', 'ATTRTYPE',
'/ciattrtype', 'attrtype', )
db = web.database(dbn='oracle', user='cmdb', pw='cmdb123', db='cffexcmdb')
#Define time stamp 9999/12/31 23:59:59
ENDTIME = str(int('99991231235959'))
DELETETIME = str('00000000000000')
def fn_create_ci_attrtype(i_name,i_description,i_type_fid,i_mandatory,i_owner,i_family_id,i_change_log,i_displayname, i_value_type, i_endtime = ENDTIME):
#Function:create ci attribute type
v_cat = 'PCAT00000001'
v_fat = 'FCAT00000001'
ct_id = db.query('select max(id) cid, max(family_id) fid from t_ci_attribute_type ')
#Although there will be only one record, it also needs to iteratively generate the dict. It will raise an error if directly transforming ci_id[0] to json format
ci_as_dict = []
for ci in ct_id:
ci_as_dict.append(ci)
v_json = json.dumps(ci_as_dict).decode("GB2312")
v_djson = json.loads(v_json,encoding="gb2312")
v_num = len(v_djson)
#Take the default value when inserting the first record
if v_num <> 0 :
v_cat = v_djson[0]['CID']
v_fat = v_djson[0]['FID']
v_cat = 'PCAT' + str(string.atoi(v_cat[4:])+1).rjust(8,'0')
print v_cat
if i_family_id == None :
v_fat = 'FCAT' + str(string.atoi(v_fat[4:])+1).rjust(8,'0')
else:
v_fat = i_family_id
print v_fat
v_curtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
n = db.insert('t_ci_attribute_type',id = v_cat,name = i_name, description = i_description, ci_type_fid = i_type_fid, mandatory= i_mandatory,
owner = i_owner, starttime = v_curtime, endtime = i_endtime, family_id = v_fat, change_log = i_change_log,displayname = i_displayname, value_type = i_value_type)
return v_fat
def fn_delete_ci_attrtype(i_family_id,i_curtime,i_change_log):
#Function: Delete ci attribute type
v_ca_fids = db.query('select distinct a.family_id from t_ci_attribute a where a.type_fid = $fid and a.endtime = $endtime',vars={'fid':i_family_id,'endtime':ENDTIME})
json_en = demjson.encode(v_ca_fids)
json_de = demjson.decode(json_en)
v_ca_fid_num = len(json_de)
if v_ca_fid_num <> 0:
for v_ca_fid in json_de:
n = webci_attr.fn_delete_ciattr(v_ca_fid['FAMILY_ID'], i_curtime, i_change_log)
v_ct_fids = db.query("select a.name ,convert(a.description,'utf8') description,a.ci_type_fid,a.mandatory, a.owner,a.family_id,convert(a.displayname,'utf8') displayname, a.value_type from t_ci_attribute_type a where a.endtime = $aendtime and a.family_id = $fid ",vars={'aendtime':ENDTIME,'fid':i_family_id})
ci_as_dict = []
for ci in v_ct_fids:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
ci_type_djson = json.loads(ci_type_json,encoding="gb2312")
#delete the record
n = db.update('t_ci_attribute_type', where='family_id = $fid and endtime = $endtime', vars={'fid':i_family_id,'endtime':ENDTIME}, endtime=i_curtime)
#insert a new record and set the endtime=deletetime
v_fid = fn_create_ci_attrtype(ci_type_djson[0]['NAME'], ci_type_djson[0]['DESCRIPTION'], ci_type_djson[0]['CI_TYPE_FID'],ci_type_djson[0]['MANDATORY'],ci_type_djson[0]['OWNER'],
ci_type_djson[0]['FAMILY_ID'], i_change_log,ci_type_djson[0]['DISPLAYNAME'], ci_type_djson[0]['VALUE_TYPE'], DELETETIME)
return n
class ATTRTYPE:
def GET(self,fid):
ci_attrtype = db.query("select b.name citype_name, a.name ,convert(a.description,'utf8') description,a.ci_type_fid,a.mandatory, a.owner,a.family_id,convert(a.displayname,'utf8') displayname,a.value_type,a.change_log from t_ci_attribute_type a ,t_ci_type b where a.ci_type_fid = b.family_id and a.endtime = $endtime and b.endtime = $endtime and a.family_id = $fid ",vars={'endtime':ENDTIME,'fid':fid})
ci_as_dict = []
for ci in ci_attrtype:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
print ci_type_json
return ci_type_json
class attrtype:
def GET(self):
all_col = ('name','description','ci_type_fid','mandatory','owner','family_id','time','change_log','citype_name','value_type')
citype_input = web.input()
condition = " "
for col in range(len(all_col)):
col_name = all_col[col]
value = citype_input.get(col_name,None)
if value <> None:
if col_name == 'time' :
condition = condition + "cat.starttime <= '" + value + "' and cat.endtime > '" + value + "' and b.starttime <= '" + value + "' and b.endtime > '" + value + "' and "
elif col_name == 'citype_name':
condition = condition + "b.name = '" + value + "' and "
else :
condition = condition + "cat." + col_name + " = '" + value + "' and "
if value == None and col_name == 'time':
condition = condition + "cat.endtime = '" + ENDTIME + "' and b.endtime = '" + ENDTIME + "' and "
print condition
v_sql = "select b.name citype_name, cat.name ,convert(cat.description,'utf8') description,cat.ci_type_fid, cat.mandatory, cat.owner,cat.family_id,convert(cat.displayname,'utf8') displayname, cat.value_type, cat.change_log from t_ci_attribute_type cat, t_ci_type b where " + condition + " cat.ci_type_fid = b.family_id "
ci_type = db.query(v_sql)
ci_as_dict = []
for ci in ci_type:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
print ci_type_json
# import sys,httplib, urllib
# params = urllib.urlencode({'fid':'FCAT00000027','change_log':'test'})
# headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
# con2 = httplib.HTTPConnection("localhost:8080")
# con2.request("DELETE","/ciattrtype",params,headers)
# con2.close()
return ci_type_json
def POST(self):
citype_input = web.input()
#Besides some fields in t_ci_attribute_type, input parameters also include the "name" field in t_ci_type
v_ct_fids = db.query('SELECT distinct ct.family_id FROM t_ci_type ct WHERE ct.endtime = $endtime and ct.family_id = $ctfid',vars={'endtime':ENDTIME,'ctfid':citype_input.get('ci_type_fid',None)})
json_en = demjson.encode(v_ct_fids)
json_de = demjson.decode(json_en)
v_ct_fid_num = len(json_de)
if v_ct_fid_num == 0:
return 2 #there is no relative family_id in table T_CI_TYPE
elif v_ct_fid_num > 1:
return 3 #there are more than one relative family_ids in table T_CI_TYPE
v_ct_fid = json_de[0]['FAMILY_ID']
print v_ct_fid
#Users don't need to input the family_id . The afferent parameter for the function is null
v_fid = fn_create_ci_attrtype(citype_input.get('name',None), citype_input.get('description',None), v_ct_fid,citype_input.get('mandatory',None),
citype_input.get('owner',None), None, 'initialization', citype_input.get('displayname',None),citype_input.get('value_type',None))
return v_fid
def DELETE(self):
input_data = web.data()
data = urlparse.parse_qs(input_data)
v_ct_fids = db.query("SELECT distinct c.name FROM t_ci_attribute_type c WHERE c.family_id = $fid and c.endtime = $endtime",vars={'fid':data['fid'][0],'endtime':ENDTIME})
json_en = demjson.encode(v_ct_fids)
json_de = demjson.decode(json_en)
v_ct_fid_num = len(json_de)
if v_ct_fid_num == 0:
return 2 #There are no records to delete in table t_ci_attribute_type
elif v_ct_fid_num > 1:
return 3 #There are more than one records to delete in table t_ci_attribute_type
v_curtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
#When deleting t_ci_attribute_type, we should delete all the relative attribute
n = fn_delete_ci_attrtype(data['fid'][0],v_curtime,data['change_log'][0])
return n
def PUT(self):
citype_input = web.input()
v_ct_fids = db.query("select a.name ,convert(a.description,'utf8') description,a.ci_type_fid,a.mandatory, a.owner,a.family_id,convert(a.displayname,'utf8') displayname,a.value_type, a.change_log from t_ci_attribute_type a where a.endtime = $aendtime and a.family_id = $fid ",vars={'aendtime':ENDTIME,'fid':citype_input.get('fid',None)})
ci_as_dict = []
for ci in v_ct_fids:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
ci_type_djson = json.loads(ci_type_json,encoding="gb2312")
v_ct_fid_num = len(ci_type_djson)
if v_ct_fid_num == 0:
return 2 #There are no records to modify in table t_ci_attribute_type
elif v_ct_fid_num > 1:
return 3 #There are more than one records to modify in table t_ci_attribute_type
v_curtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
n = db.update('t_ci_attribute_type', where='family_id = $fid and endtime = $endtime', vars={'fid':citype_input.get('fid'),'endtime':ENDTIME}, endtime=v_curtime)
v_fid = fn_create_ci_attrtype(citype_input.get('name',ci_type_djson[0]['NAME']), citype_input.get('description',ci_type_djson[0]['DESCRIPTION']),
ci_type_djson[0]['CI_TYPE_FID'],citype_input.get('mandatory',ci_type_djson[0]['MANDATORY']),citype_input.get('owner',ci_type_djson[0]['OWNER']),
ci_type_djson[0]['FAMILY_ID'], citype_input.get('change_log',ci_type_djson[0]['CHANGE_LOG']),citype_input.get('displayname',ci_type_djson[0]['DISPLAYNAME']),citype_input.get('value_type',ci_type_djson[0]['VALUE_TYPE']))
return v_fid
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
| apache-2.0 | 268,012,882,790,708,770 | 56.118919 | 407 | 0.603981 | false | 3.079038 | false | false | false |
egemsoft/esef-yawd-translation | translations/utils.py | 1 | 5003 | import os
import sys
from django.conf import settings
from django.utils.encoding import smart_str
from django.utils.translation import check_for_language
from django.utils.translation.trans_real import get_language_from_path
_default = None
_supported = []
def get_default_language():
"""
Detects the default language from the database.
If no default language is present, the default
settings.LANGUAGE_CODE is used.
This will reload its values in the context of a new thread.
"""
global _default
if _default is None:
try:
from models import Language
_default = smart_str(Language.objects.get(default=True).name)
except:
_default = settings.LANGUAGE_CODE
return _default
def get_supported_languages():
"""
Retrieve the supported languages.
"""
global _supported
if not _supported:
from models import Language
_supported = [smart_str(l) for l in Language.objects.values_list('name', flat=True)]
# if no languages are set use the default language
if not _supported:
_supported = [settings.LANGUAGE_CODE]
return _supported
def get_language_from_request(request, check_path=False):
"""
This method is used as a replacement to the original django language
detection algorithm. It takes the db default language into
consideration and does not deal with the Accept-Language header.
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
# retrieve list of supported languages
supported = get_supported_languages()
if check_path:
lang_code = get_language_from_path(request.path_info, [settings.LANGUAGE_CODE].append(supported))
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
# original Django middleware used to look for the Accept-Language
# HTTP header and extract the language. This is replaced in our
# mechanism
return get_default_language()
def compile_message_file(fn):
"""
Accepts a .po file path as argument and generates an appropriate .mo file.
This copies the needed functionality from the original compilemessages command
"""
pf = os.path.splitext(fn)[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt --check-format -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt --check-format -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
os.chmod(pf + '.mo', 0664)
def concat_message_files(files, fn):
"""
Accepts a list of po files and a target file and uses the
msgcat command to concat the files.
"""
files_str = ' '.join(files)
os.environ['djangosourcepo'] = files_str
os.environ['djangotargetpo'] = fn
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgcat --use-first -o "%djangotargetpo%" %djangosourcepo%'
else:
cmd = 'msgcat --use-first -o "$djangotargetpo" $djangosourcepo'
os.system(cmd)
os.chmod(fn, 0664)
def reset_translations(lang):
"""
Empty django's internal translations dictionary when a message translation
changes or the translations list is regenerated.
"""
from django.utils import translation
from django.utils.translation import trans_real
import gettext
if lang in trans_real._translations:
del trans_real._translations[lang]
gettext._translations = {}
if settings.LANGUAGE_CODE == lang:
trans_real._default = None
# force current thread translations reload
current_lang = translation.get_language()
if current_lang == lang:
translation.activate(current_lang)
| bsd-3-clause | -1,003,490,242,642,508,300 | 31.914474 | 105 | 0.681991 | false | 4.104184 | false | false | false |
DBeath/flask-feedrsub | feedrsub/feeds/feedfinder/feedfinder4.py | 1 | 7270 | import logging
import time
from typing import Tuple
from urllib.parse import urlsplit, urljoin
from bs4 import BeautifulSoup
from feedrsub.feeds.feedfinder.feedinfo import FeedInfo
from feedrsub.utils.requests_session import RequestsSession, requests_session
logger = logging.getLogger("feedfinder4")
def coerce_url(url: str) -> str:
url = url.strip()
if url.startswith("feed://"):
return "http://{0}".format(url[7:])
for proto in ["http://", "https://"]:
if url.startswith(proto):
return url
return "https://{0}".format(url)
def get_site_root(url: str) -> str:
"""
Find the root domain of a url
"""
url = coerce_url(url)
parsed = urlsplit(url)
print(parsed)
return parsed.netloc
class FeedFinder:
def __init__(self, session, get_feed_info=False, timeout=(3.05, 10)):
self.session = session
self.get_feed_info = get_feed_info
self.timeout = timeout
def get_url(self, url: str):
try:
r = self.session.get(url, timeout=self.timeout)
except Exception as e:
logger.warning(u"Error while getting URL: {0}, {1}".format(url, str(e)))
return None
return r
@staticmethod
def is_feed_data(text: str) -> bool:
data = text.lower()
if data.count("<html"):
return False
return bool(data.count("<rss") + data.count("<rdf") + data.count("<feed"))
def is_feed(self, url: str) -> str:
response = self.get_url(url)
if not response or not response.text or not self.is_feed_data(response.text):
return ""
return response.text
@staticmethod
def is_feed_url(url: str) -> bool:
return any(map(url.lower().endswith, [".rss", ".rdf", ".xml", ".atom"]))
@staticmethod
def is_feedlike_url(url: str) -> bool:
return any(map(url.lower().count, ["rss", "rdf", "xml", "atom", "feed"]))
def check_urls(self, urls: list) -> list:
feeds = []
for url in urls:
url_text = self.is_feed(url)
if url_text:
feed = self.create_feed_info(url, url_text)
feeds.append(feed)
return feeds
def create_feed_info(self, url: str, text: str) -> FeedInfo:
info = FeedInfo(url)
if self.get_feed_info:
logger.info(u"Getting FeedInfo for {0}".format(url))
info.get_info(text=text, soup=self.soup, finder=self)
return info
@property
def soup(self) -> BeautifulSoup:
return self.parsed_soup
def create_soup(self, text: str) -> None:
self.parsed_soup = BeautifulSoup(text, "html.parser")
def search_links(self, url: str) -> list:
links = []
for link in self.soup.find_all("link"):
if link.get("type") in [
"application/rss+xml",
"text/xml",
"application/atom+xml",
"application/x.atom+xml",
"application/x-atom+xml",
]:
links.append(urljoin(url, link.get("href", "")))
return self.check_urls(links)
def search_a_tags(self, url: str) -> Tuple[list, list]:
logger.info("Looking for <a> tags.")
local, remote = [], []
for a in self.soup.find_all("a"):
href = a.get("href", None)
if href is None:
continue
if "://" not in href and self.is_feed_url(href):
local.append(href)
if self.is_feedlike_url(href):
remote.append(href)
return local, remote
@requests_session()
def find_feeds(
url: str,
check_all: bool = False,
get_feed_info: bool = False,
timeout: tuple = (3.05, 10),
**kwargs
) -> list:
finder = FeedFinder(
kwargs.get("session"), get_feed_info=get_feed_info, timeout=timeout
)
# Format the URL properly.
url = coerce_url(url)
feeds = []
start_time = time.perf_counter()
# Download the requested URL
logger.info("Finding feeds at URL: {0}".format(url))
response = finder.get_url(url)
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched url in {0}ms".format(search_time))
if not response or not response.text:
return []
text = response.text
# Parse text with BeautifulSoup
finder.create_soup(text)
# Check if it is already a feed.
if finder.is_feed_data(text):
found = finder.create_feed_info(url, text)
feeds.append(found)
return feeds
# Search for <link> tags
logger.info("Looking for <link> tags.")
found_links = finder.search_links(url)
feeds.extend(found_links)
logger.info("Found {0} feed <link> tags.".format(len(found_links)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched <link> tags in {0}ms".format(search_time))
if len(feeds) and not check_all:
return sort_urls(feeds, url)
# Look for <a> tags.
logger.info("Looking for <a> tags.")
local, remote = finder.search_a_tags(url)
# Check the local URLs.
local = [urljoin(url, l) for l in local]
found_local = finder.check_urls(local)
feeds.extend(found_local)
logger.info("Found {0} local <a> links to feeds.".format(len(found_local)))
# Check the remote URLs.
remote = [urljoin(url, l) for l in remote]
found_remote = finder.check_urls(remote)
feeds.extend(found_remote)
logger.info("Found {0} remote <a> links to feeds.".format(len(found_remote)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched <a> links in {0}ms".format(search_time))
if len(feeds) and not check_all:
return sort_urls(feeds, url)
# Guessing potential URLs.
fns = ["atom.xml", "index.atom", "index.rdf", "rss.xml", "index.xml", "index.rss"]
urls = list(urljoin(url, f) for f in fns)
found_guessed = finder.check_urls(urls)
feeds.extend(found_guessed)
logger.info("Found {0} guessed links to feeds.".format(len(found_guessed)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched guessed urls in {0}ms".format(search_time))
return sort_urls(feeds, url)
def url_feed_prob(url: str, original_url: str = None) -> int:
score = 0
if original_url:
url_domain = get_site_root(url)
original_domain = get_site_root(original_url)
if url_domain not in original_domain:
score -= 17
if "comments" in url:
score -= 15
if "georss" in url:
score -= 9
if "alt" in url:
score -= 7
kw = ["rss", "atom", ".xml", "feed", "rdf"]
for p, t in zip(range(len(kw) * 2, 0, -2), kw):
if t in url:
score += p
if url.startswith("https"):
score += 9
print("Url: {0}, Score: {1}".format(url, score))
return score
def sort_urls(feeds, original_url=None):
print("Sorting feeds: {0}".format(feeds))
sorted_urls = sorted(
list(set(feeds)), key=lambda x: url_feed_prob(x.url, original_url), reverse=True
)
logger.info(u"Returning sorted URLs: {0}".format(sorted_urls))
return sorted_urls
| mit | -221,718,999,646,376,000 | 28.552846 | 88 | 0.587895 | false | 3.495192 | false | false | false |
Badg/hwiopy | hwiopy/platforms/beagle.py | 1 | 6484 | ''' Beaglebone/Beagleboard/Etc hardware-specific operations.
LICENSING
-------------------------------------------------
hwiopy: A common API for hardware input/output access.
Copyright (C) 2014-2015 Nicholas Badger
[email protected]
nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
------------------------------------------------------
Something something sooooomething goes here.
'''
# Global dependencies
import io
import struct
import mmap
import json
from warnings import warn
from pkg_resources import resource_string
# from os import listdir
# from os.path import isfile, join, split
# Intrapackage dependencies
from . import __path__
from .. import core
from .. import systems
# from . import generic
# from .generic import device
class _header_map():
''' Callable class that resolves the header pins into their connections,
as well as providing several utility functions to describe the device.
_header_map():
======================================================
Returns the header connection, be it a hardwired one (ex 5VDC) or a SoC
terminal.
*args
------------------------------------------------------
pin_num: str 'pin number'
return
-------------------------------------------------------
str 'SoC terminal or other'
_header_map.list_system_headers():
========================================================
Returns all of the header pins that connect to the sitara SoC.
return
--------------------------------------------------------
dict {'pin num':
_memory_map.list_all_headers():
=========================================================
*args
---------------------------------------------------------
register: str 'name of register'
return
-------------------------------------------------------
str 'description of register'
'''
def __init__(self):
# Load the corresponding json file and create a map dict
self._sys_map = json.loads(
resource_string('hwiopy', 'maps/bbb_sysmap.json').\
decode('utf-8'))
self._header_pins = tuple(self._sys_map.keys())
# Predeclare
self._hardwired = {}
self._connected = {}
self._all_headers = {}
# Separate any hardwired (5VDC, GND, etc) pins from SoC connections
# Need way to collapse dict list into single item for _all_headers
for pin_num, pin_dict in self._sys_map.items():
if pin_dict['connections']:
self._hardwired[pin_num] = pin_dict['connections']
self._all_headers[pin_num] = pin_dict['connections']
elif pin_dict['terminals']:
self._connected[pin_num] = pin_dict['terminals']
self._all_headers[pin_num] = pin_dict['terminals']
def __call__(self, pin_num, pin_941=None, pin_942=None, *args, **kwargs):
# Grab the start and convert it to int (aka long)
# NOTE THAT HERE IS THE PLACE TO DEAL WITH THE TWO HEADER PINS THAT
# ARE CONNECTED TO TWO SOC PINS!! (pin 9_41 and pin 9_42)
# Don't necessarily want to error trap out declaring pin_941 and/or
# pin_942 with each other, or with a different pin number
which_connection = 0
if pin_num == '9_41':
if pin_941:
which_connection = pin_941
else:
warn(RuntimeWarning('Lookup on pin 9_41 without specifying '
'which mode to connect to. Defaulting to Sitara pin D14. '
'Consult the BBB system reference manual for details.'))
if pin_num == '9_42':
if pin_942:
which_connection = pin_942
else:
warn(RuntimeWarning('Lookup on pin 9_42 without specifying '
'which mode to connect to. Defaulting to Sitara pin C18. '
'Consult the BBB system reference manual for details.'))
# Now use whatever information we have to output the connection
return self._all_headers[pin_num][which_connection]
# Returns all header pins that are configurable
def list_system_headers(self):
return self._connected
# Very simply return a description of the queried register
def list_all_headers(self):
return self._all_headers
class BBB(core.Device):
''' A beaglebone black. Must have kernel version >=3.8, use overlays, etc.
'''
# Where is the memory mapping stored to?
# mem_reg_loc = '/dev/mem'
# What pins correspond to what possible mappings?
def __init__(self, mem_filename='/dev/mem'):
''' Creates the device and begins setting it up.
'''
# Call super, initializing all of the abstract base class attributes
super().__init__(systems.Sitara335(mem_filename), _header_map())
def create_pin(self, pin_num, mode, name=None):
''' Gets a pin object from the self.chipset object and connects it to
a pin on the self.pinout dict.
which_terminal is redundant with mode?
'''
# NOTE THAT DUE TO THE ODDITY OF THE BBB, pins 9_41 and 9_42 need to
# be specially configured, as they each connect to two SoC terminals.
super().create_pin(pin_num, mode, name)
# pin = self.pinout[pin_num]
# return pin
return self.pinout[pin_num]
def validate(self):
''' Checks the device setup for conflicting pins, etc.
Actually this is probably unnecessary (?), as individual pin
assignments should error out with conflicting setups.
'''
pass | lgpl-2.1 | -8,934,214,126,180,577,000 | 34.244565 | 78 | 0.571869 | false | 4.477901 | false | false | false |
TalwalkarLab/paleo | paleo/layers/conv.py | 1 | 11804 | """The module estimates 2D convolution layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from paleo.layers import base
class Deconv2D(base.BaseLayer):
"""Deconv2D"""
def __init__(self,
name,
inputs,
filters,
strides,
padding,
output_shape,
use_cudnn=False,
backprop=True,
activation_fn='relu',
percent_holes=0.0):
super(Deconv2D, self).__init__(name, 'deconv2d')
self._inputs = inputs
self._filters = filters
self._strides = strides
self._padding = padding
self._backprop = backprop
# The deconv2d is implemented with conv2d.
self._transposed = Conv2d(
name + '_reverse',
output_shape,
filters,
strides,
padding,
use_cudnn=use_cudnn,
backprop=backprop,
activation_fn=activation_fn)
self._pad_h = self._transposed._pad_h
self._pad_w = self._transposed._pad_w
self._equivalent_conv = self._transposed.gradients(wrt='inputs')
self._outputs = output_shape
assert self._equivalent_conv.outputs == output_shape, (
'Output {} does not match the desired shape {}'.format(
str(self._equivalent_conv.outputs), str(output_shape)))
# Verify the backprop will get the correct gradient shapes.
self._back_filters = self._equivalent_conv.gradients(wrt='filters')
self._back_filters._percent_holes = (
self._equivalent_conv._percent_holes)
self._back_filters._hole_position = 'filters'
assert self._back_filters.outputs[1:3] == filters[:2], (
'Back filters {} does not match the desired shape {}'.format(
str(self._back_filters.outputs[1:3]), str(filters[:2])))
# Back wrt to input is a regular conv2d op.
self._back_inputs = self._transposed
assert self._back_inputs.outputs == inputs, (
'Back inputs {} does not match the desired shape {}'.format(
str(self._back_inputs.outputs), str(inputs)))
def gradients(self, wrt='inputs'):
"""Returns a conv layer that is equivalent to calculating the gradient
on this layer.
Args:
wrt: inputs or filters
"""
if wrt == 'inputs':
return self._back_inputs
elif wrt == 'filters':
return self._back_filters
def additional_summary(self):
return "Filters: {} Params: {:,}".format(self._filters,
self.num_params)
@property
def filters(self):
return self._filters
@property
def strides(self):
return self._strides
@property
def padding(self):
return self._padding
@property
def backprop(self):
return self._backprop
@property
def weights_in_bytes(self):
"""Returns weights."""
_BYTES_FLOAT = 4
kernel_h, kernel_w, in_channel, out_channel = self._filters
filters_in_bytes = (kernel_h * kernel_w * in_channel * out_channel *
_BYTES_FLOAT)
bias_in_bytes = out_channel * _BYTES_FLOAT
return filters_in_bytes + bias_in_bytes
@property
def num_params(self):
weights = six.moves.reduce(lambda x, y: x * y, self._filters, 1)
bias = self._filters[-1]
return weights + bias
class Conv2d(base.BaseLayer):
"""Estimator for 2D Convolutional layers. """
def __init__(self,
name,
inputs,
filters,
strides,
padding,
use_cudnn=False,
backprop=True,
activation_fn='relu',
percent_holes=0.0,
hole_position='filters',
splits=None):
"""Initialize estimator. """
super(Conv2d, self).__init__(name, 'conv2d')
self._inputs = list(inputs)
self._filters = list(filters)
if self._filters[2] == -1:
self._filters[2] = self._inputs[3]
self._strides = list(strides)
self._padding = padding
if splits is not None:
self.split_model(splits)
self._outputs = self._calculate_output_shape()
self._use_cudnn = use_cudnn
self._backprop = backprop
self._activation_fn = activation_fn
# Percent of holes in astrous convolution.
self._percent_holes = percent_holes
self._hole_position = hole_position
@property
def percent_holes(self):
return self._percent_holes
@property
def percent_holes_in_inputs(self):
if self._hole_position == 'inputs':
return self.percent_holes
else:
return 0.0
@property
def percent_holes_in_filters(self):
if self._hole_position == 'filters':
return self.percent_holes
else:
return 0.0
@property
def activation_fn(self):
return self._activation_fn
@property
def bias(self):
return self._filters[-1]
@property
def filters(self):
return self._filters
@property
def backprop(self):
return self._backprop
@property
def strides(self):
return self._strides
@property
def padding(self):
return self._padding
def split_model(self, num_splits):
"""Split in model parallel fashion."""
self._filters[3] = self._filters[3] // num_splits
def additional_summary(self):
return ("""Filters: {} Pad: {} ({}, {}) """
"""Stride: {}, {} Params: {:,}""".format(
self._filters, self._padding, self._pad_h, self._pad_w,
self.strides[1], self.strides[2], self.num_params))
def _calculate_output_shape(self):
"""Returns the output tensor shape."""
n, h, w, c = self._inputs
kernel_h, kernel_w, in_channel, out_channel = self._filters
_, stride_h, stride_w, _ = self._strides
if self._padding == 'VALID':
out_height = int(
math.ceil(float(h - kernel_h + 1) / float(stride_h)))
out_width = int(
math.ceil(float(w - kernel_w + 1) / float(stride_w)))
self._pad_h = 0
self._pad_w = 0
elif self._padding == 'SAME':
out_height = int(math.ceil(float(h) / float(stride_h)))
out_width = int(math.ceil(float(w) / float(stride_w)))
pad_along_height = (out_height - 1) * stride_h + kernel_h - h
pad_along_width = (out_width - 1) * stride_w + kernel_w - w
self._pad_h = pad_along_height // 2
self._pad_w = pad_along_width // 2
elif isinstance(self._padding, list):
self._pad_h, self._pad_w = self._padding
out_height = (h + 2 * self._pad_h - kernel_h) // stride_h + 1
out_width = (w + 2 * self._pad_w - kernel_w) // stride_w + 1
assert in_channel == c, (
"Input channel shall match. Layer %s: %d != %d" %
(self.name, in_channel, c))
# out_h = (h + 2 * self._pad_h - kernel_h) // stride_h + 1
# out_w = (w + 2 * self._pad_w - kernel_w) // stride_w + 1
return [n, out_height, out_width, out_channel]
@property
def weights_in_bytes(self):
"""Returns weights."""
_BYTES_FLOAT = 4
kernel_h, kernel_w, in_channel, out_channel = self._filters
filters_in_bytes = (kernel_h * kernel_w * in_channel * out_channel *
_BYTES_FLOAT)
bias_in_bytes = out_channel * _BYTES_FLOAT
return filters_in_bytes + bias_in_bytes
@property
def num_params(self):
weights = six.moves.reduce(lambda x, y: x * y, self._filters, 1)
bias = self._filters[-1]
return weights + bias
def gradients(self, wrt='inputs'):
"""Returns a conv layer that is equivalent to calculating the gradient
on this layer.
Args:
wrt: inputs or filters
"""
layer = self
def _compute_padding(layer):
# Reference: TensorFlow ConvBackpropExtractAndVerifyDimension()
# Convolution of inputs with padded output grads and filters.
expanded_output_h = (layer.outputs[1] - 1) * layer.strides[1] + 1
expanded_output_w = (layer.outputs[2] - 1) * layer.strides[2] + 1
padded_out_h = layer.inputs[1] + layer.filters[0] - 1
padded_out_w = layer.inputs[2] + layer.filters[1] - 1
# Number of padding elements to be added before/after this
# dimension of input when computing Conv2DBackpropInput.
pad_before_h = layer.filters[0] - 1 - layer._pad_h
pad_before_w = layer.filters[1] - 1 - layer._pad_w
pad_after_h = padded_out_h - expanded_output_h - pad_before_h
pad_after_w = padded_out_w - expanded_output_w - pad_before_w
# Add one when padding is odd.
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/conv_grad_filter_ops.cc#L471
if abs(pad_before_h - pad_after_h) == 1:
expanded_output_h += 1
if abs(pad_before_w - pad_after_w) == 1:
expanded_output_w += 1
p_h = min(pad_before_h, pad_after_h)
p_w = min(pad_before_w, pad_after_w)
return (expanded_output_h, expanded_output_w, p_h, p_w)
expanded_output_h, expanded_output_w, pad_h, pad_w = _compute_padding(
layer)
holes = (expanded_output_h * expanded_output_w - self.outputs[1] *
self.outputs[2])
percent_holes = (holes / expanded_output_h / expanded_output_w)
# print('gradient wrt: {}'.format(wrt))
# print('expanded outputs: {} {}'.format(expanded_output_h,
# expanded_output_w))
# print('padding: {} {}'.format(pad_h, pad_h))
# print('holes: {} ({})'.format(holes, percent_holes))
if wrt == 'inputs':
dummy_layer = Conv2d(
name="dummy_layer",
inputs=[
layer.outputs[0], expanded_output_h, expanded_output_w,
layer.outputs[3]
],
filters=[
layer.filters[0], layer.filters[1], layer.filters[3],
layer.filters[2]
],
strides=[1, 1, 1, 1],
padding=[pad_h, pad_w],
percent_holes=percent_holes,
hole_position='inputs')
# FIXME: distinguish holes in input and filter
elif wrt == 'filters':
if layer.padding == 'VALID':
_p = "VALID"
else:
_p = [pad_h, pad_w]
# Convolution of inputs with inputs and output grads.
dummy_layer = Conv2d(
name="dummy_layer",
inputs=[
layer.inputs[3], layer.inputs[1], layer.inputs[2],
layer.inputs[0]
],
filters=[
expanded_output_h, expanded_output_w, layer.outputs[0],
layer.outputs[3]
],
strides=[1, 1, 1, 1],
padding=_p,
percent_holes=percent_holes,
hole_position='filters')
return dummy_layer
| apache-2.0 | 4,500,665,056,704,952,000 | 33.923077 | 119 | 0.527194 | false | 4.004071 | false | false | false |
nathaliaspatricio/febracev | friends/views.py | 1 | 1329 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from friends.utils import get_following_set, get_follower_set, get_mutual_set
from django.contrib.auth.decorators import login_required
from friends.models import FriendLink
FRIEND_FUNCTION_MAP = {
'followers': get_follower_set,
'following': get_following_set,
'mutual': get_mutual_set,
}
def friend_list(request, username, function_alias):
user = get_object_or_404(User, username=username)
context = {'friend_list': FRIEND_FUNCTION_MAP[function_alias](user)}
return render_to_response('friends/friend_list.html',
context,
context_instance = RequestContext(request))
@login_required
def add_friend(request, username):
user = request.user
friend = get_object_or_404(User, username=username)
FriendLink.objects.get_or_create(from_user=user, to_user=friend)
return redirect(friend.get_profile())
@login_required
def remove_friend(request, username):
user = request.user
friend = get_object_or_404(User, username=username)
FriendLink.objects.get(from_user=user, to_user=friend).delete()
return redirect(friend.get_profile())
| gpl-2.0 | -6,370,096,574,285,482,000 | 38.088235 | 77 | 0.712566 | false | 3.621253 | false | false | false |
cstipkovic/spidermonkey-research | testing/marionette/harness/marionette/tests/unit/test_switch_remote_frame.py | 1 | 5471 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette import MarionetteTestCase
from marionette_driver.by import By
OOP_BY_DEFAULT = "dom.ipc.browser_frames.oop_by_default"
BROWSER_FRAMES_ENABLED = "dom.mozBrowserFramesEnabled"
class TestSwitchRemoteFrame(MarionetteTestCase):
def setUp(self):
super(TestSwitchRemoteFrame, self).setUp()
with self.marionette.using_context('chrome'):
self.oop_by_default = self.marionette.get_pref(OOP_BY_DEFAULT)
self.mozBrowserFramesEnabled = self.marionette.get_pref(BROWSER_FRAMES_ENABLED)
self.marionette.set_pref(OOP_BY_DEFAULT, True)
self.marionette.set_pref(BROWSER_FRAMES_ENABLED, True)
self.multi_process_browser = self.marionette.execute_script("""
try {
return Services.appinfo.browserTabsRemoteAutostart;
} catch (e) {
return false;
}""")
def tearDown(self):
with self.marionette.using_context("chrome"):
if self.oop_by_default is None:
self.marionette.clear_pref(OOP_BY_DEFAULT)
else:
self.marionette.set_pref(OOP_BY_DEFAULT, self.oop_by_default)
if self.mozBrowserFramesEnabled is None:
self.marionette.clear_pref(BROWSER_FRAMES_ENABLED)
else:
self.marionette.set_pref(BROWSER_FRAMES_ENABLED, self.mozBrowserFramesEnabled)
@property
def is_main_process(self):
return self.marionette.execute_script("""
return Components.classes["@mozilla.org/xre/app-info;1"].
getService(Components.interfaces.nsIXULRuntime).
processType == Components.interfaces.nsIXULRuntime.PROCESS_TYPE_DEFAULT;
""", sandbox="system")
def test_remote_frame(self):
self.marionette.navigate(self.marionette.absolute_url("test.html"))
self.marionette.push_permission('browser', True)
self.marionette.execute_script("""
let iframe = document.createElement("iframe");
iframe.setAttribute('mozbrowser', true);
iframe.setAttribute('remote', true);
iframe.id = "remote_iframe";
iframe.style.height = "100px";
iframe.style.width = "100%%";
iframe.src = "%s";
document.body.appendChild(iframe);
""" % self.marionette.absolute_url("test.html"))
remote_iframe = self.marionette.find_element(By.ID, "remote_iframe")
self.marionette.switch_to_frame(remote_iframe)
main_process = self.is_main_process
self.assertFalse(main_process)
def test_remote_frame_revisit(self):
# test if we can revisit a remote frame (this takes a different codepath)
self.marionette.navigate(self.marionette.absolute_url("test.html"))
self.marionette.push_permission('browser', True)
self.marionette.execute_script("""
let iframe = document.createElement("iframe");
iframe.setAttribute('mozbrowser', true);
iframe.setAttribute('remote', true);
iframe.id = "remote_iframe";
iframe.style.height = "100px";
iframe.style.width = "100%%";
iframe.src = "%s";
document.body.appendChild(iframe);
""" % self.marionette.absolute_url("test.html"))
self.marionette.switch_to_frame(self.marionette.find_element(By.ID,
"remote_iframe"))
main_process = self.is_main_process
self.assertFalse(main_process)
self.marionette.switch_to_frame()
main_process = self.is_main_process
should_be_main_process = not self.multi_process_browser
self.assertEqual(main_process, should_be_main_process)
self.marionette.switch_to_frame(self.marionette.find_element(By.ID,
"remote_iframe"))
main_process = self.is_main_process
self.assertFalse(main_process)
def test_we_can_switch_to_a_remote_frame_by_index(self):
# test if we can revisit a remote frame (this takes a different codepath)
self.marionette.navigate(self.marionette.absolute_url("test.html"))
self.marionette.push_permission('browser', True)
self.marionette.execute_script("""
let iframe = document.createElement("iframe");
iframe.setAttribute('mozbrowser', true);
iframe.setAttribute('remote', true);
iframe.id = "remote_iframe";
iframe.style.height = "100px";
iframe.style.width = "100%%";
iframe.src = "%s";
document.body.appendChild(iframe);
""" % self.marionette.absolute_url("test.html"))
self.marionette.switch_to_frame(0)
main_process = self.is_main_process
self.assertFalse(main_process)
self.marionette.switch_to_frame()
main_process = self.is_main_process
should_be_main_process = not self.multi_process_browser
self.assertEqual(main_process, should_be_main_process)
self.marionette.switch_to_frame(0)
main_process = self.is_main_process
self.assertFalse(main_process)
| mpl-2.0 | 5,705,097,036,018,414,000 | 45.760684 | 94 | 0.618351 | false | 3.711669 | true | false | false |
stuaxo/mnd | mnd/handler.py | 1 | 6612 | """
To make callbacks work with instance methods some things need to happen.
The handler decorator attaches instances of MNDInfo to functions to enable
the dispatcher to work with classes and instances via the Handler metaclass.
At instance creation time the metaclass converts finds any handlers with
MNDFunction, replaces them with MNDMethods + informs the dispatcher.
"""
import pickle
import weakref
from collections import defaultdict
class ArgSpec(object):
"""
The arguments a function accepts.
Keeps a pickled copy of the arguments for hashing purposes.
"""
def __init__(self, key=None, *accept_args, **accept_kwargs):
"""
:param key: optional - already pickled tuple (accept_args, accept_kwargs)
:param accept_args: positional args
:param accept_kwargs: keyword args
"""
if key is None:
key = pickle.dumps(dict(args=accept_args, kwargs=accept_kwargs))
self.key = key
self.accept_args = accept_args
self.accept_kwargs = accept_kwargs
def __repr__(self):
return "ArgSpec([A(%s), KW(%s)])" % (self.accept_args, self.accept_kwargs)
@property
def accepts(self):
return self.accept_args, self.accept_kwargs
class MNDInfo(object):
# base class
def __init__(self, type):
self.type = type
@property
def is_class(self):
return self.type == "class"
@property
def is_function(self):
return self.type == "function"
class MNDFunction(MNDInfo):
"""
stores weakref to a function and list of weakrefs to
dispatchers that point to it
"""
def __init__(self, f, dispatcher, argspec):
"""
:param f: callback function to call
"""
self._wf = weakref.ref(f)
self.bound_to = defaultdict(set)
self.bind_to(argspec, dispatcher)
MNDInfo.__init__(self, "function")
def bind_to(self, argspec, dispatcher):
"""
Add our function to dispatcher
"""
self.bound_to[argspec.key].add((argspec, dispatcher))
dispatcher.bind(self.f, argspec)
@property
def f(self):
return self._wf()
def unbind(self):
"""
Unbind from dispatchers and target function.
:return: set of tuples containing [argspec, dispatcher]
"""
args_dispatchers = set()
f = self._wf()
if f is not None:
for ad_list in self.bound_to.values():
args_dispatchers.update(ad_list)
for argspec, dispatcher in ad_list:
dispatcher.unbind(self.f, argspec)
del f.__dict__['__mnd__']
self.bound_to = {}
return args_dispatchers
class MNDMethod(MNDInfo):
def __init__(self, m, dispatcher, argspec):
"""
:param m: callback method to call
:param dispatcher: initial dispatcher
"""
self.bound_to = defaultdict(set)
self.bind_to(m, argspec, dispatcher)
MNDInfo.__init__(self, "method")
def bind_to(self, instancemethod, argspec, dispatcher):
"""
Add dispatcher for argspec
"""
self.bound_to[argspec.key].add((argspec, dispatcher))
dispatcher.bind(instancemethod, argspec)
class MNDClass(MNDInfo):
def __init__(self, bind_to):
MNDInfo.__init__(self, "class")
self.bind_to = bind_to
def bind_handler_methods(self):
for name, ad_list in self.__mnd__.bind_to.items():
m = getattr(self, name)
for argspec, dispatcher in ad_list:
mnd = m.__dict__.get('__mnd__')
if mnd is None:
mnd = MNDMethod(m, dispatcher, argspec)
m.__dict__['__mnd__'] = mnd
def base_mnds(bases):
"""
:param bases: sequence of base classes
:yield: mnd of any base classes
"""
for base in bases:
mnd = getattr(base, "__mnd__", None)
if mnd is not None:
yield mnd
class Handler(type):
"""
Metaclass enables instance methods to be used as handlers.
"""
def __new__(meta, name, bases, dct):
bind_to = defaultdict(set) # { method_name: ((argspec, dispatcher)...)}
for mnd in base_mnds(bases):
bind_to.update(mnd.bind_to)
for mname, member in dct.items():
mnd = getattr(member, "__mnd__", None)
if mnd is not None and mnd.is_function:
args_dispatchers = mnd.unbind() # set
bind_to[mname].update(args_dispatchers) # ((argspec, dispatcher)...)
dct['__mnd__'] = MNDClass(bind_to)
# wrap __init__
wrapped_init = dct.get('__init__')
if wrapped_init is None:
def wrapped_init(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
bind_handler_methods(self)
wrapped_init(self, *args, **kwargs)
dct['__init__'] = __init__
return super(Handler, meta).__new__(meta, name, bases, dct)
def __init__(cls, name, bases, dct):
super(Handler, cls).__init__(name, bases, dct)
def bind_function(f, dispatcher, *accept_args, **accept_kwargs):
"""
Bind a function to a dispatcher.
Takes accept_args, and accept_kwargs and creates and ArgSpec instance,
adding that to the MNDFunction which annotates the function
:param f: function to wrap
:param accept_args:
:param accept_kwargs:
:return:
"""
argspec = ArgSpec(None, *accept_args, **accept_kwargs)
mnd = MNDFunction(f, dispatcher, argspec)
f.__mnd__ = mnd
return f
def bind_instancemethod(m, dispatcher, *accept_args, **accept_kwargs):
"""
Bind a function to a dispatcher.
Takes accept_args, and accept_kwargs and creates and ArgSpec instance,
adding that to the MNDFunction which annotates the function
:param f: function to wrap
:param accept_args:
:param accept_kwargs:
:return:
"""
argspec = ArgSpec(None, *accept_args, **accept_kwargs)
mnd = MNDMethod(m, dispatcher, argspec)
m.__dict__['__mnd__'] = mnd
return m
def handle(dispatcher, *accept_args, **accept_kwargs):
"""
:param dispatcher: dispatcher to recieve events from
:param accept_args: args to match on
:param accept_kwargs: kwargs to match on
Creates an MNDFunction instance which containing the
argspec and adds the function to the dispatcher.
"""
def bind_function_later(f):
bind_function(f, dispatcher, *accept_args, **accept_kwargs)
return f
return bind_function_later
| mit | -7,329,817,114,201,264,000 | 27.747826 | 85 | 0.598306 | false | 3.898585 | false | false | false |
ofirpicazo/solitario | bin/generate_cards.py | 1 | 8706 | #!/usr/bin/python
# coding: utf-8
import argparse
import sys
SUIT_MAP = {
'club': '♣',
'diamond': '♦',
'heart': '♥',
'spade': '♠',
}
templateA = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit middle center">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template2 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top center">%(symbol)s</span>
<span class="suit bottom center">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template3 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top center">%(symbol)s</span>
<span class="suit middle center">%(symbol)s</span>
<span class="suit bottom center">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template4 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template5 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit middle center">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template6 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit middle left">%(symbol)s</span>
<span class="suit middle right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template7 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit middle left">%(symbol)s</span>
<span class="suit middle right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template8 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit middle left">%(symbol)s</span>
<span class="suit middle right">%(symbol)s</span>
<span class="suit over-bottom center">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template9 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit over-middle left">%(symbol)s</span>
<span class="suit over-middle right">%(symbol)s</span>
<span class="suit under-middle left">%(symbol)s</span>
<span class="suit under-middle right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template10 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit over-middle left">%(symbol)s</span>
<span class="suit over-middle right">%(symbol)s</span>
<span class="suit under-middle left">%(symbol)s</span>
<span class="suit under-middle right">%(symbol)s</span>
<span class="suit over-bottom center">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
CARD_TEMPLATES = (
('A', templateA),
('2', template2),
('3', template3),
('4', template4),
('5', template5),
('6', template6),
('7', template7),
('8', template8),
('9', template9),
('10', template10),
('J', templateA),
('Q', templateA),
('K', templateA),
)
def main(args):
parser = argparse.ArgumentParser(description='Create card templates')
parser.add_argument("suit", type=str, choices=SUIT_MAP.keys(),
help="Suit to create templates for")
args = parser.parse_args(args)
for number, template in CARD_TEMPLATES:
symbol = SUIT_MAP[args.suit]
id = args.suit[0] + number.lower() # e.g. d9 for diamond 9
print template % {'suit': args.suit,
'number': number,
'symbol': symbol,
'id': id}
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 7,593,103,439,347,813,000 | 30.977941 | 85 | 0.557714 | false | 3.106429 | false | false | false |
brython-dev/brython | www/src/Lib/threading.py | 1 | 51971 | """Thread module emulating a subset of Java's threading model."""
import os as _os
import sys as _sys
import _thread
import functools
from time import monotonic as _time
from _weakrefset import WeakSet
from itertools import islice as _islice, count as _count
try:
from _collections import deque as _deque
except ImportError:
from collections import deque as _deque
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread',
'enumerate', 'main_thread', 'TIMEOUT_MAX',
'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError',
'setprofile', 'settrace', 'local', 'stack_size',
'excepthook', 'ExceptHookArgs']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident
try:
get_native_id = _thread.get_native_id
_HAVE_THREAD_NATIVE_ID = True
__all__.append('get_native_id')
except AttributeError:
_HAVE_THREAD_NATIVE_ID = False
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s %s.%s object owner=%r count=%d at %s>" % (
"locked" if self._block.locked() else "unlocked",
self.__class__.__module__,
self.__class__.__qualname__,
owner,
self._count,
hex(id(self))
)
def _at_fork_reinit(self):
self._block._at_fork_reinit()
self._owner = None
self._count = 0
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count += 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = _deque()
def _at_fork_reinit(self):
self._lock._at_fork_reinit()
self._waiters.clear()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if _lock doesn't have _is_owned().
if self._lock.acquire(False):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
gotit = False
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
return gotit
finally:
self._acquire_restore(saved_state)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
all_waiters = self._waiters
waiters_to_notify = _deque(_islice(all_waiters, n))
if not waiters_to_notify:
return
for waiter in waiters_to_notify:
waiter.release()
try:
all_waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self, n=1):
"""Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
if n < 1:
raise ValueError('n must be one or more')
with self._cond:
self._value += n
for i in range(n):
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self, n=1):
"""Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
if n < 1:
raise ValueError('n must be one or more')
with self._cond:
if self._value + n > self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += n
for i in range(n):
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _at_fork_reinit(self):
# Private method called by Thread._reset_internal_locks()
self._cond._at_fork_reinit()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
with self._cond:
self._flag = True
self._cond.notify_all()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
with self._cond:
self._flag = False
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
with self._cond:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously awoken once they
have all made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is used as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are released. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = _count().__next__
_counter() # Consume 0 so first non-main thread has id 1.
def _newname(template="Thread-%d"):
return template % _counter()
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
_dangling = WeakSet()
# Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()
# to wait until all Python thread states get deleted:
# see Thread._set_tstate_lock().
_shutdown_locks_lock = _allocate_lock()
_shutdown_locks = set()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
_initialized = False
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
if _HAVE_THREAD_NATIVE_ID:
self._native_id = None
self._tstate_lock = None
self._started = Event()
self._is_stopped = False
self._initialized = True
# Copy of sys.stderr used by self._invoke_excepthook()
self._stderr = _sys.stderr
self._invoke_excepthook = _make_invoke_excepthook()
# For debugging and _after_fork()
_dangling.add(self)
def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
self._started._at_fork_reinit()
if is_alive:
self._tstate_lock._at_fork_reinit()
self._tstate_lock.acquire()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
self._is_stopped = True
self._tstate_lock = None
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
self.is_alive() # easy way to get ._is_stopped set when appropriate
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
if _HAVE_THREAD_NATIVE_ID:
def _set_native_id(self):
self._native_id = get_native_id()
def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
if not self.daemon:
with _shutdown_locks_lock:
_shutdown_locks.add(self._tstate_lock)
def _bootstrap_inner(self):
try:
self._set_ident()
self._set_tstate_lock()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except:
self._invoke_excepthook(self)
finally:
with _active_limbo_lock:
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
# After calling ._stop(), .is_alive() returns False and .join() returns
# immediately. ._tstate_lock must be released before calling ._stop().
#
# Normal case: C code at the end of the thread's life
# (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
# that's detected by our ._wait_for_tstate_lock(), called by .join()
# and .is_alive(). Any number of threads _may_ call ._stop()
# simultaneously (for example, if multiple threads are blocked in
# .join() calls), and they're not serialized. That's harmless -
# they'll just make redundant rebindings of ._is_stopped and
# ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the
# "assert self._is_stopped" in ._wait_for_tstate_lock() always works
# (the assert is executed only if ._tstate_lock is None).
#
# Special case: _main_thread releases ._tstate_lock via this
# module's _shutdown() function.
lock = self._tstate_lock
if lock is not None:
assert not lock.locked()
self._is_stopped = True
self._tstate_lock = None
if not self.daemon:
with _shutdown_locks_lock:
_shutdown_locks.discard(lock)
def _delete(self):
"Remove current thread from the dict of currently running threads."
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
is_alive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
# the behavior of a negative timeout isn't documented, but
# historically .join(timeout=x) for x<0 has acted as if timeout=0
self._wait_for_tstate_lock(timeout=max(timeout, 0))
def _wait_for_tstate_lock(self, block=True, timeout=-1):
# Issue #18808: wait for the thread state to be gone.
# At the end of the thread's life, after all knowledge of the thread
# is removed from C data structures, C code releases our _tstate_lock.
# This method passes its arguments to _tstate_lock.acquire().
# If the lock is acquired, the C code is done, and self._stop() is
# called. That sets ._is_stopped to True, and ._tstate_lock to None.
lock = self._tstate_lock
if lock is None: # already determined that the C code is done
assert self._is_stopped
elif lock.acquire(block, timeout):
lock.release()
self._stop()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
if _HAVE_THREAD_NATIVE_ID:
@property
def native_id(self):
"""Native integral thread ID of this thread, or None if it has not been started.
This is a non-negative integer. See the get_native_id() function.
This represents the Thread ID as reported by the kernel.
"""
assert self._initialized, "Thread.__init__() not called"
return self._native_id
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
if self._is_stopped or not self._started.is_set():
return False
self._wait_for_tstate_lock(False)
return not self._is_stopped
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when only daemon threads are left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread")
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
try:
from _thread import (_excepthook as excepthook,
_ExceptHookArgs as ExceptHookArgs)
except ImportError:
# Simple Python implementation if _thread._excepthook() is not available
from traceback import print_exception as _print_exception
from collections import namedtuple
_ExceptHookArgs = namedtuple(
'ExceptHookArgs',
'exc_type exc_value exc_traceback thread')
def ExceptHookArgs(args):
return _ExceptHookArgs(*args)
def excepthook(args, /):
"""
Handle uncaught Thread.run() exception.
"""
if args.exc_type == SystemExit:
# silently ignore SystemExit
return
if _sys is not None and _sys.stderr is not None:
stderr = _sys.stderr
elif args.thread is not None:
stderr = args.thread._stderr
if stderr is None:
# do nothing if sys.stderr is None and sys.stderr was None
# when the thread was created
return
else:
# do nothing if sys.stderr is None and args.thread is None
return
if args.thread is not None:
name = args.thread.name
else:
name = get_ident()
print(f"Exception in thread {name}:",
file=stderr, flush=True)
_print_exception(args.exc_type, args.exc_value, args.exc_traceback,
file=stderr)
stderr.flush()
def _make_invoke_excepthook():
# Create a local namespace to ensure that variables remain alive
# when _invoke_excepthook() is called, even if it is called late during
# Python shutdown. It is mostly needed for daemon threads.
old_excepthook = excepthook
old_sys_excepthook = _sys.excepthook
if old_excepthook is None:
raise RuntimeError("threading.excepthook is None")
if old_sys_excepthook is None:
raise RuntimeError("sys.excepthook is None")
sys_exc_info = _sys.exc_info
local_print = print
local_sys = _sys
def invoke_excepthook(thread):
global excepthook
try:
hook = excepthook
if hook is None:
hook = old_excepthook
args = ExceptHookArgs([*sys_exc_info(), thread])
hook(args)
except Exception as exc:
exc.__suppress_context__ = True
del exc
if local_sys is not None and local_sys.stderr is not None:
stderr = local_sys.stderr
else:
stderr = thread._stderr
local_print("Exception in threading.excepthook:",
file=stderr, flush=True)
if local_sys is not None and local_sys.excepthook is not None:
sys_excepthook = local_sys.excepthook
else:
sys_excepthook = old_sys_excepthook
sys_excepthook(*sys_exc_info())
finally:
# Break reference cycle (exception stored in a variable)
args = None
return invoke_excepthook
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._set_tstate_lock()
self._started.set()
self._set_ident()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
with _active_limbo_lock:
_active[self._ident] = self
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
self._started.set()
self._set_ident()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def is_alive(self):
assert not self._is_stopped and self._started.is_set()
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
_threading_atexits = []
_SHUTTING_DOWN = False
def _register_atexit(func, *arg, **kwargs):
"""CPython internal: register *func* to be called before joining threads.
The registered *func* is called with its arguments just before all
non-daemon threads are joined in `_shutdown()`. It provides a similar
purpose to `atexit.register()`, but its functions are called prior to
threading shutdown instead of interpreter shutdown.
For similarity to atexit, the registered functions are called in reverse.
"""
if _SHUTTING_DOWN:
raise RuntimeError("can't register atexit after shutdown")
call = functools.partial(func, *arg, **kwargs)
_threading_atexits.append(call)
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_main_thread = _MainThread()
def _shutdown():
"""
Wait until the Python thread state of all non-daemon threads get deleted.
"""
# Obscure: other threads may be waiting to join _main_thread. That's
# dubious, but some code does it. We can't wait for C code to release
# the main thread's tstate_lock - that won't happen until the interpreter
# is nearly dead. So we release it here. Note that just calling _stop()
# isn't enough: other threads may already be waiting on _tstate_lock.
if _main_thread._is_stopped:
# _shutdown() was already called
return
global _SHUTTING_DOWN
_SHUTTING_DOWN = True
# Main thread
tlock = _main_thread._tstate_lock
# The main thread isn't finished yet, so its thread state lock can't have
# been released.
assert tlock is not None
assert tlock.locked()
tlock.release()
_main_thread._stop()
# Call registered threading atexit functions before threads are joined.
# Order is reversed, similar to atexit.
for atexit_call in reversed(_threading_atexits):
atexit_call()
# Join all non-deamon threads
while True:
with _shutdown_locks_lock:
locks = list(_shutdown_locks)
_shutdown_locks.clear()
if not locks:
break
for lock in locks:
# mimick Thread.join()
lock.acquire()
lock.release()
# new threads can be spawned while we were waiting for the other
# threads to complete
def main_thread():
"""Return the main thread object.
In normal conditions, the main thread is the thread from which the
Python interpreter was started.
"""
return _main_thread
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
"""
Cleanup threading module state that should not exist after a fork.
"""
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock, _main_thread
global _shutdown_locks_lock, _shutdown_locks
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
try:
current = _active[get_ident()]
except KeyError:
# fork() was called in a thread which was not spawned
# by threading.Thread. For example, a thread spawned
# by thread.start_new_thread().
current = _MainThread()
_main_thread = current
# reset _shutdown() locks: threads re-register their _tstate_lock below
_shutdown_locks_lock = _allocate_lock()
_shutdown_locks = set()
with _active_limbo_lock:
# Dangling thread instances must still have their locks reset,
# because someone may join() them.
threads = set(_enumerate())
threads.update(_dangling)
for thread in threads:
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
thread._reset_internal_locks(True)
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._reset_internal_locks(False)
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
if hasattr(_os, "register_at_fork"):
_os.register_at_fork(after_in_child=_after_fork)
| bsd-3-clause | -4,337,143,701,189,812,000 | 33.601198 | 92 | 0.61086 | false | 4.394639 | false | false | false |
darbula/django-form-designer | form_designer/migrations/0002_auto_20160216_1527.py | 1 | 1389 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('form_designer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='formdefinition',
name='form_template_name',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='form template', choices=[(b'', 'Default'), (b'html/formdefinition/forms/as_p.html', 'as paragraphs'), (b'html/formdefinition/forms/cisco.html', 'Cisco')]),
preserve_default=True,
),
migrations.AlterField(
model_name='formdefinitionfield',
name='field_class',
field=models.CharField(max_length=100, verbose_name='field class', choices=[(b'riteh.core.form_designer.fields.TitleField', 'Title Field'), (b'django.forms.CharField', 'Text'), (b'django.forms.EmailField', 'E-mail address'), (b'django.forms.URLField', 'Web address'), (b'django.forms.IntegerField', 'Number'), (b'django.forms.DecimalField', 'Decimal number'), (b'django.forms.BooleanField', 'Yes/No'), (b'django.forms.DateField', 'Date'), (b'django.forms.ChoiceField', 'Choice'), (b'django.forms.MultipleChoiceField', 'Multiple Choice'), (b'django.forms.FileField', 'File')]),
preserve_default=True,
),
]
| bsd-3-clause | 1,623,216,130,215,411,700 | 52.423077 | 588 | 0.648668 | false | 3.795082 | false | false | false |
schlegelp/pymaid | setup.py | 1 | 1845 | from setuptools import setup, find_packages
import re
VERSIONFILE = "pymaid/__init__.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
with open('requirements.txt') as f:
requirements = f.read().splitlines()
requirements = [l for l in requirements if not l.startswith('#')]
setup(
name='python-catmaid',
version=verstr,
packages=find_packages(),
license='GNU GPL V3',
description='Python interface to CATMAID servers',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/schlegelp/pymaid',
project_urls={
"Documentation": "http://pymaid.readthedocs.io",
"Source": "https://github.com/schlegelp/pymaid",
"Changelog": "https://pymaid.readthedocs.io/en/latest/source/whats_new.html",
},
author='Philipp Schlegel',
author_email='[email protected]',
keywords='CATMAID interface neuron navis',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=requirements,
extras_require={'extras': ['fuzzywuzzy[speedup]~=0.17.0',
'ujson~=1.35']},
python_requires='>=3.6',
zip_safe=False
)
| gpl-3.0 | 5,979,263,114,792,834,000 | 33.166667 | 85 | 0.6271 | false | 3.520992 | false | false | false |
braddockcg/internet-in-a-box | iiab/whoosh_search.py | 1 | 3339 | import os
from whoosh.qparser import MultifieldParser
from whoosh import scoring
from .whoosh_multi_field_spelling_correction import MultiFieldQueryCorrector
import pagination_helper
def index_directory_path(base_path, zim_name):
"""Returns the directory where a ZIM file's index should be located, given
a base path where all the index files are located as well as a filename
or partial filename of the zim file.
"""
index_dir = os.path.join(base_path, os.path.splitext(os.path.basename(zim_name))[0])
return index_dir
def get_query_corrections(searcher, query, qstring):
"""
Suggest alternate spelling for search terms by searching each column with
spelling correction support in turn.
:param searcher: whoosh searcher object
:param query: whoosh query object
:param qstring: search string that was passed to the query object
:returns: MultiFieldQueryCorrector with one corrector for each corrected column
"""
fieldnames = [name for name, field in searcher.schema.items() if field.spelling]
correctors = {}
for fieldname in fieldnames:
if fieldname not in correctors:
correctors[fieldname] = searcher.corrector(fieldname)
terms = []
for token in query.all_tokens():
if token.fieldname in correctors:
terms.append((token.fieldname, token.text))
return MultiFieldQueryCorrector(correctors, terms, prefix=2, maxdist=1).correct_query(query, qstring)
def deduplicate_corrections(corrections):
"""
Return list of correction that omits entries where the query is unmodified
:param corrections: list of Corrector objects
:returns: list of Corrector objects
"""
# Using values from a dictionary comprehension rather than a list comprehension in order to deduplicate
#return {c.string : c for c in corrections if c.original_query != c.query}.values()
# We can't use dictionary comprehension because we are stuck on python 2.6 for Debian stable
return dict((c.string, c) for c in corrections if c.original_query != c.query).values()
def paginated_search(ix, search_columns, query_text, page=1, pagelen=20, sort_column=None, weighting=scoring.BM25F):
"""
Return a tuple consisting of an object that emulates an SQLAlchemy pagination object and corrected query suggestion
pagelen specifies number of hits per page
page specifies page of results (first page is 1)
"""
query_text = unicode(query_text) # Must be unicode
with ix.searcher(weighting=weighting) as searcher:
query = MultifieldParser(search_columns, ix.schema).parse(query_text)
try:
# search_page returns whoosh.searching.ResultsPage
results = searcher.search_page(query, page, pagelen=pagelen, sortedby=sort_column)
total = results.total
except ValueError: # Invalid page number
results = []
total = 0
paginate = pagination_helper.Pagination(page, pagelen, total, [dict(r.items()) for r in results])
corrections = deduplicate_corrections(get_query_corrections(searcher, query, query_text)) # list of Corrector objects
#hf = whoosh.highlight.HtmlFormatter(classname="change")
#html = corrections.format_string(hf)
return (paginate, [c.string for c in corrections])
| bsd-2-clause | -3,125,193,210,656,872,000 | 43.52 | 126 | 0.712788 | false | 4.008403 | false | false | false |
copotron/car-control | datacollection/prius/log.py | 1 | 1840 | # Copyright (C) 2017 Swift Navigation Inc.
# Contact: Swift Navigation <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
the :mod:`sbp.client.examples.simple` module contains a basic example of
reading SBP messages from a serial port, decoding BASELINE_NED messages and
printing them out.
"""
import argparse
from sbp.client.drivers.network_drivers import TCPDriver
from sbp.client import Handler, Framer
from sbp.navigation import SBP_MSG_BASELINE_NED, SBP_MSG_POS_LLH
def main():
parser = argparse.ArgumentParser(
description="Swift Navigation SBP Example.")
parser.add_argument(
"-a",
"--host",
default='localhost',
help="specify the host address.")
parser.add_argument(
"-p",
"--port",
default=55555,
help="specify the port to use.")
args = parser.parse_args()
# Open a connection to Piksi using TCP
with TCPDriver(args.host, args.port) as driver:
with Handler(Framer(driver.read, None, verbose=True)) as source:
try:
for msg, metadata in source.filter(SBP_MSG_POS_LLH):
# Print out the N, E, D coordinates of the baseline
print("%d,%.16f,%.16f,%.16f,%d,%d,%d,%d" % (msg.tow, msg.lat, msg.lon,
msg.height, msg.h_accuracy, msg.v_accuracy, msg.n_sats, msg.flags))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| gpl-3.0 | -1,142,508,516,065,610,600 | 35.8 | 113 | 0.643478 | false | 3.817427 | false | false | false |
PDXCodeCoop/account | models.py | 1 | 1688 | from django.db import models
from django import forms
from django.contrib.auth.models import User
from django.utils import timezone
from easy_thumbnails.fields import ThumbnailerImageField
#A User's Personal Info
class Profile(models.Model):
user = models.OneToOneField(User)
photo = ThumbnailerImageField(upload_to='profiles', blank=True)
#Location`
city = models.CharField(max_length=50, null=True, blank=True)
state = models.CharField(max_length=50, null=True, blank=True)
#Personal Info
about = models.TextField(max_length=1000, null=True, blank=True)
def __unicode__(self):
return '%s: %s %s' % (self.user.username,self.user.first_name, self.user.last_name)
class Setting(models.Model):
PALETTE_THEMES = (
('DARK', 'Dark Theme'),
('LITE', 'Light Theme'),
)
user = models.OneToOneField(User)
color_palette = models.CharField(max_length=4, choices=PALETTE_THEMES, default='DARK')
def __unicode__(self):
return self.user.username
#Forms
class UserForm(forms.ModelForm):
confirm_password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
widgets = {
'password': forms.PasswordInput(),
}
fields = ('username', 'email', 'password')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('about', 'city', 'state',)
widgets = {
'about': forms.Textarea(attrs={'class':'form-control', 'rows':'5'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
| apache-2.0 | 601,757,445,029,764,600 | 33.44898 | 91 | 0.637441 | false | 3.793258 | false | false | false |
Arkshine/AdminFreeLook | support/generate_headers.py | 1 | 2611 | # vim: set ts=8 sts=2 sw=2 tw=99 et:
import re
import os, sys
import subprocess
argv = sys.argv[1:]
if len(argv) < 2:
sys.stderr.write('Usage: generate_headers.py <source_path> <output_folder>\n')
sys.exit(1)
SourceFolder = os.path.abspath(os.path.normpath(argv[0]))
OutputFolder = os.path.normpath(argv[1])
class FolderChanger:
def __init__(self, folder):
self.old = os.getcwd()
self.new = folder
def __enter__(self):
if self.new:
os.chdir(self.new)
def __exit__(self, type, value, traceback):
os.chdir(self.old)
def run_and_return(argv):
# Python 2.6 doesn't have check_output.
if hasattr(subprocess, 'check_output'):
text = subprocess.check_output(argv)
if str != bytes:
text = str(text, 'utf-8')
else:
p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, ignored = p.communicate()
rval = p.poll()
if rval:
raise subprocess.CalledProcessError(rval, argv)
text = output.decode('utf8')
return text.strip()
def get_git_version():
revision_count = run_and_return(['git', 'rev-list', '--count', 'HEAD'])
revision_hash = run_and_return(['git', 'log', '--pretty=format:%h:%H', '-n', '1'])
shorthash, longhash = revision_hash.split(':')
return revision_count, shorthash, longhash
def output_version_headers():
with FolderChanger(SourceFolder):
count, shorthash, longhash = get_git_version()
with open(os.path.join(SourceFolder, 'product.version')) as fp:
contents = fp.read()
m = re.match('(\d+)\.(\d+)\.(\d+)-?(.*)', contents)
if m == None:
raise Exception('Could not detremine product version')
major, minor, release, tag = m.groups()
product = "{0}.{1}.{2}-rev.{3}".format(major, minor, release, count)
fullstring = product
if tag != "":
fullstring += "-{0}".format(tag)
with open(os.path.join(OutputFolder, 'module_version_auto.h'), 'w') as fp:
fp.write("""
#ifndef _EXTENSION_AUTO_VERSION_INFORMATION_H_
#define _EXTENSION_AUTO_VERSION_INFORMATION_H_
#define EXTENSION_BUILD_TAG \"{0}\"
#define EXTENSION_BUILD_CSET \"{1}\"
#define EXTENSION_BUILD_MAJOR \"{2}\"
#define EXTENSION_BUILD_MINOR \"{3}\"
#define EXTENSION_BUILD_RELEASE \"{4}\"
#define EXTENSION_BUILD_LOCAL_REV \"{6}\"
#define EXTENSION_BUILD_UNIQUEID EXTENSION_BUILD_LOCAL_REV \":\" EXTENSION_BUILD_CSET
#define EXTENSION_VERSION_STRING \"{5}\"
#define EXTENSION_VERSION_FILE {2},{3},{4},{6}
#endif // _EXTENSION_AUTO_VERSION_INFORMATION_H_
""".format(tag, shorthash, major, minor, release, fullstring, count))
output_version_headers()
| gpl-2.0 | -469,546,346,124,041,700 | 30.457831 | 87 | 0.654538 | false | 3.050234 | false | false | false |
genonfire/bbgo | recipes/migrations/0001_initial.py | 1 | 1108 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=16)),
('order', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('order', models.IntegerField(default=1)),
('recipe', models.TextField()),
('image', models.ImageField(upload_to=b'recipes/', blank=True)),
('category', models.ForeignKey(to='recipes.Category', on_delete=models.CASCADE)),
],
),
]
| mit | 5,277,985,640,252,880,000 | 33.625 | 114 | 0.544224 | false | 4.504065 | false | false | false |
anarcoder/google_explorer | plugins/apache_rce_struts2_cve_2017_5638.py | 1 | 3545 | import os
from lxml import html as lh
from queue import Queue
from urllib.parse import urlparse
from threading import Thread
import requests
import threading
from requests import get
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
lock = threading.Lock()
class ApacheStruts2_CVE_2017_5638():
def __init__(self, filename):
self.filename = filename
self.urls = self.ap_cve()
@staticmethod
def banner():
os.system('clear')
print("\n")
print(" █████╗ ███╗ ██╗ █████╗ ██████╗ ██████╗ ██████╗ ██████╗ ███████╗██████╗ ")
print("██╔══██╗████╗ ██║██╔══██╗██╔══██╗██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗")
print("███████║██╔██╗ ██║███████║██████╔╝██║ ██║ ██║██║ ██║█████╗ ██████╔╝")
print("██╔══██║██║╚██╗██║██╔══██║██╔══██╗██║ ██║ ██║██║ ██║██╔══╝ ██╔══██╗")
print("██║ ██║██║ ╚████║██║ ██║██║ ██║╚██████╗╚██████╔╝██████╔╝███████╗██║ ██║")
print("╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝")
print(" Apache Struts2 CVE 2017 5638 Checker - anarcoder at protonmail.com\n")
def remove_duplicate_targets(self):
results = [line.rstrip('\n') for line in open(self.filename)]
url_lists = []
for url in results:
try:
vuln = ['.action', '.do']
for v in vuln:
if v in url:
urlp = url.split(v)[0]
url_lists.append('python2 exploits/struntsrce.py --target='+urlp+v+' --test')
except:
pass
url_lists = set(url_lists)
url_lists = list(url_lists)
return url_lists
def check_vuln(self, q):
while True:
#with lock:
url = q.get()
os.system(url)
q.task_done()
def ap_cve(self):
self.banner()
# Removing duplicate targets
url_lists = self.remove_duplicate_targets()
print(len(url_lists))
#for url in url_lists:
# print(url.rstrip())
# My Queue
q = Queue(maxsize=0)
# Number of threads
num_threads = 10
for url in url_lists:
q.put(url)
# My threads
print('[*] Starting evil threads =)...\n')
for i in range(num_threads):
worker = Thread(target=self.check_vuln, args=(q,))
worker.setDaemon(True)
worker.start()
q.join()
def main():
filename = 'results_google_search.txt'
#print(os.getcwd())
ApacheStruts2_CVE_2017_5638(filename)
if __name__ == '__main__':
main()
| mit | -3,332,205,755,041,157,000 | 29.450549 | 101 | 0.452183 | false | 2.730049 | false | false | false |
ChristosChristofidis/h2o-3 | scripts/run.py | 1 | 66029 | #!/usr/bin/python
import sys
import os
import shutil
import signal
import time
import random
import getpass
import re
import subprocess
import ConfigParser
def is_python_test_file(file_name):
"""
Return True if file_name matches a regexp for a python test. False otherwise.
"""
if (file_name == "test_config.py"):
return False
if re.match("^pyunit.*\.py$", file_name):
return True
if (re.match("^test.*\.py$", file_name)):
return True
return False
def is_python_file(file_name):
"""
Return True if file_name matches a regexp for a python program in general. False otherwise.
This is a separate function because it's useful to have the scan-for-test operation in
build_test_list() be separated from running the test.
That allows us to run things explicitly named using the --test option. Such as:
run.py --wipeall --numclouds 1 --test generate_rest_api_docs.py
"""
if (file_name == "test_config.py"):
return False
if (re.match("^.*\.py$", file_name)):
return True
return False
def is_javascript_test_file(file_name):
"""
Return True if file_name matches a regexp for a javascript test. False otherwise.
"""
if (re.match("^.*test.*\.js$", file_name)):
return True
return False
def is_runit_test_file(file_name):
"""
Return True if file_name matches a regexp for a R test. False otherwise.
"""
if (file_name == "h2o-runit.R"):
return False
if (re.match("^runit.*\.[rR]$", file_name)):
return True
return False
class H2OUseCloudNode:
"""
A class representing one node in an H2O cloud which was specified by the user.
Don't try to build or tear down this kind of node.
use_ip: The given ip of the cloud.
use_port: The given port of the cloud.
"""
def __init__(self, use_ip, use_port):
self.use_ip = use_ip
self.use_port = use_port
def start(self):
pass
def stop(self):
pass
def terminate(self):
pass
def get_ip(self):
return self.use_ip
def get_port(self):
return self.use_port
class H2OUseCloud:
"""
A class representing an H2O clouds which was specified by the user.
Don't try to build or tear down this kind of cloud.
"""
def __init__(self, cloud_num, use_ip, use_port):
self.cloud_num = cloud_num
self.use_ip = use_ip
self.use_port = use_port
self.nodes = []
node = H2OUseCloudNode(self.use_ip, self.use_port)
self.nodes.append(node)
def start(self):
pass
def wait_for_cloud_to_be_up(self):
pass
def stop(self):
pass
def terminate(self):
pass
def get_ip(self):
node = self.nodes[0]
return node.get_ip()
def get_port(self):
node = self.nodes[0]
return node.get_port()
class H2OCloudNode:
"""
A class representing one node in an H2O cloud.
Note that the base_port is only a request for H2O.
H2O may choose to ignore our request and pick any port it likes.
So we have to scrape the real port number from stdout as part of cloud startup.
port: The actual port chosen at run time.
pid: The process id of the node.
output_file_name: Where stdout and stderr go. They are merged.
child: subprocess.Popen object.
terminated: Only from a signal. Not normal shutdown.
"""
def __init__(self, is_client, cloud_num, nodes_per_cloud, node_num, cloud_name, h2o_jar, ip, base_port,
xmx, output_dir):
"""
Create a node in a cloud.
@param is_client: Whether this node is an H2O client node (vs a worker node) or not.
@param cloud_num: Dense 0-based cloud index number.
@param nodes_per_cloud: How many H2O java instances are in a cloud. Clouds are symmetric.
@param node_num: This node's dense 0-based node index number.
@param cloud_name: The H2O -name command-line argument.
@param h2o_jar: Path to H2O jar file.
@param base_port: The starting port number we are trying to get our nodes to listen on.
@param xmx: Java memory parameter.
@param output_dir: The directory where we can create an output file for this process.
@return: The node object.
"""
self.is_client = is_client
self.cloud_num = cloud_num
self.nodes_per_cloud = nodes_per_cloud
self.node_num = node_num
self.cloud_name = cloud_name
self.h2o_jar = h2o_jar
self.ip = ip
self.base_port = base_port
self.xmx = xmx
self.output_dir = output_dir
self.port = -1
self.pid = -1
self.output_file_name = ""
self.child = None
self.terminated = False
# Choose my base port number here. All math is done here. Every node has the same
# base_port and calculates it's own my_base_port.
ports_per_node = 2
self.my_base_port = \
self.base_port + \
(self.cloud_num * self.nodes_per_cloud * ports_per_node) + \
(self.node_num * ports_per_node)
def start(self):
"""
Start one node of H2O.
(Stash away the self.child and self.pid internally here.)
@return: none
"""
# there is no hdfs currently in ec2, except s3n/hdfs
# the core-site.xml provides s3n info
# it's possible that we can just always hardware the hdfs version
# to match the cdh3 cluster we're hard-wiring tests to
# i.e. it won't make s3n/s3 break on ec2
if (self.is_client):
main_class = "water.H2OClientApp"
else:
main_class = "water.H2OApp"
if "JAVA_HOME" in os.environ:
java = os.environ["JAVA_HOME"] + "/bin/java"
else:
java = "java"
cmd = [java,
# "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005",
"-Xmx" + self.xmx,
"-ea",
"-cp", self.h2o_jar,
main_class,
"-name", self.cloud_name,
"-baseport", str(self.my_base_port),
"-ga_opt_out"]
# Add S3N credentials to cmd if they exist.
# ec2_hdfs_config_file_name = os.path.expanduser("~/.ec2/core-site.xml")
# if (os.path.exists(ec2_hdfs_config_file_name)):
# cmd.append("-hdfs_config")
# cmd.append(ec2_hdfs_config_file_name)
self.output_file_name = \
os.path.join(self.output_dir, "java_" + str(self.cloud_num) + "_" + str(self.node_num) + ".out.txt")
f = open(self.output_file_name, "w")
if g_convenient:
cwd = os.getcwd()
here = os.path.abspath(os.path.dirname(__file__))
there = os.path.abspath(os.path.join(here, ".."))
os.chdir(there)
self.child = subprocess.Popen(args=cmd,
stdout=f,
stderr=subprocess.STDOUT,
cwd=there)
os.chdir(cwd)
else:
self.child = subprocess.Popen(args=cmd,
stdout=f,
stderr=subprocess.STDOUT,
cwd=self.output_dir)
self.pid = self.child.pid
print("+ CMD: " + ' '.join(cmd))
def scrape_port_from_stdout(self):
"""
Look at the stdout log and figure out which port the JVM chose.
Write this to self.port.
This call is blocking.
Exit if this fails.
@return: none
"""
retries = 30
while (retries > 0):
if (self.terminated):
return
f = open(self.output_file_name, "r")
s = f.readline()
while (len(s) > 0):
if (self.terminated):
return
match_groups = re.search(r"Listening for HTTP and REST traffic on http.*://(\S+):(\d+)", s)
if (match_groups is not None):
port = match_groups.group(2)
if (port is not None):
self.port = port
f.close()
print("H2O Cloud {} Node {} started with output file {}".format(self.cloud_num,
self.node_num,
self.output_file_name))
return
s = f.readline()
f.close()
retries -= 1
if (self.terminated):
return
time.sleep(1)
print("")
print("ERROR: Too many retries starting cloud.")
print("")
sys.exit(1)
def scrape_cloudsize_from_stdout(self, nodes_per_cloud):
"""
Look at the stdout log and wait until the cloud of proper size is formed.
This call is blocking.
Exit if this fails.
@return: none
"""
retries = 60
while (retries > 0):
if (self.terminated):
return
f = open(self.output_file_name, "r")
s = f.readline()
while (len(s) > 0):
if (self.terminated):
return
match_groups = re.search(r"Cloud of size (\d+) formed", s)
if (match_groups is not None):
size = match_groups.group(1)
if (size is not None):
size = int(size)
if (size == nodes_per_cloud):
f.close()
return
s = f.readline()
f.close()
retries -= 1
if (self.terminated):
return
time.sleep(1)
print("")
print("ERROR: Too many retries starting cloud.")
print("")
sys.exit(1)
def stop(self):
"""
Normal node shutdown.
Ignore failures for now.
@return: none
"""
if (self.pid > 0):
print("Killing JVM with PID {}".format(self.pid))
try:
self.child.terminate()
self.child.wait()
except OSError:
pass
self.pid = -1
def terminate(self):
"""
Terminate a running node. (Due to a signal.)
@return: none
"""
self.terminated = True
self.stop()
def get_ip(self):
""" Return the ip address this node is really listening on. """
return self.ip
def get_port(self):
""" Return the port this node is really listening on. """
return self.port
def __str__(self):
s = ""
s += " node {}\n".format(self.node_num)
s += " xmx: {}\n".format(self.xmx)
s += " my_base_port: {}\n".format(self.my_base_port)
s += " port: {}\n".format(self.port)
s += " pid: {}\n".format(self.pid)
return s
class H2OCloud:
"""
A class representing one of the H2O clouds.
"""
def __init__(self, cloud_num, use_client, nodes_per_cloud, h2o_jar, base_port, xmx, output_dir):
"""
Create a cloud.
See node definition above for argument descriptions.
@return: The cloud object.
"""
self.use_client = use_client
self.cloud_num = cloud_num
self.nodes_per_cloud = nodes_per_cloud
self.h2o_jar = h2o_jar
self.base_port = base_port
self.xmx = xmx
self.output_dir = output_dir
# Randomly choose a seven digit cloud number.
n = random.randint(1000000, 9999999)
user = getpass.getuser()
user = ''.join(user.split())
self.cloud_name = "H2O_runit_{}_{}".format(user, n)
self.nodes = []
self.client_nodes = []
self.jobs_run = 0
if (use_client):
actual_nodes_per_cloud = self.nodes_per_cloud + 1
else:
actual_nodes_per_cloud = self.nodes_per_cloud
for node_num in range(actual_nodes_per_cloud):
is_client = False
if (use_client):
if (node_num == (actual_nodes_per_cloud - 1)):
is_client = True
node = H2OCloudNode(is_client,
self.cloud_num, actual_nodes_per_cloud, node_num,
self.cloud_name,
self.h2o_jar,
"127.0.0.1", self.base_port,
self.xmx, self.output_dir)
if (is_client):
self.client_nodes.append(node)
else:
self.nodes.append(node)
def start(self):
"""
Start H2O cloud.
The cloud is not up until wait_for_cloud_to_be_up() is called and returns.
@return: none
"""
for node in self.nodes:
node.start()
for node in self.client_nodes:
node.start()
def wait_for_cloud_to_be_up(self):
"""
Blocking call ensuring the cloud is available.
@return: none
"""
self._scrape_port_from_stdout()
self._scrape_cloudsize_from_stdout()
def stop(self):
"""
Normal cloud shutdown.
@return: none
"""
for node in self.nodes:
node.stop()
for node in self.client_nodes:
node.stop()
def terminate(self):
"""
Terminate a running cloud. (Due to a signal.)
@return: none
"""
for node in self.client_nodes:
node.terminate()
for node in self.nodes:
node.terminate()
def get_ip(self):
""" Return an ip to use to talk to this cloud. """
if (len(self.client_nodes) > 0):
node = self.client_nodes[0]
else:
node = self.nodes[0]
return node.get_ip()
def get_port(self):
""" Return a port to use to talk to this cloud. """
if (len(self.client_nodes) > 0):
node = self.client_nodes[0]
else:
node = self.nodes[0]
return node.get_port()
def _scrape_port_from_stdout(self):
for node in self.nodes:
node.scrape_port_from_stdout()
for node in self.client_nodes:
node.scrape_port_from_stdout()
def _scrape_cloudsize_from_stdout(self):
for node in self.nodes:
node.scrape_cloudsize_from_stdout(self.nodes_per_cloud)
for node in self.client_nodes:
node.scrape_cloudsize_from_stdout(self.nodes_per_cloud)
def __str__(self):
s = ""
s += "cloud {}\n".format(self.cloud_num)
s += " name: {}\n".format(self.cloud_name)
s += " jobs_run: {}\n".format(self.jobs_run)
for node in self.nodes:
s += str(node)
for node in self.client_nodes:
s += str(node)
return s
class Test:
"""
A class representing one Test.
cancelled: Don't start this test.
terminated: Test killed due to signal.
returncode: Exit code of child.
pid: Process id of the test.
ip: IP of cloud to run test.
port: Port of cloud to run test.
child: subprocess.Popen object.
"""
@staticmethod
def test_did_not_complete():
"""
returncode marker to know if the test ran or not.
"""
return -9999999
def __init__(self, test_dir, test_short_dir, test_name, output_dir):
"""
Create a Test.
@param test_dir: Full absolute path to the test directory.
@param test_short_dir: Path from h2o/R/tests to the test directory.
@param test_name: Test filename with the directory removed.
@param output_dir: The directory where we can create an output file for this process.
@return: The test object.
"""
self.test_dir = test_dir
self.test_short_dir = test_short_dir
self.test_name = test_name
self.output_dir = output_dir
self.output_file_name = ""
self.cancelled = False
self.terminated = False
self.returncode = Test.test_did_not_complete()
self.start_seconds = -1
self.pid = -1
self.ip = None
self.port = -1
self.child = None
def start(self, ip, port):
"""
Start the test in a non-blocking fashion.
@param ip: IP address of cloud to run on.
@param port: Port of cloud to run on.
@return: none
"""
if (self.cancelled or self.terminated):
return
self.start_seconds = time.time()
self.ip = ip
self.port = port
if (is_python_test_file(self.test_name)):
cmd = ["python",
self.test_name,
"--usecloud",
self.ip + ":" + str(self.port)]
elif (is_python_file(self.test_name)):
cmd = ["python",
self.test_name,
"--usecloud",
self.ip + ":" + str(self.port)]
elif (is_runit_test_file(self.test_name)):
cmd = ["R",
"-f",
self.test_name,
"--args",
self.ip + ":" + str(self.port)]
elif (is_javascript_test_file(self.test_name)):
cmd = ["phantomjs",
self.test_name,
"--host",
self.ip + ":" + str(self.port),
"--timeout",
str(g_phantomjs_to),
"--packs",
g_phantomjs_packs]
else:
print("")
print("ERROR: Test runner failure with test: " + self.test_name)
print("")
sys.exit(1)
test_short_dir_with_no_slashes = re.sub(r'[\\/]', "_", self.test_short_dir)
if (len(test_short_dir_with_no_slashes) > 0):
test_short_dir_with_no_slashes += "_"
self.output_file_name = \
os.path.join(self.output_dir, test_short_dir_with_no_slashes + self.test_name + ".out.txt")
f = open(self.output_file_name, "w")
self.child = subprocess.Popen(args=cmd,
stdout=f,
stderr=subprocess.STDOUT,
cwd=self.test_dir)
self.pid = self.child.pid
# print("+ CMD: " + ' '.join(cmd))
def is_completed(self):
"""
Check if test has completed.
This has side effects and MUST be called for the normal test queueing to work.
Specifically, child.poll().
@return: True if the test completed, False otherwise.
"""
child = self.child
if (child is None):
return False
child.poll()
if (child.returncode is None):
return False
self.pid = -1
self.returncode = child.returncode
return True
def cancel(self):
"""
Mark this test as cancelled so it never tries to start.
@return: none
"""
if (self.pid <= 0):
self.cancelled = True
def terminate_if_started(self):
"""
Terminate a running test. (Due to a signal.)
@return: none
"""
if (self.pid > 0):
self.terminate()
def terminate(self):
"""
Terminate a running test. (Due to a signal.)
@return: none
"""
self.terminated = True
if (self.pid > 0):
print("Killing Test {} with PID {}".format(os.path.join(self.test_short_dir, self.test_name), self.pid))
try:
self.child.terminate()
except OSError:
pass
self.pid = -1
def get_test_dir_file_name(self):
"""
@return: The full absolute path of this test.
"""
return os.path.join(self.test_dir, self.test_name)
def get_test_name(self):
"""
@return: The file name (no directory) of this test.
"""
return self.test_name
def get_seed_used(self):
"""
@return: The seed used by this test.
"""
return self._scrape_output_for_seed()
def get_ip(self):
"""
@return: IP of the cloud where this test ran.
"""
return self.ip
def get_port(self):
"""
@return: Integer port number of the cloud where this test ran.
"""
return int(self.port)
def get_passed(self):
"""
@return: True if the test passed, False otherwise.
"""
return (self.returncode == 0)
def get_nopass(self, nopass):
"""
Some tests are known not to fail and even if they don't pass we don't want
to fail the overall regression PASS/FAIL status.
@return: True if the test has been marked as NOPASS, False otherwise.
"""
a = re.compile("NOPASS")
return a.search(self.test_name) and not nopass
def get_nofeature(self, nopass):
"""
Some tests are known not to fail and even if they don't pass we don't want
to fail the overall regression PASS/FAIL status.
@return: True if the test has been marked as NOFEATURE, False otherwise.
"""
a = re.compile("NOFEATURE")
return a.search(self.test_name) and not nopass
def get_completed(self):
"""
@return: True if the test completed (pass or fail), False otherwise.
"""
return (self.returncode > Test.test_did_not_complete())
def get_terminated(self):
"""
For a test to be terminated it must have started and had a PID.
@return: True if the test was terminated, False otherwise.
"""
return self.terminated
def get_output_dir_file_name(self):
"""
@return: Full path to the output file which you can paste to a terminal window.
"""
return (os.path.join(self.output_dir, self.output_file_name))
def _scrape_output_for_seed(self):
"""
@return: The seed scraped from the output file.
"""
res = ""
with open(self.get_output_dir_file_name(), "r") as f:
for line in f:
if "SEED used" in line:
line = line.strip().split(' ')
res = line[-1]
break
return res
def __str__(self):
s = ""
s += "Test: {}/{}\n".format(self.test_dir, self.test_name)
return s
class TestRunner:
"""
A class for running tests.
The tests list contains an object for every test.
The tests_not_started list acts as a job queue.
The tests_running list is polled for jobs that have finished.
"""
def __init__(self,
test_root_dir,
use_cloud, use_cloud2, use_client, cloud_config, use_ip, use_port,
num_clouds, nodes_per_cloud, h2o_jar, base_port, xmx, output_dir,
failed_output_dir, path_to_tar, path_to_whl, produce_unit_reports, testreport_dir):
"""
Create a runner.
@param test_root_dir: h2o/R/tests directory.
@param use_cloud: Use this one user-specified cloud. Overrides num_clouds.
@param use_cloud2: Use the cloud_config to define the list of H2O clouds.
@param cloud_config: (if use_cloud2) the config file listing the H2O clouds.
@param use_ip: (if use_cloud) IP of one cloud to use.
@param use_port: (if use_cloud) Port of one cloud to use.
@param num_clouds: Number of H2O clouds to start.
@param nodes_per_cloud: Number of H2O nodes to start per cloud.
@param h2o_jar: Path to H2O jar file to run.
@param base_port: Base H2O port (e.g. 54321) to start choosing from.
@param xmx: Java -Xmx parameter.
@param output_dir: Directory for output files.
@param failed_output_dir: Directory to copy failed test output.
@param path_to_tar: NA
@param path_to_whl: NA
@param produce_unit_reports: if true then runner produce xUnit test reports for Jenkins
@param testreport_dir: directory to put xUnit test reports for Jenkins (should follow build system conventions)
@return: The runner object.
"""
self.test_root_dir = test_root_dir
self.use_cloud = use_cloud
self.use_cloud2 = use_cloud2
self.use_client = use_client
# Valid if use_cloud is True
self.use_ip = use_ip
self.use_port = use_port
# Valid if use_cloud is False
self.num_clouds = num_clouds
self.nodes_per_cloud = nodes_per_cloud
self.h2o_jar = h2o_jar
self.base_port = base_port
self.output_dir = output_dir
self.failed_output_dir = failed_output_dir
self.produce_unit_reports = produce_unit_reports
self.testreport_dir = testreport_dir
self.start_seconds = time.time()
self.terminated = False
self.clouds = []
self.tests = []
self.tests_not_started = []
self.tests_running = []
self.regression_passed = False
self._create_output_dir()
self._create_failed_output_dir()
if produce_unit_reports:
self._create_testreport_dir()
self.nopass_counter = 0
self.nofeature_counter = 0
self.path_to_tar = path_to_tar
self.path_to_whl = path_to_whl
if (use_cloud):
node_num = 0
cloud = H2OUseCloud(node_num, use_ip, use_port)
self.clouds.append(cloud)
elif (use_cloud2):
clouds = TestRunner.read_config(cloud_config)
node_num = 0
for c in clouds:
cloud = H2OUseCloud(node_num, c[0], c[1])
self.clouds.append(cloud)
node_num += 1
else:
for i in range(self.num_clouds):
cloud = H2OCloud(i, self.use_client, self.nodes_per_cloud, h2o_jar, self.base_port, xmx,
self.output_dir)
self.clouds.append(cloud)
@staticmethod
def find_test(test_to_run):
"""
Be nice and try to help find the test if possible.
If the test is actually found without looking, then just use it.
Otherwise, search from the script's down directory down.
"""
if (os.path.exists(test_to_run)):
abspath_test = os.path.abspath(test_to_run)
return abspath_test
for d, subdirs, files in os.walk(os.getcwd()):
for f in files:
if (f == test_to_run):
return os.path.join(d, f)
# Not found, return the file, which will result in an error downstream when it can't be found.
print("")
print("ERROR: Test does not exist: " + test_to_run)
print("")
sys.exit(1)
@staticmethod
def read_config(config_file):
clouds = [] # a list of lists. Inner lists have [node_num, ip, port]
cfg = ConfigParser.RawConfigParser()
cfg.read(config_file)
for s in cfg.sections():
items = cfg.items(s)
cloud = [items[0][1], int(items[1][1])]
clouds.append(cloud)
return clouds
def read_test_list_file(self, test_list_file):
"""
Read in a test list file line by line. Each line in the file is a test
to add to the test run.
@param test_list_file: Filesystem path to a file with a list of tests to run.
@return: none
"""
try:
f = open(test_list_file, "r")
s = f.readline()
while (len(s) != 0):
stripped = s.strip()
if (len(stripped) == 0):
s = f.readline()
continue
if (stripped.startswith("#")):
s = f.readline()
continue
found_stripped = TestRunner.find_test(stripped)
self.add_test(found_stripped)
s = f.readline()
f.close()
except IOError as e:
print("")
print("ERROR: Failure reading test list: " + test_list_file)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def build_test_list(self, test_group, run_small, run_medium, run_large, run_xlarge, nopass):
"""
Recursively find the list of tests to run and store them in the object.
Fills in self.tests and self.tests_not_started.
@param test_group: Name of the test group of tests to run.
@return: none
"""
if (self.terminated):
return
for root, dirs, files in os.walk(self.test_root_dir):
if (root.endswith("Util")):
continue
for f in files:
# Figure out if the current file under consideration is a test.
is_test = False
if (is_python_test_file(f)):
is_test = True
if (is_runit_test_file(f)):
is_test = True
if (not is_test):
continue
is_small = False
is_medium = False
is_large = False
is_xlarge = False
is_nopass = False
is_nofeature = False
if "xlarge" in f:
is_xlarge = True
elif "medium" in f:
is_medium = True
elif "large" in f:
is_large = True
else:
is_small = True
if "NOPASS" in f:
is_nopass = True
if "NOFEATURE" in f:
is_nofeature = True
if is_small and not run_small:
continue
if is_medium and not run_medium:
continue
if is_large and not run_large:
continue
if is_xlarge and not run_xlarge:
continue
if is_nopass and not nopass:
# skip all NOPASS tests for regular runs but still count the number of NOPASS tests
self.nopass_counter += 1
continue
if is_nofeature and not nopass:
# skip all NOFEATURE tests for regular runs but still count the number of NOFEATURE tests
self.nofeature_counter += 1
continue
if nopass and not is_nopass and not is_nofeature:
# if g_nopass flag is set, then ONLY run the NOPASS and NOFEATURE tests (skip all other tests)
continue
if test_group is not None:
test_short_dir = self._calc_test_short_dir(os.path.join(root, f))
if (test_group.lower() not in test_short_dir) and test_group.lower() not in f:
continue
self.add_test(os.path.join(root, f))
def add_test(self, test_path):
"""
Add one test to the list of tests to run.
@param test_path: File system path to the test.
@return: none
"""
abs_test_path = os.path.abspath(test_path)
abs_test_dir = os.path.dirname(abs_test_path)
test_file = os.path.basename(abs_test_path)
if (not os.path.exists(abs_test_path)):
print("")
print("ERROR: Test does not exist: " + abs_test_path)
print("")
sys.exit(1)
test_short_dir = self._calc_test_short_dir(test_path)
test = Test(abs_test_dir, test_short_dir, test_file, self.output_dir)
self.tests.append(test)
self.tests_not_started.append(test)
def start_clouds(self):
"""
Start all H2O clouds.
@return: none
"""
if (self.terminated):
return
if (self.use_cloud):
return
print("")
print("Starting clouds...")
print("")
for cloud in self.clouds:
if (self.terminated):
return
cloud.start()
print("")
print("Waiting for H2O nodes to come up...")
print("")
for cloud in self.clouds:
if (self.terminated):
return
cloud.wait_for_cloud_to_be_up()
def run_tests(self, nopass):
"""
Run all tests.
@return: none
"""
if (self.terminated):
return
if (self._have_some_r_tests()):
self._log("")
self._log("Setting up R H2O package...")
out_file_name = os.path.join(self.output_dir, "runnerSetupPackage.out.txt")
out = open(out_file_name, "w")
runner_setup_package_r = None
if (True):
possible_utils_parent_dir = self.test_root_dir
while (True):
possible_utils_dir = os.path.join(possible_utils_parent_dir,
os.path.join("h2o-r",
os.path.join("tests", "Utils")))
possible_runner_setup_package_r = os.path.join(possible_utils_dir, "runnerSetupPackage.R")
if (os.path.exists(possible_runner_setup_package_r)):
runner_setup_package_r = possible_runner_setup_package_r
break
next_possible_utils_parent_dir = os.path.dirname(possible_utils_parent_dir)
if (next_possible_utils_parent_dir == possible_utils_parent_dir):
break
possible_utils_parent_dir = next_possible_utils_parent_dir
if (runner_setup_package_r is None):
print("")
print("ERROR: runnerSetupPackage.R not found.")
print("")
sys.exit(1)
cmd = ["R",
"--quiet",
"-f",
runner_setup_package_r]
if self.path_to_tar is not None:
print "Using R TAR located at: " + self.path_to_tar
cmd += ["--args", self.path_to_tar]
child = subprocess.Popen(args=cmd,
stdout=out,
stderr=subprocess.STDOUT)
rv = child.wait()
if (self.terminated):
return
if (rv != 0):
print("")
print("ERROR: " + runner_setup_package_r + " failed.")
print(" (See " + out_file_name + ")")
print("")
sys.exit(1)
out.close()
elif self._have_some_py_tests() and self.path_to_whl is not None:
# basically only do this if we have a whl to install
self._log("")
self._log("Setting up Python H2O package...")
out_file_name = os.path.join(self.output_dir, "pythonSetup.out.txt")
out = open(out_file_name, "w")
cmd = ["pip", "install", self.path_to_whl, "--force-reinstall"]
child = subprocess.Popen(args=cmd,
stdout=out,
stderr=subprocess.STDOUT)
rv = child.wait()
if (self.terminated):
return
if (rv != 0):
print("")
print("ERROR: Python setup failed.")
print(" (See " + out_file_name + ")")
print("")
sys.exit(1)
out.close()
num_tests = len(self.tests)
num_nodes = self.num_clouds * self.nodes_per_cloud
self._log("")
if (self.use_client):
client_message = " (+ client mode)"
else:
client_message = ""
if (self.use_cloud):
self._log("Starting {} tests...".format(num_tests))
elif (self.use_cloud2):
self._log("Starting {} tests on {} clouds...".format(num_tests, len(self.clouds)))
else:
self._log("Starting {} tests on {} clouds with {} total H2O worker nodes{}...".format(num_tests,
self.num_clouds,
num_nodes,
client_message))
self._log("")
# Start the first n tests, where n is the lesser of the total number of tests and the total number of clouds.
start_count = min(len(self.tests_not_started), len(self.clouds), 30)
if (g_use_cloud2):
start_count = min(start_count, 75) # only open up 30 processes locally
for i in range(start_count):
cloud = self.clouds[i]
ip = cloud.get_ip()
port = cloud.get_port()
self._start_next_test_on_ip_port(ip, port)
# As each test finishes, send a new one to the cloud that just freed up.
while (len(self.tests_not_started) > 0):
if (self.terminated):
return
completed_test = self._wait_for_one_test_to_complete()
if (self.terminated):
return
self._report_test_result(completed_test, nopass)
ip_of_completed_test = completed_test.get_ip()
port_of_completed_test = completed_test.get_port()
self._start_next_test_on_ip_port(ip_of_completed_test, port_of_completed_test)
# Wait for remaining running tests to complete.
while (len(self.tests_running) > 0):
if (self.terminated):
return
completed_test = self._wait_for_one_test_to_complete()
if (self.terminated):
return
self._report_test_result(completed_test, nopass)
def stop_clouds(self):
"""
Stop all H2O clouds.
@return: none
"""
if (self.terminated):
return
if (self.use_cloud or self.use_cloud2):
print("")
print("All tests completed...")
print("")
return
print("")
print("All tests completed; tearing down clouds...")
print("")
for cloud in self.clouds:
cloud.stop()
def report_summary(self, nopass):
"""
Report some summary information when the tests have finished running.
@return: none
"""
passed = 0
nopass_but_tolerate = 0
nofeature_but_tolerate = 0
failed = 0
notrun = 0
total = 0
true_fail_list = []
terminated_list = []
for test in self.tests:
if (test.get_passed()):
passed += 1
else:
if (test.get_nopass(nopass)):
nopass_but_tolerate += 1
if (test.get_nofeature(nopass)):
nofeature_but_tolerate += 1
if (test.get_completed()):
failed += 1
if (not test.get_nopass(nopass) and not test.get_nofeature(nopass)):
true_fail_list.append(test.get_test_name())
else:
notrun += 1
if (test.get_terminated()):
terminated_list.append(test.get_test_name())
total += 1
if ((passed + nopass_but_tolerate + nofeature_but_tolerate) == total):
self.regression_passed = True
else:
self.regression_passed = False
end_seconds = time.time()
delta_seconds = end_seconds - self.start_seconds
run = total - notrun
self._log("")
self._log("----------------------------------------------------------------------")
self._log("")
self._log("SUMMARY OF RESULTS")
self._log("")
self._log("----------------------------------------------------------------------")
self._log("")
self._log("Total tests: " + str(total))
self._log("Passed: " + str(passed))
self._log("Did not pass: " + str(failed))
self._log("Did not complete: " + str(notrun))
self._log("Tolerated NOPASS: " + str(nopass_but_tolerate))
self._log("Tolerated NOFEATURE: " + str(nofeature_but_tolerate))
self._log("NOPASS tests skipped: " + str(self.nopass_counter))
self._log("NOFEATURE tests skipped: " + str(self.nofeature_counter))
self._log("")
self._log("Total time: %.2f sec" % delta_seconds)
if (run > 0):
self._log("Time/completed test: %.2f sec" % (delta_seconds / run))
else:
self._log("Time/completed test: N/A")
self._log("")
if (len(true_fail_list) > 0):
self._log("True fail list: " + ", ".join(true_fail_list))
if (len(terminated_list) > 0):
self._log("Terminated list: " + ", ".join(terminated_list))
self._log("")
def terminate(self):
"""
Terminate all running clouds. (Due to a signal.)
@return: none
"""
self.terminated = True
for test in self.tests:
test.cancel()
for test in self.tests:
test.terminate_if_started()
for cloud in self.clouds:
cloud.terminate()
def get_regression_passed(self):
"""
Return whether the overall regression passed or not.
@return: true if the exit value should be 0, false otherwise.
"""
return self.regression_passed
# --------------------------------------------------------------------
# Private methods below this line.
# --------------------------------------------------------------------
def _calc_test_short_dir(self, test_path):
"""
Calculate directory of test relative to test_root_dir.
@param test_path: Path to test file.
@return: test_short_dir, relative directory containing test (relative to test_root_dir).
"""
abs_test_root_dir = os.path.abspath(self.test_root_dir)
abs_test_path = os.path.abspath(test_path)
abs_test_dir = os.path.dirname(abs_test_path)
test_short_dir = abs_test_dir
# Look to elide longest prefix first.
prefix = os.path.join(abs_test_root_dir, "")
if (test_short_dir.startswith(prefix)):
test_short_dir = test_short_dir.replace(prefix, "", 1)
prefix = abs_test_root_dir
if (test_short_dir.startswith(prefix)):
test_short_dir = test_short_dir.replace(prefix, "", 1)
return test_short_dir
def _have_some_r_tests(self):
"""
Do we have any R tests to run at all?
(There might be tests of a different language to run, even if there are no R tests.)
"""
for test in self.tests:
test_name = test.get_test_name()
if (is_runit_test_file(test_name)):
return True
return False
def _have_some_py_tests(self):
"""
dumb check for pyunits
"""
for test in self.tests:
test_name = test.get_test_name()
if is_python_test_file(test_name):
return True
return False
def _create_failed_output_dir(self):
try:
os.makedirs(self.failed_output_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.failed_output_dir)
print("")
print("(try adding --wipe)")
print("")
sys.exit(1)
def _create_output_dir(self):
try:
os.makedirs(self.output_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.output_dir)
print("")
print("(try adding --wipe)")
print("")
sys.exit(1)
def _create_testreport_dir(self):
try:
if not os.path.exists(self.testreport_dir):
os.makedirs(self.testreport_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.testreport_dir)
print("")
sys.exit(1)
def _start_next_test_on_ip_port(self, ip, port):
test = self.tests_not_started.pop(0)
self.tests_running.append(test)
test.start(ip, port)
def _wait_for_one_test_to_complete(self):
while (True):
for test in self.tests_running:
if (self.terminated):
return None
if (test.is_completed()):
self.tests_running.remove(test)
return test
if (self.terminated):
return
time.sleep(1)
def _report_test_result(self, test, nopass):
port = test.get_port()
now = time.time()
duration = now - test.start_seconds
test_name = test.get_test_name()
if (test.get_passed()):
s = "PASS %d %4ds %-60s" % (port, duration, test_name)
self._log(s)
if self.produce_unit_reports:
self._report_xunit_result("r_suite", test_name, duration, False)
else:
s = " FAIL %d %4ds %-60s %s %s" % \
(port, duration, test.get_test_name(), test.get_output_dir_file_name(), test.get_seed_used())
self._log(s)
f = self._get_failed_filehandle_for_appending()
f.write(test.get_test_dir_file_name() + "\n")
f.close()
# Report junit
if self.produce_unit_reports:
if not test.get_nopass(nopass):
self._report_xunit_result("r_suite", test_name, duration, False, "TestFailure", "Test failed",
"See {}".format(test.get_output_dir_file_name()))
else:
self._report_xunit_result("r_suite", test_name, duration, True)
# Copy failed test output into directory failed
if not test.get_nopass(nopass) and not test.get_nofeature(nopass):
shutil.copy(test.get_output_dir_file_name(), self.failed_output_dir)
# XSD schema for xunit reports is here; http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
def _report_xunit_result(self, testsuite_name, testcase_name, testcase_runtime,
skipped=False, failure_type=None, failure_message=None, failure_description=None):
errors = 0
failures = 1 if failure_type else 0
skip = 1 if skipped else 0
failure = "" if not failure_type else """"<failure type="{}" message="{}">{}</failure>""" \
.format(failure_type, failure_message, failure_description)
xml_report = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="{testsuiteName}" tests="1" errors="{errors}" failures="{failures}" skip="{skip}">
<testcase classname="{testcaseClassName}" name="{testcaseName}" time="{testcaseRuntime}">
{failure}
</testcase>
</testsuite>
""".format(testsuiteName=testsuite_name, testcaseClassName=testcase_name, testcaseName=testcase_name,
testcaseRuntime=testcase_runtime, failure=failure,
errors=errors, failures=failures, skip=skip)
self._save_xunit_report(testsuite_name, testcase_name, xml_report)
def _save_xunit_report(self, testsuite, testcase, report):
f = self._get_testreport_filehandle(testsuite, testcase)
f.write(report)
f.close()
def _log(self, s):
f = self._get_summary_filehandle_for_appending()
print(s)
sys.stdout.flush()
f.write(s + "\n")
f.close()
def _get_summary_filehandle_for_appending(self):
summary_file_name = os.path.join(self.output_dir, "summary.txt")
f = open(summary_file_name, "a+")
return f
def _get_failed_filehandle_for_appending(self):
summary_file_name = os.path.join(self.output_dir, "failed.txt")
f = open(summary_file_name, "a+")
return f
def _get_testreport_filehandle(self, testsuite, testcase):
testreport_file_name = os.path.join(self.testreport_dir, "TEST_{0}_{1}.xml".format(testsuite, testcase))
f = open(testreport_file_name, "w+")
return f
def __str__(self):
s = "\n"
s += "test_root_dir: {}\n".format(self.test_root_dir)
s += "output_dir: {}\n".format(self.output_dir)
s += "h2o_jar: {}\n".format(self.h2o_jar)
s += "num_clouds: {}\n".format(self.num_clouds)
s += "nodes_per_cloud: {}\n".format(self.nodes_per_cloud)
s += "base_port: {}\n".format(self.base_port)
s += "\n"
for c in self.clouds:
s += str(c)
s += "\n"
# for t in self.tests:
# s += str(t)
return s
# --------------------------------------------------------------------
# Main program
# --------------------------------------------------------------------
# Global variables that can be set by the user.
g_script_name = ""
g_base_port = 40000
g_num_clouds = 5
g_nodes_per_cloud = 1
g_wipe_test_state = False
g_wipe_output_dir = False
g_test_to_run = None
g_test_list_file = None
g_test_group = None
g_run_small = True
g_run_medium = True
g_run_large = True
g_run_xlarge = True
g_use_cloud = False
g_use_cloud2 = False
g_use_client = False
g_config = None
g_use_ip = None
g_use_port = None
g_no_run = False
g_jvm_xmx = "1g"
g_nopass = False
g_convenient = False
g_path_to_h2o_jar = None
g_path_to_tar = None
g_path_to_whl = None
g_produce_unit_reports = True
g_phantomjs_to = 3600
g_phantomjs_packs = "examples"
# Global variables that are set internally.
g_output_dir = None
g_runner = None
g_handling_signal = False
def use(x):
""" Hack to remove compiler warning. """
if False:
print(x)
def signal_handler(signum, stackframe):
global g_runner
global g_handling_signal
use(stackframe)
if (g_handling_signal):
# Don't do this recursively.
return
g_handling_signal = True
print("")
print("----------------------------------------------------------------------")
print("")
print("SIGNAL CAUGHT (" + str(signum) + "). TEARING DOWN CLOUDS.")
print("")
print("----------------------------------------------------------------------")
g_runner.terminate()
def usage():
print("")
print("Usage: " + g_script_name + " [...options...]")
print("")
print(" (Output dir is: " + g_output_dir + ")")
print(" (Default number of clouds is: " + str(g_num_clouds) + ")")
print("")
print(" --wipeall Remove all prior test state before starting, particularly")
print(" random seeds.")
print(" (Removes master_seed file and all Rsandbox directories.")
print(" Also wipes the output dir before starting.)")
print("")
print(" --wipe Wipes the output dir before starting. Keeps old random seeds.")
print("")
print(" --baseport The first port at which H2O starts searching for free ports.")
print("")
print(" --numclouds The number of clouds to start.")
print(" Each test is randomly assigned to a cloud.")
print("")
print(" --numnodes The number of nodes in the cloud.")
print(" When this is specified, numclouds must be 1.")
print("")
print(" --test If you only want to run one test, specify it like this.")
print("")
print(" --testlist A file containing a list of tests to run (for example the")
print(" 'failed.txt' file from the output directory).")
print("")
print(" --testgroup Test a group of tests by function:")
print(" pca, glm, kmeans, gbm, rf, deeplearning, algos, golden, munging")
print("")
print(" --testsize Sizes (and by extension length) of tests to run:")
print(" s=small (seconds), m=medium (a minute or two), l=large (longer), x=xlarge (very big)")
print(" (Default is to run all tests.)")
print("")
print(" --usecloud ip:port of cloud to send tests to instead of starting clouds.")
print(" (When this is specified, numclouds is ignored.)")
print("")
print(" --usecloud2 cloud.cfg: Use a set clouds defined in cloud.config to run tests on.")
print(" (When this is specified, numclouds, numnodes, and usecloud are ignored.)")
print("")
print(" --client Send REST API commands through client mode.")
print("")
print(" --norun Perform side effects like wipe, but don't actually run tests.")
print("")
print(" --jvm.xmx Configure size of launched JVM running H2O. E.g. '--jvm.xmx 3g'")
print("")
print(" --nopass Run the NOPASS and NOFEATURE tests only and do not ignore any failures.")
print("")
print(" --c Start the JVMs in a convenient location.")
print("")
print(" --h2ojar Supply a path to the H2O jar file.")
print("")
print(" --tar Supply a path to the R TAR.")
print("")
print(" --pto The phantomjs timeout in seconds. Default is 3600 (1hr).")
print("")
print(" --noxunit Do not produce xUnit reports.")
print("")
print(" If neither --test nor --testlist is specified, then the list of tests is")
print(" discovered automatically as files matching '*runit*.R'.")
print("")
print("")
print("Examples:")
print("")
print(" Just accept the defaults and go (note: output dir must not exist):")
print(" "+g_script_name)
print("")
print(" Remove all random seeds (i.e. make new ones) but don't run any tests:")
print(" "+g_script_name+" --wipeall --norun")
print("")
print(" For a powerful laptop with 8 cores (keep default numclouds):")
print(" "+g_script_name+" --wipeall")
print("")
print(" For a big server with 32 cores:")
print(" "+g_script_name+" --wipeall --numclouds 16")
print("")
print(" Just run the tests that finish quickly")
print(" "+g_script_name+" --wipeall --testsize s")
print("")
print(" Run one specific test, keeping old random seeds:")
print(" "+g_script_name+" --wipe --test path/to/test.R")
print("")
print(" Rerunning failures from a previous run, keeping old random seeds:")
print(" # Copy failures.txt, otherwise --wipe removes the directory with the list!")
print(" cp " + os.path.join(g_output_dir, "failures.txt") + " .")
print(" "+g_script_name+" --wipe --numclouds 16 --testlist failed.txt")
print("")
print(" Run tests on a pre-existing cloud (e.g. in a debugger), keeping old random seeds:")
print(" "+g_script_name+" --wipe --usecloud ip:port")
sys.exit(1)
def unknown_arg(s):
print("")
print("ERROR: Unknown argument: " + s)
print("")
usage()
def bad_arg(s):
print("")
print("ERROR: Illegal use of (otherwise valid) argument: " + s)
print("")
usage()
def error(s):
print("")
print("ERROR: " + s)
print("")
usage()
def parse_args(argv):
global g_base_port
global g_num_clouds
global g_nodes_per_cloud
global g_wipe_test_state
global g_wipe_output_dir
global g_test_to_run
global g_test_list_file
global g_test_group
global g_run_small
global g_run_medium
global g_run_large
global g_run_xlarge
global g_use_cloud
global g_use_cloud2
global g_use_client
global g_config
global g_use_ip
global g_use_port
global g_no_run
global g_jvm_xmx
global g_nopass
global g_convenient
global g_path_to_h2o_jar
global g_path_to_tar
global g_path_to_whl
global g_produce_unit_reports
global g_phantomjs_to
global g_phantomjs_packs
i = 1
while (i < len(argv)):
s = argv[i]
if (s == "--baseport"):
i += 1
if (i > len(argv)):
usage()
g_base_port = int(argv[i])
elif (s == "--numclouds"):
i += 1
if (i > len(argv)):
usage()
g_num_clouds = int(argv[i])
elif (s == "--numnodes"):
i += 1
if (i > len(argv)):
usage()
g_nodes_per_cloud = int(argv[i])
elif (s == "--wipeall"):
g_wipe_test_state = True
g_wipe_output_dir = True
elif (s == "--wipe"):
g_wipe_output_dir = True
elif (s == "--test"):
i += 1
if (i > len(argv)):
usage()
g_test_to_run = TestRunner.find_test(argv[i])
elif (s == "--testlist"):
i += 1
if (i > len(argv)):
usage()
g_test_list_file = argv[i]
elif (s == "--testgroup"):
i += 1
if (i > len(argv)):
usage()
g_test_group = argv[i]
elif (s == "--testsize"):
i += 1
if (i > len(argv)):
usage()
v = argv[i]
if (re.match(r'(s)?(m)?(l)?', v)):
if 's' not in v:
g_run_small = False
if 'm' not in v:
g_run_medium = False
if 'l' not in v:
g_run_large = False
if 'x' not in v:
g_run_xlarge = False
else:
bad_arg(s)
elif (s == "--usecloud"):
i += 1
if (i > len(argv)):
usage()
s = argv[i]
m = re.match(r'(\S+):([1-9][0-9]*)', s)
if (m is None):
unknown_arg(s)
g_use_cloud = True
g_use_ip = m.group(1)
port_string = m.group(2)
g_use_port = int(port_string)
elif (s == "--usecloud2"):
i += 1
if (i > len(argv)):
usage()
s = argv[i]
if (s is None):
unknown_arg(s)
g_use_cloud2 = True
g_config = s
elif (s == "--client"):
g_use_client = True
elif (s == "--nopass"):
g_nopass = True
elif s == "--c":
g_convenient = True
elif s == "--h2ojar":
i += 1
g_path_to_h2o_jar = os.path.abspath(argv[i])
elif s == "--pto":
i += 1
g_phantomjs_to = int(argv[i])
elif s == "--ptt":
i += 1
g_phantomjs_packs = argv[i]
elif s == "--tar":
i += 1
g_path_to_tar = os.path.abspath(argv[i])
elif s == "--whl":
i += 1
g_path_to_whl = os.path.abspath(argv[i])
elif (s == "--jvm.xmx"):
i += 1
if (i > len(argv)):
usage()
g_jvm_xmx = argv[i]
elif (s == "--norun"):
g_no_run = True
elif (s == "--noxunit"):
g_produce_unit_reports = False
elif (s == "-h" or s == "--h" or s == "-help" or s == "--help"):
usage()
else:
unknown_arg(s)
i += 1
if ((int(g_use_client) + int(g_use_cloud) + int(g_use_cloud2)) > 1):
print("")
print("ERROR: --client, --usecloud and --usecloud2 are mutually exclusive.")
print("")
sys.exit(1)
def wipe_output_dir():
print("")
print("Wiping output directory...")
try:
if (os.path.exists(g_output_dir)):
shutil.rmtree(g_output_dir)
except OSError as e:
print("")
print("ERROR: Removing output directory failed: " + g_output_dir)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def wipe_test_state(test_root_dir):
print("")
print("Wiping test state (including random seeds)...")
if (True):
possible_seed_file = os.path.join(test_root_dir, str("master_seed"))
if (os.path.exists(possible_seed_file)):
try:
os.remove(possible_seed_file)
except OSError as e:
print("")
print("ERROR: Removing seed file failed: " + possible_seed_file)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
for d, subdirs, files in os.walk(test_root_dir):
for s in subdirs:
if ("Rsandbox" in s):
rsandbox_dir = os.path.join(d, s)
try:
shutil.rmtree(rsandbox_dir)
except OSError as e:
print("")
print("ERROR: Removing RSandbox directory failed: " + rsandbox_dir)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_num_clouds
global g_nodes_per_cloud
global g_output_dir
global g_failed_output_dir
global g_test_to_run
global g_test_list_file
global g_test_group
global g_runner
global g_nopass
global g_path_to_tar
global g_path_to_whl
g_script_name = os.path.basename(argv[0])
# Calculate test_root_dir.
test_root_dir = os.path.realpath(os.getcwd())
# Calculate global variables.
g_output_dir = os.path.join(test_root_dir, str("results"))
g_failed_output_dir = os.path.join(g_output_dir, str("failed"))
testreport_dir = os.path.join(test_root_dir, str("../build/test-results"))
# Override any defaults with the user's choices.
parse_args(argv)
# Look for h2o jar file.
h2o_jar = g_path_to_h2o_jar
if (h2o_jar is None):
possible_h2o_jar_parent_dir = test_root_dir
while (True):
possible_h2o_jar_dir = os.path.join(possible_h2o_jar_parent_dir, "build")
possible_h2o_jar = os.path.join(possible_h2o_jar_dir, "h2o.jar")
if (os.path.exists(possible_h2o_jar)):
h2o_jar = possible_h2o_jar
break
next_possible_h2o_jar_parent_dir = os.path.dirname(possible_h2o_jar_parent_dir)
if (next_possible_h2o_jar_parent_dir == possible_h2o_jar_parent_dir):
break
possible_h2o_jar_parent_dir = next_possible_h2o_jar_parent_dir
# Wipe output directory if requested.
if (g_wipe_output_dir):
wipe_output_dir()
# Wipe persistent test state if requested.
if (g_wipe_test_state):
wipe_test_state(test_root_dir)
# Create runner object.
# Just create one cloud if we're only running one test, even if the user specified more.
if (g_test_to_run is not None):
g_num_clouds = 1
g_runner = TestRunner(test_root_dir,
g_use_cloud, g_use_cloud2, g_use_client, g_config, g_use_ip, g_use_port,
g_num_clouds, g_nodes_per_cloud, h2o_jar, g_base_port, g_jvm_xmx,
g_output_dir, g_failed_output_dir, g_path_to_tar, g_path_to_whl, g_produce_unit_reports,
testreport_dir)
# Build test list.
if (g_test_to_run is not None):
g_runner.add_test(g_test_to_run)
elif (g_test_list_file is not None):
g_runner.read_test_list_file(g_test_list_file)
else:
# Test group can be None or not.
g_runner.build_test_list(g_test_group, g_run_small, g_run_medium, g_run_large, g_run_xlarge, g_nopass)
# If no run is specified, then do an early exit here.
if (g_no_run):
sys.exit(0)
# Handle killing the runner.
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Sanity check existence of H2O jar file before starting the cloud.
if ((h2o_jar is None) or (not os.path.exists(h2o_jar))):
print("")
print("ERROR: H2O jar not found")
print("")
sys.exit(1)
# Run.
try:
g_runner.start_clouds()
g_runner.run_tests(g_nopass)
finally:
g_runner.stop_clouds()
g_runner.report_summary(g_nopass)
# If the overall regression did not pass then exit with a failure status code.
if (not g_runner.get_regression_passed()):
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | -4,628,322,847,902,796,000 | 32.517259 | 119 | 0.513729 | false | 3.879267 | true | false | false |
shollingsworth/HackerRank | python/re-sub-regex-substitution/main.py | 1 | 1291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
import json
def banner():
ban = '====' * 30
print("{}\nSAMPLE INP:\n{}\n{}".format(ban,ban,open(ip, 'r').read()))
print("{}\nSAMPLE OUT:\n{}\n{}".format(ban,ban,open(op, 'r').read()))
print("{}\nSTART:\n{}".format(ban,ban))
sys.stdin = open(ip, 'r')
cnt = -1
def comp(inp,ln):
outl = output_arr[ln]
if str(inp) != outl:
raise Exception("Error input output: line {}, file: {}\ngot: {} expected: {}".format(ln,op,inp,outl))
ip = "./input06.txt"
op = "./output06.txt"
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
output_arr = map(str,open(op,'r').read().split('\n'))
banner()
# https://www.hackerrank.com/challenges/re-sub-regex-substitution/problem
import re
s = "\n".join([raw_input() for _ in range(int(raw_input()))])
regexes = map(re.escape,[' && ',' || '])
replace = [' and ',' or ']
while True in [ bool(re.search(regex,s)) for regex in regexes ]:
a1,b1 = regexes
a2,b2 = replace
s = re.sub(a1,a2,s)
s = re.sub(b1,b2,s)
print(s)
"""
solution #1
mmap = {
}
for repl, arr in mmap.items():
regex, orig = arr
for m in re.finditer(regex,s):
if re.match(' ',m.group(1)):
s = re.sub(orig,repl,s)
print(s)
"""
| apache-2.0 | 8,672,584,622,276,070,000 | 22.907407 | 109 | 0.573974 | false | 2.712185 | false | false | false |
wfwei/ReadWeibo | classifier/DataProcess.py | 1 | 3547 | # !/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2013-9-2
@author: plex
'''
from ReadWeibo.mainapp.models import Weibo
from ReadWeibo.account.models import Account
from djangodb.WeiboDao import WeiboDao
from datetime import datetime
from sets import Set
import numpy as np
import re
import jieba
jieba.load_userdict("/etc/jieba/jieba.dic")
DIC_FILE = "../data/dic/weibo.dic"
TRAIN_FILE = "../data/train/weibo_vec.tr"
#TODO 去停用词 规范化单词 添加词典http://www.sogou.com/labs/dl/w.html
def generate_user_dict(dic_file=DIC_FILE):
wbs = Weibo.objects.all()[:5000]
wordset = Set()
print 'Generating dict with %d weibo' % len(wbs)
for wb in wbs:
for word in jieba.cut(wb.text.encode('utf-8','ignore')):
if len(word)>6: #TODO filter by Cixing
wordset.add(word.lower().strip())
with open(dic_file, "w") as f:
for word in wordset:
f.write("%s\n" % word)
def load_dict(dic_file=DIC_FILE):
print 'loading dict from ', DIC_FILE
dict = {}
with open(dic_file, "r") as f:
id = 0
for word in f:
dict[word.strip().encode("utf-8", "ignore")] = id
id += 1
return dict
def generate_feature(wb, dict):
fea = [0]*len(dict)
# 微博文本
for wd in jieba.cut(wb.text.encode('utf-8','ignore')):
word_count = 0
wd = wd.lower().strip()
if len(wd)>3 and wd in dict:
fea[dict[wd]] += 1
word_count += 1
# print 'found %d word in a weibo' % word_count
# add user features
owner = wb.owner
fea.append(int(owner.w_province))
fea.append(int(owner.w_city))
if owner.w_url:
fea.append(1)
else:
fea.append(0)
fea.append(len(owner.w_description))
if 'm' in owner.w_gender:
fea.append(1)
else:
fea.append(0)
fea.append(int(owner.w_followers_count))
fea.append(int(owner.w_friends_count))
fea.append(int(owner.w_statuses_count))
fea.append(int(owner.w_favourites_count))
fea.append(int(owner.w_bi_followers_count))
fea.append((datetime.now()-owner.w_created_at).days/100)
if owner.w_verified:
fea.append(1)
else:
fea.append(0)
# add weibo features
fea.append(int(wb.reposts_count))
fea.append(int(wb.comments_count))
fea.append(int(wb.attitudes_count))
if re.search("#.*?#", wb.text):
fea.append(1)
else:
fea.append(0)
fea.append(len(wb.text))
own_text = re.search("(.*?)//@", wb.text)
if own_text:
fea.append(len(own_text.group(1)))
else:
fea.append(len(wb.text))
#TODO 对source归类
fea.append(len(wb.source))
if wb.retweeted_status:
fea.append(0)
else:
fea.append(1)
if wb.thumbnail_pic:
fea.append(1)
else:
fea.append(0)
fea.append(wb.created_at.hour)
fea.append(wb.created_at.weekday())
# TODO 计算微博转发评论的衰减公式
return fea
def generate_train_file():
print 'Generating train file...'
wbs = Weibo.objects.filter(real_category__gt=0)
word_dic = load_dict()
print 'Train set size: %d, dic size:%d' % (len(wbs), len(word_dic))
with open(TRAIN_FILE, "w") as train_file:
for wb in wbs:
for fea in generate_feature(wb, word_dic):
train_file.write("%s\t" % fea)
train_file.write("%s\n" % wb.real_category)
def get_weibo_to_predict(count=1000):
wbs = Weibo.objects.filter(real_category__exact = 0)[:count]
word_dic = load_dict()
wb_feas_list = list()
for wb in wbs:
try:
wb_feas_list.append((wb, [1.0] + generate_feature(wb, word_dic)));
except:
print 'generate feature fail for weibo:', wb.w_id
return wb_feas_list
if __name__ == '__main__':
generate_user_dict()
generate_train_file()
# print generate_feature(Weibo.objects.get(w_id=3617663458268921),{})
pass
| apache-2.0 | 5,765,088,084,828,768,000 | 22.206667 | 70 | 0.670497 | false | 2.344108 | false | false | false |
ewiger/decade | lib/antlr3c/genclib.py | 1 | 1592 | #!/usr/bin/python
'''
This code will automatically generate most of the ctypes from ANTLR3 C runtime
headers.
'''
import os
import sys
import ext_path
from pyclibrary import *
from glob import glob
ANTLR3C_INCLUDE = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'libantlr3c-3.4', 'include',
)
def get_antlr3c_headers():
return glob(ANTLR3C_INCLUDE + '/*')
def parse_headers(hfiles):
p = CParser(hfiles)
p.processAll(verbose=True)
return p
def save(p, output_dir):
from pprint import pprint
for k in p.dataList:
with file(os.path.join(output_dir, '%s.py' % k), 'w+') as output:
print 'Saving %s' % k
comment = """'''
%s
This is an auto-generated ctypes file from ANTLR3 C runtime headers. Note that
editing this file is not smart! For more details check genclib.py
It should be possible to redefine things in __init__.py if necessary (right
after imports section).
wbr, yy
'''\n""" % k.upper()
print >>output, comment
print >>output, '%s = \\' % k.upper()
pprint(p.defs[k], output)
if __name__ == '__main__':
hfiles = get_antlr3c_headers()
print('Found (%d) ANTLR3C headers, preparing to generate ctypes..' \
% len(hfiles))
p = parse_headers(hfiles)
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'generated')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, '__init__.py'), 'w+') as touched:
pass
save(p, output_dir)
| bsd-3-clause | -3,333,805,171,277,549,600 | 25.533333 | 87 | 0.627513 | false | 3.216162 | false | false | false |
tseaver/google-cloud-python | asset/synth.py | 1 | 3228 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
versions = ["v1beta1", "v1p2beta1", "v1"]
excludes = ["setup.py", "nox*.py", "README.rst", "docs/conf.py", "docs/index.rst"]
# ----------------------------------------------------------------------------
# Generate asset GAPIC layer
# ----------------------------------------------------------------------------
for version in versions:
library = gapic.py_library(
"asset",
version,
config_path=f"/google/cloud/asset/artman_cloudasset_{version}.yaml",
artman_output_name=f"asset-{version}",
include_protos=True,
)
s.move(library, excludes=excludes)
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
"from google.iam.v1 import policy_pb2 as",
"from google.iam.v1 import iam_policy_pb2_grpc as",
)
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
"from google.iam.v1 import iam_policy_pb2_grpc "
"as google_dot_iam_dot_v1_dot_policy__pb2",
"from google.iam.v1 import iam_policy_pb2 "
"as google_dot_iam_dot_v1_dot_policy__pb2",
)
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
"_ASSET.fields_by_name\['iam_policy'\].message_type "
"= google_dot_iam_dot_v1_dot_policy__pb2._POLICY",
"_ASSET.fields_by_name['iam_policy'].message_type = google_dot_iam_dot"
"_v1_dot_policy__pb2.google_dot_iam_dot_v1_dot_policy__pb2._POLICY",
)
_BORKED_ASSET_DOCSTRING = """\
The full name of the asset. For example: ``//compute.googleapi
s.com/projects/my_project_123/zones/zone1/instances/instance1`
`. See `Resource Names <https://cloud.google.com/apis/design/r
esource_names#full_resource_name>`__ for more information.
"""
_FIXED_ASSET_DOCSTRING = """
The full name of the asset. For example:
``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``.
See https://cloud.google.com/apis/design/resource_names#full_resource_name
for more information.
"""
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
_BORKED_ASSET_DOCSTRING,
_FIXED_ASSET_DOCSTRING,
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = gcp.CommonTemplates().py_library(unit_cov_level=79, cov_level=80)
s.move(templated_files, excludes=["noxfile.py"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| apache-2.0 | 1,773,273,584,265,377,000 | 35.269663 | 95 | 0.619579 | false | 3.419492 | false | false | false |
karel-brinda/rnftools | rnftools/rnfformat/Validator.py | 1 | 4484 | import rnftools.rnfformat
import re
reg_lrn = re.compile(r"^([!-?A-^`-~]*)__([0-9a-f]+)__([!-?A-^`-~]+)__([!-?A-^`-~]*)$")
reg_prefix_part = re.compile(r"^[!-?A-^`-~]*$")
reg_id_part = re.compile(r"^[0-9a-f]+$")
reg_segmental_part = re.compile(r"^(?:(\([0-9FRN,]*\))(?:,(?!$)|$))+$")
reg_suffix_part = re.compile(r"^(?:((?:[a-zA-Z0-9]+:){0,1})\[([!-?A-Z\\^`-~]*)\](?:,(?!$)|$))+$")
reg_segment = re.compile(r"^\(([0-9]+),([0-9]+),([FRN]),([0-9]+),([0-9]+)\)$")
reg_comment = re.compile(r"^\[([!-?A-Z\\^`-~]*)\]$")
reg_extension = re.compile(r"^\[([!-?A-Z\\^`-~]*)\]$")
class Validator:
"""Class for validation of RNF.
Args:
initial_read_tuple_name (str): Initial read tuple name to detect profile (widths).
report_only_first (bool): Report only first occurrence of every error.
warnings_as_errors (bool): Treat warnings as errors (error code).
"""
def __init__(
self,
initial_read_tuple_name,
report_only_first=True,
warnings_as_errors=False,
):
self.report_only_first = report_only_first
self.reported_errors = set()
self.error_has_been_reported = False
self.warning_has_been_reported = False
self.warnings_as_errors = warnings_as_errors
self.rnf_profile = rnftools.rnfformat.RnfProfile(read_tuple_name=initial_read_tuple_name)
def validate(self, read_tuple_name):
"""Check RNF validity of a read tuple.
Args:
read_tuple_name (str): Read tuple name to be checked.s
"""
if reg_lrn.match(read_tuple_name) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_read_tuple_name_structure",
message="'{}' is not matched".format(reg_lrn),
)
else:
parts = read_tuple_name.split("__")
if reg_prefix_part.match(parts[0]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_prefix_part",
message="'{}' is not matched".format(reg_prefix_part),
)
if reg_id_part.match(parts[1]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_id_part",
message="'{}' is not matched".format(reg_id_part),
)
if reg_segmental_part.match(parts[2]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_segmental_part",
message="'{}' is not matched".format(reg_segmental_part),
)
if reg_suffix_part.match(parts[3]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_suffix_part",
message="'{}' is not matched".format(reg_suffix_part),
)
if not self.rnf_profile.check(read_tuple_name):
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_profile",
message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format(
self.rnf_profile,
rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name),
),
warning=True,
)
def get_return_code(self):
"""Get final return code (0 = ok, 1=error appeared).
"""
if self.error_has_been_reported:
return 1
if self.warning_has_been_reported and self.warnings_as_errors:
return 1
def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False):
"""Report an error.
Args:
read_tuple_name (): Name of the read tuple.
error_name (): Name of the error.
wrong (str): What is wrong.
message (str): Additional msessage to be printed.
warning (bool): Warning (not an error).
"""
if (not self.report_only_first) or (error_name not in self.reported_errors):
print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message]))
self.reported_errors.add(error_name)
if warning:
self.warning_has_been_reported = True
else:
self.error_has_been_reported = True
| mit | -313,604,499,112,898,240 | 37.991304 | 119 | 0.533452 | false | 3.394398 | false | false | false |
mlperf/inference_results_v0.5 | closed/NVIDIA/code/common/harness.py | 1 | 13993 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os, sys
sys.path.insert(0, os.getcwd())
from code.common import logging, dict_get, run_command, args_to_string
import code.common.arguments as common_args
plugin_map = {
"ssd-large": ["build/plugins/NMSOptPlugin/libnmsoptplugin.so"],
"ssd-small": ["build/plugins/NMSOptPlugin/libnmsoptplugin.so"],
}
scenario_result_regex = {
"SingleStream": r"([0-9]+th percentile latency \(ns\) +: [0-9\.]+)",
"MultiStream": r"(Samples per query : [0-9\.]+)",
"Offline": r"(Samples per second: [0-9\.]+)",
"Server": r"(Scheduled samples per second +: [0-9\.]+)",
}
benchmark_qsl_size_map = {
# See: https://github.com/mlperf/inference_policies/blob/master/inference_rules.adoc#benchmarks-1
"resnet": 1024,
"mobilenet": 1024,
"ssd-large": 64,
"ssd-small": 256,
}
class BenchmarkHarness():
def __init__(self, args, name=""):
print (args)
self.args = args
self.name = name
self.verbose = dict_get(args, "verbose", default=None)
if self.verbose:
logging.info("===== Harness arguments for {:} =====".format(name))
for key in args:
logging.info("{:}={:}".format(key, args[key]))
self.system_id = args["system_id"]
self.scenario = args["scenario"]
self.engine_dir = "./build/engines/{:}/{:}/{:}".format(self.system_id, self.name, self.scenario)
self.precision = args["precision"]
self.has_gpu = dict_get(args, "gpu_batch_size", default=None) is not None
self.has_dla = dict_get(args, "dla_batch_size", default=None) is not None
self.enumerate_engines()
def enumerate_engines(self):
if self.has_gpu:
self.gpu_engine = self._get_engine_name("gpu", self.args["gpu_batch_size"])
self.check_file_exists(self.gpu_engine)
if self.has_dla:
self.dla_engine = self._get_engine_name("dla", self.args["dla_batch_size"])
self.check_file_exists(self.dla_engine)
def _get_engine_name(self, device_type, batch_size):
return "{:}/{:}-{:}-{:}-b{:}-{:}.plan".format(self.engine_dir, self.name, self.scenario,
device_type, batch_size, self.precision)
def build_default_flags(self, custom_args):
flag_dict = {}
flag_dict["verbose"] = self.verbose
# Handle plugins
if self.name in plugin_map:
plugins = plugin_map[self.name]
for plugin in plugins:
self.check_file_exists(plugin)
flag_dict["plugins"] = ",".join(plugins)
# Generate flags for logfile names.
log_dir = os.path.join(self.args["log_dir"], self.system_id, self.name, self.scenario)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
flag_dict["logfile_outdir"] = log_dir
flag_dict["logfile_prefix"] = "mlperf_log_"
# Handle custom arguments
for arg in custom_args:
val = dict_get(self.args, arg, None)
if val is not None:
flag_dict[arg] = val
return flag_dict
def build_configs(self, flag_dict):
# ideally, these values would be in mlperf.conf. since they aren't, write them into user.conf using these values.
# QSL Seed: 0x2b7e 1516 28ae d2a6
# Schedule Seed: 0x3243 f6a8 885a 308d
# Sample Seed: 0x093c 467e 37db 0c7a
seeds_map = {
"qsl_rng_seed": "3133965575612453542",
"sample_index_rng_seed": "665484352860916858",
"schedule_rng_seed": "3622009729038561421",
}
# required settings for each scenario
required_settings_map = {
"SingleStream": ["qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"], # "single_stream_expected_latency_ns", See: https://github.com/mlperf/inference/issues/471
"Offline": ["offline_expected_qps", "qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"],
"MultiStream": ["multi_stream_samples_per_query", "qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"],
"Server": ["server_target_qps", "qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"],
}
# optional settings that we support overriding
optional_settings_map = {
"SingleStream": [ "single_stream_target_latency_percentile", "min_query_count" ],
"Offline": [ "min_query_count" ],
"MultiStream": [ "multi_stream_target_qps", "multi_stream_target_latency_ns", "multi_stream_max_async_queries", "multi_stream_target_latency_percentile", "min_query_count" ],
"Server": [ "server_target_latency_percentile", "server_target_latency_ns", "min_query_count" ],
}
# option name to config file map
options_map = {
"single_stream_expected_latency_ns": "target_latency",
"offline_expected_qps": "target_qps",
"multi_stream_samples_per_query": "samples_per_query",
"server_target_qps": "target_qps",
}
parameter_scaling_map = {
"target_latency": 1 / 1000000.0,
"target_latency_percentile": 100.0,
}
system = self.system_id
benchmark = self.name
scenario = self.scenario
mlperf_conf_path = "measurements/{:}/{:}/{:}/mlperf.conf".format(system, benchmark, scenario)
user_conf_path = "measurements/{:}/{:}/{:}/user.conf".format(system, benchmark, scenario)
# setup paths
if "mlperf_conf_path" not in flag_dict:
flag_dict["mlperf_conf_path"] = mlperf_conf_path
if "user_conf_path" not in flag_dict:
flag_dict["user_conf_path"] = user_conf_path
# assign seed values
for name, value in seeds_map.items():
if name not in flag_dict:
flag_dict[name] = value
# auto-generate user.conf
with open(user_conf_path, "w") as f:
for param in required_settings_map[scenario]:
param_name = param
if param in options_map:
param_name = options_map[param]
value = flag_dict[param]
if param_name in parameter_scaling_map:
value = value * parameter_scaling_map[param_name]
f.write("*.{:}.{:} = {:}\n".format(scenario, param_name, value))
flag_dict[param] = None
for param in optional_settings_map[scenario]:
if param not in flag_dict: continue
param_name = param
if param in options_map:
param_name = options_map[param]
value = flag_dict[param]
if param_name in parameter_scaling_map:
value = value * parameter_scaling_map[param_name]
f.write("*.{:}.{:} = {:}\n".format(scenario, param_name, value))
flag_dict[param] = None
def run_harness(self):
executable = "./build/bin/harness_default"
self.check_file_exists(executable)
# These arguments are in self.args, passed in via code.main, which handles override arguments.
harness_args = common_args.LOADGEN_ARGS + common_args.LWIS_ARGS + common_args.SHARED_ARGS
flag_dict = self.build_default_flags(harness_args)
# Handle engines
if self.has_gpu:
flag_dict["gpu_engines"] = self.gpu_engine
if self.has_dla:
flag_dict["dla_engines"] = self.dla_engine
# Handle performance sample count
flag_dict["performance_sample_count"] = benchmark_qsl_size_map[self.name]
# Handle the expected qps values
if self.has_gpu and self.has_dla:
prefix = "concurrent_"
elif self.has_gpu:
prefix = "gpu_"
flag_dict["max_dlas"] = 0
elif self.has_dla:
prefix = "dla_"
flag_dict["max_dlas"] = 1
else:
raise ValueError("Cannot specify --no_gpu and --gpu_only at the same time")
if self.scenario == "SingleStream":
harness_flags = common_args.SINGLE_STREAM_PARAMS
elif self.scenario == "Offline":
harness_flags = common_args.OFFLINE_PARAMS
elif self.scenario == "MultiStream":
harness_flags = common_args.MULTI_STREAM_PARAMS
elif self.scenario == "Server":
harness_flags = common_args.SERVER_PARAMS
# use jemalloc2 for server scenario.
executable = "LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 " + executable
else:
raise ValueError("Invalid scenario: {:}".format(self.scenario))
for arg in harness_flags:
val = dict_get(self.args, prefix+arg, None)
if val is None:
raise ValueError("Missing required key {:}".format(prefix+arg))
flag_dict[arg] = val
# Handle configurations
self.build_configs(flag_dict)
argstr = args_to_string(flag_dict) + " --scenario " + self.scenario + " --model " + self.name
if self.name in ["ssd-small", "ssd-large"]:
argstr += " --response_postprocess coco"
cmd = "{:} {:}".format(executable, argstr)
output = run_command(cmd, get_output=True)
# Return harness result.
return self.harness_get_result(output, scenario_result_regex[self.scenario])
def harness_get_result(self, output, regex):
# All harness outputs should have a result string
result_regex = re.compile(regex)
result_string = ""
# All but the server harness should have an output with a validity message
valid_regex = re.compile(r"(Result is : (VALID|INVALID))")
valid_string = ""
# Iterate through the harness output
for line in output:
# Check for the result string
result_match = result_regex.match(line)
if not result_match is None:
result_string = result_match.group(1)
break
for line in output:
# Check for the validity string
valid_match = valid_regex.match(line)
if not valid_match is None:
valid_string = valid_match.group(1)
break
if result_string == "":
return "Cannot find performance result. Maybe you are running in AccuracyOnly mode."
elif valid_string == "":
return result_string + " but cannot find validity result."
else:
return result_string + " and " + valid_string
def check_file_exists(self, f):
if not os.path.isfile(f):
raise RuntimeError("File {:} does not exist.".format(f))
class GNMTHarness(BenchmarkHarness):
def __init__(self, args, name=""):
super().__init__(args, name=name)
def check_dir_exists(self, d):
if not os.path.isdir(d):
raise RuntimeError("Directory {:} does not exist.".format(d))
def enumerate_engines(self):
self.engines = []
if self.scenario == "Server":
batch_sizes = self.args["batch_sizes"]
else:
batch_sizes = [ self.args["gpu_batch_size"] ]
for batch_size in batch_sizes:
engine_name = self._get_engine_name("gpu", batch_size)
self.check_dir_exists(engine_name)
self.engines.append(engine_name)
def run_harness(self):
if self.scenario == "Server":
executable = "./build/bin/harness_gnmt_server"
harness_args = common_args.GNMT_SERVER_ARGS
else:
executable = "./build/bin/harness_gnmt_default"
harness_args = common_args.GNMT_HARNESS_ARGS
self.check_file_exists(executable)
flag_dict = self.build_default_flags(harness_args)
# Scenario based arguments
if self.scenario == "Offline":
scenario_args = common_args.OFFLINE_PARAMS
elif self.scenario == "SingleStream":
scenario_args = common_args.SINGLE_STREAM_PARAMS
else:
scenario_args = []
for key in scenario_args:
flag_dict[key] = dict_get(self.args, "gpu_"+key, None)
engine_flag = "engine" if len(self.engines) == 1 else "engines"
flag_dict[engine_flag] = ",".join(self.engines)
# Remove the batch size flags
flag_dict["batch_sizes"] = None
flag_dict["gpu_batch_size"] = None
# Choose input file based on test mode
if ((flag_dict.get("test_mode", None) == "PerformanceOnly" or flag_dict.get("test_mode", None) is None)
and flag_dict.get("input_file_performance", None) is not None):
flag_dict["input_file"] = flag_dict["input_file_performance"]
elif flag_dict.get("input_file_accuracy", None) is not None:
flag_dict["input_file"] = flag_dict["input_file_accuracy"]
flag_dict["input_file_performance"] = None
flag_dict["input_file_accuracy"] = None
# Handle configurations
self.build_configs(flag_dict)
argstr = args_to_string(flag_dict)
cmd = "{:} {:}".format(executable, argstr)
output = run_command(cmd, get_output=True)
# Return harness result.
return self.harness_get_result(output, scenario_result_regex[self.scenario])
| apache-2.0 | -5,496,312,983,046,946,000 | 39.20977 | 186 | 0.590652 | false | 3.759538 | true | false | false |
alnaav/shredNN | nn/train/gradient_descent_trainer.py | 1 | 3264 | import numpy as np
from nn.train.trainer import Trainer
class LayerData:
def __init__(self, layer):
self.z = np.zeros(layer.size)
self.a = np.zeros(layer.size)
self.neuron_error = np.zeros(layer.size)
self.grad_w = np.zeros(layer.w.shape)
self.grad_b = np.zeros(layer.b.shape)
self.d_w = np.zeros(layer.w.shape)
self.d_b = np.zeros(layer.b.shape)
class GradientDescentTrainer(Trainer):
def __init__(self, regularization_param=0.01, learning_rate=0.2):
self.iteration_number = 10000
self.l = regularization_param
self.a = learning_rate
self.__set_coeff__(1)
def __set_coeff__(self, samples_len):
self.rev_m = 1.0 / samples_len
self.coeff = self.l * self.rev_m * 0.5
def calc_gradient(self, curr, a_prev, w):
curr.grad_w += curr.neuron_error.transpose().dot(a_prev)
curr.grad_b += curr.neuron_error[1, :]
curr.d_w = self.rev_m * curr.grad_w + self.l * w
curr.d_b = self.rev_m * curr.grad_b
def step(self, layers, x, y):
layers_data = [LayerData(layer) for layer in layers[1:]]
a = x
for i, layer in enumerate(layers[1:]):
layers_data[i].z = a.dot(layer.w.transpose())
layers_data[i].z += layer.b
layers_data[i].a = layer.activation.apply(layers_data[i].z)
a = layers_data[i].a
cost = self.cost(layers, layers_data[-1].a, y)
curr = layers_data[-1]
curr.neuron_error = curr.a - y
for i in range(len(layers) - 1, 1, -1):
prev = layers_data[i - 2]
curr = layers_data[i - 1]
prev.neuron_error = curr.neuron_error.dot(layers[i].w) * layers[i - 1].activation.apply_derivative(prev.z)
self.calc_gradient(curr, prev.a, layers[i].w)
self.calc_gradient(layers_data[0], x, layers[1].w)
for layer, data in zip(layers[1:], layers_data):
layer.w -= self.a * data.d_w
layer.b -= self.a * data.d_b
return cost
def cost(self, layers, predicted, expected):
hv = predicted.ravel()
yv = expected.ravel()
reg = 0
for layer in layers[1:]:
reg += np.sum(layer.w * layer.w)
reg *= self.coeff
err = -(np.log2(hv).transpose().dot(yv) + np.log2(1 - hv).transpose().dot((1 - yv))) * self.rev_m
return err + reg
def train(self, nn, features, target, k):
samples_number = features.shape[0]
self.__set_coeff__(samples_number)
y = np.zeros((samples_number, k))
for i in range(0, samples_number):
y[i, target[i]] = 1
batch_size = 1000
batches_number = samples_number / batch_size + 1
print "{} batches".format(batches_number)
for i in range(0, self.iteration_number):
sb = 0
for b in range(0, batches_number):
size = min(batch_size, samples_number - sb)
self.__set_coeff__(size)
curr_x = features[sb:sb + size, :]
curr_y = y[sb:sb + size, :]
self.step(nn.layers, curr_x, curr_y)
if i % 1000 == 0:
print "{} iterations done".format(i)
| apache-2.0 | -6,355,674,511,678,844,000 | 31.64 | 118 | 0.543811 | false | 3.287009 | false | false | false |
andyvand/cygsystem-config-llvm | src/Properties_Renderer.py | 1 | 7792 | """This renderer class renders volume properties into a separate
drawing area next to the main volume rendering drawing area.
"""
import sys
import math
import operator
import types
import select
import signal
import gobject
import pango
import string
import os
from lvmui_constants import *
import stat
import gettext
_ = gettext.gettext
### gettext first, then import gtk (exception prints gettext "_") ###
try:
import gtk
import gtk.glade
except RuntimeError, e:
print _("""
Unable to initialize graphical environment. Most likely cause of failure
is that the tool was not run using a graphical environment. Please either
start your graphical user interface or set your DISPLAY variable.
Caught exception: %s
""") % e
sys.exit(-1)
import gnome
import gnome.ui
LABEL_X = 325
LABEL_Y = 600
X_OFF = 20
Y_OFF = 10
BIG_HEADER_SIZE = 12000
PROPERTY_SIZE = 8000
PROPERTIES_STR=_("Properties for")
PHYSICAL_VOLUME_STR=_("Physical Volume")
LOGICAL_VOLUME_STR=_("Logical Volume")
UNALLOCATED_VOLUME_STR=_("Unallocated Volume")
UNINITIALIZED_VOLUME_STR=_("Disk Entity")
PHYSICAL_VOLUMEGROUP_STR=_("Volume Group")
LOGICAL_VOLUMEGROUP_STR=_("Volume Group")
VOLUMEGROUP_STR=_("Volume Group")
##############################################################
class Properties_Renderer:
def __init__(self, area, widget):
self.main_window = widget
self.area = area #actual widget, used for getting style, hence bgcolor
self.area.set_size_request(700, 500)
self.current_selection_layout = None
self.layout_list = list()
self.layout_pixmap = gtk.gdk.Pixmap(self.main_window, LABEL_X, LABEL_Y)
self.gc = self.main_window.new_gc()
self.pango_context = self.area.get_pango_context()
color = gtk.gdk.colormap_get_system().alloc_color("white", 1,1)
self.area.modify_bg(gtk.STATE_NORMAL, color)
self.area.connect('expose-event', self.on_expose_event)
self.clear_layout_pixmap()
def render_to_layout_area(self, prop_list, name, type):
self.clear_layout_pixmap()
self.layout_list = list()
self.prepare_header_layout(name, type)
self.prepare_prop_layout(prop_list, type)
self.prepare_selection_props()
self.do_render()
def prepare_header_layout(self, name, type):
pc = self.pango_context
desc = pc.get_font_description()
desc.set_size(BIG_HEADER_SIZE)
pc.set_font_description(desc)
layout_string1 = "<span size=\"12000\">" +PROPERTIES_STR + "</span>\n"
if type == PHYS_TYPE:
layout_string2 = "<span size=\"12000\">" + PHYSICAL_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#ED1C2A\" size=\"12000\"><b>" + name + "</b></span>"
elif type == LOG_TYPE:
layout_string2 = "<span size=\"12000\">" + LOGICAL_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#43ACE2\" size=\"12000\"><b>" + name + "</b></span>"
elif type == UNALLOCATED_TYPE:
layout_string2 = "<span size=\"12000\">" + UNALLOCATED_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#ED1C2A\" size=\"12000\"><b>" + name + "</b></span>"
elif type == UNINITIALIZED_TYPE:
layout_string2 = "<span size=\"12000\">" + UNINITIALIZED_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#404040\" size=\"12000\"><b>" + name + "</b></span>"
elif type == VG_PHYS_TYPE:
layout_string2 = "<span size=\"12000\">" + PHYSICAL_VOLUMEGROUP_STR + "</span>\n"
layout_string3 = "<span foreground=\"#ED1C2A\" size=\"12000\"><b>" + name + "</b></span>"
elif type == VG_LOG_TYPE:
layout_string2 = "<span size=\"12000\">" + LOGICAL_VOLUMEGROUP_STR + "</span>\n"
layout_string3 = "<span foreground=\"#43ACE2\" size=\"12000\"><b>" + name + "</b></span>"
else:
layout_string2 = "<span size=\"12000\">" + VOLUMEGROUP_STR + "</span>\n"
layout_string3 = "<span foreground=\"#43A2FF\" size=\"12000\"><b>" + name + "</b></span>"
layout_string = layout_string1 + layout_string2 + layout_string3
header_layout = self.area.create_pango_layout('')
header_layout.set_markup(layout_string)
self.layout_list.append(header_layout)
def prepare_prop_layout(self, prop_list,type):
pc = self.pango_context
desc = pc.get_font_description()
desc.set_size(PROPERTY_SIZE)
pc.set_font_description(desc)
text_str = self.prepare_props_list(prop_list, type)
props_layout = self.area.create_pango_layout('')
props_layout.set_markup(text_str)
self.layout_list.append(props_layout)
def clear_layout_pixmap(self):
self.set_color("white")
self.layout_pixmap.draw_rectangle(self.gc, True, 0, 0, -1, -1)
def clear_layout_area(self):
self.clear_layout_pixmap()
self.layout_list = list()
self.main_window.draw_drawable(self.gc, self.layout_pixmap, 0, 0, X_OFF, Y_OFF, -1, -1)
def set_color(self, color):
self.gc.set_foreground(gtk.gdk.colormap_get_system().alloc_color(color, 1,1))
def prepare_selection_props(self):
pass
def prepare_props_list(self, props_list, type):
stringbuf = list()
for i in range(0, len(props_list), 2):
if i == 0:
stringbuf.append("<b>" + props_list[i] + "</b>")
if (type == PHYS_TYPE) or (type == VG_PHYS_TYPE) or (type == UNALLOCATED_TYPE):
stringbuf.append("<span foreground=\"#ED1C2A\">")
elif (type == LOG_TYPE) or (type == VG_LOG_TYPE):
stringbuf.append("<span foreground=\"#43ACE2\">")
elif type == VG_TYPE:
stringbuf.append("<span foreground=\"#43A2FF\">")
else:
stringbuf.append("<span foreground=\"#404040\">")
stringbuf.append(props_list[i+1])
stringbuf.append("</span>")
else:
stringbuf.append("\n")
stringbuf.append("<b>" + props_list[i] + "</b>")
if (type == PHYS_TYPE) or (type == VG_PHYS_TYPE) or (type == UNALLOCATED_TYPE):
stringbuf.append("<span foreground=\"#ED1C2A\">")
elif (type == LOG_TYPE) or (type == VG_LOG_TYPE):
stringbuf.append("<span foreground=\"#43ACE2\">")
elif type == VG_TYPE:
stringbuf.append("<span foreground=\"#43A2FF\">")
else:
stringbuf.append("<span foreground=\"#404040\">")
stringbuf.append(props_list[i+1])
stringbuf.append("</span>")
text_str = "".join(stringbuf)
return text_str
def do_render(self):
self.clear_layout_pixmap()
self.set_color("black")
y_offset = 0
for layout in self.layout_list:
x,y = layout.get_pixel_size()
if y_offset == 0:
self.layout_pixmap.draw_layout(self.gc, 0, 0, layout)
y_offset = y_offset + y
else:
self.layout_pixmap.draw_layout(self.gc, 0, y_offset + 5, layout)
y_offset = y_offset + y
if self.current_selection_layout != None:
self.layout_pixmap.draw_layout(self.gc, 0, y_offset + 5, self.current_selection_layout)
self.main_window.draw_drawable(self.gc, self.layout_pixmap, 0, 0, X_OFF, Y_OFF, -1, -1)
def render_selection(self, layout):
###FIXME - This has the potential of eliminating all entries on the list.
if layout == None:
self.current_selection_layout = None
self.do_render()
elif layout is self.current_selection_layout:
return
else:
self.current_selection_layout = layout
self.do_render()
def on_expose_event(self, widget, event):
self.do_render()
| gpl-2.0 | -4,781,185,715,055,585,000 | 36.104762 | 95 | 0.602156 | false | 3.380477 | false | false | false |
theeluwin/textrankr | textrankr/utils.py | 1 | 2716 | from typing import (
List,
Tuple,
Callable,
)
from re import split
from itertools import combinations
from collections import Counter
from networkx import Graph
from .sentence import Sentence
__all__: Tuple[str, ...] = (
'parse_text_into_sentences',
'multiset_jaccard_index',
'build_sentence_graph',
)
def parse_text_into_sentences(text: str, tokenizer: Callable[[str], List[str]]) -> List[Sentence]:
"""
This function splits the given text into sentence candidates using a pre-defined splitter,
then creates a list of `sentence.Sentence` instances which have bag-of-words inside, tokenized by the given tokenizer.
"""
# init
index: int = 0
duplication_checker: set = set()
sentences: List[Sentence] = []
# parse text
candidates: List[str] = split(r'(?:(?<=[^0-9])\.|\n|!|\?)', text)
for candidate in candidates:
# cleanse the candidate
candidate_stripped: str = candidate.strip('. ')
if not len(candidate_stripped):
continue
if candidate_stripped in duplication_checker:
continue
# tokenize the candidate
tokens: List[str] = tokenizer(candidate_stripped)
if len(tokens) < 2:
continue
duplication_checker.add(candidate_stripped)
# create a sentence
bow: Counter = Counter(tokens)
sentence = Sentence(index, candidate_stripped, bow)
sentences.append(sentence)
index += 1
# return
return sentences
def multiset_jaccard_index(counter1: Counter, counter2: Counter) -> float:
"""
Calculates the jaccard index between two given multisets.
Note that a `Counter` instance can be used for representing multisets.
"""
intersection_count: int = sum((counter1 & counter2).values())
union_count: int = sum((counter1 | counter2).values())
try:
return intersection_count / union_count
except ZeroDivisionError:
return 0.0
def build_sentence_graph(sentences: List[Sentence], tolerance: float = 0.05) -> Graph:
"""
Builds a `networkx.Graph` instance, using sentences as nodes.
An edge weight is determined by the jaccard index between two sentences,
but the edge will be ignored if the weight is lower then the given tolerance.
"""
# init
graph: Graph = Graph()
# add nodes
graph.add_nodes_from(sentences)
# add edges
for sentence1, sentence2 in combinations(sentences, 2):
weight: float = multiset_jaccard_index(sentence1.bow, sentence2.bow)
if weight > tolerance:
graph.add_edge(sentence1, sentence2, weight=weight)
# return
return graph
| mit | -7,227,328,344,896,412,000 | 27.893617 | 126 | 0.647275 | false | 4.090361 | false | false | false |
dc3-plaso/plaso | plaso/parsers/winreg_plugins/usbstor.py | 1 | 4441 | # -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
import logging
from plaso.containers import windows_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides ([email protected])'
class USBStorPlugin(interface.WindowsRegistryPlugin):
"""USBStor key plugin."""
NAME = u'windows_usbstor_devices'
DESCRIPTION = u'Parser for USB Plug And Play Manager USBStor Registry Key.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
u'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')])
URLS = [u'http://www.forensicswiki.org/wiki/USB_History_Viewing']
_SOURCE_APPEND = u': USBStor Entries'
def GetEntries(self, parser_mediator, registry_key, **kwargs):
"""Collect Values under USBStor and return an event object for each one.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
registry_key: A Windows Registry key (instance of
dfwinreg.WinRegistryKey).
"""
for subkey in registry_key.GetSubkeys():
values_dict = {}
values_dict[u'subkey_name'] = subkey.name
# Time last USB device of this class was first inserted.
event_object = windows_events.WindowsRegistryEvent(
subkey.last_written_time, registry_key.path, values_dict,
offset=registry_key.offset, source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
name_values = subkey.name.split(u'&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logging.warning(
u'Expected 4 &-separated values in: {0:s}'.format(subkey.name))
if number_of_name_values >= 1:
values_dict[u'device_type'] = name_values[0]
if number_of_name_values >= 2:
values_dict[u'vendor'] = name_values[1]
if number_of_name_values >= 3:
values_dict[u'product'] = name_values[2]
if number_of_name_values >= 4:
values_dict[u'revision'] = name_values[3]
for device_key in subkey.GetSubkeys():
values_dict[u'serial'] = device_key.name
friendly_name_value = device_key.GetValueByName(u'FriendlyName')
if friendly_name_value:
values_dict[u'friendly_name'] = friendly_name_value.GetDataAsObject()
else:
values_dict.pop(u'friendly_name', None)
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName(u'ParentIdPrefix')
if parent_id_prefix_value:
values_dict[u'parent_id_prefix'] = (
parent_id_prefix_value.GetDataAsObject())
else:
values_dict.pop(u'parent_id_prefix', None)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event_object = windows_events.WindowsRegistryEvent(
device_key.last_written_time, registry_key.path, values_dict,
offset=registry_key.offset, source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
device_parameter_key = device_key.GetSubkeyByName(u'Device Parameters')
if device_parameter_key:
event_object = windows_events.WindowsRegistryEvent(
device_parameter_key.last_written_time, registry_key.path,
values_dict, offset=registry_key.offset,
source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
log_configuration_key = device_key.GetSubkeyByName(u'LogConf')
if log_configuration_key:
event_object = windows_events.WindowsRegistryEvent(
log_configuration_key.last_written_time, registry_key.path,
values_dict, offset=registry_key.offset,
source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
properties_key = device_key.GetSubkeyByName(u'Properties')
if properties_key:
event_object = windows_events.WindowsRegistryEvent(
properties_key.last_written_time, registry_key.path,
values_dict, offset=registry_key.offset,
source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| apache-2.0 | 7,311,776,805,502,006,000 | 38.651786 | 79 | 0.665166 | false | 3.728799 | false | false | false |
unreal666/outwiker | src/outwiker/gui/basepagepanel.py | 2 | 4433 | # -*- coding: utf-8 -*-
import logging
import os.path
import wx
from wx.lib.scrolledpanel import ScrolledPanel
from outwiker.core.event import Event
logger = logging.getLogger('outwiker.gui.pasepagepanel')
class BasePagePanel(ScrolledPanel):
"""
Базовый класс для панелей представления страниц
"""
def __init__(self, parent, application):
style = wx.TAB_TRAVERSAL | wx.HSCROLL | wx.VSCROLL
super().__init__(parent, style=style)
self._currentpage = None
self._application = application
self.mainWindow = self._application.mainWindow
# Событие, срабатывающее, когда устанавливается новая страница
# Параметр: новая страница
self._onSetPage = Event()
# Словарь, хранящий информацию о созданных инструментах
# Ключ - строка, описывающая инструмент
# Значение - экземпляр класса ToolsInfo
self._tools = {}
@property
def allTools(self):
"""
Возвращает список ToolsInfo.
"""
return list(self._tools.values())
def _removeAllTools(self):
self.mainWindow.Freeze()
for toolKey in self._tools:
self.removeTool(toolKey, fullUpdate=False)
self.mainWindow.UpdateAuiManager()
self.mainWindow.Thaw()
def removeTool(self, idstring, fullUpdate=True):
if idstring not in self._tools:
logger.error('BasePagePanel.removeTool. Invalid idstring: {}'.format(idstring))
return
tool = self._tools[idstring]
if (tool.panelname in self.mainWindow.toolbars and
self.mainWindow.toolbars[tool.panelname].FindById(tool.id) is not None):
self.mainWindow.toolbars[tool.panelname].DeleteTool(tool.id, fullUpdate=fullUpdate)
tool.menu.Remove(tool.id)
self.mainWindow.Unbind(wx.EVT_MENU, id=tool.id)
del self._tools[idstring]
def enableTool(self, tool, enabled):
"""
Активировать или дезактивировать один инструмент(пункт меню и кнопку)
tool - экземпляр класса ToolsInfo
"""
tool.menu.Enable(tool.id, enabled)
if self.mainWindow.toolbars[tool.panelname].FindById(tool.id) is not None:
toolbar = self.mainWindow.toolbars[tool.panelname]
toolbar.Freeze()
toolbar.EnableTool(tool.id, enabled)
toolbar.Realize()
toolbar.Thaw()
###############################################
# Методы, которые обязательно надо перегрузить
###############################################
def Print(self):
"""
Вызов печати страницы
"""
pass
def UpdateView(self, page):
"""
Обновление страницы
"""
pass
def Save(self):
"""
Сохранить страницу
"""
pass
def Clear(self):
"""
Убрать за собой.
Удалить добавленные элементы интерфейса и отписаться от событий
"""
pass
def checkForExternalEditAndSave(self):
"""
Проверить, изменилась ли страница внешними средствами. Если изменилась,
отреагировать на эти изменения.
"""
@property
def page(self):
return self._currentpage
@page.setter
def page(self, page):
self.Save()
self._currentpage = page
if page is not None and not os.path.exists(page.path):
return
self._onSetPage(page)
self.UpdateView(page)
def Close(self):
"""
Закрытие панели.
Вызывать вручную!!!
"""
self.Save()
self.CloseWithoutSave()
def CloseWithoutSave(self):
"""
Закрытие панели без сохранения.
"""
self.Clear()
super().Close()
self.Destroy()
| gpl-3.0 | -5,303,935,627,709,025,000 | 25.157534 | 95 | 0.584446 | false | 2.835189 | false | false | false |
mozilla-it/mozlibldap | examples/make-pubkeys-investigation.py | 1 | 3570 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
# Author: [email protected]
# Requires:
# mozlibldap
from __future__ import print_function
import mozlibldap
import string
import json
import sys
LDAP_URL = 'ldap://ldap.db.scl3.mozilla.com'
LDAP_BIND_DN = '[email protected],o=com,dc=mozilla'
LDAP_BIND_PASSWD = "mysecretpassphrase"
def main():
lcli = mozlibldap.MozLDAP(LDAP_URL, LDAP_BIND_DN, LDAP_BIND_PASSWD)
searches = {}
# get a list of users that have a pubkey in ldap
users = lcli.get_all_enabled_users_attr('sshPublicKey')
for user_attr in users:
search = {}
user = user_attr[0].split(',', 1)[0].split('=', 1)[1]
print("current user: "+user, file=sys.stderr)
keys = user_attr[1]
if len(keys) == 0:
continue
contentre = '^((#.+)|(\s+)'
for pubkey in keys['sshPublicKey']:
if len(pubkey) < 5 or not (pubkey.startswith("ssh")):
continue
pubkey = string.join(pubkey.split(' ', 2)[:2], '\s')
pubkey = pubkey.replace('/', '\/')
pubkey = pubkey.replace('+', '\+')
pubkey = pubkey.replace('\r\n', '')
contentre += '|({pubkey}\s.+)'.format(pubkey=pubkey)
contentre += ')$'
search["names"] = []
search["names"].append("^authorized_keys$")
search["contents"] = []
search["contents"].append(contentre)
paths = []
try:
paths = get_search_paths(lcli, user)
except:
continue
if not paths or len(paths) < 1:
continue
search["paths"] = paths
search["options"] = {}
search["options"]["matchall"] = True
search["options"]["macroal"] = True
search["options"]["maxdepth"] = 1
search["options"]["mismatch"] = []
search["options"]["mismatch"].append("content")
print(json.dumps(search), file=sys.stderr)
searches[user+"_ssh_pubkeys"] = search
action = {}
action["name"] = "Investigate the content of authorized_keys for LDAP users"
action["target"] = "(name LIKE 'admin%' OR name LIKE 'ssh%' " + \
"OR name LIKE 'people%' OR name LIKE 'zlb%' OR name IN " + \
"('reviewboard-hg1.dmz.scl3.mozilla.com', 'hgssh.stage.dmz.scl3.mozilla.com', " + \
"'hgssh1.dmz.scl3.mozilla.com', 'hgssh2.dmz.scl3.mozilla.com', " + \
"'git1.dmz.scl3.mozilla.com', 'git1.private.scl3.mozilla.com', " + \
"'svn1.dmz.phx1.mozilla.com', 'svn2.dmz.phx1.mozilla.com', " + \
"'svn3.dmz.phx1.mozilla.com')) AND tags->>'operator'='IT' AND " + \
"mode='daemon' AND status='online'"
action["version"] = 2
action["operations"] = []
operation = {}
operation["module"] = "file"
operation["parameters"] = {}
operation["parameters"]["searches"] = searches
action["operations"].append(operation)
print(json.dumps(action, indent=4, sort_keys=True))
def get_search_paths(lcli, user):
paths = []
res = lcli.query("mail="+user, ['homeDirectory', 'hgHome',
'stageHome', 'svnHome'])
for attr in res[0][1]:
try:
paths.append(res[0][1][attr][0]+"/.ssh")
except:
continue
return paths
if __name__ == "__main__":
main()
| mpl-2.0 | -8,203,583,225,681,633,000 | 35.428571 | 95 | 0.564426 | false | 3.479532 | false | false | false |
BlackHole/enigma2-obh10 | lib/python/Components/TimerSanityCheck.py | 2 | 13683 | import NavigationInstance
from time import localtime, mktime, gmtime, time
from enigma import iServiceInformation, eServiceCenter, eServiceReference, getBestPlayableServiceReference
from timer import TimerEntry
import RecordTimer
from Tools.CIHelper import cihelper
from Components.config import config
class TimerSanityCheck:
def __init__(self, timerlist, newtimer=None):
self.localtimediff = 25 * 3600 - mktime(gmtime(25 * 3600))
self.timerlist = timerlist
self.newtimer = newtimer
self.simultimer = []
self.rep_eventlist = []
self.nrep_eventlist = []
self.bflag = -1
self.eflag = 1
def check(self, ext_timer=None):
if ext_timer and isinstance(ext_timer, RecordTimer.RecordTimerEntry):
self.newtimer = ext_timer
self.simultimer = []
if self.newtimer:
if not self.newtimer.conflict_detection or (self.newtimer.service_ref and '%3a//' in self.newtimer.service_ref.ref.toString()):
print "[TimerSanityCheck] Exception - timer does not have to be checked!"
return True
self.simultimer = [self.newtimer]
return self.checkTimerlist()
def getSimulTimerList(self):
return self.simultimer
def doubleCheck(self):
if self.newtimer and self.newtimer.service_ref and self.newtimer.service_ref.ref.valid():
self.simultimer = [self.newtimer]
for timer in self.timerlist:
if timer == self.newtimer:
return True
if self.newtimer.begin >= timer.begin and self.newtimer.end <= timer.end:
if timer.justplay and not self.newtimer.justplay:
continue
if timer.service_ref.ref.flags & eServiceReference.isGroup:
if self.newtimer.service_ref.ref.flags & eServiceReference.isGroup and timer.service_ref.ref.getPath() == self.newtimer.service_ref.ref.getPath():
return True
continue
getUnsignedDataRef1 = timer.service_ref.ref.getUnsignedData
getUnsignedDataRef2 = self.newtimer.service_ref.ref.getUnsignedData
for x in (1, 2, 3, 4):
if getUnsignedDataRef1(x) != getUnsignedDataRef2(x):
break
else:
return True
return False
def checkTimerlist(self, ext_timer=None):
#with special service for external plugins
# Entries in eventlist
# timeindex
# BeginEndFlag 1 for begin, -1 for end
# index -1 for the new Timer, 0..n index of the existing timers
# count of running timers
serviceHandler = eServiceCenter.getInstance()
# create a list with all start and end times
# split it into recurring and singleshot timers
##################################################################################
# process the new timer
self.rep_eventlist = []
self.nrep_eventlist = []
if ext_timer and isinstance(ext_timer, RecordTimer.RecordTimerEntry):
self.newtimer = ext_timer
#GML:1 - A timer which has already ended (happens during start-up check) can't clash!!
#
# NOTE: that when adding a timer it also cannot clash with:
# o any timers which run before the latest period of no timers running
# before the timer to be added starts
# o any timers which run after the first period of no timers running
# after the timer to be added ends
# Code to handle this needs to be added (it is *NOT* here yet!)
#
if (self.newtimer is not None) and (self.newtimer.end < time()): # does not conflict
return True
if not self.newtimer or not self.newtimer.service_ref or not self.newtimer.service_ref.ref.valid():
print "[TimerSanityCheck] Error - timer not valid!"
return False
if self.newtimer.disabled or not self.newtimer.conflict_detection or '%3a//' in self.newtimer.service_ref.ref.toString():
print "[TimerSanityCheck] Exception - timer does not have to be checked!"
return True
curtime = localtime(time())
if curtime.tm_year > 1970 and self.newtimer.end < time():
print "[TimerSanityCheck] timer is finished!"
return True
rflags = self.newtimer.repeated
rflags = ((rflags & 0x7F) >> 3) | ((rflags & 0x07) << 4)
if rflags:
begin = self.newtimer.begin % 86400 # map to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1) & 0x3F) | ((rflags << 6) & 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1) & 0x7E) | ((rflags >> 6) & 0x01)
while rflags: # then arrange on the week
if rflags & 1:
self.rep_eventlist.append((begin, -1))
begin += 86400
rflags >>= 1
else:
self.nrep_eventlist.extend([(self.newtimer.begin, self.bflag, -1), (self.newtimer.end, self.eflag, -1)])
##################################################################################
# now process existing timers
self.check_timerlist = []
idx = 0
for timer in self.timerlist:
if timer != self.newtimer:
if timer.disabled or not timer.conflict_detection or not timer.service_ref or '%3a//' in timer.service_ref.ref.toString() or timer.state == TimerEntry.StateEnded:
continue
if timer.repeated:
rflags = timer.repeated
rflags = ((rflags & 0x7F) >> 3) | ((rflags & 0x07) << 4)
begin = timer.begin % 86400 # map all to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1) & 0x3F) | ((rflags << 6) & 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1) & 0x7E) | ((rflags >> 6) & 0x01)
while rflags:
if rflags & 1:
self.rep_eventlist.append((begin, idx))
begin += 86400
rflags >>= 1
else:
self.nrep_eventlist.extend([(timer.begin, self.bflag, idx), (timer.end, self.eflag, idx)])
self.check_timerlist.append(timer)
idx += 1
################################################################################
# journalize timer repeations
if self.nrep_eventlist:
interval_begin = min(self.nrep_eventlist)[0]
interval_end = max(self.nrep_eventlist)[0]
offset_0 = interval_begin - (interval_begin % 604800)
weeks = (interval_end - offset_0) / 604800
if (interval_end - offset_0) % 604800:
weeks += 1
for cnt in range(int(weeks)):
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.check_timerlist[event[1]].begin
event_end = self.check_timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
# summertime correction
new_lth = localtime(new_event_begin).tm_hour
new_event_begin += 3600 * (localtime(event_begin).tm_hour - new_lth)
new_event_end = new_event_begin + (event_end - event_begin)
if event[1] == -1:
if new_event_begin >= self.newtimer.begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
else:
if new_event_begin >= self.check_timerlist[event[1]].begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
else:
offset_0 = 345600 # the Epoch begins on Thursday
for cnt in (0, 1): # test two weeks to take care of Sunday-Monday transitions
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.check_timerlist[event[1]].begin
event_end = self.check_timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
new_event_end = new_event_begin + (event_end - event_begin)
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
################################################################################
# order list chronological
self.nrep_eventlist.sort()
##################################################################################
# detect overlapping timers and overlapping times
fakeRecList = []
ConflictTimer = None
ConflictTunerType = None
newTimerTunerType = None
cnt = 0
idx = 0
overlaplist = []
is_ci_use = 0
is_ci_timer_conflict = 0
ci_timer = False
if config.misc.use_ci_assignment.value and cihelper.ServiceIsAssigned(self.newtimer.service_ref.ref) and not (self.newtimer.record_ecm and not self.newtimer.descramble):
ci_timer = self.newtimer
ci_timer_begin = ci_timer.begin
ci_timer_end = ci_timer.end
ci_timer_dur = ci_timer_end - ci_timer_begin
ci_timer_events = []
for ev in self.nrep_eventlist:
if ev[2] == -1:
ci_timer_events.append((ev[0], ev[0] + ci_timer_dur))
for event in self.nrep_eventlist:
cnt += event[1]
if event[2] == -1: # new timer
timer = self.newtimer
else:
timer = self.check_timerlist[event[2]]
if event[1] == self.bflag:
tunerType = []
ref = timer.service_ref and timer.service_ref.ref
timer_ref = timer.service_ref
if ref and ref.flags & eServiceReference.isGroup and timer.isRunning():
timer_ref = getBestPlayableServiceReference(timer.service_ref.ref, eServiceReference())
fakeRecService = NavigationInstance.instance.recordService(timer_ref, True)
if fakeRecService:
fakeRecResult = fakeRecService.start(True)
else:
fakeRecResult = -1
# TODO
#if fakeRecResult == -6 and len(NavigationInstance.instance.getRecordings(True)) < 2:
# print "[TimerSanityCheck] less than two timers in the simulated recording list - timer conflict is not plausible - ignored !"
# fakeRecResult = 0
if not fakeRecResult: # tune okay
if hasattr(fakeRecService, 'frontendInfo'):
feinfo = fakeRecService.frontendInfo()
if feinfo and hasattr(feinfo, 'getFrontendData'):
tunerType.append(feinfo.getFrontendData().get("tuner_type", -1))
feinfo = None
else: # tune failed.. so we must go another way to get service type (DVB-S, DVB-T, DVB-C)
def getServiceType(ref): # helper function to get a service type of a service reference
serviceInfo = serviceHandler.info(ref)
serviceInfo = serviceInfo and serviceInfo.getInfoObject(ref, iServiceInformation.sTransponderData)
return -1 if serviceInfo is None else serviceInfo.get("tuner_type", -1)
if ref and ref.flags & eServiceReference.isGroup: # service group ?
serviceList = serviceHandler.list(ref) # get all alternative services
if serviceList:
for ref in serviceList.getContent("R"): # iterate over all group service references
type = getServiceType(ref)
if not type in tunerType: # just add single time
tunerType.append(type)
elif ref:
tunerType.append(getServiceType(ref))
if event[2] == -1: # new timer
newTimerTunerType = tunerType
overlaplist.append((fakeRecResult, timer, tunerType))
fakeRecList.append((timer, fakeRecService))
if fakeRecResult:
if ConflictTimer is None: # just take care of the first conflict
ConflictTimer = timer
ConflictTunerType = tunerType
elif event[1] == self.eflag:
for fakeRec in fakeRecList:
if timer == fakeRec[0] and fakeRec[1]:
NavigationInstance.instance.stopRecordService(fakeRec[1])
fakeRecList.remove(fakeRec)
fakeRec = None
for entry in overlaplist:
if entry[1] == timer:
overlaplist.remove(entry)
else:
print "[TimerSanityCheck] bug: unknown flag!"
if ci_timer and timer != ci_timer and cihelper.ServiceIsAssigned(timer.service_ref.ref) and not (timer.record_ecm and not timer.descramble):
if event[1] == self.bflag:
timer_begin = event[0]
timer_end = event[0] + (timer.end - timer.begin)
else:
timer_end = event[0]
timer_begin = event[0] - (timer.end - timer.begin)
for ci_ev in ci_timer_events:
if (ci_ev[0] >= timer_begin and ci_ev[0] <= timer_end) or (ci_ev[1] >= timer_begin and ci_ev[1] <= timer_end):
if ci_timer.service_ref.ref != timer.service_ref.ref:
is_ci_timer_conflict = 1
break
if is_ci_timer_conflict == 1:
if ConflictTimer is None:
ConflictTimer = timer
ConflictTunerType = tunerType
self.nrep_eventlist[idx] = (event[0], event[1], event[2], cnt, overlaplist[:]) # insert a duplicate into current overlaplist
fakeRecService = None
fakeRecResult = None
idx += 1
if ConflictTimer is None:
print "[TimerSanityCheck] conflict not found!"
return True
##################################################################################
# we have detected a conflict, now we must figure out the involved timers
if self.newtimer is not ConflictTimer: # the new timer is not the conflicting timer?
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
kt = False
nt = False
for entry in event[4]:
if entry[1] is ConflictTimer:
kt = True
if entry[1] is self.newtimer:
nt = True
if nt and kt:
ConflictTimer = self.newtimer
ConflictTunerType = newTimerTunerType
break
self.simultimer = [ConflictTimer]
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
for entry in event[4]:
if entry[1] is ConflictTimer:
break
else:
continue
for entry in event[4]:
if not entry[1] in self.simultimer:
for x in entry[2]:
if x in ConflictTunerType:
self.simultimer.append(entry[1])
break
if len(self.simultimer) < 2:
print "[TimerSanityCheck] possible bug: unknown conflict!"
return True
print "[TimerSanityCheck] conflict detected!"
return False
| gpl-2.0 | -3,732,489,084,296,369,700 | 39.602374 | 171 | 0.655631 | false | 3.240881 | false | false | false |
scorpilix/Golemtest | golem/interface/client/environments.py | 1 | 1674 | from golem.core.deferred import sync_wait
from golem.interface.command import group, Argument, command, CommandResult
@group(name="envs", help="Manage environments")
class Environments(object):
name = Argument('name', help="Environment name")
table_headers = ['name', 'supported', 'active', 'performance',
'description']
sort = Argument(
'--sort',
choices=table_headers,
optional=True,
default=None,
help="Sort environments"
)
@command(argument=sort, help="Show environments")
def show(self, sort):
deferred = Environments.client.get_environments()
result = sync_wait(deferred) or []
values = []
for env in result:
values.append([
env['id'],
str(env['supported']),
str(env['accepted']),
str(env['performance']),
env['description']
])
return CommandResult.to_tabular(Environments.table_headers, values,
sort=sort)
@command(argument=name, help="Enable environment")
def enable(self, name):
deferred = Environments.client.enable_environment(name)
return sync_wait(deferred)
@command(argument=name, help="Disable environment")
def disable(self, name):
deferred = Environments.client.disable_environment(name)
return sync_wait(deferred)
@command(argument=name, help="Recount performance for an environment")
def recount(self, name):
deferred = Environments.client.run_benchmark(name)
return sync_wait(deferred, timeout=1800)
| gpl-3.0 | 2,024,806,146,087,001,300 | 30 | 75 | 0.603345 | false | 4.440318 | false | false | false |
pyrrho314/recipesystem | trunk/astrodata/DataSpider.py | 1 | 42351 | try:
import pyfits
except:
pass
import os
import re
from AstroData import *
ldebug = False
verbose = False
from astrodata.adutils import terminal
from ReductionContextRecords import AstroDataRecord
import subprocess
import os
from copy import copy,deepcopy
from AstroData import Errors
from astrodata import new_pyfits_version
uselocalcalserv = False
batchno = 100
if uselocalcalserv: # takes WAY TOO LONG~!!!!!!
from astrodata.LocalCalibrationService import CalibrationService
from CalibrationDefinitionLibrary import CalibrationDefinitionLibrary # For xml calibration requests
def shallow_walk(directory):
global batchno
opti = False
if opti:
print "sw: going to call os.listdir"
ld = os.listdir(directory)
if opti:
print "sw: called os.listdir"
root = directory
dirn = []
files = []
if opti:
print "sw: sorting directories from files in directory"
if batchno != None:
batchsize = batchno
else:
batchsize = 100
for li in ld:
if os.path.isdir(li):
dirn.append(li)
else:
files.append(li)
if len(files)> batchsize:
if opti:
print "yielding batch of " + str(batchsize)
print repr(files)
yield (root, [], files)
files = []
if opti:
print "sw: yielding"
yield (root, [], files)
class DataSpider(object):
"""
DataSpider() is a work class to encapsulate
reusable code to work the AstroData related classes.
e.g. it will walk a directory using AstroData
to check type sizes.
"""
hdulist = None
contextType = None
classification_library = None
cal_search = None
def __init__(self, context = None):
# ==== member vars ====
self.contextType = context
self.classification_library = self.get_classification_library()
if uselocalcalserv:
self.calService = CalibrationService()
self.calDefLib = CalibrationDefinitionLibrary()
def get_classification_library(self):
# @@todo: handle context here
if (self.classification_library == None):
try:
self.classification_library = ClassificationLibrary()
except CLAlreadyExists, s:
self.classification_library = s.clInstance
return self.classification_library
def dumpinfo(self):
#print self.hdulist.info()
if new_pyfits_version:
cards = self.hdulist[0].header.cards
else:
cards = self.hdulist[0].header.ascard
for hd in self.hdulist:
if (hd.data != None):
try:
print hd.data.type()
except:
print "Table"
def typewalk(self, directory = ".", only = "all", pheads = None,
showinfo = False,
onlyStatus = False,
onlyTypology = False,
# generic descriptors interface
showDescriptors = None, # string of comma separated descriptor names (function names!)
filemask = None,
showCals = False,
incolog = True,
stayTop = False,
recipe = None,
raiseExcept = False,
where = None,
batchnum = None,
opti = None):
"""
Recursively walk a given directory and put type information to stdout
"""
global verbose
global debug
global batchno
if batchnum != None:
batchno = batchnum
if raiseExcept:
from astrodata.debugmodes import set_descriptor_throw
set_descriptor_throw(True)
onlylist = only.split(",")
if (verbose):
print "onlylist:",repr(onlylist)
verbose = False
ldebug = False
dirnum = 0
if stayTop == True:
walkfunc = shallow_walk
if opti:
print "Doing a shallow walk"
else:
walkfunc = os.walk
if opti:
print "Doing an os.walk"
for root,dirn,files in walkfunc(directory):
verbose = False
if opti:
print "Analyzing:", root
dirnum += 1
if (verbose) :
print "DS90:",root,dirn,files
#print "root:", root
#print "dirn:", dirn
#if verbose:
# print "DS92:",root, repr(dirn), repr(file)
if (".svn" not in root):
width = 10
## !!!!!
## !!!!! CREATE THE LINE WRITTEN FOR EACH DIRECTORY RECURSED !!!!!
## !!!!!
fullroot = os.path.abspath(root)
if verbose:
print 'DS91:',fullroot
if root == ".":
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}. ("+fullroot + ")${NORMAL}"
else:
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}"+root + "${NORMAL}"
firstfile = True
for tfile in files:
# we have considered removing this check in place of a
# pyfits open but that was not needed, the pyfits open
# is down lower, this is just to avoid checking files
# that are not named correctly to be FITS, so why check them?
# especially on a command recursing directories and potentially
# looking at a lot of files.
if filemask == None:
# @@NAMING: fits file mask for typewalk
mask = r".*?\.(fits|FITS)$"
else:
mask = filemask
try:
matched = re.match(mask, tfile)
except:
print "BAD FILEMASK (must be a valid regular expression):", mask
return str(sys.exc_info()[1])
if (re.match(mask, tfile)) :
if (ldebug) : print "FITS:", tfile
fname = os.path.join(root, tfile)
try:
# NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
# fl is the astrodata instance of tfile/fname
fl = AstroData(fname)
#
# NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
except KeyboardInterrupt:
raise
except:
mes = "Could not open file: %s as AstroData" % fname
print mes
# raise Errors.AstroDataError(mes)
continue
gain = 0
stringway = False
if (stringway):
if (onlyTypology == onlyStatus):
dtypes = self.classification_library.discover_types(fname)
elif (onlyTypology):
dtypes = self.classification_library.discover_typology(fname)
elif (onlyStatus):
dtypes = self.classification_library.discover_status(fname)
else:
# this is the AstroData Class way
# to ask the file itself
if (onlyTypology == onlyStatus):
dtypes = fl.discover_types()
elif (onlyTypology):
dtypes = fl.discover_typology()
elif (onlyStatus):
dtypes = fl.discover_status()
if verbose:
print "DS130:", repr(dtypes)
# print "after classification"
if (dtypes != None) and (len(dtypes)>0):
#check to see if only is set
#only check for given type
found = False
if (only == "all"):
found=True
else:
# note: only can be split this way with no worry about
# whitespace because it's from the commandline, no whitespace
# allowed in that argument, just "," as a separator
ol = only.split(",")
# print ol
found = False
for tpname in dtypes:
if (verbose):
print "DS148", " in ", repr(ol),
if (tpname in ol):
found = True
break
if (verbose):
print "yes, found = ", str(found)
if (found == True):
if where != None:
# let them use underscore as spaces, bash + getopts doesn't like space in params even in quotes
cleanwhere = re.sub("_"," ", where)
ad = fl
try:
found = eval(cleanwhere)
except:
print "can't execute where:\n\t" + where + "\n\t" +cleanwhere
print "reason:\n\t"+str(sys.exc_info()[1])+"\n"+repr(sys.exc_info())
sys.exit(1)
if (found != True):
continue
if (firstfile == True):
print rootln
firstfile = False
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!!PRINTING OUT THE FILE AND TYPE INFO!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
indent = 5
pwid = 40
fwid = pwid - indent
# print start of string
#print "DS270:", len(tfile)
while len(tfile)>= fwid-1:
if False:
part = tfile[:fwid]
print " ${BG_WHITE}%s${NORMAL}" % part
tfile = tfile[fwid-1:]
else:
print " ${BG_WHITE}%s${NORMAL}" % tfile
tfile = ""
if len(tfile)>0:
prlin = " %s " % tfile
prlincolor = " ${BG_WHITE}%s${NORMAL} " % tfile
else:
prlin = " "
prlincolor = " "
empty = " "*indent + "."*fwid
fwid = pwid+indent
lp = len(prlin)
nsp = pwid - ( lp % pwid )
# print out indent, filename, and "..." to justify types area"
# there is a way to do with with a comprehension?
print prlincolor+("."*nsp)+"${NORMAL}",
# print dtypes
tstr = ""
termsize = terminal.getTerminalSize()
maxlen = termsize[0] - pwid -1
printed = False
dtypes.sort()
for dtype in dtypes:
if (dtype != None):
newtype = "(%s) " % dtype
else:
newtype = "(Unknown) "
# print "(%s)N20091027S0133.fits" % dtype ,
astr = tstr + newtype
if len(astr) >= maxlen:
print "${BLUE}"+ tstr + "${NORMAL}"
tstr = newtype
print empty,
else:
tstr = astr
if tstr != "":
print "${BLUE}"+ tstr + "${NORMAL}"
tstr = ""
astr = ""
printed = True
# new line at the end of the output
# print ""
if (showinfo == True):
print "-"*40
print "AstroData.info():"
fl.info()
print "-"*40
print "pyfits.info():"
fl.hdulist.info()
print "-"*40
#hlist = pyfits.open(fname)
#hlist.info()
#hlist.close()
# print descriptors
# show descriptors
if (showDescriptors != None):
sdl = showDescriptors.split(",")
if verbose:
print "DS320:", repr(sdl)
# print ol
# get maxlen
if "err" in sdl:
errOnly = True
sdl.remove("err")
else:
errOnly = False
maxlen = 0
for sd in sdl:
maxlen = max(len(sd),maxlen)
for sd in sdl:
#print "DS242:", sd
try:
if "(" not in sd:
dval = eval("fl."+sd+"(asList=True)")
else:
#print "DS333:", repr(sd)
dval = eval("fl."+sd)
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
if dval:
if (not errOnly):
print (" ${BOLD}%s${NORMAL} = %s") % (sd, str(dval))
else:
print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}returned None${NORMAL}' % (sd)
except AttributeError:
exinfo = sys.exc_info()
print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}NO SUCH DESCRIPTOR${NORMAL}' % (sd)
#if raiseExcept:
# raise
except KeyboardInterrupt:
raise
except:
# pad = " " * (maxlen - len(sd))
# sd = str(sd) + pad
exinfo = sys.exc_info()
print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}%s${NORMAL}' % (sd, repr(exinfo[1]).strip())
#if raiseExcept:
# raise
# if phead then there are headers to print per file
if (pheads != None):
#print " -----------"sys.exec
print " ${UNDERLINE}PHU Headers${NORMAL}"
#print " -----------"
#print "pheads", pheads
hlist = pyfits.open(fname)
pheaders = pheads.split(",")
for headkey in pheaders:
#if in phu, this is the code
try:
print " %s = (%s)" % (headkey, hlist[0].header[headkey])
except KeyError:
print " %s not present in PHU of %s" % (headkey, tfile)
hlist.close()
if (showCals == True):
from astrodata.adutils.adccutils.calutil import localCalibrationSearch
from astrodata.adutils.adccutils.calutil import geminiCalibrationSearch
calurls = localCalibrationSearch(fl)
print " ${BOLD}Local Calibration Search${NORMAL}"
if calurls != None:
for caltyp in calurls.keys():
print " ${BOLD}%s${NORMAL}: %s" % (caltyp, calurls[caltyp])
else:
print " ${RED}No Calibrations Found${NORMAL}"
calurls = geminiCalibrationSearch(fl)
print " ${BOLD}Gemini Calibration Search${NORMAL}"
if calurls != None:
for caltyp in calurls.keys():
print " ${BOLD}%s${NORMAL}: %s" % (caltyp, calurls[caltyp])
else:
print " ${RED}No Calibrations Found${NORMAL}"
if (recipe):
banner = ' Running Recipe "%s" on %s ' % (recipe, fname)
print "${REVERSE}${RED}" + " "*len(banner)
print banner
print " "*len(banner)+"${NORMAL}"
if recipe == "default":
rs = ""
else:
rs = "-r %s" % recipe
subprocess.call("reduce %s %s" % (rs, fname), shell=True)
else:
if (verbose) : print "%s is not a FITS file" % tfile
if False: # done with walk function switching if stayTop == True:
# cheap way to not recurse.
break;
def datasetwalk(self, directory = ".", only = "all", pheads = None,
showinfo = False,
onlyStatus = False,
onlyTypology = False,
# generic descriptors interface
showDescriptors = None, # string of comma separated descriptor names (function names!)
filemask = None,
showCals = False,
incolog = True,
stayTop = False,
recipe = None,
raiseExcept = False,
where = None,
batchnum = None,
opti = None):
"""
Recursively walk a given directory and put type information to stdout
"""
# About the DirDict class
"""
The DirDict class represents a single directory, and all it's contents
that are relevant. It is filled by the client code (datasetwalk)
so that only "relevant" files are added, and only directories containing
relevant files are shown. Allows iteration to, for example, populate
a tree control.
Note, the path given is the root path, the user has no access to any
parent or sibling directories. However... also note, it is a locally
running action, it just happens to use a web interface rather than
tk, qt, etc. Actions may be final.
"""
dirdict = DirDict(os.path.abspath(directory))
global verbose
global debug
global batchno
if batchnum != None:
batchno = batchnum
onlylist = only.split(",")
if (verbose):
print "onlylist:",repr(onlylist)
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> DATA SPI'
verbose = True
ldebug = True
dirnum = 0
if stayTop == True:
walkfunc = shallow_walk
if opti:
print "Doing a shallow walk"
else:
walkfunc = os.walk
if opti:
print "Doing an os.walk"
for root,dirn,files in walkfunc(directory):
#dirdict.adddir(root)
if opti:
print "Analyzing:", root
dirnum += 1
if (verbose) :
print "root:", root
print "dirn:", dirn
if verbose:
print "DS92:",root, repr(dirn), repr(file)
if (".svn" not in root):
width = 10
## !!!!!
## !!!!! CREATE THE LINE WRITTEN FOR EACH DIRECTORY RECURSED !!!!!
## !!!!!
fullroot = os.path.abspath(root)
if root == ".":
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}. ("+fullroot + ")${NORMAL}"
else:
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}"+root + "${NORMAL}"
firstfile = True
# print "DS472:", repr(files)
for tfile in files:
if tfile == None:
raise str(files)
# we have considered removing this check in place of a
# pyfits open but that was not needed, the pyfits open
# is down lower, this is just to avoid checking files
# that are not named correctly to be FITS, so why check them?
# especially on a command recursing directories and potentially
# looking at a lot of files.
if filemask == None:
# @@NAMING: fits file mask for typewalk
mask = r".*?\.(fits|FITS)$"
else:
mask = filemask
try:
matched = re.match(mask, tfile)
except:
print "BAD FILEMASK (must be a valid regular expression):", mask
return str(sys.exc_info()[1])
sys.stdout.write(".")
if (re.match(mask, tfile)) :
if (ldebug) : print "FITS:", tfile
fname = os.path.join(root, tfile)
try:
fl = AstroData(fname)
except KeyboardInterrupt:
raise
except:
mes = "Could not open %s as AstroData" % fname
continue
gain = 0
stringway = False
if (stringway):
if (onlyTypology == onlyStatus):
dtypes = self.classification_library.discover_types(fname)
elif (onlyTypology):
dtypes = self.classification_library.discover_typology(fname)
elif (onlyStatus):
dtypes = self.classification_library.discover_status(fname)
else:
# this is the AstroData Class way
# to ask the file itself
if (onlyTypology == onlyStatus):
dtypes = fl.discover_types()
elif (onlyTypology):
dtypes = fl.discover_typology()
elif (onlyStatus):
dtypes = fl.discover_status()
if verbose:
print "DS130:", repr(dtypes)
# print "after classification"
if (dtypes != None) and (len(dtypes)>0):
#check to see if only is set
#only check for given type
found = False
if (only == "all"):
found=True
else:
# note: only can be split this way with no worry about
# whitespace because it's from the commandline, no whitespace
# allowed in that argument, just "," as a separator
ol = only.split(",")
# print ol
found = False
for tpname in dtypes:
if (verbose):
print "DS148", " in ", repr(ol),
if (tpname in ol):
found = True
break
if (verbose):
print "yes, found = ", str(found)
if (found == True):
if where != None:
# let them use underscore as spaces, bash + getopts doesn't like space in params even in quotes
cleanwhere = re.sub("_"," ", where)
ad = fl
try:
found = eval(cleanwhere)
except:
print "can't execute where:\n\t" + where + "\n\t" +cleanwhere
print "reason:\n\t"+str(sys.exc_info()[1])+"\n"+repr(sys.exc_info())
sys.exit(1)
if (found != True):
continue
if (firstfile == True):
pass # print rootln
firstfile = False
#dirdict tending
dirdict.add_dir(fullroot)
dirdict.add_file(tfile, root=fullroot)
sys.stdout.write("+")
sys.stdout.flush()
if tfile != "":
dirdict.add_file_prop(tfile, root= fullroot, propname="types", propval=dtypes)
# new line at the end of the output
# print ""
# show descriptors
if (showDescriptors != None):
sdl = showDescriptors.split(",")
# print ol
# get maxlen
maxlen = 0
for sd in sdl:
maxlen = max(len(sd),maxlen)
# print "DS595:", repr(fl.gain(as_dict=True))
# print "DS596:", repr(fl.amp_read_area(asList = True))
for sd in sdl:
#print "DS242:", sd
try:
if "(" not in sd:
dval = eval("fl."+sd+"(asList=True)")
else:
dval = eval("fl."+sd)
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
print (" ${BOLD}%s${NORMAL} = %s") % (sd, str(dval))
except AttributeError:
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
exinfo = sys.exc_info()
print " ${BOLD}%s${NORMAL} = ${RED}NO SUCH DESCRIPTOR${NORMAL}" % (sd)
if raiseExcept:
raise
except:
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
print (" ${BOLD}%s${NORMAL} = ${RED}FAILED${NORMAL}: %s") % (sd, str(sys.exc_info()[1]))
raise
if raiseExcept:
raise
# if phead then there are headers to print per file
if (pheads != None):
#print " -----------"sys.exec
print " ${UNDERLINE}PHU Headers${NORMAL}"
#print " -----------"
#print "pheads", pheads
hlist = pyfits.open(fname)
pheaders = pheads.split(",")
for headkey in pheaders:
#if in phu, this is the code
try:
print " %s = (%s)" % (headkey, hlist[0].header[headkey])
except KeyError:
print " %s not present in PHU of %s" % (headkey, tfile)
hlist.close()
if (showCals == True):
adr = AstroDataRecord(fl)
for caltyp in ["bias", "twilight"]:
rq = self.calDefLib.get_cal_req([adr],caltyp)[0]
try:
cs = "%s" % (str(self.calService.search(rq)[0]))
except:
cs = "No %s found, %s " % ( caltyp, str(sys.exc_info()[1]))
raise
print " %10s: %s" % (caltyp, cs)
if (recipe):
banner = ' Running Recipe "%s" on %s ' % (recipe, fname)
print "${REVERSE}${RED}" + " "*len(banner)
print banner
print " "*len(banner)+"${NORMAL}"
if recipe == "default":
rs = ""
else:
rs = "-r %s" % recipe
subprocess.call("reduce %s %s" % (rs, fname), shell=True)
else:
if (verbose) : print "%s is not a FITS file" % tfile
if False: # done with walk function switching if stayTop == True:
# cheap way to not recurse.
break;
print ""
return dirdict
def path2list(path):
# this is because path.split doesn't split dirs with trailing /'s
if path[-1]==os.sep:
path = path[:-1]
upath = path
palist = []
while True:
upath, tail = os.path.split(upath)
if tail == "":
break;
else:
palist.insert(0, tail)
return palist
class DirDict(object):
rootdir = None
rootdirlist = None
direntry = None
givenRootdir = None
entryDict = None
def __init__(self, rootdir = "."):
self.givenRootdir = rootdir
self.rootdir = os.path.abspath(rootdir)
self.direntry = DirEntry("",parent=self)
self.entryDict = {}
def report_entry( self, name, path):
self.entryDict.update({name:path})
def reldir(self, dirname):
if dirname[:len(self.rootdir)] != self.rootdir:
raise "this shouldn't happen, maybe a security breach"
else:
return dirname[len(self.rootdir):]
def add_dir(self, path):
# print "DS746: adding path", path
if path[:len(self.rootdir)] != self.rootdir:
raise "can't add that bad directory! "+path
relpath = path[len(self.rootdir):]
if self.direntry.path == relpath:
# print "DS750: path is already added at top:", path
return
else:
# print "DS753: having subdir add path if need be"
pathlist = path2list(relpath)
rpathlist = copy(pathlist)
self.direntry.add_dir(rpathlist)
def add_file(self, filename, root = None):
if root == None:
base = os.path.basename(filename)
dirn = os.path.dirname(filename)
else:
dirn = os.path.join(root,os.path.dirname(filename))
base = os.path.basename(filename)
# print "DS765:", repr(dirn)
dirlist = path2list(self.reldir(dirn))
# print "DS767:", repr(dirlist)
self.direntry.add_file(FileEntry(base,dirn), dirlist)
def add_file_prop(self, filename, root=None, propname = None, propval = None):
#print "\nDS775:", repr(filename), repr(root)
targfileent=self.direntry.find_file_entry(filename, root)
#print "DS777:",repr(targfileent), repr(filename), repr(root)
#print "DS778:",targfileent.fullpath()
targfileent.add_prop(propname, propval)
def fullpath(self):
return self.rootdir
def dirwalk(self):
for direntry in self.direntry.dirwalk():
#print "DS760:", direntry.path, direntry.fullpath(),direntry
yield direntry
def get_full_path(self,filename):
if filename in self.entryDict:
return os.path.join(self.entryDict[filename], filename)
else:
return None
def as_xml(self):
return self.direntry.as_xml()
class DirEntry(object):
path = None
files = None
dirs = None
parent = None
dataSpider = None
def __init__(self, dirpath, parent = None):
self.path = dirpath
self.files = {}
self.dirs = {}
self.parent = parent
def reldir(self, dirname):
root = self.parent.fullpath()
if not dirname.startswith(root):
print "DS752: (%s) %s", dirname, root
raise "this shouldn't happen, maybe a security breach"
else:
return dirname[len(root):]
def report_entry(self, name, path):
self.parent.report_entry(name, path)
def add_dir(self, pathlist):
subdir = pathlist.pop(0)
if subdir not in self.dirs.keys():
#print "DS774: adding subdir:", subdir
self.dirs.update({subdir:DirEntry(subdir, parent = self)})
#print "DS776:", id(self), repr(self.dirs)
#print "consumable pathlist:", pathlist
if len(pathlist)>0:
self.dirs[subdir].add_dir(pathlist)
def add_file(self, base, dirlist):
#$ print "DS795:", repr(dirlist)
if len(dirlist)==0:
# it's my file!
base.parent=self
self.files.update({base.basename:base})
else:
tdir = dirlist.pop(0)
if tdir not in self.dirs:
raise "broken tree search, no place for file"
else:
self.dirs[tdir].add_file(base, dirlist)
def fullpath(self):
rets = os.path.join(self.parent.fullpath(),self.path)
return rets
def dirwalk(self):
yield self
if len(self.dirs)>0:
for dekey in self.dirs:
for dent in self.dirs[dekey].dirwalk():
yield dent
def find_file_entry(self,filename,root=None, dirlist = None):
if root == None:
base = os.path.basename(filename)
dirn = os.path.dirname(filename)
else:
dirn = os.path.join(root,os.path.dirname(filename))
base = os.path.basename(filename)
self.report_entry(base, dirn)
if dirlist == None:
# print "DS852:", repr(dirn), repr(self.reldir(dirn))
dirlist = path2list(self.reldir(dirn))
if len(dirlist)==0:
#then find the file
# print "self.files, filn", repr(self.files)
for filn in self.files.keys():
if filn == filename:
fil = self.files[filn]
# print "DS858: found FileEntry:", repr(fil)
return fil
#raise "fileEntry does not exist"
return None
else:
tdir = dirlist.pop(0)
if tdir not in self.dirs:
raise "broken tree search, file address invalid"
else:
return self.dirs[tdir].find_file_entry(base, dirn, dirlist)
def as_xml(self, top = True):
rtemp = """
<dirEntry %(id)s name="%(dirname)s">
%(files)s\n
%(childdirs)s\n
</dirEntry>
"""
if top == True:
idstr = 'id="topDirectory"'
else:
idstr = ""
rfiles = ""
fils = self.files.keys()
if len(fils)>0:
rfiles += '<filesList name="files">\n'
for fil in fils:
rfiles += '\t<fileEntry name="%(file)s" fullpath="%(full)s">\n' % {
"file":self.files[fil].basename,
"full":self.files[fil].fullpath()}
props = self.files[fil].props
if False: # DON'T SEND TYPES, no need... ?? --> if "types" in props:
tlist = props["types"]
for typ in tlist:
rfiles += '\t\t<astrodatatype name="%(typ)s"/>\n' % {
"typ":typ}
rfiles += "\t</fileEntry>\n"
rfiles += "</filesList>\n"
dirs = self.dirs.keys()
rdirs = ""
if len(dirs)>0:
for dirn in dirs:
rdirs += self.dirs[dirn].as_xml(top=False)
return rtemp % { "dirname" : self.fullpath(),
"files" : rfiles,
"childdirs": rdirs,
"id":idstr }
def __str__(self):
return repr(self.dirs)
class FileEntry(object):
basename = None
directory = None
parent = None
props = None
def __init__(self, basename, directory, parent = None):
self.basename = basename
self.directory = directory
self.parent = parent
self.props = {}
def fullpath(self):
#print "DS865: FileEntry #", id(self)
return os.path.join(self.parent.fullpath(), self.basename)
def add_prop(self, name, val):
self.props.update({name:val})
| mpl-2.0 | 8,175,685,783,328,367,000 | 42.48152 | 138 | 0.378928 | false | 5.384058 | false | false | false |
teamclairvoyant/airflow-scheduler-failover-controller | scheduler_failover_controller/metadata/base_metadata_service.py | 1 | 1260 | import datetime
from scheduler_failover_controller.utils import date_utils
class BaseMetadataService:
def initialize_metadata_source(self):
raise NotImplementedError
def get_failover_heartbeat(self):
raise NotImplementedError
def set_failover_heartbeat(self):
raise NotImplementedError
def get_active_failover_node(self):
raise NotImplementedError
def set_active_failover_node(self, node):
raise NotImplementedError
def get_active_scheduler_node(self):
raise NotImplementedError
def set_active_scheduler_node(self, node):
raise NotImplementedError
def clear(self):
raise NotImplementedError
def print_metadata(self):
print("Printing Metadata: ")
print("==============================")
print("active_failover_node: " + str(self.get_active_failover_node()))
print("active_scheduler_node: " + str(self.get_active_scheduler_node()))
print( "last_failover_heartbeat: " + str(self.get_failover_heartbeat()))
print("")
print("Printing Other Info: ")
print("==============================")
print( "current_timestamp: " + str(date_utils.get_datetime_as_str(datetime.datetime.now())))
| apache-2.0 | 6,998,434,482,056,831,000 | 30.5 | 100 | 0.637302 | false | 4.468085 | false | false | false |
freiheit/Bay-Oh-Woolph | cogs/basicpromotions.py | 1 | 14770 | from discord.ext import commands
from utils import *
import discord
import asyncio
from cogs.updateroster import UpdateRoster
from config import Config
import logging
logger = logging.getLogger('bayohwoolph.cogs.basicpromotions')
BASICPROMOTIONS = Config.config['BASICPROMOTIONS']
ROLE_CADET = BASICPROMOTIONS['ROLE_CADET']
ROLE_OFFICER = BASICPROMOTIONS['ROLE_OFFICER']
ROLE_PS4 = BASICPROMOTIONS['ROLE_PS4']
ROLE_PS4CADET = BASICPROMOTIONS['ROLE_PS4CADET']
ROLE_XBOX = BASICPROMOTIONS['ROLE_XBOX']
ROLE_XBOXCADET = BASICPROMOTIONS['ROLE_XBOXCADET']
ROLE_PC = BASICPROMOTIONS['ROLE_PC']
CADETS_MESS = BASICPROMOTIONS['CADETS_MESS']
PS4_ROOM = BASICPROMOTIONS['PS4_ROOM']
XBOX_ROOM = BASICPROMOTIONS['XBOX_ROOM']
OFFICERS_CLUB = BASICPROMOTIONS['OFFICERS_CLUB']
BOT_NOISE = BASICPROMOTIONS['bot_noise']
ROLE_MEMBER = BASICPROMOTIONS['ROLE_MEMBER']
NEWPCCADETMSG = """**Welcome to Dark Echo, {0}!**
**<:echoBlue:230423421983522816> Here are the basic steps to get started with Dark Echo: <:echoBlue:230423421983522816>**
-->Please read and make sure you understand the channel structure in <#146723400671428608>.
1. If you use Inara, join us at <http://inara.cz/wing/300>
2. In the game, apply to the "Dark Echo" squadron.
3. Send in-game friend requests to the Echoes you see currently active on Discord and/or via in the in-game squadron.
4. Check <#161529165223428096> for current priorities.
5. Move your primary base of operations (any additional ships, etc) to Snodgrass Orbital in Disci.
6. Join the "Dark Echo" private group.
Note: You cannot get to Disci in a starter sidewinder. You need 9.5LY jump range. Upgrade from "E" to "D". We can help if you need it.
Check your welcome email, there is an optional and yet fun way to make your trip to Disci worthwhile.
Please set an avatar image in Discord, as it greatly helps with telling people apart when using the in-game overlay.
If you stay active with us for a couple of weeks and haven't heard about a promotion to Officer, please remind the Leadership.
"""
NEWPS4CADETMSG = """**Welcome to Dark Echo, {0}!**
**<:echoBlue:230423421983522816> Here are the basic steps to get started with Dark Echo: <:echoBlue:230423421983522816>**
-->Please read and make sure you understand the channel structure <#146723400671428608>.
1. If you use Inara, join us at http://inara.cz/wing/300
2. Send a PSN friend request to "Elite-DarkEcho".
3. Once PSN friend request is accepted: In the game, under "Friends and Private Groups",Send a friend request and request membership in the "Elite-DarkEcho" private group.
4. Check <#161529165223428096> for current priorities.
5. Move your primary base of operations (any additional ships, etc) to Snodgrass Orbital in Disci.
6. Set your ship id to [ECHO] or put [ECHO] in your ship name, whichever you prefer.
Note: You cannot get to Disci in a starter sidewinder. You need 9.5LY jump range. Upgrade Sidewinder or Eagle from "E" to "D"; or use a Hauler. If you're still having trouble, talk to us and somebody can help.
Check your welcome email, there is an optional and yet fun way to make your trip to Disci worthwhile.
If you stay active with us for a couple of weeks and haven't heard about a promotion to Officer, please remind the Leadership.
"""
NEWXBOXCADETMSG = """**Welcome to Dark Echo, {0}!**
**<:echoBlue:230423421983522816> Here are the basic steps to get started with Dark Echo: <:echoBlue:230423421983522816>**
-->Please read and make sure you understand the channel structure in <#146723400671428608>.
1. If you use Inara, join us at http://inara.cz/wing/300
2. Send a XBOX Live friend request to "ED Dark Echo" and join the "Dark Echo" club.
3. Once XBOX Live friend request is accepted: In the game, under "Friends and Private Groups",request membership in the "ED Dark Echo" private group.
4. Check <#161529165223428096> for current priorities.
5. Move your primary base of operations (any additional ships, etc) to Snodgrass Orbital in Disci.
6. Set your ship id to [ECHO] or put [ECHO] in your ship name, whichever you prefer.
Note: You cannot get to Disci in a starter sidewinder. You need 9.5LY jump range. Upgrade Sidewinder or Eagle from "E" to "D"; or use a Hauler. If you're still having trouble, talk to us and somebody can help.
Also check your welcome email, there is an optional and yet fun way to make your trip to Disci worthwhile.
If you stay active with us for a couple of weeks and haven't heard about a promotion to Officer, please remind the Leadership.
"""
NEWOFFICERMSG = """**<:echoBlue:230423421983522816> Welcome to Dark Echo's Officer's Club, {0}!**
Dark Echos Dark Council believe that you are an asset to this organization, and has promoted you to a full member (Officer).
Optional but traditional and highly recommended: Please bring some sort of rare beverage to Snodgrass Orbital in Disci and share a screenshot of that run on the forums and/or in <#173953415280328704>.
A Dark Council Member will update your forum permissions. Once your forum permissions are set up, make sure to:
If you use Inara, join us at <http://inara.cz/wing/300>.
"""
class Basicpromotions:
"""Leadership/Recruiter commands for promoting to basic membership roles."""
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_any_role('Leadership','Recruiter')
async def newpccadet(self,ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None ):
"""Get new PC platform Cadet started."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
memrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_MEMBER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
pcrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_PC))
for member in members:
try:
await member.add_roles(cadetrole,memrole,pcrole)
except Exception as e:
await ctx.send('Unable to set PC Cadet role.')
mentiontext = memberlist_to_mentionlist(members)
cadetsmess = self.bot.get_channel(int(CADETS_MESS))
await cadetsmess.send(NEWPCCADETMSG.format(mentiontext))
await ctx.send('Go check out <#{}>, '.format(CADETS_MESS) + mentiontext + '.')
@commands.command()
@commands.has_any_role('Leadership','Recruiter')
async def newps4cadet(self, ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None):
"""Get new Playstation4 platform Cadet started."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
memrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_MEMBER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
ps4role = discord.utils.get(ctx.guild.roles, id=int(ROLE_PS4))
ps4cadet = discord.utils.get(ctx.guild.roles, id=int(ROLE_PS4CADET))
for member in members:
try:
await member.add_roles(cadetrole,memrole,ps4role,ps4cadet)
except Exception as e:
await ctx.send('Unable to set PS4 Cadet role.')
mentiontext = memberlist_to_mentionlist(members)
cadetsmess = self.bot.get_channel(int(CADETS_MESS))
await cadetsmess.send(NEWPS4CADETMSG.format(mentiontext))
await ctx.send('Go check out <#{}>, '.format(CADETS_MESS) + mentiontext + '.')
@commands.command()
@commands.has_any_role('Leadership','Recruiter')
async def newxboxcadet(self, ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None ):
"""Get new xbox platform Cadet started."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
memrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_MEMBER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
xboxrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_XBOX))
xboxcadet = discord.utils.get(ctx.guild.roles, id=int(ROLE_XBOXCADET))
for member in members:
try:
await member.add_roles(cadetrole,memrole,xboxrole,xboxcadet)
except Exception as e:
await ctx.send('Unable to set Xbox Cadet role.')
mentiontext = memberlist_to_mentionlist(members)
cadetsmess = self.bot.get_channel(int(CADETS_MESS))
await cadetsmess.send(NEWXBOXCADETMSG.format(mentiontext))
await ctx.send('Go check out <#{}>, '.format(CADETS_MESS) + mentiontext + '.')
@commands.command()
@commands.has_role('Leadership')
async def newofficer(self, ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None ):
"""Give intro message to new officer and assign them Officer role."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
officerrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_OFFICER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
botnoise = self.bot.get_channel(int(BOT_NOISE))
officersclub = self.bot.get_channel(int(OFFICERS_CLUB))
for member in members:
try:
await member.add_roles(officerrole)
except Exception as e:
await ctx.send('Unable to set Officer role.')
cleannick = member_to_clean_nick(member)
await botnoise.send('!addroster ' + cleannick)
mentiontext = memberlist_to_mentionlist(members)
# sleep for a second to make sure the role has gone through before sending messages that need it
await asyncio.sleep(1)
await officersclub.send(NEWOFFICERMSG.format(mentiontext))
await botnoise.send("!whois -r -d -role 'Officer' -nick")
for member in members:
await member.remove_roles(cadetrole)
def setup(bot):
bot.add_cog(Basicpromotions(bot))
| agpl-3.0 | -6,239,468,246,116,860,000 | 43.221557 | 213 | 0.673527 | false | 3.42453 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.