repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Erotemic/hotspotter | hsgui/_frontend/EditPrefSkel.py | 1 | 2626 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/joncrall/code/hotspotter/hsgui/_frontend/EditPrefSkel.ui'
#
# Created: Mon Feb 10 13:40:41 2014
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_editPrefSkel(object):
def setupUi(self, editPrefSkel):
editPrefSkel.setObjectName(_fromUtf8("editPrefSkel"))
editPrefSkel.resize(668, 530)
self.verticalLayout = QtGui.QVBoxLayout(editPrefSkel)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.prefTreeView = QtGui.QTreeView(editPrefSkel)
self.prefTreeView.setObjectName(_fromUtf8("prefTreeView"))
self.verticalLayout.addWidget(self.prefTreeView)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.redrawBUT = QtGui.QPushButton(editPrefSkel)
self.redrawBUT.setObjectName(_fromUtf8("redrawBUT"))
self.horizontalLayout.addWidget(self.redrawBUT)
self.unloadFeaturesAndModelsBUT = QtGui.QPushButton(editPrefSkel)
self.unloadFeaturesAndModelsBUT.setObjectName(_fromUtf8("unloadFeaturesAndModelsBUT"))
self.horizontalLayout.addWidget(self.unloadFeaturesAndModelsBUT)
self.defaultPrefsBUT = QtGui.QPushButton(editPrefSkel)
self.defaultPrefsBUT.setObjectName(_fromUtf8("defaultPrefsBUT"))
self.horizontalLayout.addWidget(self.defaultPrefsBUT)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(editPrefSkel)
QtCore.QMetaObject.connectSlotsByName(editPrefSkel)
def retranslateUi(self, editPrefSkel):
editPrefSkel.setWindowTitle(QtGui.QApplication.translate("editPrefSkel", "Edit Preferences", None, QtGui.QApplication.UnicodeUTF8))
self.redrawBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Redraw", None, QtGui.QApplication.UnicodeUTF8))
self.unloadFeaturesAndModelsBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Unload Features and Models", None, QtGui.QApplication.UnicodeUTF8))
self.defaultPrefsBUT.setText(QtGui.QApplication.translate("editPrefSkel", "Defaults", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
editPrefSkel = QtGui.QWidget()
ui = Ui_editPrefSkel()
ui.setupUi(editPrefSkel)
editPrefSkel.show()
sys.exit(app.exec_())
| apache-2.0 | -1,449,345,158,768,102,100 | 45.070175 | 161 | 0.739147 | false | 3.844802 | false | false | false |
thomas-maurice/docker-minecraft-webapp | webapp/lib/mcrcon.py | 1 | 1703 | import socket
import select
import struct
class MCRconException(Exception):
pass
class MCRcon:
socket = None
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
def disconnect(self):
self.socket.close()
self.socket = None
def send(self, out_type, out_data):
if self.socket is None:
raise MCRconException("Must connect before sending data")
# Send a request packet
out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
out_length = struct.pack('<i', len(out_payload))
self.socket.send(out_length + out_payload)
# Read response packets
in_data = ""
while True:
# Read a packet
in_length, = struct.unpack('<i', self.socket.recv(4))
in_payload = self.socket.recv(in_length)
in_id, in_type = struct.unpack('<ii', in_payload[:8])
in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
# Sanity checks
if in_padding != b'\x00\x00':
raise MCRconException("Incorrect padding")
if in_id == -1:
raise MCRconException("Login failed")
# Record the response
in_data += in_data_partial.decode('utf8')
# If there's nothing more to receive, return the response
if len(select.select([self.socket], [], [], 0)[0]) == 0:
return in_data
def command(self, command):
return self.send(2, command)
def login(self, password):
return self.send(3, password)
| gpl-3.0 | 6,767,356,456,409,357,000 | 31.132075 | 93 | 0.571932 | false | 3.818386 | false | false | false |
danidee10/Votr | api/api.py | 1 | 3600 | from os import getenv
from models import db, Users, Polls, Topics, Options, UserPolls
from flask import Blueprint, request, jsonify, session
from datetime import datetime
from config import SQLALCHEMY_DATABASE_URI
if getenv('APP_MODE') == 'PRODUCTION':
from production_settings import SQLALCHEMY_DATABASE_URI
api = Blueprint('api', 'api', url_prefix='/api')
@api.route('/polls', methods=['GET', 'POST'])
# retrieves/adds polls from/to the database
def api_polls():
if request.method == 'POST':
# get the poll and save it in the database
poll = request.get_json()
# simple validation to check if all values are properly set
for key, value in poll.items():
if not value:
return jsonify({'message': 'value for {} is empty'.format(key)})
title = poll['title']
options_query = lambda option: Options.query.filter(Options.name.like(option))
options = [Polls(option=Options(name=option))
if options_query(option).count() == 0
else Polls(option=options_query(option).first()) for option in poll['options']
]
eta = datetime.utcfromtimestamp(poll['close_date'])
new_topic = Topics(title=title, options=options, close_date=eta)
db.session.add(new_topic)
db.session.commit()
# run the task
from tasks import close_poll
close_poll.apply_async((new_topic.id, SQLALCHEMY_DATABASE_URI), eta=eta)
return jsonify({'message': 'Poll was created succesfully'})
else:
# it's a GET request, return dict representations of the API
polls = Topics.query.filter_by(status=True).join(Polls).order_by(Topics.id.desc()).all()
all_polls = {'Polls': [poll.to_json() for poll in polls]}
return jsonify(all_polls)
@api.route('/polls/options')
def api_polls_options():
all_options = [option.to_json() for option in Options.query.all()]
return jsonify(all_options)
@api.route('/poll/vote', methods=['PATCH'])
def api_poll_vote():
poll = request.get_json()
poll_title, option = (poll['poll_title'], poll['option'])
join_tables = Polls.query.join(Topics).join(Options)
# Get topic and username from the database
topic = Topics.query.filter_by(title=poll_title, status=True).first()
user = Users.query.filter_by(username=session['user']).first()
# if poll was closed in the background before user voted
if not topic:
return jsonify({'message': 'Sorry! this poll has been closed'})
# filter options
option = join_tables.filter(Topics.title.like(poll_title), Topics.status == True).filter(Options.name.like(option)).first()
# check if the user has voted on this poll
poll_count = UserPolls.query.filter_by(topic_id=topic.id).filter_by(user_id=user.id).count()
if poll_count > 0:
return jsonify({'message': 'Sorry! multiple votes are not allowed'})
if option:
# record user and poll
user_poll = UserPolls(topic_id=topic.id, user_id=user.id)
db.session.add(user_poll)
# increment vote_count by 1 if the option was found
option.vote_count += 1
db.session.commit()
return jsonify({'message': 'Thank you for voting'})
return jsonify({'message': 'option or poll was not found please try again'})
@api.route('/poll/<poll_name>')
def api_poll(poll_name):
poll = Topics.query.filter(Topics.title.like(poll_name)).first()
return jsonify({'Polls': [poll.to_json()]}) if poll else jsonify({'message': 'poll not found'})
| gpl-3.0 | -3,323,841,830,138,290,700 | 32.962264 | 127 | 0.648889 | false | 3.654822 | false | false | false |
jamesmunns/wate_backend | prototyping/create_schema.py | 1 | 1046 | import psycopg2
import getpass
username = getpass.getuser()
password = getpass.getpass("Database password for {}: ".format(username))
database = "wate"
def create_user_table(cursor):
user_schema = """
CREATE TABLE users (
id serial PRIMARY KEY,
name text NOT NULL,
username text NOT NULL,
email citext UNIQUE NOT NULL,
joindate date NOT NULL,
passhash character (60),
use_metric_units boolean,
emails_disabled boolean
);
"""
cursor.execute(user_schema)
def create_weight_table(cursor):
weight_schema = """
CREATE TABLE weights (
user_id integer REFERENCES users(id) NOT NULL,
weight_lbs numeric CHECK (weight_lbs > 0) NOT NULL,
measure_date date NOT NULL,
measure_time time);
"""
cursor.execute(weight_schema)
with psycopg2.connect(dbname=database, user=username, password=password) as conn:
with conn.cursor() as cur:
create_user_table(cur)
create_weight_table(cur)
| mit | -8,867,129,375,044,253,000 | 27.27027 | 81 | 0.632887 | false | 3.932331 | false | false | false |
Wilo/barcampMilagro2015 | pushfeed/pushfeed/pipelines.py | 1 | 1083 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import logging
from scrapy.conf import settings
from scrapy.exceptions import DropItem
import rethinkdb as r
#class PushfeedPipeline(object):
# def process_item(self, item, spider):
# return item
class RethinkdbPipeline(object):
"""docstring for RethinkdbPipeline"""
def __init__(self):
r.connect(settings['RETHINKDB_SERVER'], settings['RETHINKDB_PORT']).repl()
self.db = r.db(settings['RETHINKDB_DB']).table(settings['RETHINKDB_TABLE'])
def process_item(self, item, spider):
for data in item:
if not data:
raise DropItem
data = dict(title=item['title'][0], description=item['description'][0],
date=item['date'][0], link=item['link'][0], img=item['img'][0])
self.db.insert(data).run()
logging.log(logging.INFO,"Feed added to rethinkdb database!")
return item
| mit | 595,242,613,530,834,000 | 31.818182 | 83 | 0.649123 | false | 3.61 | false | false | false |
hmpf/nav | python/nav/web/portadmin/urls.py | 1 | 1470 | #
# Copyright (C) 2011, 2013-2015 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""PortAdmin Django URL config"""
from django.conf.urls import url
from nav.web.portadmin import views
urlpatterns = [
url(r'^$',
views.index,
name='portadmin-index'),
url(r'^ip=(?P<ip>[\d\.]+)',
views.search_by_ip,
name='portadmin-ip'),
url(r'^sysname=(?P<sysname>\S+)',
views.search_by_sysname,
name='portadmin-sysname'),
url(r'^interfaceid=(?P<interfaceid>\d+)',
views.search_by_interfaceid,
name='portadmin-interface'),
url(r'^save_interfaceinfo',
views.save_interfaceinfo),
url(r'^restart_interface',
views.restart_interface),
url(r'^write_mem',
views.write_mem),
url(r'^trunk/(?P<interfaceid>\d+)',
views.render_trunk_edit,
name="portadmin-render-trunk-edit"),
]
| gpl-3.0 | 2,910,476,749,803,089,400 | 32.409091 | 79 | 0.667347 | false | 3.62069 | false | false | false |
bmcage/centrifuge-1d | centrifuge1d/modules/direct_consolidation_saturated/options.py | 1 | 7263 | from __future__ import division, print_function
import sys
from ..shared.functions import lagrangian_derivative_coefs
from numpy import linspace, power, empty, array, log
from ..shared.consolidation import (create_CON, CON_SLURRY, CON_GOMPERTZ,
CON_FREEFORM, CON_SLURRY_CC, CON_SLURRY_KWA,
CON_WEIBULL)
def dtype_deps(cfg):
dtype = cfg.get_value('dtype')
result = []
if dtype == 1: pass
elif dtype in [2,3]: result = ['k_dx']
return result
PARENTAL_MODULES = ['base']
CONFIG_OPTIONS = ['inner_points', 'dtype',
('con_type', CON_SLURRY),
('con_max_refine', 0),
(lambda cfg: cfg.get_value('con_type') == CON_SLURRY,
['a', 'b', 'c', 'd']),
(lambda cfg: cfg.get_value('con_type') == CON_SLURRY_CC,
['a', 'cc', 'c', 'd']),
(lambda cfg: cfg.get_value('con_type') in [CON_GOMPERTZ,],
['a', 'b', 'c', 'd', 'cc']),
(lambda cfg: cfg.get_value('con_type') in [CON_WEIBULL,CON_SLURRY_KWA],
['b', 'e', 'f', 'c', 'd']),
(lambda cfg: cfg.get_value('con_type') in [CON_FREEFORM],
[('ei', None), ('si', None), ('ki', None), ('eiadd', None)]),
'porosity',
'estimate_zp0',
('L_atol', 1e-8),
dtype_deps,
# dependent
(lambda cfg: cfg.get_value('fl1') > 0.0,
['fp1'], [('fp1', -1.0)]),
(lambda cfg: cfg.get_value('fl2') > 0.0,
['fp2'], [('fp2', -1.0)]),
#
'rb_type',
# dependent
(lambda cfg: cfg.get_value('rb_type') == 2,
['h_last']),
(lambda cfg: cfg.get_value('rb_type') == 3,
['dip_height']),
'h_last',
'l0',
'wl0',
'density_s', #density sample in g/(cm^3)
('excess_load', [0]),
('excess_load_t',[0]),
('numerfact_e0', 0.999),
('e0_overshoot_factor', 0.),
]
INTERNAL_OPTIONS = ['m', 'y', 'y12', 'dy', 'alpha', 'ldc1', 'ldc2', 'ldc3',
'k_dx', 'wm0', 'CON',
'first_idx', 'last_idx', 'wl_idx', 'L_idx',
'mass_in_idx', 'mass_out_idx',
'z_size', 'gamma_w', 'gamma_s', 'e0']
EXCLUDE_FROM_MODEL = ['dtype']
PROVIDE_OPTIONS = []
OPTIONS_ITERABLE_LISTS = ['porosity']
def load_func(x, atimes, aloads, duration_change=10):
#print (x, atimes, aloads,aloads[x>=atimes])
x_load = aloads[x>=atimes][-1]
#10 sec later
x_offset_load = aloads[x+duration_change>=atimes][-1]
if (x_load == x_offset_load):
return x_load
else:
#load will change, change smootly to the change
t_new_load = atimes[x+duration_change>=atimes][-1]
val= (x - (t_new_load-duration_change))/duration_change * (x_offset_load-x_load) + x_load
return val
def create_excess_load(times, loads, duration_change=10):
if (len(times) != len(loads)):
print ("ERROR: excess loads and excess load times don't have same array sizes!")
sys.exit(0)
if (len(times) == 0 or (len(times) == 1 and times[0] == 0 and loads[0] == 0)):
#no loads
return lambda x: 0.
else:
atimes = array(times)
aloads = array(loads)
return lambda x: load_func(x, atimes, aloads, duration_change)
#return lambda x: aloads[x>=atimes][-1]
def adjust_cfg(cfg):
#specific weight water in g/(s cm^2)
cfg.set_value('gamma_w', cfg.get_value('density')*cfg.get_value('g'))
#specific weight sample in g/(s cm^2)
cfg.set_value('gamma_s', cfg.get_value('density_s')*cfg.get_value('g'))
# Discretization
inner_points = cfg.get_value('inner_points')
discretization_type = cfg.get_value('dtype')
if discretization_type == 1: # linear discretization
y = linspace(0, 1, inner_points + 2)
elif discretization_type in [2,3]: # L= a+ka+(k^2)a+...+(k^inner_points)a
# L=1 (as we use transformed interval <0,1>)
# L = a*[(1-k^(inner_points +1))/(1-k)]
k = cfg.get_value('k_dx')
a=(1-k)/(1-power(k, inner_points+1))
y= empty([inner_points+2, ])
y[0] = 0.0; y[-1] = 1.0
for i in range(1, inner_points+1):
y[i] = y[i-1] + a
a = a*k
if discretization_type == 3:
# invert it
tmp = y[::-1]
y[:] = 1. - tmp[:]
else:
print('Unsupported discretization type:', discretization_type)
exit(1)
#porosity and void ratio
por = cfg.get_value('porosity')
if not (0<por<1):
print ('Porosity must be a value between 0 and 1. Given:', por)
exit(1)
e0 = por/(1-por)
cfg.set_value('e0', e0)
print ('Consolidation: Calculated initial void ratio is', cfg.get_value('e0'))
ksguess = cfg.get_value('ks')
ks = ksguess
if cfg.get_value('con_type') in [CON_SLURRY, CON_GOMPERTZ]:
ks = (1+e0)*(cfg.get_value('c')+cfg.get_value('d')*e0)
cfg.set_value('ks', ks)
elif cfg.get_value('con_type') in [CON_SLURRY_CC, CON_SLURRY_KWA, CON_WEIBULL]:
ks = log(e0/cfg.get_value('c')) / cfg.get_value('d')
else:
print ("ERROR: cannot calculate the start ks as consolidation type is not known!")
sys.exit(0)
print ('Consolidation: Your guessed ks', ksguess, 'has been changed into calculated', ks, 'cm/s')
back = raw_input("Continue? [Y/n] ")
if back.strip().lower() in ['n', "no"]:
sys.exit(0)
# Determine consolidation curve model used, all data is now available
cfg.set_value('CON', create_CON(cfg))
cfg.set_value('excess_load_f', create_excess_load(
cfg.get_value('excess_load_t'),
cfg.get_value('excess_load'),
duration_change=10))
cfg.set_value('y', y)
cfg.set_value('y12', (y[1:]+y[:-1])/2.)
dy = y[1:]-y[:-1]
alpha = empty([len(dy)+1, ])
alpha[0] = 0.
alpha[1:] = dy
cfg.set_value('dy', dy)
cfg.set_value('alpha', alpha)
ldc1, ldc2, ldc3 = lagrangian_derivative_coefs(dy)
cfg.set_value('ldc1', ldc1)
cfg.set_value('ldc2', ldc2)
cfg.set_value('ldc3', ldc3)
inner_points = cfg.get_value('inner_points')
cfg.set_value('sc_max_refine', 0)
cfg.set_value('first_idx', 0)
cfg.set_value('last_idx', inner_points+1)
cfg.set_value('mass_in_idx', inner_points+2)
cfg.set_value('wl_idx', inner_points+3)
cfg.set_value('L_idx', inner_points+4)
cfg.set_value('mass_out_idx', inner_points+5)
# total length of 'z' array (discretization points + s1,s2,mass_in,...)
cfg.set_value('z_size', inner_points+6)
def check_cfg(cfg):
if not (not cfg.get_value('wl0') is None or not cfg.get_value('ww0') is None):
print("One of 'wl0' or 'ww0' parameters must be specified.")
return False
return True
| gpl-2.0 | 4,322,076,158,088,722,400 | 37.226316 | 101 | 0.514801 | false | 3.082767 | false | false | false |
Notxor/Neuronal | neuronal/neuromotor.py | 1 | 3081 | # -*- coding: utf-8 -*-
# Neuronal - Framework for Neural Networks and Artificial Intelligence
#
# Copyright (C) 2012 dddddd <[email protected]>
# Copyright (C) 2012 Notxor <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from glioblasto import Glioblasto
class _Motor(Glioblasto):
"""Elemento de salida neuromotora."""
def __init__(self, neuromotor):
Glioblasto.__init__(self)
self.vias_aferentes = set()
self.neuromotor = neuromotor
class NeuroMotor(object):
"""Conjunto de sensores. Proporciona datos de entrada a una red."""
def __init__(self, cantidad_de_motores, red = None):
"""
Las neuronas motoras están en una lista inmutable (tuple), por lo tanto
no cambian a lo largo de la vida del neuromotor.
"""
self.motoras = tuple(
_Motor(self) for i in xrange(cantidad_de_motores)
)
self._red = None
if red is not None:
self._conectar_a_red_aferente(red)
def _conectar_a_red_aferente(self, red):
"""
Crea tantas neuronas de salida en la 'red' como motoras haya en
este neuromotor, y las conecta (mediante sinapsis salida->motora).
Es conveniente que dichas neuronas sean las que finalizan la lista de
neuronas del núcleo. El objetivo es que sean disparadas al final del
'ciclo' para reducir el número de pasadas que habrá que darle a la
red. Por lo tanto, lo ideal es llamar a esta función como último
paso de la creación de la red.
"""
n_conexiones = len(self.motoras)
# Crear neuronas en la red, que serviran de emisoras.
nuevas_salidas = red.crear_neuronas_de_salida(n_conexiones)
# Conectar las nuevas salidas (mediante sinapsis) a
# ... las motoras de este neuromotor.
for i in xrange(n_conexiones):
nuevas_salidas[i].crear_sinapsis_saliente(self.motoras[i])
# Guardamos una referencia a la red.
self._red = red
def _conectar_motorizacion(self, funciones):
"""
Este miembro recibe una lista de funciones y le asigna cada una de ellas
a una neurona motora de la red, de modo que si usa salida es activada
por la red, se ejecutará el código contenido en la función asociada.
"""
if (len(funciones) != len(self.motoras)):
raise "No coincide el número de neuronas con las acciones."
| agpl-3.0 | -3,130,294,366,610,631,700 | 40.486486 | 80 | 0.669381 | false | 3.178054 | false | false | false |
dlenski/stravacli | stravacli/QueryGrabber.py | 1 | 1046 | from http.server import HTTPServer, BaseHTTPRequestHandler
import socket
from urllib.parse import parse_qs
class handler(BaseHTTPRequestHandler):
def do_GET(self):
self.server.received = parse_qs(self.path.split('?',1)[1])
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.response.encode())
class QueryGrabber(HTTPServer):
def __init__(self, response='', address=None):
self.received = None
self.response = response
if address!=None:
HTTPServer.__init__(self, self.address, handler)
else:
for port in range(1024,65536):
try:
HTTPServer.__init__(self, ('localhost', port), handler)
except socket.error as e:
if e.errno!=98: # Address already in use
raise
else:
break
else:
raise e
def root_uri(self):
return 'http://{}:{:d}'.format(*self.server_address)
| gpl-3.0 | -4,080,119,067,574,555,600 | 33.866667 | 75 | 0.554493 | false | 4.376569 | false | false | false |
failys/CAIRIS | cairis/tools/PseudoClasses.py | 1 | 16629 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
from flask_restful import fields
from cairis.core.ReferenceSynopsis import ReferenceSynopsis
from cairis.core.ReferenceContribution import ReferenceContribution
__author__ = 'Robin Quetin, Shamal Faily'
obj_id_field = '__python_obj__'
def gen_class_metadata(class_ref):
return {"enum": [class_ref.__module__+'.'+class_ref.__name__]}
class CharacteristicReferenceSynopsis(object):
resource_fields = {
obj_id_field: fields.String,
"theSynopsis": fields.String,
"theDimension": fields.String,
"theActorType": fields.String,
"theActor": fields.String,
"theInitialSatisfaction" : fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, rsName='', rsDim='', rsActorType='', rsActor='', gSat=''):
self.theSynopsis = rsName
self.theDimension = rsDim
self.theActorType = rsActorType
self.theActor= rsActor
self.theInitialSatisfaction = gSat
def __getitem__(self,varName):
if (varName == 'theSynopsis'): return self.theSynopsis
elif (varName == 'theDimension'): return self.theDimension
elif (varName == 'theActorType'): return self.theActorType
elif (varName == 'theActor'): return self.theActor
elif (varName == 'theInitialSatisfaction'): return self.theInitialSatisfaction
else: return None
class CharacteristicReferenceContribution(object):
resource_fields = {
obj_id_field: fields.String,
"theMeansEnd": fields.String,
"theContribution": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, rcMe='', rcCont=''):
self.theMeansEnd = rcMe
self.theContribution = rcCont
def __getitem__(self,varName):
if (varName == 'theMeansEnd'): return self.theMeansEnd
elif (varName == 'theContribution'): return self.theContribution
else: return None
class CharacteristicReference(object):
resource_fields = {
obj_id_field: fields.String,
'theReferenceName' : fields.String,
'theCharacteristicType' : fields.String,
'theReferenceDescription' : fields.String,
'theDimensionName' : fields.String,
'theReferenceSynopsis' : fields.Nested(CharacteristicReferenceSynopsis.resource_fields),
'theReferenceContribution' : fields.Nested(CharacteristicReferenceContribution.resource_fields)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, refName=None, crTypeName='grounds', refDesc=None, dimName='document',rSyn=None,rCont=None):
"""
:type refName: str
:type crTypeName: str
:type refDesc: str
:type dimName: str
"""
self.theReferenceName = refName
self.theCharacteristicType = crTypeName
self.theReferenceDescription = refDesc
self.theDimensionName = dimName
self.theReferenceSynopsis = rSyn
self.theReferenceContribution = rCont
class Definition(object):
resource_fields = {
obj_id_field: fields.String,
'name': fields.String,
'value': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class Contributor(object):
resource_fields = {
obj_id_field: fields.String,
'firstName': fields.String,
'surname': fields.String,
'affiliation': fields.String,
'role': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, first_name=None, surname=None, affiliation=None, role=None, tuple_form=None):
"""
:type first_name: str
:type surname: str
:type affiliation: str
:type role: str
:type tuple_form: tuple
"""
if tuple_form is None:
self.firstName = first_name or ''
self.surname = surname or ''
self.affiliation = affiliation or ''
self.role = role or ''
else:
attrs = ['firstName', 'surname', 'affiliation', 'role']
for idx in range(0, len(tuple_form)):
self.__setattr__(attrs[idx], tuple_form[idx] or '')
class EnvironmentTensionModel(object):
resource_fields = {
obj_id_field: fields.String,
"base_attr_id": fields.Integer,
"attr_id": fields.Integer,
"value": fields.Integer,
"rationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
attr_dictionary = {
'Confidentiality': 0,
'Integrity': 1,
'Availability': 2,
'Accountability': 3,
'Anonymity': 4,
'Pseudonymity': 5,
'Unlinkability': 6,
'Unobservability': 7
}
attr_dictionary = OrderedDict(sorted(list(attr_dictionary.items()), key=lambda t: t[1]))
# endregion
base_attr_values = list(range(-1,4))
attr_values = list(range(4,8))
attr_values.append(-1)
def __init__(self, base_attr_id=-1, attr_id=-1, value=0, rationale='None', key=None):
"""
:type base_attr_id: int
:type attr_id: int
:type value: int|tuple
:type rationale: str
:type key: tuple
"""
if key is not None:
base_attr_id = key[0]
attr_id = key[1]
rationale = value[1]
value = value[0]
if base_attr_id not in self.base_attr_values or attr_id not in self.attr_values:
raise ValueError('Base attribute or subattribute value is incorrect.')
self.base_attr_id = base_attr_id
self.attr_id = attr_id
self.value = value
self.rationale = rationale
class Revision(object):
resource_fields = {
obj_id_field: fields.String,
'id': fields.Integer,
'date': fields.String,
'description': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, id=None, date=None, description=None, tuple_form=None):
"""
:type id: int
:type date: str
:type description: str
:type tuple_form: tuple
"""
if tuple_form is None:
self.id = id
self.date = date
self.description = description
else:
attrs = ['id', 'date', 'description']
for idx in range(0, len(tuple_form)):
self.__setattr__(attrs[idx], tuple_form[idx] or '')
class ProjectSettings(object):
resource_fields = {
obj_id_field: fields.String,
'projectName': fields.String,
'richPicture': fields.String,
'projectScope': fields.String,
'definitions': fields.List(fields.Nested(Definition.resource_fields)),
'projectGoals': fields.String,
'contributions': fields.List(fields.Nested(Contributor.resource_fields)),
'projectBackground': fields.String,
'revisions': fields.List(fields.Nested(Revision.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
req_p_settings_keys = ['Project Background', 'Project Goals', 'Project Name', 'Project Scope', 'Rich Picture']
def __init__(self, pSettings=None, pDict=None, contributors=None, revisions=None):
logger = logging.getLogger('cairisd')
project_settings = pSettings or {}
self.projectBackground = project_settings.get("Project Background", "")
self.projectGoals = project_settings.get("Project Goals", "")
self.projectName = project_settings.get("Project Name", "")
self.projectScope = project_settings.get("Project Scope", "")
self.richPicture = project_settings.get("Rich Picture", "")
self.definitions = pDict or []
self.contributions = []
for contributor in contributors or []:
if isinstance(contributor, tuple):
new_contr = Contributor(tuple_form=contributor)
self.contributions.append(new_contr)
else:
logger.warning('Item does not meet typical contributor structure. Passing this one.')
self.revisions = []
for revision in revisions or []:
if isinstance(revision, tuple):
new_rev = Revision(tuple_form=revision)
self.revisions.append(new_rev)
else:
logger.warning('Item does not meet typical contributor structure. Passing this one.')
class RiskScore(object):
resource_fields = {
obj_id_field: fields.String,
'responseName': fields.String,
'unmitScore': fields.Integer,
'mitScore': fields.Integer,
'details': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, response_name, unmit_score, mit_score, details):
"""
:type response_name: str
:type unmit_score: int
:type mit_score: int
:type details: str
"""
self.responseName = response_name
self.unmitScore = unmit_score or -1
self.mitScore = mit_score or -1
self.details = details
class RiskRating(object):
resource_fields = {
obj_id_field: fields.String,
'rating': fields.String,
'threat': fields.String,
'vulnerability': fields.String,
'environment': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, threat, vulnerability, environment, rating=None):
self.threat = threat
self.vulnerability = vulnerability
self.environment = environment
self.rating = rating
class CountermeasureTarget(object):
def __init__(self, tName=None, tEffectiveness=None, tRat=None):
"""
:type tName: str
:type tEffectiveness: str
:type tRat: str
"""
self.theName = tName
self.theEffectiveness = tEffectiveness
self.theRationale = tRat
resource_fields = {
"__python_obj__": fields.String,
"theName": fields.String,
"theEffectiveness": fields.String,
"theRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def name(self): return self.theName
def effectiveness(self): return self.theEffectiveness
def rationale(self): return self.theRationale
class PersonaTaskCharacteristics(object):
def __init__(self, pName, pDur, pFreq, pDemands, pGoalConflict):
"""
:type pName: str
:type pDur: str
:type pFreq: str
:type pDemands: str
:type pGoalConflict: str
"""
self.thePersona = pName
self.theDuration = pDur
self.theFrequency = pFreq
self.theDemands = pDemands
self.theGoalConflict = pGoalConflict
resource_fields = {
"__python_obj__": fields.String,
"thePersona": fields.String,
"theDuration": fields.String,
"theFrequency": fields.String,
"theDemands": fields.String,
"theGoalConflict": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def persona(self): return self.thePersona
def duration(self): return self.theDuration
def frequency(self): return self.theFrequency
def demands(self): return self.theDemands
def goalConflict(self): return self.theGoalConflict
class CountermeasureTaskCharacteristics(object):
def __init__(self, pTask, pName, pDur, pFreq, pDemands, pGoalConflict):
"""
:type pTask: str
:type pName: str
:type pDur: str
:type pFreq: str
:type pDemands: str
:type pGoalConflict: str
"""
self.theTask = pTask
self.thePersona = pName
self.theDuration = pDur
self.theFrequency = pFreq
self.theDemands = pDemands
self.theGoalConflict = pGoalConflict
resource_fields = {
"__python_obj__": fields.String,
"theTask": fields.String,
"thePersona": fields.String,
"theDuration": fields.String,
"theFrequency": fields.String,
"theDemands": fields.String,
"theGoalConflict": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def task(self): return self.theTask
def persona(self): return self.thePersona
def duration(self): return self.theDuration
def frequency(self): return self.theFrequency
def demands(self): return self.theDemands
def goalConflict(self): return self.theGoalConflict
class SecurityAttribute(object):
def __init__(self, name=None, value=None, rationale=None):
"""
:type name: str
:type value: str
:type rationale: str
"""
self.name = name
self.value = value
self.rationale = rationale
resource_fields = {
"__python_obj__": fields.String,
"name": fields.String,
"value": fields.String,
"rationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def get_attr_value(self, enum_obj):
"""
Gets the database value for the security attribute
:type enum_obj: list|tuple
"""
value = 0
if self.value is not None:
found = False
idx = 0
while not found and idx < len(enum_obj):
if enum_obj[idx] == self.value:
value = idx
found = True
else:
idx += 1
return value
class ValuedRole(object):
resource_fields = {
obj_id_field: fields.String,
'roleName': fields.String,
'cost': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self, role_name, cost):
self.roleName = role_name
self.cost = cost
class ExceptionAttributes(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theDimensionType': fields.String,
'theDimensionValue': fields.String,
'theCategoryName': fields.String,
'theDescription': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,excName,dimType,dimValue,catName,excDesc):
self.theName = excName
self.theDimensionType = dimType
self.theDimensionValue = dimValue
self.theCategoryName = catName
self.theDescription = excDesc
class StepAttributes(object):
resource_fields = {
obj_id_field: fields.String,
'theStepText': fields.String,
'theSynopsis': fields.String,
'theActor': fields.String,
'theActorType': fields.String,
'theExceptions': fields.List(fields.Nested(ExceptionAttributes.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,stepTxt,stepSyn,stepActor,stepActorType,stepExceptions):
self.theStepText = stepTxt
self.theSynopsis = stepSyn
self.theActor = stepActor
self.theActorType = stepActorType
self.theExceptions = stepExceptions
def synopsis(self): return self.theSynopsis
def actor(self): return self.theActor
def actorType(self): return self.theActorType
def tags(self): return self.theTags
def setSynopsis(self,s): self.theSynopsis = s
def setActor(self,a): self.theActor = a
class StepsAttributes(object):
resource_fields = {
obj_id_field: fields.String,
'theSteps': fields.List(fields.Nested(StepAttributes.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self):
self.theSteps = []
def __getitem__(self,stepNo):
return self.theSteps[stepNo]
def __setitem__(self,stepNo,s):
self.theSteps[stepNo] = s
def size(self):
return len(self.theSteps)
def append(self,s):
self.theSteps.append(s)
def remove(self,stepNo):
self.theSteps.pop(stepNo)
def insert(self,pos,s):
self.theSteps.insert(pos,s)
class ObjectDependency(object):
resource_fields = {
obj_id_field: fields.String,
'theDimensionName': fields.String,
'theObjectName': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,dimension_name,object_name):
self.theDimensionName = dimension_name
self.theObjectName = object_name
class TaskGoalContribution(object):
resource_fields = {
obj_id_field: fields.String,
'theSource': fields.String,
'theDestination': fields.String,
'theEnvironment': fields.String,
'theContribution': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,src,dest,env,cont):
self.theSource = src
self.theDestination = dest
self.theEnvironment = env
self.theContribution = cont
| apache-2.0 | -6,147,163,597,620,550,000 | 29.234545 | 112 | 0.682783 | false | 3.527577 | false | false | false |
ZoranPavlovic/kombu | kombu/utils/objects.py | 1 | 1945 | """Object Utilities."""
class cached_property:
"""Cached property descriptor.
Caches the return value of the get method on first call.
Examples:
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection {0!r} deleted'.format(value)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj, _sentinel=object()):
if obj is None:
return self
value = obj.__dict__.pop(self.__name__, _sentinel)
if self.__del is not None and value is not _sentinel:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
| bsd-3-clause | -138,779,550,533,933,420 | 29.390625 | 70 | 0.515681 | false | 4.256018 | false | false | false |
Fuzen-py/BladeAndSoulAPI | BladeAndSoul/bns.py | 1 | 21211 | import asyncio
import json
import aiohttp
from bs4 import BeautifulSoup
from .errors import (CharacterNotFound, FailedToParse, InvalidData,
ServiceUnavialable)
try:
import lxml
parser = 'lxml'
except ImportError:
parser = 'html.parser'
# types of weapons in game
VALID_WEAPONS = ['dagger', 'sword', 'staff', 'razor', 'axe', 'bangle', 'gauntlet', 'lynblade', 'bracer']
# types of accessories in game
VALID_ACCESSORIES = ['necklace', 'earring', 'bracelet', 'ring', 'belt', 'energy', 'soul']
PROFILE_URL = 'http://na-bns.ncsoft.com/ingame/bs/character/profile' # ?c=Char
SEARCH_URL = 'http://na-bns.ncsoft.com/ingame/bs/character/search/info' # ?c=Char
SUGGEST_URL = 'http://na-search.ncsoft.com/openapi/suggest.jsp' # ?site=bns&display=10&collection=bnsusersuggest&query=char
MARKET_API_ENDPOINT = 'http://na.bnsbazaar.com/api/market' # ITEM NAME
ITEM_NAME_SUGGEST = 'http://na-search.ncsoft.com/openapi/bnsmarketsuggest.jsp' #?site=bns&display=1&collection=bnsitemsuggest&lang=en&callback=suggestKeyword&query=items
BASE_ITEM_IMAGE_URL = 'http://static.ncsoft.com/bns_resource/ui_resource'
def _float(var):
"""
Attempts to an entry to a float (normally works for this)
"""
if var in [None, False]:
return 0
if var is True:
return 1
if isinstance(var, float):
return var
if isinstance(var, int):
return float(var)
assert isinstance(var, str)
assert any(x.isnumeric() for x in var)
var = var.split()[-1]
while len(var) > 0 and not var[-1].isnumeric():
var = var[:-1]
while len(var) > 0 and not var[0].isnumeric():
var = var[1:]
return float(var)
def _subtract(var1, var2, string=True, percent=False):
"""
Visually do math
"""
if string:
if percent:
return '{}% - {}% = {}%'.format(var1, var2, var1-var2)
return '{} - {} = {}'.format(var1, var2, var1-var2)
if percent:
return str(var1) + '%', str(var2) + '%', str(var1-var2) + '%'
return var1, var2, var1-var2
def get_name(gear_item):
"""
A helper function for extracting names
"""
try:
gear_item = gear_item.find('div', class_='name')
if not gear_item:
return None
if gear_item.find('span', class_='empty') is not None:
return None
return gear_item.span.text
except AttributeError:
return None
def set_bonus(set_) -> tuple:
"""
returns the set bonus for a user as a generator
"""
return (':\n'.join(('\n'.join((t.strip() for t in z.text.strip().split('\n') if t.strip() != '')) for z in x)) for x
in dict(zip(set_.find_all('p', class_='discription'), set_.find_all('p', class_='setEffect'))).items())
async def fetch_url(url, params={}):
"""
Fetch a url and return soup
"""
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as re:
return BeautifulSoup(await re.text(), parser)
async def search_user(user, suggest=True, max_count=3) -> list:
soup = await fetch_url(SEARCH_URL, params={'c': user})
search = soup.find('div', class_='searchList')
if suggest:
return [(x.dl.dt.a.text, [b.text for b in x.dl.find('dd', class_='other').dd.find_all('li')]) for x in
search.find_all('li') if x.dt is not None][:max_count]
return (search.li.dl.dt.a.text,
[x.text for x in search.li.dl.find('dd', class_='other').dd.find_all('li') if x is not None])
async def fetch_profile(user) -> dict:
"""
Fetches a user and returns the data as a dict
Dictionary Keys:
Account Name - The display name for their account (str).
Character Name - The Name of the given character (str).
Level - Character's level (str).
HM Level - Character's HM level (str).
Server - Server the character is on (str).
Faction - The Faction the character is in (str).
Picture - Link to the character's profile picture (str).
Stats - A dictionary object with stats (each stat is also a dict).
Gear - The gear of the Given Character (list).
SoulSheild - SoulSheild stats (str).
Set Bonus - Set bonus affects, a list of strings (list).
Outfit - The outfit of the character (dict).
Other Characters - A list of the other characters on that user's account (list).
Region - The region the user is from.
:parm user: The name of the character you wish to fetch data for
"""
CharacterName, other_chars = await search_user(user, suggest=False)
soup = await fetch_url(PROFILE_URL, params={'c': CharacterName})
if len(soup.find_all('div', clas_='pCharacter error', id='container')):
raise ServiceUnavialable('Cannot Access BNS At this time')
# INFORMATION
Name = soup.find('a', href='#').text
CharacterName = soup.find('dt').span.text[1:-1]
Class, Level, Server, *Faction = [x.text.strip().replace('\xa0', ' ') for x in
soup.find('dd', class_='desc').find_all('li')]
if len(Faction) == 0:
Clan = Rank = Faction = None
elif len(Faction) == 1:
Faction = Faction[0].split()
Rank = ' '.join(Faction[2:])
Faction = ' '.join(Faction[:2])
Clan = None
else:
Clan = Faction[1]
Faction = Faction[0].split()
Rank = ' '.join(Faction[2:])
Faction = ' '.join(Faction[:2])
Level = Level.split()
if len(Level) > 2:
HM = int(Level[-1])
else:
HM = 0
Level = int(Level[1])
# ATTACK
ATK = soup.find('div', class_='attack').dl
sub = [z for z in (dict(zip((z.text for z in x.find_all('span', class_='title')),
(z.text for z in x.find_all('span', class_='stat-point')))) for x in ATK.find_all('dd')) if len(z)][:-2]
temp = ATK.find_all('dt')[:-2]
ATK = dict(
zip([t.find('span', class_='title').text for t in temp], [t.find('span', 'stat-point').text for t in temp]))
del ATK['Mastery']
[ATK.update({x: {'Total': ATK.get(x)}}) for x in ATK.keys()]
ATK['Attack Power'].update(sub[0])
ATK['Piercing'].update(sub[2])
ATK['Accuracy'].update(sub[3])
ATK['Critical Hit'].update(sub[5])
ATK['Critical Damage'].update(sub[6])
# DEFENSE
Defense = soup.find('div', class_='defense')
temp = Defense.dl.find_all('dt')
sub = [z for z in (dict(zip((z.text for z in x.find_all('span', class_='title')),
(z.text for z in x.find_all('span', class_='stat-point')))) for x in Defense.find_all('dd')) if len(z)]
Defense = dict(
zip([t.find('span', class_='title').text for t in temp], [t.find('span', 'stat-point').text for t in temp]))
[Defense.update({x: {'Total': Defense.get(x)}}) for x in Defense.keys()]
del Defense['Debuff Defense']
Defense['Defense'].update(sub[1])
Defense['Evolved Defense'].update(sub[2])
Defense['Evasion'].update(sub[3])
Defense['Block'].update(sub[4])
Defense['Critical Defense'].update(sub[5])
Defense['Health Regen'].update(sub[7])
Defense['Recovery'].update(sub[8])
# GEAR
Weapon = get_name(soup.find('div', class_='wrapWeapon'))
Necklace = get_name(soup.find('div', class_='wrapAccessory necklace'))
Earring = get_name(soup.find('div', class_='wrapAccessory earring'))
Ring = get_name(soup.find('div', class_='wrapAccessory ring'))
Bracelet = get_name(soup.find('div', class_='wrapAccessory bracelet'))
Belt = get_name(soup.find('div', class_='wrapAccessory belt'))
Soul = get_name(soup.find('div', class_='wrapAccessory soul'))
# SoulSheild
SS = soup.find('div', class_='wrapGem')
BONUS = ()
Stats = ()
if any(x.img is not None for x in SS.find_all('span')):
BONUS = set_bonus(SS.find('div', class_='lyCharmEffect'))
Stats = ([': '.join([tr.th.text, tr.td.text]) for tr in SS.table.find_all('tr')])
# OUTFIT
Clothes = get_name(soup.find('div', class_='wrapAccessory clothes'))
Head = get_name(soup.find('div', class_='wrapAccessory tire'))
Face = get_name(soup.find('div', class_='wrapAccessory faceDecoration'))
Adornment = get_name(soup.find('div', class_='wrapAccessory clothesDecoration'))
# PROFILEPICTURE
Picture = soup.find('section').div.div.img.get('src')
del soup, temp, sub
r = {'Account Name': Name,
'Character Name': CharacterName,
'Class': Class,
'Level': Level,
'HM Level': HM,
'Server': Server,
'Faction': Faction,
'Clan': Clan,
'Faction Rank': Rank,
'Picture': Picture,
'Stats': {},
'Gear': {
'Weapon': Weapon,
'Necklace': Necklace,
'Earring': Earring,
'Ring': Ring,
'Bracelet': Bracelet,
'Belt': Belt,
'Soul': Soul},
'SoulSheild': Stats,
'Set Bonus': '\n\n'.join(BONUS),
'Outfit': {'Clothes': Clothes,
'Head': Head,
'Face': Face,
'Adornment': Adornment},
'Other Characters': other_chars,
'Region': 'NA'}
r['Stats'].update(ATK)
r['Stats'].update(Defense)
return r
async def get_item_name_suggestions(item, display, session):
async with session.get(ITEM_NAME_SUGGEST, params={'site': 'bns', 'display': display, 'collection': 'bnsitemsuggest', 'callback': 'suggestKeyword', 'query': item}) as re:
data: dict = json.loads((await re.text())[17:-4])
if data['result'] != "0":
raise ServiceUnavialable
return data
async def search_item(item, display:int=1):
def price_parse(html):
soup = BeautifulSoup(html, parser)
return [int(x.text.split()[0]) if x is not 0 else 0 for x in [soup.find(name='span', attrs={'class':c}) or 0 for c in ('gold', 'silver', 'bronze')]]
async def get_item_data(titem, session):
async with session.get(f'{MARKET_API_ENDPOINT}/{titem}/true') as re:
data = await re.json()
if (not isinstance(data, list)) or len(data) == 0:
raise InvalidData("Market Returned Invalid Data")
return {'icon': ''.join([BASE_ITEM_IMAGE_URL, data[0]['iconImg']]),
'prices': [(price_parse(e['price_html']), int(e['sale_data']['amount'])) for e in data],
'name': titem}
with aiohttp.ClientSession() as session:
data = await get_item_name_suggestions(item, display, session)
suggestions = [x[0] for x in data["front"] if len(x) == 2 and x[1] == 0 and isinstance(x[0], str)]
return [await get_item_data(item, session) for item in suggestions]
class Character(object):
"""
Character Object
pretty_profile - Return A prettied profile Overview as a string.
pretty_gear - Return a prettied Gear Overview as a string.
pretty_stats - Return a prettied Stats Overview as a string.
pretty_outfit - Return a prettied Outfit Overview as a string.
Notice: The Following items can be used as self.item with space replaced with "_" and it is not case sensitive.
Notice: The Following items can also be used as self[item] it is case sensitive, no replacement.
Account Name - The display name for their account (str).
Character Name - The Name of the given character (str).
Level - Character's level (str).
HM Level - Character's HM level (str).
Server - Server the character is on (str).
Faction - The Faction the character is in (str).
Picture - Link to the character's profile picture (str).
Stats - A dictionary object with stats (each stat is also a dict).
Gear - The gear of the Given Character (list).
SoulSheild - SoulSheild stats (str).
Set Bonus - Set bonus affects, a list of strings (list).
Outfit - The outfit of the character (dict).
Other Characters - A list of the other characters on that user's account (list).
Region - The region the user is from.
"""
def __init__(self, data: dict):
data = data.copy()
self.name = data['Character Name']
self.__data = data
self.items = self.__data.items
self.keys = self.__data.keys
self.account = data['Account Name']
async def refresh(self):
self.__data = await fetch_profile(self.name)
self.items = self.__data.items
self.keys = self.__data.keys
def __call__(self):
"""returns an awaitable to refresh"""
return self.refresh()
def __getattr__(self, item):
return self[str(item)]
def __getitem__(self, item):
item = str(item).replace('_', ' ')
k = list(self.__data.keys())
k = dict(zip([z.lower() for z in k], k))
try:
return self.__data[k.get(item.lower())]
except KeyError:
return self.__data[k.get(item.lower().replace(' ', '_'))]
def pretty_profile(self):
"""Return A prettyfied profile Overview as a string"""
if self['HM Level']:
temp = 'Level {} Hongmoon Level {}'.format(self['Level'], self['HM Level'])
else:
temp = 'Level {}'.format(self['Level'])
text = ['**Display Name:** {}'.format(self['Account Name']),
'**Character**: {} {}'.format(self['Character Name'], temp),
'**Weapon**: {}'.format(self['Gear']['Weapon']),
'**Server:** {}'.format(self['Server'])]
if self['Faction']:
if self['Faction'] == 'Cerulean Order':
text.append('**Faction:** Cerulean Order :blue_heart:')
else:
text.append('**Faction"** Crimson Legion :heart:')
text.append('**Faction Rank:** {}'.format(self['Faction Rank']))
if self['Clan']:
text.append('**Clan:** {}'.format(self['Clan']))
if len(self['Other Characters']):
temp = ['[', ']']
temp.insert(1, ', '.join(self['Other Characters']))
text.append('**Other Characters:**\n {}'.format(''.join(temp)))
text.append(self['Picture'])
return '\n'.join(text).strip()
def pretty_gear(self):
"""Return a prettyfied Gear Overview as a string"""
temp = [self['Character Name'], '[' + self['Class'],'Level', str(self['Level'])]
if self['HM Level']:
temp += ['Hongmoon Level', str(self['HM Level'])]
temp = ' '.join(temp) + ']'
divider = '─'*len(temp)
stats = self['Stats']
send_this = ['```', temp, divider, 'Total HP {} Attack Power {}'.format(stats['HP']['Total'], stats['Attack Power']['Total']),
divider, 'Soul Shield Attributes (Base + Fused + Set)', '\n'.join(self['SoulSheild']),
''.join(self['Set Bonus']), '']
gear = self['Gear']
temp = list(gear.keys())
temp.sort()
for k in temp:
send_this.append('{}: {}'.format(k, gear[k]))
send_this.append(divider)
send_this.append('```')
return '\n'.join(send_this).strip()
def pretty_stats(self):
"""Return a prettyfied Outfit Overview as a string"""
temp = [self['Character Name'], '[' + self['Class'] + ',','Level', str(self['Level'])]
if self['HM Level']:
temp += ['Hongmoon Level', str(self['HM Level'])]
temp = ' '.join(temp) + ']'
divider = '─'*len(temp)
stats = self['Stats']
send_this = ['```ruby', temp, divider, 'HP: {}'.format(stats['HP']['Total']),
'Attack Power: {}'.format(stats['Attack Power']['Total']),
'Piercing: {}'.format(stats['Piercing']['Total']),
'+Defense Piercing: {}'.format(stats['Piercing']['Defense Piercing']),
'+Block Piercing: {}'.format(stats['Piercing']['Block Piercing']),
'Accuracy: {0[Total]} ({0[Hit Rate]})'.format(stats['Accuracy']),
'Critical Hit: {0[Total]} ({0[Critical Rate]})'.format(stats['Critical Hit']),
'Critical Damage: {0[Total]} ({0[Increase Damage]})'.format(stats['Critical Damage']), divider,
'Defense: {0[Total]} ({0[Damage Reduction]})'.format(stats['Defense']),
'Evasion: {}'.format(stats['Evasion']['Total']),
'+Evasion Rate: {}'.format(stats['Evasion']['Evasion Rate']),
'+Counter Bonus: {}'.format(stats['Evasion']['Counter Bonus']),
('Block: {0[Total]}\n'
'+Damage Reduction: {0[Damage Reduction]}\n'
'+Block Bonus: {0[Block Bonus]}\n'
'+Block Rate: {0[Block Rate]}').format(stats['Block']),
'Health Regen (IN/OUT): {0[In Combat]}/{0[Out of Combat]}'.format(stats['Health Regen']),
'Recovery Rate: {}'.format(stats['Recovery']['Total']),
'```']
return '\n'.join(send_this)
def pretty_outfit(self):
"""Return a prettyfied Outfit Overview as a string"""
outfit = self['Outfit']
o = list(outfit.keys())
o.sort()
return '\n'.join(['```'] + ['{}\'s Outfit:'.format(self['Character Name'])] +
['{}: {}'.format(k, outfit[k]) for k in o] + ['```'])
def avg_dmg(self):
stats = self['Stats']
return avg_dmg(stats['Attack Power']['Total'],
stats['Critical Hit']['Critical Rate'],
stats['Critical Damage']['Total'],
elemental_bonus='100%')
async def get_character(user: str) -> Character:
"""
Return a Character Object for the given user.
:param user: The user to create an object for
:return: Returns A Character Object for the given user
"""
if not isinstance(user, str):
raise InvalidData('Expected type str for user, found {} instead'.format(type(user).__name__))
try:
return Character(await fetch_profile(user))
except AttributeError:
raise CharacterNotFound('Failed to find character "{}"'.format(user))
except Exception as e:
print('[!] Error:', e)
raise Exception(e)
async def compare(user1: Character, user2: Character, update=False):
"""A WIP compare fucntion."""
assert isinstance(user1, Character) and isinstance(user2, Character)
if update:
await user1.refresh()
await user2.refresh()
temp = '{} - {}'.format(user1['Character Name'], user2['Character Name'])
divider = '─'*len(temp)
user1 = user1['Stats']
user2 = user2['Stats']
for k,v in user1.items():
for k2,v2 in v.items():
v[k2] = _float(v2)
user1[k] = v
for k,v in user2.items():
for k2,v2 in v.items():
v[k2] = _float(v2)
user2[k] = v
send_this = [temp, divider, 'HP: {}'.format(_subtract(user1['HP']['Total'], user2['HP']['Total'])),
'Attack Power: {}'.format(_subtract(user1['Attack Power']['Total'],
user2['Attack Power']['Total'])),
'Piercing: {}'.format(_subtract(user1['Piercing']['Total'], user2['Piercing']['Total'])),
'+Defense Piercing: {}'.format(_subtract(user1['Piercing']['Defense Piercing'],
user2['Piercing']['Defense Piercing'],
percent=True)),
'+Block Piercing: {}'.format(_subtract(user1['Piercing']['Block Piercing'],
user2['Piercing']['Block Piercing'],
percent=True)),
'Accuracy: {}'.format(_subtract(user1['Accuracy']['Total'],
user2['Accuracy']['Total'])),
'+']
return '\n'.join(send_this)
def avg_dmg(attack_power: str, critical_rate: str, critical_damage: str, elemental_bonus: str='100%'):
"""
AVG Damage
Calculates The Average Damage
:param attack_power: Attack Power (Total)
:param critical_hit_rate: Critical Hit -> Critical Rate
:param critical_damage: Critical Damage (Total)
:param elemental_bonus: Total elemental_bonus% - 500
"""
attack_power = float(attack_power)
crit_rate = float(critical_rate.strip(' %'))
crit_damage = float(critical_damage)
elemental_bonus = float(elemental_bonus.strip(' %'))
# Result is No Blue Buff
# Result 2 is with Blue Buff
result = attack_power * (1 - (crit_rate * 0.01) + (crit_rate * crit_damage * 0.0001))
if (crit_rate < 60):
result2 = attack_power * (1 - ((crit_rate + 50) * 0.01) + (crit_rate + 50) * (crit_damage + 40) * .0001)
else: result2 = attack_power * ((crit_damage + 40) * .01)
if elemental_bonus in [0, 100]: return round(result, 2), round(result2, 2)
result *= (elemental_bonus * 0.01)
result2 *= (elemental_bonus * 0.01)
return round(result, 2), round(result2, 2)
| mit | 8,555,198,700,855,818,000 | 42.542094 | 173 | 0.564395 | false | 3.531224 | false | false | false |
Llamatech/sis-fibo | model/vos/cuenta.py | 1 | 2245 | #-*- coding:iso-8859-1 -*-
"""
Clase que modela la información de una cuenta en el sistema
"""
# NUMERO.SALDO.TIPO_CUENTA,CERRADA,CLIENTE,OFICINA
class Cuenta(object):
def __init__(self, numero, saldo, tipo_cuenta, cerrada, cliente, oficina):
self.numero = numero
self.saldo = saldo
self.tipo_cuenta = tipo_cuenta
self.cerrada = cerrada
self.cliente = cliente
self.oficina = oficina
def __repr__(self):
args = [self.numero, self.saldo, self.tipo_cuenta, self.cerrada, self.cliente, self.oficina]
args = map(str, args)
return "numero: %s; saldo: %s; tipo_cuenta:%s; cerrada:%s; cliente: %s; oficina: %s" % tuple(args)
def __str__(self):
return self.__repr__()
class CuentaR(object):
def __init__(self, numero, fecha_creacion, saldo, tipo, cerrada, id_cliente, nom_cliente, ap_cliente, id_of, of_nombre, fecha_umov):
self.numero = numero
if fecha_creacion is not None:
self.fecha_creacion = fecha_creacion.strftime('%d/%m/%Y')
else:
self.fecha_creacion = None
self.saldo = saldo
self.tipo = tipo
self.cerrada = cerrada
self.id_cliente = id_cliente
self.nom_cliente = nom_cliente
self.ap_cliente = ap_cliente
self.id_of = id_of
self.of_nombre = of_nombre
if fecha_umov is not None:
self.fecha_umov = fecha_umov.strftime('%d/%m/%Y')
else:
self.fecha_umov = fecha_umov
def dict_repr(self):
if self.cerrada == 'N':
url = '/cuentas?numero='+str(self.numero)
else:
url = None
d = {
'numero':self.numero,
'fecha_creacion':self.fecha_creacion,
'saldo':self.saldo,
'tipo':self.tipo,
'cerrada':self.cerrada,
'id_cliente':self.id_cliente,
'nom_cliente':self.nom_cliente,
'ap_cliente':self.ap_cliente,
'id_of':self.id_of,
'of_nombre':self.of_nombre,
'fecha_umov':self.fecha_umov,
'delete':url
}
return d
| gpl-2.0 | 7,610,641,571,238,857,000 | 33.619048 | 136 | 0.536988 | false | 3.004016 | false | false | false |
99cloud/keystone_register | openstack_dashboard/dashboards/project/access_and_security/security_groups/forms.py | 1 | 12028 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import validators
from django.core.urlresolvers import reverse
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.validators import validate_port_range
from horizon.utils import fields
from openstack_dashboard import api
class CreateGroup(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.nova.security_group_create(request,
data['name'],
data['description'])
messages.success(request,
_('Successfully created security group: %s')
% data['name'])
return sg
except:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to create security group.'),
redirect=redirect)
class AddRule(forms.SelfHandlingForm):
id = forms.IntegerField(widget=forms.HiddenInput())
ip_protocol = forms.ChoiceField(label=_('IP Protocol'),
choices=[('tcp', _('TCP')),
('udp', _('UDP')),
('icmp', _('ICMP'))],
help_text=_("The protocol which this "
"rule should be applied to."),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'protocol'}))
port_or_range = forms.ChoiceField(label=_('Open'),
choices=[('port', _('Port')),
('range', _('Port Range'))],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'range',
'data-switch-on': 'protocol',
'data-protocol-tcp': _('Open'),
'data-protocol-udp': _('Open')}))
port = forms.IntegerField(label=_("Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-port': _('Port')}),
validators=[validate_port_range])
from_port = forms.IntegerField(label=_("From Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('From Port')}),
validators=[validate_port_range])
to_port = forms.IntegerField(label=_("To Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('To Port')}),
validators=[validate_port_range])
icmp_type = forms.IntegerField(label=_("Type"),
required=False,
help_text=_("Enter a value for ICMP type "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'protocol',
'data-protocol-icmp': _('Type')}),
validators=[validate_port_range])
icmp_code = forms.IntegerField(label=_("Code"),
required=False,
help_text=_("Enter a value for ICMP code "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'protocol',
'data-protocol-icmp': _('Code')}),
validators=[validate_port_range])
source = forms.ChoiceField(label=_('Source'),
choices=[('cidr', _('CIDR')),
('sg', _('Security Group'))],
help_text=_('To specify an allowed IP '
'range, select "CIDR". To '
'allow access from all '
'members of another security '
'group select "Security '
'Group".'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
cidr = fields.IPField(label=_("CIDR"),
required=False,
initial="0.0.0.0/0",
help_text=_("Classless Inter-Domain Routing "
"(e.g. 192.168.0.0/24)"),
version=fields.IPv4 | fields.IPv6,
mask=True,
widget=forms.TextInput(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-cidr': _('CIDR')}))
security_group = forms.ChoiceField(label=_('Security Group'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-sg': _('Security '
'Group')}))
def __init__(self, *args, **kwargs):
sg_list = kwargs.pop('sg_list', [])
super(AddRule, self).__init__(*args, **kwargs)
# Determine if there are security groups available for the
# source group option; add the choices and enable the option if so.
if sg_list:
security_groups_choices = sg_list
else:
security_groups_choices = [("", _("No security groups available"))]
self.fields['security_group'].choices = security_groups_choices
def clean(self):
cleaned_data = super(AddRule, self).clean()
ip_proto = cleaned_data.get('ip_protocol')
port_or_range = cleaned_data.get("port_or_range")
source = cleaned_data.get("source")
icmp_type = cleaned_data.get("icmp_type", None)
icmp_code = cleaned_data.get("icmp_code", None)
from_port = cleaned_data.get("from_port", None)
to_port = cleaned_data.get("to_port", None)
port = cleaned_data.get("port", None)
if ip_proto == 'icmp':
if icmp_type is None:
msg = _('The ICMP type is invalid.')
raise ValidationError(msg)
if icmp_code is None:
msg = _('The ICMP code is invalid.')
raise ValidationError(msg)
if icmp_type not in xrange(-1, 256):
msg = _('The ICMP type not in range (-1, 255)')
raise ValidationError(msg)
if icmp_code not in xrange(-1, 256):
msg = _('The ICMP code not in range (-1, 255)')
raise ValidationError(msg)
cleaned_data['from_port'] = icmp_type
cleaned_data['to_port'] = icmp_code
else:
if port_or_range == "port":
cleaned_data["from_port"] = port
cleaned_data["to_port"] = port
if port is None:
msg = _('The specified port is invalid.')
raise ValidationError(msg)
else:
if from_port is None:
msg = _('The "from" port number is invalid.')
raise ValidationError(msg)
if to_port is None:
msg = _('The "to" port number is invalid.')
raise ValidationError(msg)
if to_port < from_port:
msg = _('The "to" port number must be greater than '
'or equal to the "from" port number.')
raise ValidationError(msg)
if source == "cidr":
cleaned_data['security_group'] = None
else:
cleaned_data['cidr'] = None
return cleaned_data
def handle(self, request, data):
try:
rule = api.nova.security_group_rule_create(
request,
data['id'],
data['ip_protocol'],
data['from_port'],
data['to_port'],
data['cidr'],
data['security_group'])
messages.success(request,
_('Successfully added rule: %s') % unicode(rule))
return rule
except:
redirect = reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[data['id']])
exceptions.handle(request,
_('Unable to add rule to security group.'),
redirect=redirect)
| apache-2.0 | -5,296,865,002,722,064,000 | 47.305221 | 79 | 0.427835 | false | 5.617936 | false | false | false |
IanDoarn/zbsmsa | tests/loans.py | 1 | 15622 | """
loans.py
Final working version of the mutation automation
Mutates items from special excel file from inventory type
CI - Centralized Inventory to type
ZDI - Zimmer Distributor Inventory
Written by: Ian Doarn
Maintained by: Ian Doarn
"""
from zbsmsa.site import Site
from zbsmsa.inventory.stock import Stock, ProductChooser
from zbsmsa.utils.exceptions import InvalidRange, ItemAddError
import xlrd
import ctypes
import sys
import time
import logging
import os
from datetime import datetime
# TODO: Comment this file
__author__ = "Ian Doarn"
__maintainer__ = "Ian Doarn"
__current_date__ = '{:%m-%d-%Y}'.format(datetime.now())
PATH = os.path.dirname(os.path.realpath(__file__))
LOG_FILE_NAME = 'mutation_loans_{}.log'.format(__current_date__)
LOG_FILE_PATH = os.path.join(PATH, LOG_FILE_NAME)
MB_OK = 0x0
MB_HELP = 0x4000
ICON_EXLAIM = 0x30
ICON_INFO = 0x40
ICON_STOP = 0x10
# if os.path.isfile(os.path.join(PATH, LOG_FILE_NAME)):
# os.remove(os.path.join(PATH, 'LOG_FILE_NAME))
# elif os.path.isfile(LOG_FILE_PATH):
# os.remove(LOG_FILE_PATH)
# else:
if not os.path.isfile(LOG_FILE_PATH):
with open(LOG_FILE_PATH, 'w')as l_file:
l_file.close()
logger = logging.getLogger()
handler = logging.FileHandler(LOG_FILE_PATH)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def message_box(title, text, style, log=True):
if log:
logger.info("[{}]: {}".format(title, text))
ctypes.windll.user32.MessageBoxW(0, text, title, style)
def load_workbook(file):
workbook = xlrd.open_workbook(file)
sheets = []
for sheet in workbook.sheets():
try:
headers = sheet.row_values(0)
rows = []
for y in range(1, sheet.nrows):
row_dict = dict(zip(headers, sheet.row_values(y)))
if row_dict['serial_number'] == '' and row_dict['lot_number'] == '':
logger.warning("Missing lot and serial at sheet:{} row:{} item:{} lot/serial:{}".format(
sheet.name, str(y + 1), row_dict['product_number'], 'NULL'
))
else:
row_dict['row'] = str(y + 1)
rows.append(row_dict)
sheets.append({'sheet_name': sheet.name,
'data': rows,
'headers': headers,
'total_rows': sheet.nrows})
del rows
del headers
except Exception:
sheets.append({'sheet_name': sheet.name,
'data': None,
'headers': None,
'total_rows': None})
return sheets
def mutate_loans(site, stock, product_chooser, data, note="CI-ZDI Mutation, Loaners Late Debit Policy"):
time.sleep(3)
site.ribbon("InventoryTab")
stock.stock_tab()
for row in data:
current_site = site.get_site_name()
logger.info('logged into [{}]'.format(site.get_site_name()))
current_item_info = "Item: {} Lot: {} Row: {}".format(
row['product_number'],
row['lot_number'],
row['row'])
if row['distributor'] != current_site:
site.change_site(row['distributor'])
logger.info("site changed to [{}]".format(site.get_site_name()))
site.ribbon("InventoryTab")
stock.stock_tab()
stock.stock_tab_product_chooser()
product_chooser.search(row['product_number'])
time.sleep(3)
try:
product_chooser.add(row['product_number'])
except ItemAddError as iae:
message_box('Error',
'Unable to add [{}] to the product chooser. '
'Please add this manually and press ok to continue!'.format(current_item_info),
MB_OK | ICON_INFO)
product_chooser.finish()
stock.stock_tab_search()
time.sleep(3)
stock.iterate_search_table()
try:
for table_row in stock.current_table_data:
if row['lot_number'] != '' and row['serial_number'] == '':
tr_text = table_row['text']
if tr_text.__contains__(row['lot_number']) and tr_text.__contains__('CASE-'+row['case_number']):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation",
"Item was not mutated!"
"\nItem:{} Lot:{} Row:{}".format(row['product_number'],
row['lot_number'],
row['row']),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
elif str(int(row['serial_number'])) != '':
tr_text = table_row['text']
if tr_text.__contains__(row['case_number']) and tr_text.__contains__(
"Serial " + row['serial_number']):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation", "Item was not mutated! {}".format(current_item_info),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
except ValueError as val_error:
message_box('Error', "No Records found. [{}] {}".format(val_error, current_item_info), MB_OK | ICON_INFO)
except InvalidRange as ivr_error:
message_box('Error', ivr_error.message + " {}".format(current_item_info), MB_OK | ICON_INFO)
except Exception as other_error:
message_box('Error', str(other_error) + " {}".format(current_item_info), MB_OK | ICON_INFO)
else:
continue
def mutate_bins(site, stock, product_chooser, data, note="CI-ZDI Mutation, Loaners Late Debit Policy"):
time.sleep(3)
site.ribbon("InventoryTab")
stock.stock_tab()
for row in data:
current_item_info = "Item: {} Lot: {} Row: {}".format(
row['product_number'],
row['lot_number'],
row['row'])
if row['bin'] == 'CI MISSING ZONE-0-0':
message_box('Error', 'Can not mutate item in bin [{}]: {}'.format(row['bin'], current_item_info),
MB_OK | ICON_EXLAIM, log=False)
logger.warning('Can not mutate item in bin [{}]: {}'.format(row['bin'], current_item_info))
else:
current_site = site.get_site_name()
logger.info('logged into [{}]'.format(site.get_site_name()))
if row['name'] != current_site:
site.change_site(row['name'])
logger.info("site changed to [{}]".format(site.get_site_name()))
site.ribbon("InventoryTab")
stock.stock_tab()
stock.stock_tab_product_chooser()
product_chooser.search(row['product_number'])
time.sleep(3)
try:
product_chooser.add(row['product_number'])
except ItemAddError as iae:
message_box('Error',
'Unable to add [{}] to the product chooser. '
'Please add this manually and press ok to continue!'.format(current_item_info),
MB_OK | ICON_INFO)
product_chooser.finish()
stock.stock_tab_search()
time.sleep(3)
stock.iterate_search_table()
try:
for table_row in stock.current_table_data:
if row['lot_number'] != '' and row['serial_number'] == '':
tr_text = table_row['text']
bin_num = ''
if row['bin'] == 'location_bin':
if tr_text.__contains__('Location Bin'):
bin_num = 'Location Bin'
if tr_text.__contains__('Consigned'):
bin_num = 'Consigned'
else:
bin_num = row['bin']
if tr_text.__contains__(row['lot_number']) and tr_text.__contains__(bin_num):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation",
"Item was not mutated!"
"\nItem:{} Lot:{} Row:{}".format(row['product_number'],
row['lot_number'],
row['row']),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
elif row['serial_number'] != '':
tr_text = table_row['text']
bin_num = ''
if row['bin'] == 'location_bin':
if tr_text.__contains__('Location Bin'):
bin_num = 'Location Bin'
if tr_text.__contains__('Consigned'):
bin_num = 'Consigned'
else:
bin_num = row['bin']
if tr_text.__contains__(bin_num) and tr_text.__contains__("Serial " + row['serial_number']):
stock.select_row(table_row['row'])
try:
if stock.mutate_stock(note, click_ok_on_error=False):
logger.info("SUCCESSFUL MUTATION: [{}]".format(current_item_info))
break
else:
message_box("Mutation", "Item was not mutated! {}".format(current_item_info),
MB_OK | ICON_INFO)
except Exception as m_error:
message_box('Error', "{}. {}\nPlease close any open menus and errors then press ok"
" to continue.".format(m_error, current_item_info), MB_OK | ICON_INFO,
log=False)
logger.error("MutationError:{} {}".format(m_error, current_item_info))
break
break
except ValueError as val_error:
message_box('Error', "No Records found. [{}] {}".format(val_error, current_item_info),
MB_OK | ICON_INFO)
except InvalidRange as ivr_error:
message_box('Error', ivr_error.message + " {}".format(current_item_info),
MB_OK | ICON_INFO)
except Exception as other_error:
message_box('Error', str(other_error) + " {}".format(current_item_info),
MB_OK | ICON_INFO)
else:
continue
def main(username, password, file, driver):
site = Site(username, password, driver, launch=False)
mutation_data = load_workbook(file)
site.launch(maximize=False)
# Log launch information
logger.debug("USER[{}]".format(username))
logger.debug("DRIVER[{}]".format(driver))
logger.debug("FILE[{}]".format(file))
logger.debug("URI[{}]".format(site.driver_uri))
logger.debug("SESSIONID[{}]".format(site.driver_session_id))
stock = Stock(site)
pc = ProductChooser(stock)
message_box("Mutation", "Please press ok when the site has fully loaded",
MB_OK | ICON_INFO, log=False)
site.login()
for sheet in mutation_data:
if sheet['sheet_name'] == 'Loans Transferred':
mutate_loans(site, stock, pc, sheet['data'])
if sheet['sheet_name'] == 'Bin Transferred':
mutate_bins(site, stock, pc, sheet['data'])
site.close()
if __name__ == '__main__':
usage = "loans.py [username] [password] [driver location] [file location]"
if len(sys.argv[1:]) not in [4, 5]:
print(usage)
else:
_user = sys.argv[1]
_pass = sys.argv[2]
_drive_loc = sys.argv[3]
_file_loc = sys.argv[4]
try:
logger.info("Begin program execution at main()")
main(_user, _pass, _file_loc, _drive_loc)
except KeyboardInterrupt as ki_error:
message_box("Fatal Error", "[FATAL]::Fatal error caused program to fail.\nERROR:{}".format(ki_error),
MB_OK | ICON_STOP, log=False)
logger.critical("[FATAL]:: Fatal error caused program to fail. ERROR:{}".format(ki_error))
except Exception as fatal_error:
message_box("Fatal Error", "[FATAL]::Fatal error caused program to fail.\nERROR:{}".format(fatal_error),
MB_OK | ICON_STOP, log=False)
logger.critical("[FATAL]:: Fatal error caused program to fail. ERROR:{}".format(fatal_error))
else:
pass
| apache-2.0 | 6,084,375,937,478,261,000 | 44.150289 | 117 | 0.475355 | false | 4.379591 | false | false | false |
LAIRLAB/qr_trees | src/python/run_ilqr_diffdrive.py | 1 | 2328 | #!/usr/bin/env python
#
# Arun Venkatraman ([email protected])
# December 2016
#
# If we are not running from the build directory, then add lib to path from
# build assuming we are running from the python folder
import os
full_path = os.path.realpath(__file__)
if full_path.count("src/python") > 0:
import sys
to_add = os.path.abspath(os.path.join(os.path.split(full_path)[0], "../../build/"))
sys.path.append(to_add)
from IPython import embed
import lib.ilqr_diffdrive as ilqr
import visualize_circle_world as vis
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
obs_prior = [0.5, 0.5]
world_dims = [-30, 30, -30, 30]
w1 = ilqr.CircleWorld(world_dims)
w2 = ilqr.CircleWorld(world_dims)
obs_pos_1 = [-2, 0.0]
obs_pos_2 = [2, 0.0]
obs_radius = 10.0
obstacle_1 = ilqr.Circle(obs_radius, obs_pos_1);
obstacle_2 = ilqr.Circle(obs_radius, obs_pos_2);
# add obstacle to world 1
w1.add_obstacle(obstacle_1);
# add obstacle to world 2
w2.add_obstacle(obstacle_2);
cost, states_true_1, obs_fname_1 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w1, w2, obs_prior, "true1", "true1")
cost, states_true_2, obs_fname_2 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w2, w1, obs_prior, "true2", "true2")
cost, states_weighted_1, obs_fname_3 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w1, w2, obs_prior, "weight3", "weight3")
cost, states_weighted_2, obs_fname_4 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w2, w1, obs_prior, "weight4", "weight4")
cost, states_hind_1, obs_fname_5 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w1, w2, obs_prior, "hind3", "hind3")
cost, states_hind_2, obs_fname_6 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w2, w1, obs_prior, "hind4", "hind4")
print("Drawing world 1")
ax1 = vis.parse_draw_files([states_true_1, states_weighted_1, states_hind_1], obs_fname_1,
show=False)
plt.title('World 1')
print("Drawing world 2")
ax2 = vis.parse_draw_files([states_true_2, states_weighted_2, states_hind_2],
obs_fname_2, show=False)
plt.title('World 2')
plt.show()
embed()
| bsd-3-clause | 9,018,473,560,120,826,000 | 28.468354 | 94 | 0.614691 | false | 2.818402 | false | true | false |
mcvidomi/poim2motif | run_svm_real.py | 1 | 1483 | '''
Created on 08.06.2015
@author: marinavidovic
'''
import os
import pdb
import utils_svm
import pickle
import numpy as np
import copy
import genQ
import makePOIM
import view
import matplotlib
matplotlib.use('Agg')
if __name__ == '__main__':
read_data = 1
datapath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data.txt"
savepath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data0.pkl"
lines=1000
if read_data:
x,y=utils_svm.extractRealData(datapath,savepath,lines)
else:
fobj=open(savepath,'rb')
x,y=pickle.load(fobj)
fobj.close()
num_pos = 100
num_neg = 4*num_pos
print "reduce samples"
x_red,y_red = utils_svm.reduce_samples(copy.deepcopy(x),copy.deepcopy(y),num_pos,num_neg)
nploci_letters,nploci_positions = utils_svm.non_polymorphic_loci(x_red)
#read data
experiment_name = "real1"
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
poimpath=experiment_name+"/poim.pkl"
tally=30
positives=25
sequenceno=100
mutation_prob=0.0
motif="ATTTT"
mu=13
x,y = makePOIM.gensequences(tally,positives,sequenceno,mutation_prob,motif,mu)
#compute POIM
poim_degree = 6
kernel_degree = 8
print "start poim computation"
poims = makePOIM.computePOIM(x,y,poim_degree,kernel_degree,poimpath)
Q2 = poims[0][1]
#view.test()
view.figurepoimsimple(Q2, "poim_pic", 0)
| mit | -5,112,159,067,973,785,000 | 25.017544 | 93 | 0.665543 | false | 2.977912 | false | true | false |
SevereOverfl0w/MCDirectory | migrations/versions/10723b632a87_.py | 1 | 1036 | """empty message
Revision ID: 10723b632a87
Revises: 3d7ce850941c
Create Date: 2013-11-12 22:18:26.482191
"""
# revision identifiers, used by Alembic.
revision = '10723b632a87'
down_revision = '3d7ce850941c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('commenter_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('comment', sa.Text(), nullable=False),
sa.Column('stars', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['commenter_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
### end Alembic commands ###
| mit | 5,601,540,713,512,676,000 | 27.777778 | 63 | 0.664093 | false | 3.419142 | false | false | false |
oculusstorystudio/kraken | Python/kraken/ui/GraphView/pyflowgraph/node.py | 1 | 13604 |
#
# Copyright 2015-2017 Eric Thivierge
#
import math
import json
from kraken.ui.Qt import QtWidgets, QtGui, QtCore
from port import InputPort, OutputPort, IOPort
class NodeTitle(QtWidgets.QGraphicsWidget):
__color = QtGui.QColor(25, 25, 25)
__font = QtGui.QFont('Roboto', 14)
__font.setLetterSpacing(QtGui.QFont.PercentageSpacing, 115)
__labelBottomSpacing = 12
def __init__(self, text, parent=None):
super(NodeTitle, self).__init__(parent)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed))
self.__textItem = QtWidgets.QGraphicsTextItem(text, self)
self.__textItem.setDefaultTextColor(self.__color)
self.__textItem.setFont(self.__font)
self.__textItem.setPos(0, -2)
option = self.__textItem.document().defaultTextOption()
option.setWrapMode(QtGui.QTextOption.NoWrap)
self.__textItem.document().setDefaultTextOption(option)
self.__textItem.adjustSize()
self.setPreferredSize(self.textSize())
def setText(self, text):
self.__textItem.setPlainText(text)
self.__textItem.adjustSize()
self.setPreferredSize(self.textSize())
def textSize(self):
return QtCore.QSizeF(
self.__textItem.textWidth(),
self.__font.pointSizeF() + self.__labelBottomSpacing
)
# def paint(self, painter, option, widget):
# super(NodeTitle, self).paint(painter, option, widget)
# painter.setPen(QtGui.QPen(QtGui.QColor(0, 255, 0)))
# painter.drawRect(self.windowFrameRect())
class NodeHeader(QtWidgets.QGraphicsWidget):
def __init__(self, text, parent=None):
super(NodeHeader, self).__init__(parent)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
layout = QtWidgets.QGraphicsLinearLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(3)
layout.setOrientation(QtCore.Qt.Horizontal)
self.setLayout(layout)
self._titleWidget = NodeTitle(text, self)
layout.addItem(self._titleWidget)
layout.setAlignment(self._titleWidget, QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop)
def setText(self, text):
self._titleWidget.setText(text)
# def paint(self, painter, option, widget):
# super(NodeHeader, self).paint(painter, option, widget)
# painter.setPen(QtGui.QPen(QtGui.QColor(0, 255, 100)))
# painter.drawRect(self.windowFrameRect())
class PortList(QtWidgets.QGraphicsWidget):
def __init__(self, parent):
super(PortList, self).__init__(parent)
layout = QtWidgets.QGraphicsLinearLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(4)
layout.setOrientation(QtCore.Qt.Vertical)
self.setLayout(layout)
def addPort(self, port, alignment):
layout = self.layout()
layout.addItem(port)
layout.setAlignment(port, alignment)
self.adjustSize()
return port
# def paint(self, painter, option, widget):
# super(PortList, self).paint(painter, option, widget)
# painter.setPen(QtGui.QPen(QtGui.QColor(255, 255, 0)))
# painter.drawRect(self.windowFrameRect())
class Node(QtWidgets.QGraphicsWidget):
nameChanged = QtCore.Signal(str, str)
__defaultColor = QtGui.QColor(154, 205, 50, 255)
__defaultUnselectedColor = QtGui.QColor(25, 25, 25)
__defaultSelectedColor = QtGui.QColor(255, 255, 255, 255)
__defaultUnselectedPen = QtGui.QPen(__defaultUnselectedColor, 1.6)
__defaultSelectedPen = QtGui.QPen(__defaultSelectedColor, 1.6)
__defaultLinePen = QtGui.QPen(QtGui.QColor(25, 25, 25, 255), 1.25)
def __init__(self, graph, name):
super(Node, self).__init__()
self.__name = name
self.__graph = graph
self.__color = self.__defaultColor
self.__unselectedColor = self.__defaultUnselectedColor
self.__selectedColor = self.__defaultSelectedColor
self.__unselectedPen = QtGui.QPen(self.__defaultUnselectedPen)
self.__selectedPen = QtGui.QPen(self.__defaultSelectedPen)
self.__linePen = QtGui.QPen(self.__defaultLinePen)
self.setMinimumWidth(60)
self.setMinimumHeight(20)
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
layout = QtWidgets.QGraphicsLinearLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.setOrientation(QtCore.Qt.Vertical)
self.setLayout(layout)
self.__headerItem = NodeHeader(self.__name, self)
layout.addItem(self.__headerItem)
layout.setAlignment(self.__headerItem, QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop)
self.__ports = []
self.__ioPortsHolder = PortList(self)
self.__inputPortsHolder = PortList(self)
self.__outputPortsHolder = PortList(self)
self.__outputPortsHolder.layout().setContentsMargins(0, 0, 0, 10)
layout.addItem(self.__ioPortsHolder)
layout.addItem(self.__inputPortsHolder)
layout.addItem(self.__outputPortsHolder)
self.__selected = False
self.__dragging = False
# =====
# Name
# =====
def getName(self):
return self.__name
def setName(self, name):
if name != self.__name:
origName = self.__name
self.__name = name
self.__headerItem.setText(self.__name)
# Emit an event, so that the graph can update itsself.
self.nameChanged.emit(origName, name)
# Update the node so that the size is computed.
self.adjustSize()
# =======
# Colors
# =======
def getColor(self):
return self.__color
def setColor(self, color):
self.__color = color
self.update()
def getUnselectedColor(self):
return self.__unselectedColor
def setUnselectedColor(self, color):
self.__unselectedColor = color
self.__unselectedPen.setColor(self.__unselectedColor)
self.update()
def getSelectedColor(self):
return self.__selectedColor
def setSelectedColor(self, color):
self.__selectedColor = color
self.__selectedPen.setColor(self.__selectedColor)
self.update()
# =============
# Misc Methods
# =============
def getGraph(self):
return self.__graph
def getHeader(self):
return self.__headerItem
# ==========
# Selection
# ==========
def isSelected(self):
return self.__selected
def setSelected(self, selected=True):
self.__selected = selected
self.setZValue(20.0)
self.update()
#########################
## Graph Pos
def getGraphPos(self):
transform = self.transform()
size = self.size()
return QtCore.QPointF(transform.dx()+(size.width()*0.5), transform.dy()+(size.height()*0.5))
def setGraphPos(self, graphPos):
self.prepareConnectionGeometryChange()
size = self.size()
self.setTransform(QtGui.QTransform.fromTranslate(graphPos.x(), graphPos.y()), False)
def translate(self, x, y):
self.prepareConnectionGeometryChange()
super(Node, self).moveBy(x, y)
# Prior to moving the node, we need to tell the connections to prepare for a geometry change.
# This method must be called preior to moving a node.
def prepareConnectionGeometryChange(self):
for port in self.__ports:
if port.inCircle():
for connection in port.inCircle().getConnections():
connection.prepareGeometryChange()
if port.outCircle():
for connection in port.outCircle().getConnections():
connection.prepareGeometryChange()
#########################
## Ports
def addPort(self, port):
if isinstance(port, InputPort):
self.__inputPortsHolder.addPort(port, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
elif isinstance(port, OutputPort):
self.__outputPortsHolder.addPort(port, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
else:
self.__ioPortsHolder.addPort(port, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.__ports.append(port)
self.adjustSize()
return port
def getPort(self, name):
for port in self.__ports:
if port.getName() == name:
return port
return None
def getInputPort(self, name):
for port in self.__ports:
if port.getName() == name and isinstance(port, (InputPort, IOPort)):
return port
return None
def getOutputPort(self, name):
for port in self.__ports:
if port.getName() == name and isinstance(port, (OutputPort, IOPort)):
return port
return None
def paint(self, painter, option, widget):
rect = self.windowFrameRect()
painter.setBrush(self.__color)
painter.setPen(QtGui.QPen(QtGui.QColor(0, 0, 0, 0), 0))
roundingY = 8
roundingX = 8
painter.drawRoundedRect(rect, roundingX, roundingY, mode=QtCore.Qt.AbsoluteSize)
# Title BG
titleHeight = self.__headerItem.size().height() - 3
painter.setBrush(self.__color.darker(125))
roundingY = rect.width() * roundingX / titleHeight
painter.drawRoundedRect(0, 0, rect.width(), titleHeight, roundingX, roundingY, mode=QtCore.Qt.AbsoluteSize)
painter.drawRect(0, titleHeight * 0.5 + 2, rect.width(), titleHeight * 0.5)
painter.setBrush(QtGui.QColor(0, 0, 0, 0))
if self.__selected:
painter.setPen(self.__selectedPen)
else:
painter.setPen(self.__unselectedPen)
roundingY = 8
roundingX = 8
painter.drawRoundedRect(rect, roundingX, roundingY, mode=QtCore.Qt.AbsoluteSize)
#########################
## Events
def mousePressEvent(self, event):
if event.button() is QtCore.Qt.MouseButton.LeftButton:
modifiers = event.modifiers()
if modifiers == QtCore.Qt.ControlModifier:
if not self.isSelected():
self.__graph.selectNode(self, clearSelection=False)
else:
self.__graph.deselectNode(self)
elif modifiers == QtCore.Qt.ShiftModifier:
if not self.isSelected():
self.__graph.selectNode(self, clearSelection=False)
else:
if not self.isSelected():
self.__graph.selectNode(self, clearSelection=True)
# Push all nodes back 1 level in z depth to bring selected
# node to front
for node in [x for x in self.__graph.getNodes().values()]:
if node == self:
continue
if node.zValue() != 0.0:
node.setZValue(node.zValue() - 1)
self.__dragging = True
self._mouseDownPoint = self.mapToScene(event.pos())
self._mouseDelta = self._mouseDownPoint - self.getGraphPos()
self._lastDragPoint = self._mouseDownPoint
self._nodesMoved = False
else:
super(Node, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if self.__dragging:
newPos = self.mapToScene(event.pos())
graph = self.getGraph()
if graph.getSnapToGrid() is True:
gridSize = graph.getGridSize()
newNodePos = newPos - self._mouseDelta
snapPosX = math.floor(newNodePos.x() / gridSize) * gridSize
snapPosY = math.floor(newNodePos.y() / gridSize) * gridSize
snapPos = QtCore.QPointF(snapPosX, snapPosY)
newPosOffset = snapPos - newNodePos
newPos = newPos + newPosOffset
delta = newPos - self._lastDragPoint
self.__graph.moveSelectedNodes(delta)
self._lastDragPoint = newPos
self._nodesMoved = True
else:
super(Node, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self.__dragging:
if self._nodesMoved:
newPos = self.mapToScene(event.pos())
delta = newPos - self._mouseDownPoint
self.__graph.endMoveSelectedNodes(delta)
self.setCursor(QtCore.Qt.ArrowCursor)
self.__dragging = False
else:
super(Node, self).mouseReleaseEvent(event)
#########################
## shut down
def disconnectAllPorts(self):
# gather all the connections into a list, and then remove them from the graph.
# This is because we can't remove connections from ports while
# iterating over the set.
connections = []
for port in self.__ports:
if port.inCircle():
for connection in port.inCircle().getConnections():
connections.append(connection)
if port.outCircle():
for connection in port.outCircle().getConnections():
connections.append(connection)
for connection in connections:
self.__graph.removeConnection(connection) | bsd-3-clause | -1,098,339,499,066,316,300 | 31.625899 | 115 | 0.600485 | false | 4.132442 | false | false | false |
sebmolinari/los-kpos | app/mod_auth/controllers.py | 1 | 4029 | #Flask imports
from flask import Blueprint, render_template, flash, redirect, url_for, abort
from flask.ext.login import LoginManager, login_required, logout_user, login_user
#App imports
from app import app
from app.mod_auth.forms import LoginForm, UserForm, EmailForm, PasswordForm
from app.mod_auth.models import User
from utils import ts, send_email
lm = LoginManager()
lm.init_app(app)
lm.login_view = "auth.user_login"
mod_auth = Blueprint('auth', __name__, url_prefix='/user',
template_folder='templates')
@lm.user_loader
def user_load(id):
return User.get_by_id(int(id))
@mod_auth.route('/login/', methods=['GET', 'POST'])
def user_login():
#special case if database is empty we should create an user
if len(User.get_all()) == 0:
return redirect(url_for('auth.user_create'))
form = LoginForm()
if form.validate_on_submit():
user = User.get_by_email(email=form.email.data)
if user and User.check_password(user.password, form.password.data):
login_user(user)
return redirect(url_for('index'))
flash('Wrong email or password')
return render_template("login.html", form=form)
@mod_auth.route('/logout/')
@login_required
def user_logout():
logout_user()
return redirect(url_for('index'))
@mod_auth.route('/create', methods=["GET", "POST"])
def user_create():
form = UserForm()
if form.validate_on_submit():
user = User()
user.name = form.name.data
user.email = form.email.data
user.password = form.password.data
user.is_admin = form.is_admin.data
#TODO Fix possible duplicated keys!
User.save(user)
# Now we'll send the email confirmation link
subject = "Confirm your email"
token = ts.dumps(user.email, salt='email-confirm-key')
confirm_url = url_for(
'auth.user_confirm_email',
token=token,
_external=True)
html = render_template(
'activate.html',
confirm_url=confirm_url)
# We'll assume that send_email has been defined in myapp/util.py
app.logger.info('Url use to confirm: {0}'.format(confirm_url))
send_email(user.email, subject, html)
return redirect(url_for("index"))
return render_template("create.html", form=form)
@mod_auth.route('/confirm/<token>')
def user_confirm_email(token):
try:
email = ts.loads(token, salt="email-confirm-key", max_age=86400)
except:
abort(404)
user = User.get_by_email(email=email)
user.email_confirmed = True
User.save(user)
return redirect(url_for('auth.user_login'))
@mod_auth.route('/reset', methods=["GET", "POST"])
def user_password_reset():
form = EmailForm()
if form.validate_on_submit():
user = User.get_by_email(email=form.email.data)
subject = "Password reset requested"
# Here we use the URLSafeTimedSerializer we created in `util` at the
# beginning of the chapter
token = ts.dumps(user.email, salt='recover-key')
recover_url = url_for(
'auth.user_reset_password_with_token',
token=token,
_external=True)
html = render_template(
'recover.html',
recover_url=recover_url)
# Let's assume that send_email was defined in myapp/util.py
send_email(user.email, subject, html)
return redirect(url_for('index'))
return render_template('reset.html', form=form)
@mod_auth.route('/reset/<token>', methods=["GET", "POST"])
def user_reset_password_with_token(token):
try:
email = ts.loads(token, salt="recover-key", max_age=86400)
except:
abort(404)
form = PasswordForm()
if form.validate_on_submit():
user = User.get_by_email(email=email)
user.password = form.password.data
User.save(user)
return redirect(url_for('auth.user_login'))
return render_template('reset_with_token.html', form=form, token=token)
| gpl-3.0 | -7,708,140,034,764,960,000 | 28.844444 | 81 | 0.63316 | false | 3.590909 | false | false | false |
SRI-CSL/ETB | demos/allsatlive/yices_parse.py | 1 | 2665 | #Defines grammar for reading yices files; used in the include <file> api for yices.
from pyparsing import *
#Grammar for s-expressions which is used to parse Yices expressions
token = Word(alphanums + "-./_:*+=!<>")
LPAR = "("
RPAR = ")"
#Yices comments are ignored; parentheses are retained since Yices expressions are printed back
#as strings for the Yices api
lispStyleComment = Group(";" + restOfLine)
sexp = Forward()
sexpList = ZeroOrMore(sexp)
sexpList.ignore(lispStyleComment)
sexpGroup = Group(LPAR + sexpList + RPAR)
sexp << (token | sexpGroup)
#Grammar for Yices commands
#_LPAR = Suppress(LPAR)
#_RPAR = Suppress(RPAR)
#The command names are enumerated
yDefine = Literal("define")
yAssert = Literal("assert")
yAssertPlus = Literal("assert+")
yRetract = Literal("retract")
yCheck = Literal("check")
yMaxSat = Literal("maxsat")
ySetEvidence = Literal("set-evidence!")
ySetVerbosity = Literal("set-verbosity")
ySetArithOnly = Literal("set-arith-only")
yPush = Literal("push")
yPop = Literal("pop")
yEcho = Literal("echo")
yReset = Literal("reset")
yCommandName = yDefine + yAssert + yAssertPlus + yRetract + yCheck + yMaxSat + ySetEvidence + ySetVerbosity + ySetArithOnly + yPush + yPop + yEcho + yReset
#name is word without colons
name = Word(alphanums + "-./_*+=!<>")
colons = Suppress("::")
#Define commands are treated differently since we have to parse out the '::'
yDefineCommand = Group(yDefine + name + colons + sexp + sexpList)
yOtherCommandName = yAssert | yAssertPlus | yRetract | yCheck | yMaxSat | ySetEvidence | ySetVerbosity | ySetArithOnly | yPush | yPop | yEcho | yReset
yOtherCommand = Group(yOtherCommandName + sexpList)
yCommandBody = yDefineCommand | yOtherCommand
yCommand = Group(LPAR + yCommandBody + RPAR)
yCommandList = ZeroOrMore(yCommand)
yCommandList.ignore(lispStyleComment)
# no longer used: defineName = Group(name + colons + sexp + sexpList)
lparPrint = " ("
rparPrint = ") "
def printSexp(parsedSexp):
if parsedSexp == LPAR:
return lparPrint
elif parsedSexp == RPAR:
return rparPrint
elif type(parsedSexp) == str:
return parsedSexp
elif parsedSexp == []:
return ''
else:
print(parsedSexp)
first = printSexp(parsedSexp[0])
rest = printSexp(parsedSexp[1:])
print('first = %s' % first)
print('rest = %s' % rest)
if (first == lparPrint) or (first == rparPrint) or (rest == rparPrint):
return '%s%s' % (first, rest)
else:
return '%s %s' % (first, rest)
test1 = """(define a::bool)"""
test2 = """(define b ::bool)"""
test3 = """(define c :: bool)"""
| gpl-3.0 | -756,669,965,276,283,000 | 26.193878 | 155 | 0.670544 | false | 3.153846 | false | false | false |
Gabotero/GNURadioNext | gr-blocks/python/qa_tagged_stream_mux.py | 1 | 3562 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import pmt
import blocks_swig as blocks
import numpy
def make_len_tags(tupl, key):
tags = []
tag = gr.tag_t()
tag.key = pmt.string_to_symbol(key)
n_read = 0
for element in tupl:
tag.offset = n_read
n_read += len(element)
tag.value = pmt.to_pmt(len(element))
tags.append(tag)
return tags
def make_len_tag(offset, key, value):
tag = gr.tag_t()
tag.offset = offset
tag.key = pmt.string_to_symbol(key)
tag.value = pmt.to_pmt(value)
return tag
class qa_tagged_stream_mux (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_1(self):
datas = (
0, 1, 2, 5, 6, 10, 14, 15, 16,
3, 4, 7, 8, 9, 11, 12, 13, 17
)
expected = tuple(range(18))
tagname = "packet_length"
len_tags_0 = (
make_len_tag(0, tagname, 3),
make_len_tag(3, tagname, 2),
make_len_tag(5, tagname, 1),
make_len_tag(6, tagname, 3)
)
len_tags_1 = (
make_len_tag(0, tagname, 2),
make_len_tag(2, tagname, 3),
make_len_tag(5, tagname, 3),
make_len_tag(8, tagname, 1)
)
test_tag_0 = gr.tag_t()
test_tag_0.key = pmt.string_to_symbol('spam')
test_tag_0.offset = 4 # On the second '1'
test_tag_0.value = pmt.to_pmt(42)
test_tag_1 = gr.tag_t()
test_tag_1.key = pmt.string_to_symbol('eggs')
test_tag_1.offset = 3 # On the first '3' of the 2nd stream
test_tag_1.value = pmt.to_pmt(23)
src0 = blocks.vector_source_b(datas[0:9], False, 1, len_tags_0 + (test_tag_0,))
src1 = blocks.vector_source_b(datas[9:], False, 1, len_tags_1 + (test_tag_1,))
tagged_stream_mux = blocks.tagged_stream_mux(gr.sizeof_char, tagname)
snk = blocks.vector_sink_b()
self.tb.connect(src0, (tagged_stream_mux, 0))
self.tb.connect(src1, (tagged_stream_mux, 1))
self.tb.connect(tagged_stream_mux, snk)
self.tb.run()
self.assertEqual(expected, snk.data())
tags = [gr.tag_to_python(x) for x in snk.tags()]
tags = sorted([(x.offset, x.key, x.value) for x in tags])
tags_expected = [
(0, 'packet_length', 5),
(5, 'packet_length', 5),
(6, 'spam', 42),
(8, 'eggs', 23),
(10, 'packet_length', 4),
(14, 'packet_length', 4)
]
self.assertEqual(tags, tags_expected)
if __name__ == '__main__':
gr_unittest.run(qa_tagged_stream_mux, "qa_tagged_stream_mux.xml")
| gpl-3.0 | 8,027,701,689,082,090,000 | 31.09009 | 87 | 0.576081 | false | 3.226449 | true | false | false |
phev8/dataset_tools | experiment_handler/time_synchronisation.py | 1 | 1444 | import os
import pandas as pd
def read_synchronisation_file(experiment_root):
filepath = os.path.join(experiment_root, "labels", "synchronisation.csv")
return pd.read_csv(filepath)
def convert_timestamps(experiment_root, timestamps, from_reference, to_reference):
"""
Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time)
Parameters
----------
experiment_root: str
Root of the current experiment (to find the right synchronisation matrix)
timestamps: float or array like
timestamps to be converted
from_reference: str
name of the reference of the original timestamps
to_reference: str
name of the reference time the timestamp has to be converted to
Returns
-------
converted_timestamps: float or array like
Timestamps given in to_reference time values
"""
synchronisation_file = read_synchronisation_file(experiment_root)
offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0]
converted_timestamps = timestamps + offset
return converted_timestamps
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video")) | mit | 1,236,938,527,149,042,200 | 33.404762 | 171 | 0.702216 | false | 4.161383 | false | false | false |
xfaxca/pygaero | example/tmax_peakfind_example.py | 1 | 4986 | # tmax_peakfind_example.py
"""
Demonstration of some of the primary functions in pygaero, including Tmax finding and elemental analysis.
"""
# Module import
from pygaero import pio
from pygaero import therm
from pygaero import gen_chem
import os
import matplotlib.pyplot as plt
def example1():
# ------------------------------- File I/O and Data Cleaning Example -------------------------------- #
indir = "" # input directory (same folder as script by default)
infiles = ['desorb1.csv', 'desorb2.csv'] # input files as a list of strings
# Read in list of csvs with figaero desorptions
df_desorbs_ls = pio.read_files(fdir=indir, flist=infiles)
print('# of files imported: ', len(df_desorbs_ls))
# Clean ion names from default A_CxHyOzI_Avg format (strip underscores '_' and remove iodide
for df in df_desorbs_ls:
print("Example of ion names before clean: ", df.columns.values[0:3])
df.columns = gen_chem.cln_molec_names(idx_names=df.columns.values, delim="_") # remove underscores
df.columns = gen_chem.replace_group(molec_ls=df.columns.values, old_groups=["I"], new_group="") # remove I
print('Example of ion names after clean: ', df.columns.values[0:3])
# Alternatively, one can just assign a single thermogram by df_example = pd.DataFrame.from_csv(indir+infile)
# Adjust thermogram signals for 4.0 LPM figaero flow rate relative to nominal 2.0 LPM sample rate
# print('Before flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
therm.flow_correction(thermograms=df_desorbs_ls, aero_samp_rates=[4.0, 4.0])
# print('After flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
# ---------------------------------- Elemental Stats Example --------------------------------------- #
# A. Calculate elemental statistics for species in each desorb CSV that was read in. Then append the DataFrames
# containing these statistics into a list. Note, Iodide has been stripped from the names at this point, so
# the parameter cluster_group=None
ele_stats_ls = []
for df in df_desorbs_ls:
df_ele_temp = gen_chem.ele_stats(molec_ls=df.columns.values, ion_state=-1, cluster_group=None,
clst_group_mw=0.0, xtra_elements=["Cl", "F"])
ele_stats_ls.append(df_ele_temp)
# -------------------------------- Peak Finding (TMax) Example --------------------------------------#
# A. Smooth time series as step prior to Tmax (helps prevent mis-identification of TMax in noisy signals)
for df in df_desorbs_ls:
for series in df.columns.values:
# print('series: ', series)
df.ix[:, series] = therm.smooth(x=df.ix[:, series].values, window='hamming', window_len=15)
plt.show()
# B. Find TMax for all loaded thermograms. Returns a pandas DataFrame with ion names as index values and columns:
# TMax1, MaxSig1, TMax2, MaxSig2, DubFlag (double peak flag - binary; -1 for no peaks found). Depending on the
# specific data set, the [pk_thresh] and [pk_win] parameters may need to be optimized. See documentation for
# function peakfind_df_ls in module therm.py for more details. Results are drastically improved by first
# smoothing the time series, so that small fluctuations in signal are not mistaken for a peak.
df_tmax_ls = therm.peakfind_df_ls(df_ls=df_desorbs_ls, pk_thresh=0.05, pk_win=20,
min_temp=40.0, max_temp=190.0)
# C. Quick plot to visualize Tmax values for 15 example ions
# therm.plot_tmax(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
# tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'], tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'])
therm.plot_tmax_double(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'],
tmax_temps2=df_tmax_ls[0].ix[15:29, 'TMax2'],
tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'],
tmax_vals2=df_tmax_ls[0].ix[15:29, 'MaxSig2'])
# ----------------------------------- Saving Results Example -------------------------------------- #
# Uncomment the following lines to save the example output
# outdir = 'testout'
# if outdir[-1] != '/':
# outdir += '/'
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# # A. Save TMax data
# for df, fname in zip(df_tmax_ls, ["desorb1_tmax", "desorb2_tmax"]):
# df.to_csv(outdir+fname+".csv")
# # B. Save smoothed desorption thermogram time series
# for df, fname in zip(df_desorbs_ls, ["desorb1_smth", "desorb2_smth"]):
# df.to_csv(outdir+fname+".csv")
# # C. Save elemental stats for each desorption
# for df, fname in zip(ele_stats_ls, ["desorb1_ele", "desorb2_ele"]):
# df.to_csv(outdir+fname+".csv")
return 0
if __name__ == '__main__':
example1()
| gpl-3.0 | 1,347,200,790,599,786,200 | 53.791209 | 117 | 0.606899 | false | 3.252446 | false | false | false |
AIFDR/inasafe-django | django_project/realtime/helpers/base_indicator.py | 2 | 2769 | # coding=utf-8
from builtins import range
from builtins import object
from datetime import datetime, timedelta
from math import isnan
from django.utils.translation import ugettext as _
import numpy
import pytz
from realtime.models.earthquake import Earthquake
__author__ = 'Rizky Maulana Nugraha "lucernae" <[email protected]>'
__date__ = '04/09/15'
STATUS_HEALTHY = 'Healthy'
STATUS_WARNING = 'Warning'
STATUS_CRITICAL = 'Critical'
class Indicator(object):
"""An abstract class of indicators.
This class should provide a way to generate indicator info to know that
realtime is running fine.
"""
def __init__(self):
self._value = None
self._label = None
self._status = None
@property
def value(self):
return self._value
@property
def label(self):
return self._label
@property
def status(self):
return self._status
def value_humanize(self):
raise NotImplementedError()
def notes(self):
raise NotImplementedError()
def is_healthy(self):
return self.status == STATUS_HEALTHY
def is_warning(self):
return self.status == STATUS_WARNING
def is_critical(self):
return self.status == STATUS_CRITICAL
def status_text(self):
if self.status == STATUS_HEALTHY:
return _('Healthy')
elif self.status == STATUS_WARNING:
return _('Warning')
elif self.status == STATUS_CRITICAL:
return _('Critical')
else:
return _('Not Available')
# this line onward will contains helpers method
def average_shake_interval(num_days=30):
"""Calculates average interval between shake events.
It is calculated in the span of previous 30 days
:param num_days: Number of previous days the function will calculate
:type num_days: int
:return: tuple of mean interval and standard deviation of shake events
:rtype: tuple
"""
last_span = datetime.utcnow() - timedelta(days=num_days)
last_span.replace(tzinfo=pytz.utc)
shakes = Earthquake.objects.filter(time__gte=last_span)
intervals = []
for i in range(1, len(shakes)):
prev_shake = shakes[i - 1]
shake = shakes[i]
intervals.append(shake.time - prev_shake.time)
# using numpy to calculate mean
intervals = numpy.array([i.total_seconds() for i in intervals])
mean_interval = numpy.mean(intervals)
if isinstance(mean_interval, float) and isnan(mean_interval):
mean_interval = 0
# using numpy to calculate std
deviation = numpy.std(intervals)
if isinstance(deviation, float) and isnan(deviation):
deviation = 0
return timedelta(seconds=mean_interval), timedelta(seconds=deviation)
| bsd-2-clause | -3,732,841,021,557,332,000 | 26.415842 | 75 | 0.660166 | false | 3.955714 | false | false | false |
morinted/plover | plover/gui_qt/dictionary_editor.py | 1 | 13523 |
from operator import attrgetter, itemgetter
from collections import namedtuple
from itertools import chain
from PyQt5.QtCore import (
QAbstractTableModel,
QModelIndex,
Qt,
)
from PyQt5.QtWidgets import (
QComboBox,
QDialog,
QStyledItemDelegate,
)
from plover.translation import escape_translation, unescape_translation
from plover.misc import expand_path, shorten_path
from plover.steno import normalize_steno
from plover.gui_qt.dictionary_editor_ui import Ui_DictionaryEditor
from plover.gui_qt.utils import ToolBar, WindowState
_COL_STENO, _COL_TRANS, _COL_DICT, _COL_COUNT = range(3 + 1)
class DictionaryItem(namedtuple('DictionaryItem', 'strokes translation dictionary')):
@property
def dictionary_path(self):
return self.dictionary.path
class DictionaryItemDelegate(QStyledItemDelegate):
def __init__(self, dictionary_list):
super().__init__()
self._dictionary_list = dictionary_list
def createEditor(self, parent, option, index):
if index.column() == _COL_DICT:
dictionary_paths = [
shorten_path(dictionary.path)
for dictionary in self._dictionary_list
if not dictionary.readonly
]
combo = QComboBox(parent)
combo.addItems(dictionary_paths)
return combo
return super().createEditor(parent, option, index)
class DictionaryItemModel(QAbstractTableModel):
def __init__(self, dictionary_list, sort_column, sort_order):
super().__init__()
self._dictionary_list = dictionary_list
self._operations = []
self._entries = []
self._sort_column = sort_column
self._sort_order = sort_order
self._update_entries()
def _update_entries(self, strokes_filter=None, translation_filter=None):
self._entries = []
for dictionary in self._dictionary_list:
for strokes, translation in dictionary.items():
if strokes_filter is not None and \
not '/'.join(strokes).startswith(strokes_filter):
continue
if translation_filter is not None and \
not translation.startswith(translation_filter):
continue
item = DictionaryItem(strokes, translation, dictionary)
self._entries.append(item)
self.sort(self._sort_column, self._sort_order)
@property
def has_undo(self):
return bool(self._operations)
@property
def modified(self):
paths = set()
dictionary_list = []
for op_list in self._operations:
if not isinstance(op_list, list):
op_list = (op_list,)
for item in chain(*op_list):
if item is None:
continue
dictionary = item.dictionary
if dictionary.path in paths:
continue
paths.add(dictionary.path)
dictionary_list.append(dictionary)
return dictionary_list
# Note:
# - since switching from a dictionary to a table does not enforce the
# unicity of keys, a deletion can fail when one of the duplicate has
# already been deleted.
# - when undoing an operation at the table level, the item may have
# been filtered-out and not present
def _undo(self, old_item, new_item):
if old_item is None:
# Undo addition.
try:
del new_item.dictionary[new_item.strokes]
except KeyError:
pass
try:
row = self._entries.index(new_item)
except ValueError:
# Happen if the item is filtered-out.
pass
else:
self.remove_rows([row], record=False)
return
if new_item is None:
# Undo deletion.
self.new_row(0, item=old_item, record=False)
return
# Undo update.
try:
del new_item.dictionary[new_item.strokes]
except KeyError:
pass
try:
row = self._entries.index(new_item)
except ValueError:
# Happen if the item is filtered-out,
# "create" a new row so the user see
# the result of the undo.
self.new_row(0, item=old_item, record=False)
else:
old_item.dictionary[old_item.strokes] = old_item.translation
self._entries[row] = old_item
self.dataChanged.emit(self.index(row, _COL_STENO),
self.index(row, _COL_TRANS))
def undo(self, op=None):
op = self._operations.pop()
if isinstance(op, list):
for old_item, new_item in op:
self._undo(old_item, new_item)
else:
self._undo(*op)
def rowCount(self, parent):
return 0 if parent.isValid() else len(self._entries)
def columnCount(self, parent):
return _COL_COUNT
def headerData(self, section, orientation, role):
if orientation != Qt.Horizontal or role != Qt.DisplayRole:
return None
if section == _COL_STENO:
return _('Strokes')
if section == _COL_TRANS:
return _('Translation')
if section == _COL_DICT:
return _('Dictionary')
def data(self, index, role):
if not index.isValid() or role not in (Qt.EditRole, Qt.DisplayRole):
return None
item = self._entries[index.row()]
column = index.column()
if column == _COL_STENO:
return '/'.join(item.strokes)
if column == _COL_TRANS:
return escape_translation(item.translation)
if column == _COL_DICT:
return shorten_path(item.dictionary.path)
def flags(self, index):
if not index.isValid():
return Qt.NoItemFlags
f = Qt.ItemIsEnabled | Qt.ItemIsSelectable
item = self._entries[index.row()]
if not item.dictionary.readonly:
f |= Qt.ItemIsEditable
return f
def filter(self, strokes_filter=None, translation_filter=None):
self.modelAboutToBeReset.emit()
self._update_entries(strokes_filter, translation_filter)
self.modelReset.emit()
def sort(self, column, order):
self.layoutAboutToBeChanged.emit()
if column == _COL_DICT:
key = attrgetter('dictionary_path')
else:
key = itemgetter(column)
self._entries.sort(key=key,
reverse=(order == Qt.DescendingOrder))
self._sort_column = column
self._sort_order = order
self.layoutChanged.emit()
def setData(self, index, value, role=Qt.EditRole, record=True):
assert role == Qt.EditRole
row = index.row()
column = index.column()
old_item = self._entries[row]
strokes, translation, dictionary = old_item
if column == _COL_STENO:
strokes = normalize_steno(value.strip())
if not strokes or strokes == old_item.strokes:
return False
elif column == _COL_TRANS:
translation = unescape_translation(value.strip())
if translation == old_item.translation:
return False
elif column == _COL_DICT:
path = expand_path(value)
for dictionary in self._dictionary_list:
if dictionary.path == path:
break
if dictionary == old_item.dictionary:
return False
try:
del old_item.dictionary[old_item.strokes]
except KeyError:
pass
if not old_item.strokes and not old_item.translation:
# Merge operations when editing a newly added row.
if self._operations and self._operations[-1] == [(None, old_item)]:
self._operations.pop()
old_item = None
new_item = DictionaryItem(strokes, translation, dictionary)
self._entries[row] = new_item
dictionary[strokes] = translation
if record:
self._operations.append((old_item, new_item))
self.dataChanged.emit(index, index)
return True
def new_row(self, row, item=None, record=True):
if item is None:
if row == 0 and not self._entries:
dictionary = self._dictionary_list[0]
else:
dictionary = self._entries[row].dictionary
item = DictionaryItem((), '', dictionary)
self.beginInsertRows(QModelIndex(), row, row)
self._entries.insert(row, item)
if record:
self._operations.append((None, item))
self.endInsertRows()
def remove_rows(self, row_list, record=True):
assert row_list
operations = []
for row in sorted(row_list, reverse=True):
self.beginRemoveRows(QModelIndex(), row, row)
item = self._entries.pop(row)
self.endRemoveRows()
try:
del item.dictionary[item.strokes]
except KeyError:
pass
else:
operations.append((item, None))
if record:
self._operations.append(operations)
class DictionaryEditor(QDialog, Ui_DictionaryEditor, WindowState):
ROLE = 'dictionary_editor'
def __init__(self, engine, dictionary_paths):
super().__init__()
self.setupUi(self)
self._engine = engine
with engine:
dictionary_list = [
dictionary
for dictionary in engine.dictionaries.dicts
if dictionary.path in dictionary_paths
]
sort_column, sort_order = _COL_STENO, Qt.AscendingOrder
self._model = DictionaryItemModel(dictionary_list,
sort_column,
sort_order)
self._model.dataChanged.connect(self.on_data_changed)
self.table.sortByColumn(sort_column, sort_order)
self.table.setModel(self._model)
self.table.setSortingEnabled(True)
self.table.resizeColumnsToContents()
self.table.setItemDelegate(DictionaryItemDelegate(dictionary_list))
self.table.selectionModel().selectionChanged.connect(self.on_selection_changed)
background = self.table.palette().highlightedText().color().name()
text_color = self.table.palette().highlight().color().name()
self.table.setStyleSheet('''
QTableView::item:focus {
background-color: %s;
color: %s;
}''' % (background, text_color))
self.table.setFocus()
for action in (
self.action_Undo,
self.action_Delete,
):
action.setEnabled(False)
# Toolbar.
self.layout().addWidget(ToolBar(
self.action_Undo,
self.action_Delete,
self.action_New,
))
self.restore_state()
self.finished.connect(self.save_state)
@property
def _selection(self):
return list(sorted(
index.row() for index in
self.table.selectionModel().selectedRows(0)
))
def _select(self, row, edit=False):
row = min(row, self._model.rowCount(QModelIndex()) - 1)
index = self._model.index(row, 0)
self.table.setCurrentIndex(index)
if edit:
self.table.edit(index)
def on_data_changed(self, top_left, bottom_right):
self.table.setCurrentIndex(top_left)
self.action_Undo.setEnabled(self._model.has_undo)
def on_selection_changed(self):
enabled = bool(self._selection)
for action in (
self.action_Delete,
):
action.setEnabled(enabled)
def on_undo(self):
assert self._model.has_undo
self._model.undo()
self.action_Undo.setEnabled(self._model.has_undo)
def on_delete(self):
selection = self._selection
assert selection
self._model.remove_rows(selection)
self._select(selection[0])
self.action_Undo.setEnabled(self._model.has_undo)
def on_new(self):
selection = self._selection
if selection:
row = self._selection[0]
else:
row = 0
self.table.reset()
self._model.new_row(row)
self._select(row, edit=True)
self.action_Undo.setEnabled(self._model.has_undo)
def on_apply_filter(self):
self.table.selectionModel().clear()
strokes_filter = '/'.join(normalize_steno(self.strokes_filter.text().strip()))
translation_filter = unescape_translation(self.translation_filter.text().strip())
self._model.filter(strokes_filter=strokes_filter,
translation_filter=translation_filter)
def on_clear_filter(self):
self.strokes_filter.setText('')
self.translation_filter.setText('')
self._model.filter(strokes_filter=None, translation_filter=None)
def on_finished(self, result):
with self._engine:
self._engine.dictionaries.save(dictionary.path
for dictionary
in self._model.modified)
| gpl-2.0 | -4,029,928,462,594,560,500 | 34.124675 | 89 | 0.569844 | false | 4.330131 | false | false | false |
RyanJenkins/ISS | ISS/templatetags/pagination.py | 1 | 3715 | import urlparse
import urllib
from django import template
register = template.Library()
def unfuck_percent_encoded_utf8(fucked_unicode_str):
# OK So... *dramatic pause*
# (((Some))) browsers insist on transforming unicode characters outside of
# the ASCII range to their UTF-8 encoding, and then url encoding that byte
# sequence. If you want my opinion this is harmful because it's a big pain
# in my ass necessitating this code when it would be perfectly reasonable
# to just send UTF-8 byte sequences in URLs but fuck it, until Google/Apple
# /Mozilla start considering overly long comments in obscure codebases as
# standards this code is gonna have to stick around.
#
# To compound this issue, python's urlparse.parse_qs has the highly
# questionable behavior of treating every percent encoded octet at a
# seperate codepoint which is like the opposite how the major browser
# vendors have decided to do it. Theoretically this should be fine if
# browsers did The Right Thing but given the reality of the situation it's
# imprudent and requires me to fix this situation here with the jank that
# follows.
#
# So what do we do about it? Instead of trying to monkey patch urlparse or
# something we instead consult the (incorrect) values that it returns. We
# construct a byte string. For each codepoint in the input string we either
#
# A) insert a byte into our byte string iff the codepoint is less than
# 2^8 or...
# B) insert a byte sequence into the byte string corrosponding to the utf-8
# encoded value for that codepoint.
#
# This bytestring should now be correctly encoded UTF-8, caller can decode
# if they want
#
# Browsers doing The Right Thing with high codepoints are covered under B,
# normal ascii range characters are covered under A, and fucked utf-8 then
# percent encoded strings are also covered under A.
#
# This also has the benefit that if someone really decides to be an ass and
# sends a url where there is both "raw" UTF-8 encoded codepoints and percent
# encoded UTF-8 encoded sequences the url will somehow correctly get
# handled.
#
# This is probably pretty slow but I'm fairly confident it's correct.
if isinstance(fucked_unicode_str, unicode):
return ''.join([(chr(ord(c)) if ord(c) < 256 else c.encode('utf-8')) for c in fucked_unicode_str])
else:
return str(fucked_unicode_str)
RANGE_WIDTH = 3
@register.assignment_tag
def nice_page_set(page):
pages = []
pages.extend(range(1, RANGE_WIDTH+1))
pages.extend(range(page.paginator.num_pages-RANGE_WIDTH,
page.paginator.num_pages+1))
pages.extend(range(page.number-RANGE_WIDTH, page.number+RANGE_WIDTH))
pages = [n for n in pages if n <= page.paginator.num_pages and n > 0]
pages = list(set(pages))
pages.sort()
elip_pages = []
for idx, n in enumerate(pages):
if idx != 0 and n != pages[idx-1] + 1:
elip_pages.append(-1)
elip_pages.append(n)
return elip_pages
@register.filter
def mixin_page_param(base_url, page_number):
parsed_url = urlparse.urlparse(base_url)
query = urlparse.parse_qs(parsed_url.query)
query['p'] = [page_number]
one_pairs = []
for key, values in query.items():
for value in values:
one_pairs.append((
unfuck_percent_encoded_utf8(key),
unfuck_percent_encoded_utf8(value)))
qs = urllib.urlencode(one_pairs)
url_dict = parsed_url._asdict()
url_dict['query'] = qs
return urlparse.urlunparse(urlparse.ParseResult(**url_dict))
| gpl-3.0 | 8,622,333,044,276,108,000 | 38.946237 | 106 | 0.679139 | false | 3.763931 | false | false | false |
Azure/azure-sdk-for-python | sdk/servermanager/azure-mgmt-servermanager/azure/mgmt/servermanager/models/session_parameters.py | 1 | 2295 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SessionParameters(Model):
"""Parameter collection for creation and other operations on sessions.
:param user_name: Encrypted User name to be used to connect to node.
:type user_name: str
:param password: Encrypted Password associated with user name.
:type password: str
:param retention_period: Session retention period. Possible values
include: 'Session', 'Persistent'
:type retention_period: str or
~azure.mgmt.servermanager.models.RetentionPeriod
:param credential_data_format: Credential data format. Possible values
include: 'RsaEncrypted'
:type credential_data_format: str or
~azure.mgmt.servermanager.models.CredentialDataFormat
:param encryption_certificate_thumbprint: Encryption certificate
thumbprint.
:type encryption_certificate_thumbprint: str
"""
_attribute_map = {
'user_name': {'key': 'properties.userName', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'retention_period': {'key': 'properties.retentionPeriod', 'type': 'RetentionPeriod'},
'credential_data_format': {'key': 'properties.credentialDataFormat', 'type': 'CredentialDataFormat'},
'encryption_certificate_thumbprint': {'key': 'properties.EncryptionCertificateThumbprint', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SessionParameters, self).__init__(**kwargs)
self.user_name = kwargs.get('user_name', None)
self.password = kwargs.get('password', None)
self.retention_period = kwargs.get('retention_period', None)
self.credential_data_format = kwargs.get('credential_data_format', None)
self.encryption_certificate_thumbprint = kwargs.get('encryption_certificate_thumbprint', None)
| mit | 2,877,594,313,000,630,300 | 45.836735 | 114 | 0.659695 | false | 4.388145 | false | false | false |
ermo/privateer_wcu | modules/missions/cargo_mission.py | 1 | 8981 | from go_somewhere_significant import *
from go_to_adjacent_systems import *
import Briefing
import Director
import VS
import debug
import faction_ships
import launch
import quest
import unit
import universe
import vsrandom
class cargo_mission (Director.Mission):
def initbriefing(self):
VS.IOmessage (0,"cargo mission","briefing","Your mission for today will be to run some %s cargo" % self.cargoname)
self.briefgametime = 0
self.adjsys.initbriefing()
def loopbriefing(self):
brief_you=self.adjsys.loopbriefing()
if (brief_you != -1):
VS.IOmessage(0,"cargo mission","briefing","Once there, you must drop the cargo off at a specified unit")
if (self.briefgametime==0):
self.briefgametime = VS.GetGameTime()
elif ((VS.GetGameTime()-self.briefgametime)>5):
Briefing.terminate()
def endbriefing(self):
self.adjsys.endbriefing()
del self.briefgametime
def SetVar (self,val):
if (self.var_to_set!=''):
quest.removeQuest (self.you.isPlayerStarship(),self.var_to_set,val)
def __init__ (self,factionname, numsystemsaway, cargoquantity, missiondifficulty, creds, launchoncapship, time_to_complete, category,jumps=(),var_to_set=''):
Director.Mission.__init__(self);
self.you=VS.Unit()
self.base=VS.Unit()
self.role="ESCORTCAP"
self.arrived=0
self.var_to_set=var_to_set
self.mplay="all"
# self.mission_time=VS.GetGameTime()+time_to_complete*100*float(1+numsystemsaway)
self.capship= launchoncapship
self.faction=factionname
self.cred=creds
self.difficulty=missiondifficulty
self.you=VS.getPlayer()
self.adjsys=go_to_adjacent_systems(self.you,numsystemsaway,jumps)
self.quantity=cargoquantity
self.mplay=universe.getMessagePlayer(self.you)
if (self.quantity<1):
self.quantity=1
carg=VS.getRandCargo(self.quantity,category)
if (carg.GetQuantity()==0 or category==''):
carg = VS.getRandCargo(self.quantity,"") #oh no... could be starships...
i=0
while i<50 and carg.GetCategory()[:10]=="Contraband":
debug.info("contraband==bad")
carg = VS.getRandCargo(self.quantity,"")
i+=1
tempquantity=self.quantity
self.cargoname=carg.GetContent()
name = self.you.getName ()
carg.SetMissionFlag(1)
if (not self.you.isNull()):
tmpcarg=self.you.GetCargo(self.cargoname)
if tmpcarg.GetMissionFlag() and tmpcarg.GetQuantity()>2:
quantum=int(tmpcarg.GetQuantity()/3)
quantum=self.you.removeCargo(carg.GetContent(),quantum,True)#use it if player has it
carg.SetQuantity(1+quantum)
self.quantity=self.you.addCargo(carg)
else:
self.quantity = self.you.addCargo(carg) #I add some cargo
else:
VS.IOmessage (2,"cargo mission",self.mplay,"#ff0000Unable to establish communications. Mission failed.")
VS.terminateMission (0)
return
# creds_deducted = (carg.GetPrice()*float(self.quantity)*vsrandom.random()+1)
# self.cred += creds_deducted
if (tempquantity>0):
self.cred*=float(self.quantity)/float(tempquantity)
else:
VS.IOmessage (2,"cargo mission",self.mplay,"#ff0000You do not have space to add our %s cargo to your ship. Mission failed."%self.cargoname)
VS.terminateMission(0)
return
if (self.quantity==0):
VS.IOmessage (2,"cargo mission",self.mplay,"#ff0000You do not have space to add our cargo to the mission. Mission failed.")
VS.terminateMission(0)
return
VS.IOmessage (0,"cargo mission",self.mplay,"Good Day, %s. Your mission is as follows:" % (name))
self.adjsys.Print("You should start in the system named %s","Then jump to %s","Finally, jump to %s, your final destination","cargo mission",1)
VS.IOmessage (2,"cargo mission",self.mplay,"Give the cargo to a %s unit or planet." % (self.faction))
VS.IOmessage (3,"cargo mission",self.mplay,"You will receive %d of the %s cargo" % (self.quantity,self.cargoname))
# VS.IOmessage (4,"cargo mission",self.mplay,"We will deduct %.2f credits from your account for the cargo needed." % (creds_deducted))
VS.IOmessage (4,"cargo mission",self.mplay,"You will earn %.2f credits when you deliver our cargo." % (creds))
VS.IOmessage (4,"cargo mission",self.mplay,"#00ff00Good luck!")
# self.you.addCredits (-creds_deducted)
def takeCargoAndTerminate (self,you, remove):
removenum=0 #if you terminate without remove, you are SKREWED
self.base.setCombatRole(self.role)
if (remove):
removenum=you.removeCargo(self.cargoname,self.quantity,1)
debug.info("removed %d" % removenum)
mpart=VS.GetMasterPartList()
newcarg=mpart.GetCargo(self.cargoname)
newcarg.SetQuantity(removenum)
self.base.addCargo(newcarg)
has=self.you.hasCargo(self.cargoname)
if (has):
has=self.you.removeCargo(self.cargoname,has,1)
newcarg.SetMissionFlag(0)
newcarg.SetQuantity(has)
self.you.addCargo(newcarg) #It seems that removing and then adding it again is the only way...
if ((removenum>=self.quantity) or (self.quantity==0) or removenum>=1):
VS.IOmessage (0,"cargo mission",self.mplay,"#00ff00Excellent work pilot.")
VS.IOmessage (0,"cargo mission",self.mplay,"#00ff00You have been rewarded for your effort as agreed.")
VS.IOmessage (0,"cargo mission",self.mplay,"#00ff00Your excellent work will be remembered.")
you.addCredits(self.cred)
VS.AdjustRelation(you.getFactionName(),self.faction,.01*self.difficulty,1)
self.SetVar(1)
VS.terminateMission(1)
return
else:
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000You did not follow through on your end of the deal.")
if (self.difficulty<1):
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000Your pay will be reduced")
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000And we will consider if we will accept you on future missions.")
addcred=(float(removenum)/(float(self.quantity*(1+self.difficulty))))*self.cred
you.addCredits(addcred)
else:
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000You will not be paid!")
universe.punish(self.you,self.faction,self.difficulty)
self.SetVar(-1)
VS.terminateMission(0)
return
def Execute (self):
## if (VS.getGameTime()>mission_time):
## VS.IOmessage (0,"cargo mission",self.mplay,"You Have failed to deliver your cargo in a timely manner.")
## VS.IOmessage (0,"cargo mission",self.mplay,"The cargo is no longer of need to us.")
## if (you):
## takeCargoAndTerminate(you,0)
## return
if (self.you.isNull() or (self.arrived and self.base.isNull())):
VS.IOmessage (0,"cargo mission",self.mplay,"#ff0000You were unable to deliver cargo. Mission failed.")
self.SetVar(-1)
VS.terminateMission(0)
return
if (not self.adjsys.Execute() and not self.arrived):
return
if (self.arrived):
self.adjsys.Execute=self.adjsys.HaveArrived
if (self.base.isDocked(self.you)):
self.takeCargoAndTerminate(self.you,1)
return
else:
self.arrived=1
tempfac=self.faction
if vsrandom.random()<=.5:
tempfac=''
self.adjsys=go_somewhere_significant(self.you,1,100,self.capship,tempfac)
capstr="planet"
dockstr="land"
if tempfac=='':
dockstr="dock"
capstr="ship"
self.adjsys.Print("You must visit the %%s %s" % (capstr),"cargo mission",", docked around the %s",0)
VS.IOmessage(0,"cargo mission",self.mplay,"Once there, %s and we will transport the cargo off of your ship." % (dockstr))
self.base=self.adjsys.SignificantUnit()
self.role=self.base.getCombatRole()
self.base.setCombatRole("INERT")
def initrandom (factionname, missiondifficulty,creds_per_jump, launchoncapship, sysmin, sysmax, time_to_complete, category,jumps=(),var_to_set=''):
numsys=vsrandom.randrange(sysmin,sysmax)
return cargo_mission(factionname,numsys, vsrandom.randrange(4,15), missiondifficulty,creds_per_jump*float(1+numsys),launchoncapship, 10.0, category,jumps,var_to_set)
| gpl-2.0 | 812,984,243,763,449,700 | 48.894444 | 169 | 0.620755 | false | 3.417428 | false | false | false |
AASHE/hub | hub/apps/metadata/migrations/0001_initial.py | 1 | 2996 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iss', '__first__'),
]
operations = [
migrations.CreateModel(
name='AcademicDiscipline',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Academic Discipline',
'verbose_name_plural': 'Academic Disciplines',
},
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Country',
'verbose_name_plural': 'Countries',
},
),
migrations.CreateModel(
name='InstitutionalOffice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Institutional Office',
'verbose_name_plural': 'Institutional Offices',
},
),
migrations.CreateModel(
name='ProgramType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Program Type',
'verbose_name_plural': 'Program Types',
},
),
migrations.CreateModel(
name='SustainabilityTopic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('color', models.CharField(default='#ff0000', max_length=7, verbose_name='HEX Color')),
('slug', models.SlugField()),
],
options={
'ordering': ('color', 'name'),
'verbose_name': 'Sustainability Topic',
'verbose_name_plural': 'Sustainability Topics',
},
),
migrations.CreateModel(
name='Organization',
fields=[
],
options={
'proxy': True,
},
bases=('iss.organization',),
),
]
| mit | -6,569,062,075,885,106,000 | 34.247059 | 114 | 0.480975 | false | 4.688576 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/manager_link_status.py | 1 | 1226 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'ManagerLinkStatusEnum',
},
)
class ManagerLinkStatusEnum(proto.Message):
r"""Container for enum describing possible status of a manager
and client link.
"""
class ManagerLinkStatus(proto.Enum):
r"""Possible statuses of a link."""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
INACTIVE = 3
PENDING = 4
REFUSED = 5
CANCELED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -1,836,098,244,861,664,500 | 27.511628 | 74 | 0.672104 | false | 4.006536 | false | false | false |
LividInstruments/LiveRemoteScripts | Launchpad_M4L/LaunchpadM4L.py | 1 | 4621 | # http://lividinstruments.com
from __future__ import with_statement
import Live
import math
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
#from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from VCM600.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from VCM600.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from VCM600.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
""" Here we define some global variables """
CHANNEL = 0 #main channel (0 - 15)
LAUNCH_GRID = [0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,32,33,34,35,36,37,38,39,48,49,50,51,52,53,54,55,64,65,66,67,68,69,70,71,80,81,82,83,84,85,86,87,96,97,98,99,100,101,102,103,112,113,114,115,116,117,118,119] #there are 64 of these
LAUNCH_SIDE = [8,24,40,56,72,88,104,120] #there are 8 of these
LAUNCH_TOP = [104,105,106,107,108,109,110,111] #there are 8 of these
class LaunchpadM4L(ControlSurface):
__module__ = __name__
__doc__ = " LaunchpadM4L controller script "
def __init__(self, c_instance):
super(LaunchpadM4L, self).__init__(c_instance)
with self.component_guard():
self._host_name = 'LaunchpadM4L'
self._color_type = 'Launchpad'
self.log_message("--------------= LaunchpadM4L log BEGIN SCRIPT =--------------")
self._setup_controls()
"""script initialization methods"""
def _setup_controls(self):
is_momentary = True
self._grid = [None for index in range(64)]
self._side = [None for index in range(8)]
self._top = [None for index in range(8)]
for index in range(64):
self._grid[index] = ButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LAUNCH_GRID[index])
self._grid[index].name = 'grid[' + str(index) + ']'
for index in range(8):
self._side[index] = ButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LAUNCH_SIDE[index])
self._side[index].name = 'side[' + str(index) + ']'
for index in range(8):
self._top[index] = ButtonElement(is_momentary, MIDI_CC_TYPE, CHANNEL, LAUNCH_TOP[index])
self._top[index].name = 'top[' + str(index) + ']'
def receive_value(self, value):
self._value = value
"""LividBaseM4L script disconnection"""
def disconnect(self):
self.log_message("--------------= LaunchpadM4L log END =--------------")
ControlSurface.disconnect(self)
return None | mit | 2,758,905,294,441,095,000 | 59.815789 | 235 | 0.752651 | false | 3.863712 | false | false | false |
StellarCN/py-stellar-base | stellar_sdk/signer_key.py | 1 | 3956 | from . import xdr as stellar_xdr
from .__version__ import __issues__
from .exceptions import ValueError
from .strkey import StrKey
__all__ = ["SignerKey"]
class SignerKey:
"""The :class:`SignerKey` object, which represents an account signer key on Stellar's network.
:param signer_key: The XDR signer object
"""
def __init__(self, signer_key: stellar_xdr.SignerKey) -> "None":
self.signer_key: stellar_xdr.SignerKey = signer_key
@classmethod
def ed25519_public_key(cls, account_id: str) -> "SignerKey":
"""Create ED25519 PUBLIC KEY Signer from account id.
:param account_id: account id
:return: ED25519 PUBLIC KEY Signer
:raises:
:exc:`Ed25519PublicKeyInvalidError <stellar_sdk.exceptions.Ed25519PublicKeyInvalidError>`: if ``account_id``
is not a valid ed25519 public key.
"""
signer_key = stellar_xdr.SignerKey(
stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_ED25519,
ed25519=stellar_xdr.Uint256(StrKey.decode_ed25519_public_key(account_id)),
)
return cls(signer_key)
@classmethod
def pre_auth_tx(cls, pre_auth_tx_hash: bytes) -> "SignerKey":
"""Create Pre AUTH TX Signer from the sha256 hash of a transaction,
click `here <https://www.stellar.org/developers/guides/concepts/multi-sig.html#pre-authorized-transaction>`__ for more information.
:param pre_auth_tx_hash: The sha256 hash of a transaction.
:return: Pre AUTH TX Signer
"""
signer_key = stellar_xdr.SignerKey(
stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_PRE_AUTH_TX,
pre_auth_tx=stellar_xdr.Uint256(pre_auth_tx_hash),
)
return cls(signer_key)
@classmethod
def sha256_hash(cls, sha256_hash: bytes) -> "SignerKey":
"""Create SHA256 HASH Signer from a sha256 hash of a preimage,
click `here <https://www.stellar.org/developers/guides/concepts/multi-sig.html#hashx>`__ for more information.
:param sha256_hash: a sha256 hash of a preimage
:return: SHA256 HASH Signer
"""
signer_key = stellar_xdr.SignerKey(
stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_HASH_X,
hash_x=stellar_xdr.Uint256(sha256_hash),
)
return cls(signer_key)
def to_xdr_object(self) -> stellar_xdr.SignerKey:
"""Returns the xdr object for this SignerKey object.
:return: XDR Signer object
"""
return self.signer_key
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.SignerKey) -> "SignerKey":
"""Create a :class:`SignerKey` from an XDR SignerKey object.
:param xdr_object: The XDR SignerKey object.
:return: A new :class:`SignerKey` object from the given XDR SignerKey object.
"""
if xdr_object.type == stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_ED25519:
assert xdr_object.ed25519 is not None
account_id = StrKey.encode_ed25519_public_key(xdr_object.ed25519.uint256)
return cls.ed25519_public_key(account_id)
elif xdr_object.type == stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_PRE_AUTH_TX:
assert xdr_object.pre_auth_tx is not None
return cls.pre_auth_tx(xdr_object.pre_auth_tx.uint256)
elif xdr_object.type == stellar_xdr.SignerKeyType.SIGNER_KEY_TYPE_HASH_X:
assert xdr_object.hash_x is not None
return cls.sha256_hash(xdr_object.hash_x.uint256)
else:
raise ValueError(
f"This is an unknown signer type, please consider creating an issuer at {__issues__}."
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return self.signer_key == other.signer_key
def __str__(self):
return f"<SignerKey [signer_key={self.signer_key}]>"
| apache-2.0 | -7,404,016,642,933,082,000 | 38.959596 | 139 | 0.638524 | false | 3.30217 | false | false | false |
songeater/SONGSHTR | soundfunc.py | 1 | 4391 | import numpy as np
from scipy.fftpack import dct, idct
import sys
'''
----------
FUNCTIONS
----------
'''
def get_config():
config = {}
config['sound_file'] = "harvestmoon-mono-hp500.wav"
config['save_file'] = config['sound_file'] + "_modelsave_"
config['blocksize']=13000
config['compressed_blocksize'] = (config['blocksize']//2+1)
config['seqlen'] = 80 # in number of blocks...
config['win_edge'] = int(config['blocksize'] / 2)
config['out_step'] = 1 # in number of blocks...
config['batchsize'] = 5 # in number of blocks...
config['domain'] = "rfft" # either "rfft" or "dct"
if config['domain'] == "dct": config['win_edge'] = int(config['blocksize'] / 2) # if dct, have to set this
return config
def concat_sound_blocks_mdct(sound_blocks, edge, clickedge=0):
print(edge)
print(np.asarray(sound_blocks).shape)
new_gen = []
for i in range(0, len(sound_blocks)-2):
if i==0:
new_gen.append(sound_blocks[i][0:-edge-clickedge])
else:
temp1 = sound_blocks[i][0:-edge-clickedge]
temp2 = sound_blocks[i-1][-edge+clickedge:]
merge = temp1 + temp2
new_gen.append(merge)
return new_gen
def conv_to_dct(signal, blocksize, edge, out_blocksize):
blocks1 = []
blocks2 = []
for i in range(0, signal.shape[0]-blocksize-edge, blocksize-edge):
dct_block = dct(signal[i:i+blocksize], norm='ortho')
blocks1.append(dct_block)
if blocksize > out_blocksize:
for opw in range(len(blocks1)): blocks2.append(blocks1[opw][0:out_blocksize])
return blocks2
def conv_from_dct(blocks, in_blocksize, out_blocksize):
new_blocks=[]
zeropad = [0]*(out_blocksize-in_blocksize)
dct_pred = blocks
dct_pred = np.append(dct_pred, zeropad)
dct_pred = np.asarray(idct(dct_pred, norm='ortho'), dtype=np.float32)
new_blocks.append(dct_pred)
return new_blocks
def linear(u):
return (1-u, u)
def quadratic_out(u):
u = u * u
return (1-u, u)
def quadratic_in(u):
u = 1-u
u = u * u
return (u, 1-u)
def linear_bounce(u):
u = 2 * ( 0.5-u if u > 0.5 else u)
return (1-u, u)
def merge_sounds(sound1, sound2, fade=linear):
assert len(sound1)==len(sound2)
n = len(sound1)
new_sound = sound1
for t in range(n):
u = t / float(n)
amp1, amp2 = fade(u)
new_sound[t] = sound1[t]*amp1 + sound2[t]*amp2
return new_sound
def concat_sound_blocks(sound_blocks, edge):
print("sound_blocks shape:", np.asarray(sound_blocks[1]).shape)
new_gen = []
for i in range(0, len(sound_blocks)-2):
if i==0: temp1 = sound_blocks[i][0:-edge]
else: temp1 = sound_blocks[i][edge:-edge]
new_gen.append(temp1)
if i%100==0: print("temp1", np.asarray(temp1).shape)
merge_a = sound_blocks[i] [-edge:]
merge_b = sound_blocks[i+1][0:edge]
if edge==0: temp2 = merge_a
else: temp2 = merge_sounds(merge_a, merge_b)
if i%100==0: print("temp2", np.asarray(temp2).shape)
new_gen.append(temp2)
return new_gen
def conv_to_rfft(signal, blocksize, edge):
mag_blocks = []
ang_blocks = []
for i in range(0, signal.shape[0]-blocksize-edge, blocksize-edge):
fft_block = np.fft.rfft(signal[i:i+blocksize], norm='ortho')
mag_blocks.append(np.abs(fft_block))
ang_blocks.append(np.angle(fft_block))
return mag_blocks, ang_blocks
def conv_from_rfft(mag_blocks, ang_blocks=0):
new_blocks=[]
if ang_blocks==0:
fft_pred = []
for opq in range(len(mag_blocks)):
fft_x = np.cos(0)*mag_blocks[opq]
fft_y = np.sin(0)*mag_blocks[opq]
fft_pred.append(fft_x + 1.0j*fft_y)
new_blocks = np.asarray(np.fft.irfft(mag_blocks, norm='ortho'), dtype=np.float32)
print("new_blocks shape:", new_blocks.shape)
else:
for opq in range(len(mag_blocks)):
fft_x = np.cos(ang_blocks[opq])*mag_blocks[opq]
fft_y = np.sin(ang_blocks[opq])*mag_blocks[opq]
fft_pred = fft_x + 1.0j*fft_y
fft_pred = np.asarray(np.fft.irfft(fft_pred, norm='ortho'), dtype=np.float32)
new_blocks.append(fft_pred)
return new_blocks
| agpl-3.0 | 5,898,696,401,641,609,000 | 33.128 | 110 | 0.578228 | false | 3.053547 | true | false | false |
mvcsantos/QGIS | python/plugins/processing/algs/qgis/RandomPointsAlongLines.py | 1 | 5227 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RandomPointsAlongLines.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import random
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsFields, QgsField, QgsGeometry, QgsSpatialIndex, QgsDistanceArea, QgsFeatureRequest, QgsFeature, QgsPoint
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class RandomPointsAlongLines(GeoAlgorithm):
VECTOR = 'VECTOR'
POINT_NUMBER = 'POINT_NUMBER'
MIN_DISTANCE = 'MIN_DISTANCE'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name = 'Random points along line'
self.group = 'Vector creation tools'
self.addParameter(ParameterVector(self.VECTOR,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterNumber(self.POINT_NUMBER,
self.tr('Number of points'), 1, 9999999, 1))
self.addParameter(ParameterNumber(self.MIN_DISTANCE,
self.tr('Minimum distance'), 0.0, 9999999.0, 0.0))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Random points')))
def processAlgorithm(self):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.VECTOR))
pointCount = float(self.getParameterValue(self.POINT_NUMBER))
minDistance = float(self.getParameterValue(self.MIN_DISTANCE))
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields, QGis.WKBPoint, layer.dataProvider().crs())
nPoints = 0
nIterations = 0
maxIterations = pointCount * 200
featureCount = layer.featureCount()
total = 100.0 / pointCount
index = QgsSpatialIndex()
points = dict()
da = QgsDistanceArea()
request = QgsFeatureRequest()
random.seed()
while nIterations < maxIterations and nPoints < pointCount:
# pick random feature
fid = random.randint(0, featureCount - 1)
f = layer.getFeatures(request.setFilterFid(fid)).next()
fGeom = QgsGeometry(f.geometry())
if fGeom.isMultipart():
lines = fGeom.asMultiPolyline()
# pick random line
lineId = random.randint(0, len(lines) - 1)
vertices = lines[lineId]
else:
vertices = fGeom.asPolyline()
# pick random segment
if len(vertices) == 2:
vid = 0
else:
vid = random.randint(0, len(vertices) - 2)
startPoint = vertices[vid]
endPoint = vertices[vid + 1]
length = da.measureLine(startPoint, endPoint)
dist = length * random.random()
if dist > minDistance:
d = dist / (length - dist)
rx = (startPoint.x() + d * endPoint.x()) / (1 + d)
ry = (startPoint.y() + d * endPoint.y()) / (1 + d)
# generate random point
pnt = QgsPoint(rx, ry)
geom = QgsGeometry.fromPoint(pnt)
if vector.checkMinDistance(pnt, index, minDistance, points):
f = QgsFeature(nPoints)
f.initAttributes(1)
f.setFields(fields)
f.setAttribute('id', nPoints)
f.setGeometry(geom)
writer.addFeature(f)
index.insertFeature(f)
points[nPoints] = pnt
nPoints += 1
self.progress.emit(int(nPoints * total))
nIterations += 1
if nPoints < pointCount:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Can not generate requested number of random points. '
'Maximum number of attempts exceeded.'))
del writer
| gpl-2.0 | -8,700,106,233,471,537,000 | 37.718519 | 135 | 0.545246 | false | 4.506034 | false | false | false |
saneyuki/servo | python/servo/bootstrap_commands.py | 1 | 22544 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import absolute_import, print_function, unicode_literals
import base64
import json
import os
import os.path as path
import platform
import re
import subprocess
import sys
import traceback
import six.moves.urllib as urllib
import glob
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
import servo.bootstrap as bootstrap
from servo.command_base import CommandBase, cd, check_call
from servo.util import delete, download_bytes, download_file, extract, check_hash
@CommandProvider
class MachCommands(CommandBase):
@Command('bootstrap',
description='Install required packages for building.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap(self, force=False):
# This entry point isn't actually invoked, ./mach bootstrap is directly
# called by mach (see mach_bootstrap.bootstrap_command_only) so that
# it can install dependencies without needing mach's dependencies
return bootstrap.bootstrap(self.context, force=force)
@Command('bootstrap-salt',
description='Install and set up the salt environment.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap_salt(self, force=False):
return bootstrap.bootstrap(self.context, force=force, specific="salt")
@Command('bootstrap-gstreamer',
description='Set up a local copy of the gstreamer libraries (linux only).',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap_gstreamer(self, force=False):
return bootstrap.bootstrap(self.context, force=force, specific="gstreamer")
@Command('bootstrap-android',
description='Install the Android SDK and NDK.',
category='bootstrap')
@CommandArgument('--build',
action='store_true',
help='Install Android-specific dependencies for building')
@CommandArgument('--emulator-x86',
action='store_true',
help='Install Android x86 emulator and system image')
@CommandArgument('--accept-all-licences',
action='store_true',
help='For non-interactive use')
def bootstrap_android(self, build=False, emulator_x86=False, accept_all_licences=False):
if not (build or emulator_x86):
print("Must specify `--build` or `--emulator-x86` or both.")
ndk = "android-ndk-r15c-{system}-{arch}"
tools = "sdk-tools-{system}-4333796"
emulator_platform = "android-28"
emulator_image = "system-images;%s;google_apis;x86" % emulator_platform
known_sha1 = {
# https://dl.google.com/android/repository/repository2-1.xml
"sdk-tools-darwin-4333796.zip": "ed85ea7b59bc3483ce0af4c198523ba044e083ad",
"sdk-tools-linux-4333796.zip": "8c7c28554a32318461802c1291d76fccfafde054",
"sdk-tools-windows-4333796.zip": "aa298b5346ee0d63940d13609fe6bec621384510",
# https://developer.android.com/ndk/downloads/older_releases
"android-ndk-r15c-windows-x86.zip": "f2e47121feb73ec34ced5e947cbf1adc6b56246e",
"android-ndk-r15c-windows-x86_64.zip": "970bb2496de0eada74674bb1b06d79165f725696",
"android-ndk-r15c-darwin-x86_64.zip": "ea4b5d76475db84745aa8828000d009625fc1f98",
"android-ndk-r15c-linux-x86_64.zip": "0bf02d4e8b85fd770fd7b9b2cdec57f9441f27a2",
}
toolchains = path.join(self.context.topdir, "android-toolchains")
if not path.isdir(toolchains):
os.makedirs(toolchains)
def download(target_dir, name, flatten=False):
final = path.join(toolchains, target_dir)
if path.isdir(final):
return
base_url = "https://dl.google.com/android/repository/"
filename = name + ".zip"
url = base_url + filename
archive = path.join(toolchains, filename)
if not path.isfile(archive):
download_file(filename, url, archive)
check_hash(archive, known_sha1[filename], "sha1")
print("Extracting " + filename)
remove = True # Set to False to avoid repeated downloads while debugging this script
if flatten:
extracted = final + "_"
extract(archive, extracted, remove=remove)
contents = os.listdir(extracted)
assert len(contents) == 1
os.rename(path.join(extracted, contents[0]), final)
os.rmdir(extracted)
else:
extract(archive, final, remove=remove)
system = platform.system().lower()
machine = platform.machine().lower()
arch = {"i386": "x86"}.get(machine, machine)
if build:
download("ndk", ndk.format(system=system, arch=arch), flatten=True)
download("sdk", tools.format(system=system))
components = []
if emulator_x86:
components += [
"platform-tools",
"emulator",
"platforms;" + emulator_platform,
emulator_image,
]
if build:
components += [
"platform-tools",
"platforms;android-18",
]
sdkmanager = [path.join(toolchains, "sdk", "tools", "bin", "sdkmanager")] + components
if accept_all_licences:
yes = subprocess.Popen(["yes"], stdout=subprocess.PIPE)
process = subprocess.Popen(
sdkmanager, stdin=yes.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
# Reduce progress bar spam by removing duplicate lines.
# Printing the same line again with \r is a no-op in a real terminal,
# but each line is shown individually in Taskcluster's log viewer.
previous_line = None
line = b""
while 1:
# Read one byte at a time because in Python:
# * readline() blocks until "\n", which doesn't come before the prompt
# * read() blocks until EOF, which doesn't come before the prompt
# * read(n) keeps reading until it gets n bytes or EOF,
# but we don't know reliably how many bytes to read until the prompt
byte = process.stdout.read(1)
if len(byte) == 0:
print(line)
break
line += byte
if byte == b'\n' or byte == b'\r':
if line != previous_line:
print(line.decode("utf-8", "replace"), end="")
sys.stdout.flush()
previous_line = line
line = b""
exit_code = process.wait()
yes.terminate()
if exit_code:
return exit_code
else:
subprocess.check_call(sdkmanager)
if emulator_x86:
avd_path = path.join(toolchains, "avd", "servo-x86")
process = subprocess.Popen(stdin=subprocess.PIPE, stdout=subprocess.PIPE, args=[
path.join(toolchains, "sdk", "tools", "bin", "avdmanager"),
"create", "avd",
"--path", avd_path,
"--name", "servo-x86",
"--package", emulator_image,
"--force",
])
output = b""
while 1:
# Read one byte at a time, see comment above.
byte = process.stdout.read(1)
if len(byte) == 0:
break
output += byte
# There seems to be no way to disable this prompt:
if output.endswith(b"Do you wish to create a custom hardware profile? [no]"):
process.stdin.write("no\n")
assert process.wait() == 0
with open(path.join(avd_path, "config.ini"), "a") as f:
f.write("disk.dataPartition.size=2G\n")
@Command('update-hsts-preload',
description='Download the HSTS preload list',
category='bootstrap')
def bootstrap_hsts_preload(self, force=False):
preload_filename = "hsts_preload.json"
preload_path = path.join(self.context.topdir, "resources")
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
"/net/+/master/http/transport_security_state_static.json?format=TEXT"
try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
except urllib.error.URLError:
print("Unable to download chromium HSTS preload list; are you connected to the internet?")
sys.exit(1)
content_decoded = base64.b64decode(content_base64)
# The chromium "json" has single line comments in it which, of course,
# are non-standard/non-valid json. Simply strip them out before parsing
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
try:
pins_and_static_preloads = json.loads(content_json)
entries = {
"entries": [
{
"host": e["name"],
"include_subdomains": e.get("include_subdomains", False)
}
for e in pins_and_static_preloads["entries"]
]
}
with open(path.join(preload_path, preload_filename), 'w') as fd:
json.dump(entries, fd, indent=4)
except ValueError as e:
print("Unable to parse chromium HSTS preload list, has the format changed?")
sys.exit(1)
@Command('update-pub-domains',
description='Download the public domains list and update resources/public_domains.txt',
category='bootstrap')
def bootstrap_pub_suffix(self, force=False):
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
not_implemented_case = re.compile(r'^[^*]+\*')
try:
content = download_bytes("Public suffix list", list_url)
except urllib.error.URLError:
print("Unable to download the public suffix list; are you connected to the internet?")
sys.exit(1)
lines = [l.strip() for l in content.decode("utf8").split("\n")]
suffixes = [l for l in lines if not l.startswith("//") and not l == ""]
with open(dst_filename, "wb") as fo:
for suffix in suffixes:
if not_implemented_case.match(suffix):
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
fo.write(suffix.encode("idna") + "\n")
@Command('clean-nightlies',
description='Clean unused nightly builds of Rust and Cargo',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent nightlies')
def clean_nightlies(self, force=False, keep=None):
default_toolchain = self.default_toolchain()
print("Current Rust version for Servo: {}".format(default_toolchain))
old_toolchains = []
keep = int(keep)
stdout = subprocess.check_output(['git', 'log', '--format=%H', 'rust-toolchain'])
for i, commit_hash in enumerate(stdout.split(), 1):
if i > keep:
toolchain = subprocess.check_output(
['git', 'show', '%s:rust-toolchain' % commit_hash])
old_toolchains.append(toolchain.strip())
removing_anything = False
stdout = subprocess.check_output(['rustup', 'toolchain', 'list'])
for toolchain_with_host in stdout.split():
for old in old_toolchains:
if toolchain_with_host.startswith(old):
removing_anything = True
if force:
print("Removing {}".format(toolchain_with_host))
check_call(["rustup", "uninstall", toolchain_with_host])
else:
print("Would remove {}".format(toolchain_with_host))
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("Nothing done. "
"Run `./mach clean-nightlies -f` to actually remove.")
@Command('clean-cargo-cache',
description='Clean unused Cargo packages',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--show-size', '-s',
action='store_true',
help='Show packages size')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent dependencies')
def clean_cargo_cache(self, force=False, show_size=False, keep=None):
def get_size(path):
if os.path.isfile(path):
return os.path.getsize(path) / (1024 * 1024.0)
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size / (1024 * 1024.0)
removing_anything = False
packages = {
'crates': {},
'git': {},
}
import toml
if os.environ.get("CARGO_HOME", ""):
cargo_dir = os.environ.get("CARGO_HOME")
else:
home_dir = os.path.expanduser("~")
cargo_dir = path.join(home_dir, ".cargo")
if not os.path.isdir(cargo_dir):
return
cargo_file = open(path.join(self.context.topdir, "Cargo.lock"))
content = toml.load(cargo_file)
for package in content.get("package", []):
source = package.get("source", "")
version = package["version"]
if source == u"registry+https://github.com/rust-lang/crates.io-index":
crate_name = "{}-{}".format(package["name"], version)
if not packages["crates"].get(crate_name, False):
packages["crates"][package["name"]] = {
"current": [],
"exist": [],
}
packages["crates"][package["name"]]["current"].append(crate_name)
elif source.startswith("git+"):
name = source.split("#")[0].split("/")[-1].replace(".git", "")
branch = ""
crate_name = "{}-{}".format(package["name"], source.split("#")[1])
crate_branch = name.split("?")
if len(crate_branch) > 1:
branch = crate_branch[1].replace("branch=", "")
name = crate_branch[0]
if not packages["git"].get(name, False):
packages["git"][name] = {
"current": [],
"exist": [],
}
packages["git"][name]["current"].append(source.split("#")[1][:7])
if branch:
packages["git"][name]["current"].append(branch)
crates_dir = path.join(cargo_dir, "registry")
crates_cache_dir = ""
crates_src_dir = ""
if os.path.isdir(path.join(crates_dir, "cache")):
for p in os.listdir(path.join(crates_dir, "cache")):
crates_cache_dir = path.join(crates_dir, "cache", p)
crates_src_dir = path.join(crates_dir, "src", p)
git_dir = path.join(cargo_dir, "git")
git_db_dir = path.join(git_dir, "db")
git_checkout_dir = path.join(git_dir, "checkouts")
if os.path.isdir(git_db_dir):
git_db_list = filter(lambda f: not f.startswith('.'), os.listdir(git_db_dir))
else:
git_db_list = []
if os.path.isdir(git_checkout_dir):
git_checkout_list = os.listdir(git_checkout_dir)
else:
git_checkout_list = []
for d in list(set(git_db_list + git_checkout_list)):
crate_name = d.replace("-{}".format(d.split("-")[-1]), "")
if not packages["git"].get(crate_name, False):
packages["git"][crate_name] = {
"current": [],
"exist": [],
}
if os.path.isdir(path.join(git_checkout_dir, d)):
with cd(path.join(git_checkout_dir, d)):
git_crate_hash = glob.glob('*')
if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
continue
for d2 in git_crate_hash:
dep_path = path.join(git_checkout_dir, d, d2)
if os.path.isdir(dep_path):
packages["git"][crate_name]["exist"].append((path.getmtime(dep_path), d, d2))
elif os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
if crates_src_dir:
for d in os.listdir(crates_src_dir):
crate_name = re.sub(r"\-\d+(\.\d+){1,3}.+", "", d)
if not packages["crates"].get(crate_name, False):
packages["crates"][crate_name] = {
"current": [],
"exist": [],
}
packages["crates"][crate_name]["exist"].append(d)
total_size = 0
for packages_type in ["git", "crates"]:
sorted_packages = sorted(packages[packages_type])
for crate_name in sorted_packages:
crate_count = 0
existed_crates = packages[packages_type][crate_name]["exist"]
for exist in sorted(existed_crates, reverse=True):
current_crate = packages[packages_type][crate_name]["current"]
size = 0
exist_name = path.join(exist[1], exist[2]) if packages_type == "git" else exist
exist_item = exist[2] if packages_type == "git" else exist
if exist_item not in current_crate:
crate_count += 1
if int(crate_count) >= int(keep) or not current_crate or \
exist[0] == "del" or exist[2] == "master":
removing_anything = True
crate_paths = []
if packages_type == "git":
exist_checkout_path = path.join(git_checkout_dir, exist[1])
exist_db_path = path.join(git_db_dir, exist[1])
exist_path = path.join(git_checkout_dir, exist_name)
if exist[0] == "del":
if os.path.isdir(exist_checkout_path):
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
crate_paths.append(exist_db_path)
crate_count += -1
else:
crate_paths.append(exist_path)
exist_checkout_list = glob.glob(path.join(exist_checkout_path, '*'))
if len(exist_checkout_list) <= 1:
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
crate_paths.append(exist_db_path)
else:
crate_paths.append(path.join(crates_cache_dir, "{}.crate".format(exist)))
crate_paths.append(path.join(crates_src_dir, exist))
size = sum(get_size(p) for p in crate_paths) if show_size else 0
total_size += size
print_msg = (exist_name, " ({}MB)".format(round(size, 2)) if show_size else "", cargo_dir)
if force:
print("Removing `{}`{} package from {}".format(*print_msg))
for crate_path in crate_paths:
if os.path.exists(crate_path):
try:
delete(crate_path)
except:
print(traceback.format_exc())
print("Delete %s failed!" % crate_path)
else:
print("Would remove `{}`{} package from {}".format(*print_msg))
if removing_anything and show_size:
print("\nTotal size of {} MB".format(round(total_size, 2)))
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("\nNothing done. "
"Run `./mach clean-cargo-cache -f` to actually remove.")
| mpl-2.0 | 885,229,770,263,548,800 | 44.821138 | 118 | 0.522489 | false | 4.233615 | false | false | false |
cabanm/project-euler | Problem 23/problem23.py | 1 | 1277 | # Find the sum of all the positive integers which
# cannot be written as the sum of two abundant numbers.
#
# Facts:
# All integers greater than 28123 can be
# written as the sum of two abundant numbers.
# Abundant number = sum of proper divisors of n exceed n.
#
# Find all abundant numbers up to and including 28123
# Add all combinations of these and store if not greater then 28123
# Add all integers <= 28123 not in the list to get required sum
from myMath import *
abundants = list()
for n in range(1, 28123 + 1):
if sum(int(n).properDivisors()) > n:
abundants.append(n)
print('stage 1 complete --', 'number of abundants = ', len(abundants))
sums = list()
for i, n in enumerate(abundants):
for m in abundants[i:]:
if n+m <= 28123:
sums.append(n+m)
sums = sorted(set(sums))
print('stage 2 complete --', 'number of sums of abundants = ', len(sums))
sumsIndeces = [0]*(28123 + 1)
for i, n in enumerate(sums):
sumsIndeces.pop(n)
sumsIndeces.insert(n,1) # places a one at every index that is sum of abundants
if i%1000 == 0:
print(i)
print('stage 3 complete')
total = 0
for n in range(len(sumsIndeces)):
if sumsIndeces[n] == 0:
total += n
print('sum = ', total)
| gpl-2.0 | 7,158,230,133,010,168,000 | 29.146341 | 82 | 0.645262 | false | 3.107056 | false | false | false |
mapbox/atom-shell | script/create-dist.py | 1 | 7365 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import subprocess
import sys
import tarfile
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, NODE_VERSION, \
TARGET_PLATFORM, DIST_ARCH
from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \
safe_mkdir, execute
ATOM_SHELL_VERSION = get_atom_shell_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'Release')
NODE_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'node')
DIST_HEADERS_NAME = 'node-{0}'.format(NODE_VERSION)
DIST_HEADERS_DIR = os.path.join(DIST_DIR, DIST_HEADERS_NAME)
SYMBOL_NAME = {
'darwin': 'libchromiumcontent.dylib.dSYM',
'linux': 'libchromiumcontent.so.dbg',
'win32': 'chromiumcontent.dll.pdb',
}[TARGET_PLATFORM]
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'atom.exe',
'chromiumcontent.dll',
'content_shell.pak',
'd3dcompiler_43.dll',
'ffmpegsumo.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'msvcp120.dll',
'msvcr120.dll',
'ui_resources_200_percent.pak',
'vccorlib120.dll',
'webkit_resources_200_percent.pak',
'xinput1_3.dll',
],
'linux': [
'atom',
'content_shell.pak',
'icudtl.dat',
'libchromiumcontent.so',
'libffmpegsumo.so',
],
}
TARGET_DIRECTORIES = {
'darwin': [
'Atom.app',
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
SYSTEM_LIBRARIES = [
'libudev.so',
'libgcrypt.so',
'libnotify.so',
]
HEADERS_SUFFIX = [
'.h',
'.gypi',
]
HEADERS_DIRS = [
'src',
'deps/http_parser',
'deps/zlib',
'deps/uv',
'deps/npm',
'deps/mdb_v8',
]
HEADERS_FILES = [
'common.gypi',
'config.gypi',
]
def main():
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
args = parse_args()
force_build()
download_libchromiumcontent_symbols(args.url)
create_symbols()
copy_binaries()
copy_headers()
copy_license()
if TARGET_PLATFORM == 'linux':
copy_system_libraries()
create_version()
create_dist_zip()
create_symbols_zip()
create_header_tarball()
def parse_args():
parser = argparse.ArgumentParser(description='Create distributions')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
return parser.parse_args()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[TARGET_PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
for directory in TARGET_DIRECTORIES[TARGET_PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_headers():
os.mkdir(DIST_HEADERS_DIR)
# Copy standard node headers from node. repository.
for include_path in HEADERS_DIRS:
abs_path = os.path.join(NODE_DIR, include_path)
for dirpath, _, filenames in os.walk(abs_path):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(os.path.join(dirpath, filename))
for other_file in HEADERS_FILES:
copy_source_file(source = os.path.join(NODE_DIR, other_file))
# Copy V8 headers from chromium's repository.
src = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download',
'libchromiumcontent', 'src')
for dirpath, _, filenames in os.walk(os.path.join(src, 'v8')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(source=os.path.join(dirpath, filename),
start=src,
destination=os.path.join(DIST_HEADERS_DIR, 'deps'))
def copy_license():
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def copy_system_libraries():
ldd = execute(['ldd', os.path.join(OUT_DIR, 'atom')])
lib_re = re.compile('\t(.*) => (.+) \(.*\)$')
for line in ldd.splitlines():
m = lib_re.match(line)
if not m:
continue
for i, library in enumerate(SYSTEM_LIBRARIES):
real_library = m.group(1)
if real_library.startswith(library):
shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library))
SYSTEM_LIBRARIES[i] = real_library
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ATOM_SHELL_VERSION)
def download_libchromiumcontent_symbols(url):
brightray_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor')
target_dir = os.path.join(brightray_dir, 'download', 'libchromiumcontent')
symbols_path = os.path.join(target_dir, 'Release', SYMBOL_NAME)
if os.path.exists(symbols_path):
return
download = os.path.join(brightray_dir, 'libchromiumcontent', 'script',
'download')
subprocess.check_call([sys.executable, download, '-f', '-s', '-c',
LIBCHROMIUMCONTENT_COMMIT, url, target_dir])
def create_symbols():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
subprocess.check_output([sys.executable, build, '-c', 'Release',
'-t', 'atom_dump_symbols'])
directory = 'Atom-Shell.breakpad.syms'
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def create_dist_zip():
dist_name = 'atom-shell-{0}-{1}-{2}.zip'.format(ATOM_SHELL_VERSION,
TARGET_PLATFORM, DIST_ARCH)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[TARGET_PLATFORM] + ['LICENSE', 'version']
if TARGET_PLATFORM == 'linux':
files += SYSTEM_LIBRARIES
dirs = TARGET_DIRECTORIES[TARGET_PLATFORM]
make_zip(zip_file, files, dirs)
def create_symbols_zip():
dist_name = 'atom-shell-{0}-{1}-{2}-symbols.zip'.format(ATOM_SHELL_VERSION,
TARGET_PLATFORM,
DIST_ARCH)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE', 'version']
dirs = ['Atom-Shell.breakpad.syms']
make_zip(zip_file, files, dirs)
def create_header_tarball():
with scoped_cwd(DIST_DIR):
tarball = tarfile.open(name=DIST_HEADERS_DIR + '.tar.gz', mode='w:gz')
tarball.add(DIST_HEADERS_NAME)
tarball.close()
def copy_source_file(source, start=NODE_DIR, destination=DIST_HEADERS_DIR):
relative = os.path.relpath(source, start=start)
final_destination = os.path.join(destination, relative)
safe_mkdir(os.path.dirname(final_destination))
shutil.copy2(source, final_destination)
if __name__ == '__main__':
sys.exit(main())
| mit | 4,588,913,929,787,219,500 | 27.326923 | 78 | 0.621453 | false | 3.329566 | false | false | false |
afh/cmakedash | cmakedash.py | 1 | 1690 | #!/usr/bin/env python
#
# cmakedash - a dash docset generator for CMake
import os
import re
import subprocess
from bs4 import BeautifulSoup, NavigableString, Tag
from docsetgenerator import DocsetGenerator
class CMakeDocsetGenerator (DocsetGenerator):
def __init__(self):
DocsetGenerator.__init__(self)
self.docsetName = 'CMake'
self.iconFilename = 'icon.tiff'
def helpFilename(self):
return os.path.join(self.documentsPath(), 'index.html')
def dashFeedVersion(self):
cmakeVersion = subprocess.check_output('cmake --version'.split()).split()
return cmakeVersion[2]
def generateHtml(self):
os.system("cmake --help-html > '%s'" % (self.helpFilename()))
def generateIndex(self):
page = open(self.helpFilename()).read()
soup = BeautifulSoup(page)
any = re.compile('.*')
for tag in soup.find_all('a', {'href':any}):
name = tag.text.strip()
if len(name) > 0:
path = tag.attrs['href'].strip()
if path.startswith('#command'):
stype = 'Command'
elif path.startswith('#opt'):
stype = 'Option'
elif path.startswith('#variable'):
stype = 'Variable'
elif path.startswith('#module'):
stype = 'Module'
elif path.startswith('#prop_') or path.startswith('#property'):
stype = 'Property'
elif path.startswith('http'):
continue
else:
if self.verbose: print 'Skipping %s' % (path)
continue
path = 'index.html%s' % (path)
self.addIndexEntry(name, stype, path)
if __name__ == '__main__':
generator = CMakeDocsetGenerator()
args = generator.getargs()
generator.run(args)
| mit | -1,009,468,933,443,884,300 | 27.644068 | 77 | 0.621302 | false | 3.814898 | false | false | false |
hjanime/VisTrails | vistrails/db/versions/v0_9_3/domain/workflow.py | 1 | 7277 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from auto_gen import DBWorkflow as _DBWorkflow
from auto_gen import DBAbstractionRef, DBModule, DBGroup
from id_scope import IdScope
import copy
class DBWorkflow(_DBWorkflow):
def __init__(self, *args, **kwargs):
_DBWorkflow.__init__(self, *args, **kwargs)
self.objects = {}
self.tmp_id = IdScope(1,
{DBAbstractionRef.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
def __copy__(self):
return DBWorkflow.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBWorkflow.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBWorkflow
# need to go through and reset the index to the copied objects
cp.build_index()
cp.tmp_id = copy.copy(self.tmp_id)
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBWorkflow()
new_obj = _DBWorkflow.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
new_obj.build_index()
return new_obj
def update_id_scope(self):
pass
_vtTypeMap = {'abstractionRef': 'module', 'group': 'module'}
def build_index(self):
g = self._vtTypeMap.get
self.objects = dict(((g(o.vtType, o.vtType), o._db_id), o)
for (o,_,_) in self.db_children())
def add_to_index(self, object):
obj_type = self._vtTypeMap.get(object.vtType, object.vtType)
self.objects[(obj_type, object.getPrimaryKey())] = object
def delete_from_index(self, object):
obj_type = self._vtTypeMap.get(object.vtType, object.vtType)
del self.objects[(obj_type, object.getPrimaryKey())]
def capitalizeOne(self, str):
return str[0].upper() + str[1:]
def db_print_objects(self):
for k,v in self.objects.iteritems():
print '%s: %s' % (k, v)
def db_has_object(self, type, id):
return (type, id) in self.objects
def db_get_object(self, type, id):
return self.objects[(type, id)]
def db_add_object(self, object, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
if object.vtType == 'abstractionRef' or object.vtType == 'group':
obj_type = 'module'
else:
obj_type = object.vtType
funname = 'db_add_' + obj_type
obj_copy = copy.copy(object)
getattr(parent_obj, funname)(obj_copy)
self.add_to_index(obj_copy)
def db_change_object(self, old_id, object, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
self.db_delete_object(old_id, object.vtType, None, None, parent_obj)
self.db_add_object(object, None, None, parent_obj)
def db_delete_object(self, obj_id, obj_type, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
if obj_type == 'abstractionRef' or obj_type == 'group':
obj_type = 'module'
funname = 'db_get_' + obj_type
if hasattr(parent_obj, funname):
object = getattr(parent_obj, funname)(obj_id)
else:
attr_name = 'db_' + obj_type
object = getattr(parent_obj, attr_name)
funname = 'db_delete_' + obj_type
getattr(parent_obj, funname)(object)
self.delete_from_index(object)
| bsd-3-clause | -6,573,125,723,641,042,000 | 41.063584 | 79 | 0.578123 | false | 3.963508 | false | false | false |
semanticize/semanticizest | semanticizest/parse_wikidump/__init__.py | 1 | 11223 | """Parsing utilities for Wikipedia database dumps."""
from __future__ import print_function
from os.path import basename
from bz2 import BZ2File
from collections import Counter, namedtuple
import gzip
from HTMLParser import HTMLParser
from itertools import chain
import logging
import re
import xml.etree.ElementTree as etree # don't use LXML, it's slower (!)
import six
from semanticizest._util import ngrams
from semanticizest._version import __version__
_logger = logging.getLogger(__name__)
Page = namedtuple("Page", ['page_id', 'title', 'content', 'redirect'])
def _get_namespace(tag):
try:
namespace = re.match(r"^{(.*?)}", tag).group(1)
except AttributeError:
namespace = ''
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("namespace %r not recognized as MediaWiki dump"
% namespace)
return namespace
if six.PY3:
def _tounicode(s):
return s
else:
def _tounicode(s):
# Convert ASCII strings coming from xml.etree.
if isinstance(s, str):
s = s.decode('ascii')
return s
def extract_pages(f):
"""Extract pages from Wikimedia database dump.
Parameters
----------
f : file-like or str
Handle on Wikimedia article dump. May be any type supported by
etree.iterparse.
Returns
-------
pages : iterable over `Page`s
namedtuples containging the fields (page_id, title, content,
redirect_target) triples. In Python 2.x, may produce either
str or unicode strings.
"""
elems = etree.iterparse(f, events=["end"])
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
_, elem = next(elems)
namespace = _get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
ns_path = "./{%(ns)s}ns" % ns_mapping
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
id_path = "./{%(ns)s}id" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
redir_path = "./{%(ns)s}redirect" % ns_mapping
for _, elem in elems:
if elem.tag == page_tag:
if elem.find(ns_path).text != '0':
continue
text = elem.find(text_path).text
if text is None:
# Empty article; these occur in Wikinews dumps.
continue
redir = elem.find(redir_path)
redir = (_tounicode(redir.attrib['title'])
if redir is not None else None)
text = _tounicode(text)
title = _tounicode(elem.find(title_path).text)
yield Page(int(elem.find(id_path).text), title, text, redir)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. That shouldn't matter since the pages
# comprise the bulk of the file.
elem.clear()
def _clean_link(l):
"""Clean links (anchor and titles)."""
l = l.strip()
l = re.sub(r'\s+', ' ', l)
return l
def extract_links(article):
"""Extract all (or most) links from article text (wiki syntax).
Returns an iterable over (target, anchor) pairs.
"""
links = re.findall(r"(\w*) \[\[ ([^]]+) \]\] (\w*)", article,
re.UNICODE | re.VERBOSE)
r = []
for before, l, after in links:
if '|' in l:
target, anchor = l.split('|', 1)
else:
target, anchor = l, l
# If the anchor contains a colon, assume it's a file or category link.
if ':' in target:
continue
# Some links contain newlines...
target = _clean_link(target)
anchor = _clean_link(anchor)
# Remove section links and normalize to the format used in <redirect>
# elements: uppercase first character, spaces instead of underscores.
target = target.split('#', 1)[0].replace('_', ' ')
if not target:
continue # section link
if not target[0].isupper():
target = target[0].upper() + target[1:]
anchor = before + anchor + after
r.append((target, anchor))
return r
_UNWANTED = re.compile(r"""
(:?
\{\{ .*? \}\}
| \{\| .*? \|\}
| ^[|!] .* $ # table content
| <math> .*? </math>
| <ref .*? > .*? </ref>
| <br\s*/>
| </?su[bp]\s*>
| \[\[ [^][:]* : (\[\[.*?\]\]|.)*? \]\] # media, categories
| =+ .*? =+ # headers
| ''+
| ^\* # list bullets
)
""", re.DOTALL | re.MULTILINE | re.UNICODE | re.VERBOSE)
_unescape_entities = HTMLParser().unescape
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page))
_LINK_SYNTAX = re.compile(r"""
(?:
\[\[
(?: [^]|]* \|)? # "target|" in [[target|anchor]]
|
\]\]
)
""", re.DOTALL | re.MULTILINE | re.VERBOSE)
def remove_links(page):
"""Remove links from clean_text output."""
page = re.sub(r'\]\]\[\[', ' ', page) # hack hack hack, see test
return re.sub(_LINK_SYNTAX, '', page)
def page_statistics(page, N, sentence_splitter=None, tokenizer=None):
"""Gather statistics from a single WP page.
The sentence_splitter should be a callable that splits text into sentences.
It defaults to an unspecified heuristic.
See ``parse_dump`` for the parameters.
Returns
-------
stats : (dict, dict)
The first dict maps (target, anchor) pairs to counts.
The second maps n-grams (up to N) to counts.
"""
if N is not None and not isinstance(N, int):
raise TypeError("expected integer or None for N, got %r" % N)
clean = clean_text(page)
link_counts = Counter(extract_links(clean))
if N:
no_links = remove_links(clean)
if sentence_splitter is None:
sentences = re.split(r'(?:\n{2,}|\.\s+)', no_links,
re.MULTILINE | re.UNICODE)
else:
sentences = [sentence
for paragraph in re.split('\n+', no_links)
for sentence in paragraph]
if tokenizer is None:
tokenizer = re.compile(r'\w+', re.UNICODE).findall
all_ngrams = chain.from_iterable(ngrams(tokenizer(sentence), N)
for sentence in sentences)
ngram_counts = Counter(all_ngrams)
else:
ngram_counts = None
return link_counts, ngram_counts
def _open(f):
if isinstance(f, six.string_types):
if f.endswith('.gz'):
return gzip.open(f)
elif f.endswith('.bz2'):
return BZ2File(f)
return open(f)
return f
def parse_dump(dump, db, N=7, sentence_splitter=None, tokenizer=None):
"""Parse Wikipedia database dump, return n-gram and link statistics.
Parameters
----------
dump : {file-like, str}
Path to or handle on a Wikipedia page dump, e.g.
'chowiki-20140919-pages-articles.xml.bz2'.
db : SQLite connection
Connection to database that will be used to store statistics.
N : integer
Maximum n-gram length. Set this to a false value to disable
n-gram counting; this disables some of the fancier statistics,
but baseline entity linking will still work.
sentence_splitter : callable, optional
Sentence splitter. Called on output of paragraph splitter
(strings).
tokenizer : callable, optional
Tokenizer. Called on output of sentence splitter (strings).
Must return iterable over strings.
"""
f = _open(dump)
redirects = {}
c = db.cursor()
# Store the semanticizer version for later reference
c.execute('''insert into parameters values ('version', ?);''',
(__version__,))
# Store the dump file name
c.execute('''insert into parameters values ('dump', ?);''',
(basename(dump),))
# Store the maximum ngram length, so we can use it later on
c.execute('''insert into parameters values ('N', ?);''', (str(N),))
# Temporary index to speed up insertion
c.execute('''create unique index target_anchor
on linkstats(ngram_id, target)''')
_logger.info("Processing articles")
for i, page in enumerate(extract_pages(f), 1):
if i % 10000 == 0:
_logger.info("%d articles done", i)
if page.redirect is not None:
redirects[page.title] = page.redirect
continue
link, ngram = page_statistics(page.content, N=N, tokenizer=tokenizer,
sentence_splitter=sentence_splitter)
# We don't count the n-grams within the links, but we need them
# in the table, so add them with zero count.
tokens = chain(six.iteritems(ngram or {}),
((anchor, 0) for _, anchor in six.iterkeys(link)))
tokens = list(tokens)
c.executemany('''insert or ignore into ngrams (ngram) values (?)''',
((g,) for g, _ in tokens))
c.executemany('''update ngrams set tf = tf + ?, df = df + 1
where ngram = ?''',
((count, token) for token, count in tokens))
c.executemany('''insert or ignore into linkstats values
((select id from ngrams where ngram = ?), ?, 0)''',
((anchor, target)
for target, anchor in six.iterkeys(link)))
c.executemany('''update linkstats set count = count + ?
where ngram_id = (select rowid from ngrams
where ngram = ?)''',
((count, anchor)
for (_, anchor), count in six.iteritems(link)))
db.commit()
_logger.info("Processing %d redirects", len(redirects))
for redir, target in redirects.items():
for anchor, count in c.execute('''select ngram_id, count from linkstats
where target = ?''', [redir]):
# TODO: combine the next two execute statements
c.execute('''insert or ignore into linkstats values (?, ?, 0)''',
[anchor, target])
c.execute('''update linkstats
set count = count + ?
where target = ? and ngram_id = ?''',
(count, target, anchor))
c.executemany('delete from linkstats where target = ?',
([redir] for redir in redirects))
_logger.info("Finalizing database")
c.executescript('''drop index target_anchor; vacuum;''')
_logger.info("Dump parsing done: processed %d articles", i)
db.commit()
| apache-2.0 | 3,747,019,002,081,521,700 | 32.008824 | 79 | 0.557605 | false | 4.03125 | false | false | false |
KelSolaar/sIBL_GUI | sibl_gui/components/core/inspector/models.py | 1 | 2265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**models.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines the :class:`sibl_gui.components.core.inspector.inspector.Inspector`
Component Interface class Models.
**Others:**
"""
from __future__ import unicode_literals
import foundations.verbose
import sibl_gui.ui.models
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["LOGGER", "PlatesModel"]
LOGGER = foundations.verbose.install_logger()
class PlatesModel(sibl_gui.ui.models.GraphModel):
"""
Defines the Model used the by :class:`sibl_gui.components.core.inspector.inspector.Inspector`
Component Interface class.
"""
def __init__(self, parent=None, root_node=None, horizontal_headers=None, vertical_headers=None):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param root_node: Root node.
:type root_node: AbstractCompositeNode
:param horizontal_headers: Headers.
:type horizontal_headers: OrderedDict
:param vertical_headers: Headers.
:type vertical_headers: OrderedDict
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
sibl_gui.ui.models.GraphModel.__init__(self,
parent,
root_node,
horizontal_headers,
vertical_headers)
def initialize_model(self, root_node):
"""
Initializes the Model using given root node.
:param root_node: Graph root node.
:type root_node: DefaultNode
:return: Method success
:rtype: bool
"""
LOGGER.debug("> Initializing model with '{0}' root node.".format(root_node))
self.beginResetModel()
self.root_node = root_node
self.enable_model_triggers(True)
self.endResetModel()
return True
| gpl-3.0 | 2,661,083,915,787,938,300 | 27.670886 | 100 | 0.587638 | false | 4.001767 | false | false | false |
sekikn/ambari | ambari-common/src/main/python/ambari_ws4py/server/wsgirefserver.py | 2 | 5353 | # -*- coding: utf-8 -*-
__doc__ = """
Add WebSocket support to the built-in WSGI server
provided by the :py:mod:`wsgiref`. This is clearly not
meant to be a production server so please consider this
only for testing purpose.
Mostly, this module overrides bits and pieces of
the built-in classes so that it supports the WebSocket
workflow.
.. code-block:: python
from wsgiref.simple_server import make_server
from ambari_ws4py.websocket import EchoWebSocket
from ambari_ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler
from ambari_ws4py.server.wsgiutils import WebSocketWSGIApplication
server = make_server('', 9000, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.initialize_websockets_manager()
server.serve_forever()
.. note::
For some reason this server may fail against autobahntestsuite.
"""
import logging
import sys
import itertools
import operator
from wsgiref.handlers import SimpleHandler
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer as _WSGIServer
from wsgiref import util
util._hoppish = {}.__contains__
from ambari_ws4py.manager import WebSocketManager
from ambari_ws4py import format_addresses
from ambari_ws4py.server.wsgiutils import WebSocketWSGIApplication
from ambari_ws4py.compat import get_connection
__all__ = ['WebSocketWSGIHandler', 'WebSocketWSGIRequestHandler',
'WSGIServer']
logger = logging.getLogger('ambari_ws4py')
class WebSocketWSGIHandler(SimpleHandler):
def setup_environ(self):
"""
Setup the environ dictionary and add the
`'ws4py.socket'` key. Its associated value
is the real socket underlying socket.
"""
SimpleHandler.setup_environ(self)
self.environ['ws4py.socket'] = get_connection(self.environ['wsgi.input'])
self.http_version = self.environ['SERVER_PROTOCOL'].rsplit('/')[-1]
def finish_response(self):
"""
Completes the response and performs the following tasks:
- Remove the `'ws4py.socket'` and `'ws4py.websocket'`
environ keys.
- Attach the returned websocket, if any, to the WSGI server
using its ``link_websocket_to_server`` method.
"""
# force execution of the result iterator until first actual content
rest = iter(self.result)
first = list(itertools.islice(rest, 1))
self.result = itertools.chain(first, rest)
# now it's safe to look if environ was modified
ws = None
if self.environ:
self.environ.pop('ws4py.socket', None)
ws = self.environ.pop('ws4py.websocket', None)
try:
SimpleHandler.finish_response(self)
except:
if ws:
ws.close(1011, reason='Something broke')
raise
else:
if ws:
self.request_handler.server.link_websocket_to_server(ws)
class WebSocketWSGIRequestHandler(WSGIRequestHandler):
WebSocketWSGIHandler = WebSocketWSGIHandler
def handle(self):
"""
Unfortunately the base class forces us
to override the whole method to actually provide our wsgi handler.
"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
# next line is where we'd have expect a configuration key somehow
handler = self.WebSocketWSGIHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
class WSGIServer(_WSGIServer):
def initialize_websockets_manager(self):
"""
Call thos to start the underlying websockets
manager. Make sure to call it once your server
is created.
"""
self.manager = WebSocketManager()
self.manager.start()
def shutdown_request(self, request):
"""
The base class would close our socket
if we didn't override it.
"""
pass
def link_websocket_to_server(self, ws):
"""
Call this from your WSGI handler when a websocket
has been created.
"""
self.manager.add(ws)
def server_close(self):
"""
Properly initiate closing handshakes on
all websockets when the WSGI server terminates.
"""
if hasattr(self, 'manager'):
self.manager.close_all()
self.manager.stop()
self.manager.join()
delattr(self, 'manager')
_WSGIServer.server_close(self)
if __name__ == '__main__':
from ambari_ws4py import configure_logger
configure_logger()
from wsgiref.simple_server import make_server
from ambari_ws4py.websocket import EchoWebSocket
server = make_server('', 9000, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.initialize_websockets_manager()
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
| apache-2.0 | 4,859,754,912,478,700,000 | 33.095541 | 89 | 0.653092 | false | 4.330906 | false | false | false |
fishroot/nemoa | nemoa/dataset/commons/labels/__init__.py | 1 | 2018 | # -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = '[email protected]'
__license__ = 'GPLv3'
import nemoa
import numpy
import importlib
def convert(list, input, output = None, filter = False):
generic_types = ['number', 'string', 'float']
if isinstance(list, (numpy.ndarray)):
list = list.tolist()
input_dtype = 'nparray'
else: input_dtype = 'list'
# 'input'
if input in generic_types:
input_class = 'generic'
input_format = input
elif ':' in input:
input_class = input.lower().split(':')[0].strip()
input_format = input.lower().split(':')[1].strip()
else: raise Warning("""could not convert list:
unknown input format '%s'.""" % input)
# 'output'
if output in generic_types:
output_class = 'generic'
output_format = output
elif not output:
output_class = input_class
output_format = None
elif ':' in input:
output_class = output.lower().split(':')[0].strip()
output_format = output.lower().split(':')[1].strip()
else: raise Warning("""could not convert list:
unknown output format '%s'.""" % output)
# 'input' vs 'output'
if input_class != output_class:
raise Warning("'%s' can not be converted to '%s'"
% (input_class, output_class))
# trivial cases
if input_class == 'generic' or input_format == output_format:
if input_dtype == 'nparray':
return numpy.asarray(list), numpy.asarray([])
else: return list, []
# import annotation module
module_name = input_class.lower()
module = importlib.import_module('nemoa.dataset.commons.labels.'
+ module_name)
converter = getattr(module, module_name)()
output_list, output_lost = converter.convert_list(
list, input_format, output_format, filter)
if input_dtype == 'nparray':
return numpy.asarray(output_list), numpy.asarray(output_lost)
return output_list, output_lost
| gpl-3.0 | -1,435,464,430,566,239,200 | 31.031746 | 69 | 0.600099 | false | 3.779026 | false | false | false |
marado/youtube-dl | youtube_dl/extractor/npo.py | 1 | 12656 | from __future__ import unicode_literals
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
url_basename,
)
class NPOBaseIE(SubtitlesInfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
return self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
class NPOIE(NPOBaseIE):
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/(?!live|radio)[^/]+/[^/]+/(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = 'http://e.omroep.nl/tt888/%s' % video_id
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
return {
'id': video_id,
'title': metadata['titel'],
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
if stream_type == 'ss':
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
transform_source=strip_jsonp)
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class TegenlichtVproIE(NPOIE):
IE_NAME = 'tegenlicht.vpro.nl'
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
},
},
]
def _real_extract(self, url):
name = url_basename(url)
webpage = self._download_webpage(url, name)
urn = self._html_search_meta('mediaurn', webpage)
info_page = self._download_json(
'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
return self._get_info(info_page['mid'])
| unlicense | -5,777,262,629,636,759,000 | 33.579235 | 111 | 0.477323 | false | 3.51751 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/mobile_device_constant.py | 1 | 2416 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import mobile_device_type
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'MobileDeviceConstant',
},
)
class MobileDeviceConstant(proto.Message):
r"""A mobile device constant.
Attributes:
resource_name (str):
Output only. The resource name of the mobile device
constant. Mobile device constant resource names have the
form:
``mobileDeviceConstants/{criterion_id}``
id (int):
Output only. The ID of the mobile device
constant.
name (str):
Output only. The name of the mobile device.
manufacturer_name (str):
Output only. The manufacturer of the mobile
device.
operating_system_name (str):
Output only. The operating system of the
mobile device.
type_ (google.ads.googleads.v6.enums.types.MobileDeviceTypeEnum.MobileDeviceType):
Output only. The type of mobile device.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.INT64,
number=7,
optional=True,
)
name = proto.Field(
proto.STRING,
number=8,
optional=True,
)
manufacturer_name = proto.Field(
proto.STRING,
number=9,
optional=True,
)
operating_system_name = proto.Field(
proto.STRING,
number=10,
optional=True,
)
type_ = proto.Field(
proto.ENUM,
number=6,
enum=mobile_device_type.MobileDeviceTypeEnum.MobileDeviceType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -6,480,759,131,112,648,000 | 27.423529 | 90 | 0.627897 | false | 4.151203 | false | false | false |
6/GeoDJ | geodj/settings.py | 1 | 5663 | import os
# Override these on production env
os.environ.setdefault("APP_ENV", "development")
os.environ.setdefault("SECRET_KEY", "^uhrm48x9y=1f&+$bg=oc(#23mp0*g5k%8+si9tdz7&4_xk&lf")
if os.environ["APP_ENV"] == "development":
try:
# Add secret ENV varibales for development (e.g. API keys) to secrets.py
import secrets
os.environ.setdefault("LASTFM_API_KEY", secrets.LASTFM_API_KEY)
except:
pass
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = os.environ['APP_ENV'] != 'production'
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {}
if os.environ["APP_ENV"] == "production":
import dj_database_url
DATABASES['default'] = dj_database_url.config()
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'geodj_development',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'staticfiles'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ["SECRET_KEY"]
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geodj.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geodj.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'geodj',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| mit | 8,175,222,836,632,787,000 | 31.545977 | 89 | 0.693449 | false | 3.586447 | false | false | false |
userzimmermann/python-moretools | moretools/_types.py | 1 | 2808 | # python-moretools
#
# many more basic tools for python 2/3
# extending itertools, functools and operator
#
# Copyright (C) 2011-2016 Stefan Zimmermann <[email protected]>
#
# python-moretools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-moretools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-moretools. If not, see <http://www.gnu.org/licenses/>.
from ._common import *
from ._simpledict import SimpleDictType
from six.moves import UserString, UserList, UserDict
number_types = integer_types + (float, complex)
string_types = (string_types) + (UserString,)
list_types = (list, UserList)
dict_types = (dict, UserDict, SimpleDictType)
def isintclass(cls):
return issubclass(cls, int)
isinttype = isintclass
def isint(value):
return isinstance(value, int)
if PY2:
def islongclass(cls):
return issubclass(cls, long)
islongtype = islongclass
def islong(value):
return isinstance(value, long)
def isintegerclass(cls):
return issubclass(cls, integer_types)
isintegertype = isintegerclass
def isinteger(value):
return isinstance(value, integer_types)
def isfloatclass(cls):
return issubclass(cls, float)
isfloattype = isfloatclass
def isfloat(value):
return isinstance(value, float)
def iscomplexclass(cls):
return issubclass(cls, complex)
iscomplextype = iscomplexclass
def iscomplex(value):
return isinstance(value, complex)
def isnumberclass(cls):
return issubclass(cls, number_types)
isnumbertype = isnumberclass
def isnumber(value):
return isinstance(value, number_types)
def isstringclass(cls):
return issubclass(cls, string_types)
isstringtype = isstringclass
def isstring(value):
return isinstance(value, string_types)
def istupleclass(cls):
return issubclass(cls, tuple)
istupletype = istupleclass
def istuple(value):
return isinstance(value, tuple)
def islistclass(cls):
return issubclass(cls, list_types)
islisttype = islistclass
def islist(value):
return isinstance(value, list_types)
def issetclass(cls):
return issubclass(cls, set)
issettype = issetclass
def isset(value):
return isinstance(value, set)
def isdictclass(cls):
return issubclass(cls, dict_types)
isdicttype = isdictclass
def isdict(value):
return isinstance(value, dict_types)
| gpl-3.0 | 2,180,999,676,987,541,200 | 20.112782 | 77 | 0.742877 | false | 3.549937 | false | false | false |
petry/kanboard | apps/core/migrations/0001_initial.py | 1 | 1775 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Board'
db.create_table(u'core_board', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'core', ['Board'])
# Adding model 'Story'
db.create_table(u'core_story', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Board'])),
))
db.send_create_signal(u'core', ['Story'])
def backwards(self, orm):
# Deleting model 'Board'
db.delete_table(u'core_board')
# Deleting model 'Story'
db.delete_table(u'core_story')
models = {
u'core.board': {
'Meta': {'object_name': 'Board'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.story': {
'Meta': {'object_name': 'Story'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Board']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['core'] | apache-2.0 | 3,847,779,870,579,923,000 | 35.244898 | 102 | 0.56169 | false | 3.585859 | false | false | false |
izapolsk/integration_tests | cfme/roles.py | 1 | 28996 | from cfme.utils.log import logger
def _remove_page(roles, group, pages):
if group in roles:
for page in pages:
if page in roles[group]:
roles[group].remove(page)
else:
logger.info("Page %s attempted to be removed from role %s, "
"but isn't in there anyway", page, group)
else:
logger.info("Attempted to remove a page from role %s, but role "
"doesn't exist", group)
def _remove_from_all(roles, r_page):
for group in roles:
for page in roles[group]:
if page == r_page:
roles[group].remove(page)
else:
logger.info("Page %s attempted to be removed from role %s, "
"but isn't in there anyway", page, group)
group_data = {
'evmgroup-administrator': [
'control_explorer',
'control_simulation',
'control_import_export',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'clouds_providers',
'clouds_availability_zones',
'clouds_flavors',
'clouds_security_groups',
'clouds_instances',
'clouds_stacks',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'automate_explorer',
'automate_simulation',
'automate_customization',
'automate_import_export',
'automate_log',
'automate_requests',
'my_services',
'services_catalogs',
'services_requests',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-approver': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_requ,ests'
'services_workloads'
],
'evmgroup-auditor': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-desktop': [
'services_requests',
'services_workloads',
'dashboard',
'infrastructure_config_management',
'infrastructure_requests',
'infrastructure_virtual_machines',
'clouds_instances',
'my_settings',
'about'
],
'evmgroup-operator': [
'services_workloads',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'clouds_instances',
'my_settings',
'tasks',
'about'
],
'evmgroup-security': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads'
],
'evmgroup-super_administrator': [
'control_explorer',
'control_simulation',
'control_import_export',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_pxe',
'infrastructure_requests',
'infrastructure_config_management',
'clouds_providers',
'clouds_availability_zones',
'clouds_flavors',
'clouds_security_groups',
'clouds_instances',
'clouds_tenants',
'clouds_stacks',
'my_settings',
'tasks',
'configuration',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'automate_explorer',
'automate_simulation',
'automate_customization',
'automate_import_export',
'automate_log',
'automate_requests',
'my_services',
'services_catalogs',
'services_requests',
'services_workloads',
'utilization',
'planning',
'bottlenecks'
],
'evmgroup-support': [
'control_explorer',
'control_simulation',
'control_log',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'clouds_instances',
'my_settings',
'tasks',
'about',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'services_workloads'
],
'evmgroup-user': [
'services_workloads',
'services_requests',
'dashboard',
'reports',
'chargeback',
'timelines',
'rss',
'infrastructure_providers',
'infrastructure_clusters',
'infrastructure_hosts',
'infrastructure_virtual_machines',
'infrastructure_resource_pools',
'infrastructure_datastores',
'infrastructure_requests',
'clouds_instances',
'my_settings',
'tasks',
'about'
],
'evmgroup-user_limited_self_service': [
'clouds_instances',
'services_requests',
'infrastructure_virtual_machines',
'infrastructure_requests',
'my_settings',
'about'
],
'evmgroup-user_self_service': [
'clouds_instances',
'services_requests',
'infrastructure_config_management',
'infrastructure_virtual_machines',
'infrastructure_requests',
'my_settings',
'about'
],
'evmgroup-vm_user': [
'clouds_instances',
'infrastructure_config_management',
'infrastructure_virtual_machines',
'infrastructure_requests',
'services_requests',
'services_workloads',
'my_settings',
'about'
]
}
# Matches structure/string format of VerticalNavigation output for tree, not UI access control tree
# TODO include non-vertical nav RBAC to settings, help
# TODO RBAC goes deeper than veritcal nav, into accordions. example cloud intel -> Reports
role_access_ui_510z = {
'evmgroup-super_administrator': {
'Cloud Intel': ['Dashboard', 'Reports', 'Chargeback', 'Timelines', 'RSS'],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Tenants',
'Flavors',
'Instances',
'Stacks',
'Key Pairs',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Overview',
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Container Templates',
'Topology'
],
'Migration': [
'Migration Plans',
'Infrastructure Mappings',
'Migration Settings'
]
},
'Configuration': ['Management'],
'Networks': [
'Providers',
'Networks',
'Subnets',
'Network Routers',
'Security Groups',
'Floating IPs',
'Network Ports',
'Load Balancers',
'Topology'
],
'Storage': {
'Block Storage': [
'Managers',
'Volumes',
'Volume Snapshots',
'Volume Backups',
'Volume Types'
],
'Object Storage': [
'Managers',
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Generic Objects',
'Customization',
'Import / Export',
'Log',
'Requests'
]
},
'Optimize': ['Utilization', 'Planning', 'Bottlenecks'],
'Monitor': {
'Alerts': ['Overview', 'All Alerts']
}
},
'evmgroup-administrator': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Jobs', 'Explorer'],
'Automate': ['Log', 'Simulation', 'Import / Export', 'Customization', 'Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Flavors', 'Instances', 'Providers', 'Host Aggregates', 'Availability Zones',
'Stacks', 'Topology'],
'Containers': ['Container Nodes', 'Containers', 'Providers', 'Overview',
'Image Registries', 'Container Builds', 'Container Services',
'Volumes', 'Container Images', 'Routes', 'Pods', 'Replicators',
'Projects', 'Topology'],
'Infrastructure': ['Datastores', 'Networking', 'Providers', 'Virtual Machines', 'Hosts',
'Clusters', 'Topology', 'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Control': ['Import / Export', 'Log', 'Explorer', 'Simulation'],
'Networks': ['Providers', 'Security Groups', 'Floating IPs', 'Networks'],
'Optimize': ['Bottlenecks', 'Planning', 'Utilization'],
'Services': ['Requests', 'Workloads', 'Catalogs', 'My Services'],
'Storage': {
'Object Storage': ['Object Store Containers', 'Object Store Objects']}
},
'evmgroup-approver': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['Requests', 'Workloads', 'My Services'],
},
'evmgroup-auditor': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Networking', 'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Optimize': ['Bottlenecks', 'Planning', 'Utilization'],
'Services': ['Workloads', 'My Services']},
'evmgroup-desktop': {
'Automation': {
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Dashboard'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Services': ['Requests', 'Workloads']
},
'evmgroup-operator': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'PXE', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Configuration': ['Management'],
'Services': ['Workloads', 'My Services']
},
'evmgroup-security': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts',
'Clusters', 'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Servers']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['My Services', 'Workloads']
},
'evmgroup-support': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Control': ['Explorer', 'Log', 'Simulation'],
'Services': ['My Services', 'Workloads']
},
'evmgroup-user': {
'Cloud Intel': ['Timelines', 'RSS', 'Dashboard', 'Reports', 'Chargeback'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Datastores', 'Providers', 'Virtual Machines', 'Hosts', 'Clusters',
'Resource Pools'],
'Physical Infrastructure': ['Providers', 'Chassis', 'Racks', 'Switches', 'Servers',
'Storages', 'Topology']},
'Services': ['Requests', 'Workloads', 'My Services']
},
'evmgroup-vm_user': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']},
'Configuration': ['Management'],
'Services': ['Requests', 'Workloads'],
}
}
role_access_ssui = {
'evmgroup-user_limited_self_service': {
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']},
'Services': ['Requests', 'Catalogs', 'My Services']
},
'evmgroup-user_self_service': {
'Automation': {
'Ansible': ['Credentials', 'Repositories', 'Playbooks'],
'Ansible Tower': ['Explorer']},
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': ['Providers']},
'Configuration': ['Management'],
'Services': ['Requests', 'Catalogs', 'My Services']
},
}
role_access_ui_511z = {
'evmgroup-super_administrator': {
'Overview': [
'Dashboard',
'Reports',
'Utilization',
'Chargeback',
'Optimization'
],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Tenants',
'Flavors',
'Instances',
'Stacks',
'Key Pairs',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Firmware Registry',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Overview',
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Container Templates',
'Topology'
]
},
'Migration': [
'Migration Plans',
'Infrastructure Mappings',
'Migration Settings'
],
'Configuration': ['Management'],
'Networks': [
'Providers',
'Networks',
'Subnets',
'Network Routers',
'Security Groups',
'Floating IPs',
'Network Ports',
'Topology'
],
'Storage': {
'Block Storage': [
'Managers',
'Volumes',
'Volume Snapshots',
'Volume Backups',
'Volume Types'
],
'Object Storage': [
'Managers',
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Generic Objects',
'Customization',
'Import / Export',
'Log',
'Requests'
]
},
'Monitor': {
'Alerts': ['Overview', 'All Alerts']
},
},
'evmgroup-administrator': {
'Overview': ['Dashboard', 'Reports', 'Utilization', 'Chargeback'],
'Services': ['My Services', 'Catalogs', 'Workloads', 'Requests'],
'Compute': {
'Clouds': [
'Providers',
'Availability Zones',
'Host Aggregates',
'Flavors',
'Instances',
'Stacks',
'Topology'
],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking',
'Topology'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
],
'Containers': [
'Overview',
'Providers',
'Projects',
'Routes',
'Container Services',
'Replicators',
'Pods',
'Containers',
'Container Nodes',
'Volumes',
'Container Builds',
'Image Registries',
'Container Images',
'Topology'
]
},
'Configuration': ['Management'],
'Networks': ['Providers', 'Networks', 'Security Groups', 'Floating IPs'],
'Storage': {
'Object Storage': [
'Object Store Containers',
'Object Store Objects'
]
},
'Control': ['Explorer', 'Simulation', 'Import / Export', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer', 'Jobs'],
'Automate': [
'Explorer',
'Simulation',
'Customization',
'Import / Export',
'Log'
]
}
},
'evmgroup-approver': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-auditor': {
'Overview': ['Dashboard', 'Reports', 'Utilization', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE',
'Networking'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
},
'evmgroup-desktop': {
'Overview': ['Dashboard'],
'Services': ['Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines'],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Configuration': ['Management'],
'Automation': {
'Ansible Tower': ['Explorer']
}
},
'evmgroup-operator': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores',
'PXE'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Configuration': ['Management'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
},
'evmgroup-security': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': ['Providers', 'Servers']
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-support': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
},
'Control': ['Explorer', 'Simulation', 'Log']
},
'evmgroup-user': {
'Overview': ['Dashboard', 'Reports', 'Chargeback'],
'Services': ['My Services', 'Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': [
'Providers',
'Clusters',
'Hosts',
'Virtual Machines',
'Resource Pools',
'Datastores'
],
'Physical Infrastructure': [
'Providers',
'Chassis',
'Racks',
'Servers',
'Storages',
'Switches',
'Topology'
]
}
},
'evmgroup-vm_user': {
'Services': ['Workloads', 'Requests'],
'Compute': {
'Clouds': ['Instances'],
'Infrastructure': ['Virtual Machines']
},
'Configuration': ['Management'],
'Automation': {
'Ansible': ['Playbooks', 'Repositories', 'Credentials'],
'Ansible Tower': ['Explorer']
}
}
}
| gpl-2.0 | -6,055,738,492,595,354,000 | 31.32553 | 100 | 0.453373 | false | 4.588701 | true | false | false |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Examples/Catalyst/PythonDolfinExample/simulation-catalyst-step6.py | 1 | 8271 | """This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 6 : Add field data arrays to VTK grid
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
inputdescription = datadescription.GetInputDescriptionByName("input")
if inputdescription.GetIfGridIsNecessary() == False:
return
if grid != None:
# attach VTK data set to pipeline input
inputdescription.SetGrid(grid)
# execute catalyst processing
cpscript.DoCoProcessing(datadescription)
# [SC14-Catalyst] convert dolfin mesh to a VTK unstructured grid
def Mesh2VTKUGrid(mesh):
vtkcelltypes=((),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_QUAD,vtk.VTK_POLYGON,vtk.VTK_POLYGON),(vtk.VTK_EMPTY_CELL,vtk.VTK_VERTEX,vtk.VTK_LINE,vtk.VTK_TRIANGLE,vtk.VTK_TETRA,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_CONVEX_POINT_SET,vtk.VTK_HEXAHEDRON))
npoints=mesh.num_vertices()
geom=mesh.geometry()
pts=vtk.vtkPoints()
pts.SetNumberOfPoints(npoints)
for i in xrange(npoints):
p=geom.point(i)
pts.SetPoint(i,p.x(),p.y(),p.z())
dim = mesh.topology().dim()
ncells=mesh.num_cells()
cells=vtk.vtkCellArray()
cellTypes=vtk.vtkUnsignedCharArray()
cellTypes.SetNumberOfTuples(ncells)
cellLocations=vtk.vtkIdTypeArray()
cellLocations.SetNumberOfTuples(ncells)
loc=0
for (cell,i) in zip(mesh.cells(),xrange(ncells)) :
ncellpoints=len(cell)
cells.InsertNextCell(ncellpoints)
for cpoint in cell:
cells.InsertCellPoint(cpoint)
cellTypes.SetTuple1(i,vtkcelltypes[dim][ncellpoints])
cellLocations.SetTuple1(i,loc)
loc+=1+ncellpoints
ugrid = vtk.vtkUnstructuredGrid()
ugrid.SetPoints(pts)
ugrid.SetCells(cellTypes,cellLocations,cells)
return ugrid
# [SC14-Catalyst] convert a flattened sequence of values to VTK double array
def Values2VTKArray(values,n,name):
ncomps=len(values)/n
array=vtk.vtkDoubleArray()
array.SetNumberOfComponents(ncomps)
array.SetNumberOfTuples(n)
for i in range(n):
a = []
for j in range(ncomps):
a.append(values[i+j*n])
array.SetTupleValue(i, a)
array.SetName(name)
return array
def AddFieldData(ugrid, pointArrays, cellArrays ):
# add Point data fields
npoints = ugrid.GetNumberOfPoints()
for (name,values) in pointArrays:
ugrid.GetPointData().AddArray( Values2VTKArray(values,npoints,name) )
# add Cell data fields
ncells = ugrid.GetNumberOfCells()
for (name,values) in cellArrays:
ugrid.GetCellData().AddArray( Values2VTKArray(values,ncells,name) )
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = Mesh2VTKUGrid( u1.function_space().mesh() )
# [SC14-Catalyst] add field data to the VTK grid
velocity = u1.compute_vertex_values()
pressure = p1.compute_vertex_values()
AddFieldData( ugrid, [ ("Velocity",velocity) , ("Pressure",pressure) ] , [] )
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
| gpl-3.0 | -5,108,514,945,316,003,000 | 29.977528 | 355 | 0.685407 | false | 3.050904 | false | false | false |
progdupeupl/pdp_website | pdp/forum/migrations/0001_initial.py | 1 | 5896 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import datetime
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('position', models.IntegerField(verbose_name='Position', null=True, blank=True)),
('slug', models.SlugField(max_length=80)),
],
options={
'verbose_name_plural': 'Catégories',
'verbose_name': 'Catégorie',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Forum',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('subtitle', models.CharField(max_length=200, verbose_name='Sous-titre', blank=True)),
('position_in_category', models.IntegerField(verbose_name='Position dans la catégorie', null=True, blank=True)),
('slug', models.SlugField(max_length=80)),
('category', models.ForeignKey(to='forum.Category', verbose_name='Catégorie')),
],
options={
'verbose_name_plural': 'Forums',
'verbose_name': 'Forum',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('text', models.TextField(verbose_name='Texte')),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name='Date de publication')),
('update', models.DateTimeField(verbose_name="Date d'édition", null=True, blank=True)),
('position_in_topic', models.IntegerField(verbose_name='Position dans le sujet')),
('is_useful', models.BooleanField(default=False, verbose_name='Est utile')),
('is_moderated', models.BooleanField(default=False, verbose_name='Est modéré')),
('moderation_time', models.DateTimeField(default=datetime.datetime(2014, 11, 26, 20, 15, 36, 701382), verbose_name="Date d'édition")),
('moderation_text', models.TextField(default='', verbose_name='Explication de modération', blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Auteur', related_name='posts')),
('moderated_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Modérateur', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Titre')),
('subtitle', models.CharField(max_length=200, verbose_name='Sous-titre', blank=True)),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name='Date de création')),
('is_solved', models.BooleanField(default=False, verbose_name='Est résolu')),
('is_locked', models.BooleanField(default=False, verbose_name='Est verrouillé')),
('is_sticky', models.BooleanField(default=False, verbose_name='Est en post-it')),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='Auteur', related_name='topics')),
('forum', models.ForeignKey(to='forum.Forum', verbose_name='Forum')),
('last_message', models.ForeignKey(to='forum.Post', verbose_name='Dernier message', related_name='last_message', null=True)),
],
options={
'verbose_name_plural': 'Sujets',
'verbose_name': 'Sujet',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicFollowed',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.ForeignKey(to='forum.Topic')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='topics_followed')),
],
options={
'verbose_name_plural': 'Sujets suivis',
'verbose_name': 'Sujet suivi',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicRead',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('post', models.ForeignKey(to='forum.Post')),
('topic', models.ForeignKey(to='forum.Topic')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='topics_read')),
],
options={
'verbose_name_plural': 'Sujets lus',
'verbose_name': 'Sujet lu',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(to='forum.Topic', verbose_name='Sujet'),
preserve_default=True,
),
]
| agpl-3.0 | 5,790,191,699,543,362,000 | 48.855932 | 150 | 0.556009 | false | 4.217204 | false | false | false |
racmariano/skidom | backend/resorts/models/conditions.py | 1 | 2431 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .resort import Resort
from django.db import models
from django.contrib.postgres.fields import ArrayField
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy_djangoitem import DjangoItem
import datetime
# Past and forecasted conditions for a resort
class Conditions(models.Model):
# Hard-coded attributes needed for scraping
resort = models.ForeignKey(Resort, null = True, default=6)
conditions_page_url = models.URLField(blank = True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank = True, null = True, on_delete = models.SET_NULL)
# Attributes collected during scraping
date = models.DateField(default = datetime.date.today)
base_temp = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
summit_temp = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
wind_speed = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
base_depth = models.DecimalField(max_digits = 6, decimal_places = 2, default = 0)
num_trails_open = models.IntegerField(default = 0)
new_snow_24_hr = models.IntegerField(default = 0)
#past_n_day_snowfall = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#past_n_day_wind_speed = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#future_n_day_snowfall = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
#future_n_day_wind_speed = ArrayField(models.DecimalField(max_digits = 6, decimal_places = 2, default = 0), size = 15)
# For database querying
unique_id = models.CharField(default='', max_length = 200)
def __init__(self, *args, **kwargs):
super(Conditions, self).__init__(*args, **kwargs)
if not self.id:
day = datetime.date.today
self.conditions_page_url = self.resort.conditions_page_url
self.unique_id = self.resort.name+str(datetime.date.today())
def __unicode__(self):
return self.resort.name+": "+str(self.date)
def __str__(self):
return self.resort.name+": "+str(self.date)
class Meta:
verbose_name_plural = "Conditions"
class ConditionsItem(DjangoItem):
django_model = Conditions
| mit | -8,431,988,466,548,880,000 | 42.2 | 122 | 0.673797 | false | 3.601481 | false | false | false |
justinmeister/spaceinvaders-spyral | game/level.py | 1 | 1501 | import os
import spyral
from .sprites import sprite
from . import collision
WIDTH = 1200
HEIGHT = 900
WHITE = (255, 255, 255)
SIZE = (WIDTH, HEIGHT)
GREEN = (60, 179, 113)
RED = (255, 0, 0)
BLACKBLUE = (19, 15, 48)
BG_COLOR = BLACKBLUE
ENEMYGAP = 30
XMARGIN = 175
YMARGIN = 100
MOVEX = 15
MOVEY = 20
ENEMYSIDE = 50
BACKGROUND = os.path.join("game", "graphics", "spacebackground.png")
class Level1(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self, SIZE)
self.space = spyral.Image(filename=BACKGROUND)
self.background = self.space.scale((1200, 900))
self.collision_handler = collision.CollisionHandler(self)
self.player = sprite.Player(self, 'left', self.collision_handler)
self.alien_list = self.make_aliens(6, 3)
self.collision_handler.add_player(self.player)
self.collision_handler.add_aliens(self.alien_list)
spyral.event.register("system.quit", spyral.director.pop)
spyral.event.register("director.update", self.update)
spyral.event.register("input.keyboard.down.q", spyral.director.pop)
def update(self, delta):
pass
def make_aliens(self, columns, rows):
"""
Make aliens and send them to collision handler.
"""
alien_list = []
for column in range(columns):
for row in range(rows):
alien = sprite.Alien(self, row, column)
alien_list.append(alien)
return alien_list
| mit | -697,301,725,593,934,000 | 25.803571 | 75 | 0.632245 | false | 3.284464 | false | false | false |
nakamura-akifumi/kassis_orange | app_search/helpers/paginate_helper.py | 1 | 1579 | import math
class Paginate:
def __init__(self, pagetab_count = 5, per_page = 10):
pass
self.pagetab_count = pagetab_count
self.per_page = per_page
def paginate(self, result_count, current_page):
paginate_list = []
pagetab_count = self.pagetab_count
per_page = self.per_page
max_page = math.floor((result_count) / per_page)
if max_page <= pagetab_count:
sp = current_page
ep = sp + pagetab_count
elif current_page > 3 and max_page - 2 > current_page:
sp = current_page - 2
ep = sp + pagetab_count
elif current_page <= 3 and max_page > current_page + pagetab_count:
sp = 1
ep = sp + pagetab_count
else:
sp = max_page - pagetab_count + 1
ep = max_page + 1
for p in range(sp, ep):
x = {"key": str(p), "display_name": str(p), "current": "0"}
if p == current_page:
x.update({"current": "1"})
paginate_list.append(x)
paginate = {}
paginate.update({"list": paginate_list})
if current_page != 1:
paginate.update({"first": {"key": "1"}})
if current_page != max_page:
paginate.update({"last": {"key": str(max_page)}})
if current_page - 1 > 1:
paginate.update({"previous": {"key": str(current_page - 1)}})
if current_page + 1 <= max_page:
paginate.update({"next": {"key": str(current_page + 1)}})
return {"paginate": paginate}
| mit | 1,459,151,800,332,375,000 | 32.595745 | 75 | 0.513616 | false | 3.532438 | false | false | false |
willemt/docopt2ragel | setup.py | 1 | 1202 | from setuptools import setup, find_packages
import codecs
from os import path
here = path.abspath(path.dirname(__file__))
def long_description():
with codecs.open('README.rst', encoding='utf8') as f:
return f.read()
setup(
name='docopt2ragel',
version='0.1.3',
description='Convert your docopt usage text into a Ragel FSM',
long_description=long_description(),
# The project's main homepage.
url='https://github.com/willemt/docopt2ragel',
author='willemt',
author_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: System :: Logging',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='development',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['docopt'],
include_package_data=True,
package_data={
'': ['template.rl']
},
entry_points={
'console_scripts': [
'docopt2ragel = docopt2ragel.__main__:main',
],
},
)
| bsd-3-clause | -9,220,080,780,478,510,000 | 26.318182 | 66 | 0.608153 | false | 3.779874 | false | false | false |
KamLii/Databaes | Databaes/urls.py | 1 | 1369 | """Databaes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
#from account.views import UserRegistrationFormView, LoginView, logout_view
from . import views
from user_profile.views import SignupView
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^$', views.homepage, name='home'),
url(r'^admin/', admin.site.urls),
url(r'^crate/', include('Crate.urls')),
url(r"^account/signup/$", SignupView.as_view(), name="account_signup"),
url(r"^account/", include("account.urls")),
url(r'^payments/', include('pinax.stripe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | 7,490,611,764,296,033,000 | 40.484848 | 79 | 0.705625 | false | 3.51928 | false | false | false |
Makki1/old-svn | avr/sketchbook/GiraRM_Debug/freebus/freebus_ets/software/freebus-ets/src/GUI/FB_ProgramFrame.py | 1 | 10920 | #!/usr/bin/
#-*- coding: iso-8859-1 -*-
#===============================================================================
# __________ ________________ __ _______
# / ____/ __ \/ ____/ ____/ __ )/ / / / ___/
# / /_ / /_/ / __/ / __/ / __ / / / /\__ \
# / __/ / _, _/ /___/ /___/ /_/ / /_/ /___/ /
# /_/ /_/ |_/_____/_____/_____/\____//____/
#
#Source File: FB_ProgramFrame.py
#Version: V0.1 , 29.08.2009
#Author: Jerome Leisner
#email: [email protected]
#===============================================================================
import os
import sys
import time
#import thread
#import Queue
#import threading
#import thread
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import pickle
import jpype
import thread
from Global import Global
from GUI import FB_DlgConnectionManager
class FB_ProgramFrame(object):
__curProject = None #project object
__cbConnections = None #widget combo connections
__bConnect = None #widget connect button
__parentClass = None #object of its own class
__curConnectionInstance = None #instance of the current connection (FB_EIBConnection)
#Devices in programming mode
__ListViewProgDevices = None #widget Tree/Listview to show devices in programming mode
__CheckTimer = None #timer object for check devices in cycle
__toggleCheckProgDevices = None
def __init__(self,curProject):
self.__parentClass = self
self.__curProject = curProject
GladeObj = gtk.glade.XML(Global.GUIPath + Global.GladeFile,"winProgramming")
dic = { "on_bConnectionConfig_clicked":self.ShowConnectionManager ,
"on_bTestConnection_clicked":self.ClickTestConnection,
"on_bConnect_toggled":self.ToggleConnect,
"on_cbConnections_changed":self.ConnectionsChanged,
"on_toggleCheckProgDevices_toggled":self.ToggleCheckProgDevices,
}
GladeObj.signal_autoconnect(dic)
#read widgets
self.__cbConnections = GladeObj.get_widget("cbConnections")
self.__bConnect = GladeObj.get_widget("bConnect")
self.__ListViewProgDevices = GladeObj.get_widget("ListViewProgDevices")
self.__toggleCheckProgDevices = GladeObj.get_widget("toggleCheckProgDevices")
#init model combobox to show connections
liststore = gtk.ListStore(str,str) #just one string at first..., 2nd string for GUID
self.__cbConnections.set_model(liststore)
self.text_cell = gtk.CellRendererText()
self.__cbConnections.pack_start(self.text_cell,True)
self.__cbConnections.add_attribute(self.text_cell, "text", 0)
#init model tree/listview to show devices in progmode
liststore = gtk.ListStore(gtk.gdk.Pixbuf, str)
self.__ListViewProgDevices.set_model(liststore)
self.text_cell = gtk.CellRendererText() #Text Object
self.img_cell = gtk.CellRendererPixbuf() #Image Object
self.column = gtk.TreeViewColumn()
self.column.pack_start(self.img_cell, False)
self.column.pack_start(self.text_cell,True)
self.column.add_attribute(self.img_cell, "pixbuf",0)
self.column.add_attribute(self.text_cell, "text", 1)
self.column.set_attributes(self.text_cell, markup=1)
self.__ListViewProgDevices.append_column(self.column)
#init timer to check devices in progmode
#self.__CheckTimer = threading.Timer(5.0, self.ReadDevicesInProgMode)
self.LoadConnectionFromDB()
self.UpdateUserConnections()
winProgramming = GladeObj.get_widget("winProgramming")
winProgramming.show()
#Dialog: Connection-Manager
def ShowConnectionManager(self,widget, data=None):
FB_DlgConnectionManager.FB_DlgConnectionManager(self.__curProject, self.__parentClass)
#button: Test-Connection
#open the current connection and test it...
def ClickTestConnection(self,widget, data=None):
pass
def ToggleConnect(self,widget, data=None):
model = self.__cbConnections.get_model()
iter = self.__cbConnections.get_active_iter()
id = model.get_value(iter,1)
self.__curConnectionInstance = self.getEIBConnection(id)
if widget.get_active() == True:
#connect
self.__curConnectionInstance.doConnect()
else:
#disconnect
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(widget)
#callback change combo connections
def ConnectionsChanged(self,widget, data=None):
#disconnect in case of changing the connection
if self.__curConnectionInstance <> None:
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(self.__bConnect)
def SetConnectButtonState(self,widget):
if self.__curConnectionInstance.isConnected() == True:
widget.set_active(True)
widget.set_label("Verbunden")
else:
widget.set_active(False)
widget.set_label("Verbinden")
#gets the instance of a FB_EIBConnection with the given id
def getEIBConnection(self,id):
RValue = None
if self.__curProject <> None:
if self.__curProject.eibConnectionList <> None:
for i in range(len(self.__curProject.eibConnectionList)):
if id == self.__curProject.eibConnectionList[i].getID():
RValue = self.__curProject.eibConnectionList[i]
break
return RValue
##function to update the combobox in parentframe to show/select for user
#@param cbConnections: widget of the combobox in parentframe which should be loaded
def UpdateUserConnections(self):
try:
#copy list in combo connections in program_Frame (parent)
if(self.__curProject <> None):# and self._MyConnection <> None):
model = self.__cbConnections.get_model()
#save id of the current connection / which is currently selected
curIter = self.__cbConnections.get_active_iter()
if curIter <> None:
idsaved = model.get_value(curIter,1) #column 1 = id
else:
idsaved = 0
model.clear()
IterSaved = None #init Iterator
for i in range(len(self.__curProject.eibConnectionList)):
Name = self.__curProject.eibConnectionList[i].getName()
typeID = self.__curProject.eibConnectionList[i].getType()
Type = str(Global.ConTypesText[typeID])
id = self.__curProject.eibConnectionList[i].getID()
tmp = Name + " mit '" + Type + "'"
iter = model.append([tmp, id])
#look if saved id is still in list and set this item to the active item
if idsaved == id:
IterSaved = iter
#connection still existing...
if IterSaved <> None:
self.__cbConnections.set_active_iter(IterSaved)
else:
if len(self.__curProject.eibConnectionList) > 0:
self.__cbConnections.set_active(0)
else:
#no connections in list or no valid project is loaded
model = self.__cbConnections.get_model()
model.clear()
except:
pass
def LoadConnectionFromDB(self):
#try:
cursor = Global.DatabaseConnection.cursor()
cursor.execute("SELECT * FROM Connections")
del self.__curProject.eibConnectionList[0:len(self.__curProject.eibConnectionList)]
for row in cursor:
tmpCon = pickle.loads(row[2]) #column 2 contains class data
self.__curProject.eibConnectionList.append(tmpCon)
#except:
# pass
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
##button to start reading Devices in progmode
##
def ToggleCheckProgDevices(self,widget,Data=None):
if widget.get_active() == True:
widget.set_label("zyklischer Suchlauf...")
self.ReadDevicesInProgMode()
#self.__CheckTimer.start()
else:
widget.set_label("Suchlauf starten")
#self.__CheckTimer.cancel()
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#section physical addresses
def ReadDevicesInProgMode(self):
#read the PA of devices in programming mode
try:
mngClient = Global.ManagementClientImpl(self.__curConnectionInstance.getKNXNetworkLink())
IndivAddrList = mngClient.readAddress(False)
model = self.__ListViewProgDevices.get_model()
model.clear()
image=gtk.gdk.pixbuf_new_from_file(Global.ImagePath + "Device.png")
for Addr in IndivAddrList:
Iterator = model.append([image,Addr.toString()])
except jpype.JavaException, ex :
error = ""
if jpype.JavaException.javaClass(ex) is Global.KNXTimeoutException:
error = U"keine Geräte im Programmiermodus : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXInvalidResponseException :
error = U"ungültige Antwort beim Lesen der Addressen : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXLinkClosedException:
error = U"kein geöffneter Netzwerk-Link : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXRemoteException:
error = U"Fehler beim Remote-Server : " + str(jpype.JavaException.message(ex))
msgbox = gtk.MessageDialog(parent = None, buttons = gtk.BUTTONS_OK,
flags = gtk.DIALOG_MODAL, type = gtk.MESSAGE_ERROR,
message_format = error )
msgbox.set_title(Global.ERRORCONNECTIONTITLE)
#result = msgbox.run()
#msgbox.destroy()
| gpl-3.0 | 865,511,498,934,486,400 | 39.83908 | 111 | 0.554487 | false | 4.284033 | false | false | false |
rapidpro/chatpro | chatpro/profiles/models.py | 1 | 4753 | from __future__ import absolute_import, unicode_literals
from chatpro.rooms.models import Room
from dash.orgs.models import Org
from dash.utils import intersection
from dash.utils.sync import ChangeType
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from temba.types import Contact as TembaContact
from uuid import uuid4
from .tasks import push_contact_change
class AbstractParticipant(models.Model):
full_name = models.CharField(verbose_name=_("Full name"), max_length=128, null=True)
chat_name = models.CharField(verbose_name=_("Chat name"), max_length=16, null=True,
help_text=_("Shorter name used for chat messages"))
class Meta:
abstract = True
class Contact(AbstractParticipant):
"""
Corresponds to a RapidPro contact who is tied to a single room
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='contacts')
room = models.ForeignKey(Room, verbose_name=_("Room"), related_name='contacts',
help_text=_("Room which this contact belongs in"))
urn = models.CharField(verbose_name=_("URN"), max_length=255)
is_active = models.BooleanField(default=True, help_text=_("Whether this contact is active"))
created_by = models.ForeignKey(User, null=True, related_name="contact_creations",
help_text="The user which originally created this item")
created_on = models.DateTimeField(auto_now_add=True,
help_text="When this item was originally created")
modified_by = models.ForeignKey(User, null=True, related_name="contact_modifications",
help_text="The user which last modified this item")
modified_on = models.DateTimeField(auto_now=True,
help_text="When this item was last modified")
@classmethod
def create(cls, org, user, full_name, chat_name, urn, room, uuid=None):
if org.id != room.org_id: # pragma: no cover
raise ValueError("Room does not belong to org")
# if we don't have a UUID, then we created this contact
if not uuid:
do_push = True
uuid = unicode(uuid4())
else:
do_push = False
# create contact
contact = cls.objects.create(org=org, full_name=full_name, chat_name=chat_name, urn=urn, room=room, uuid=uuid,
created_by=user, modified_by=user)
if do_push:
contact.push(ChangeType.created)
return contact
@classmethod
def kwargs_from_temba(cls, org, temba_contact):
org_room_uuids = [r.uuid for r in Room.get_all(org)]
room_uuids = intersection(org_room_uuids, temba_contact.groups)
room = Room.objects.get(org=org, uuid=room_uuids[0]) if room_uuids else None
if not room:
raise ValueError("No room with uuid in %s" % ", ".join(temba_contact.groups))
return dict(org=org,
full_name=temba_contact.name,
chat_name=temba_contact.fields.get(org.get_chat_name_field(), None),
urn=temba_contact.urns[0],
room=room,
uuid=temba_contact.uuid)
def as_temba(self):
temba_contact = TembaContact()
temba_contact.name = self.full_name
temba_contact.urns = [self.urn]
temba_contact.fields = {self.org.get_chat_name_field(): self.chat_name}
temba_contact.groups = [self.room.uuid]
temba_contact.uuid = self.uuid
return temba_contact
def push(self, change_type):
push_contact_change.delay(self.id, change_type)
def get_urn(self):
return tuple(self.urn.split(':', 1))
def release(self):
self.is_active = False
self.save()
self.push(ChangeType.deleted)
def as_participant_json(self):
return dict(id=self.id, type='C', full_name=self.full_name, chat_name=self.chat_name)
def __unicode__(self):
if self.full_name:
return self.full_name
elif self.chat_name:
return self.chat_name
else:
return self.get_urn()[1]
class Profile(AbstractParticipant):
"""
Extension for the user class
"""
user = models.OneToOneField(User)
change_password = models.BooleanField(default=False, help_text=_("User must change password on next login"))
def as_participant_json(self):
return dict(id=self.user_id, type='U', full_name=self.full_name, chat_name=self.chat_name)
| bsd-3-clause | 4,602,666,662,489,233,000 | 36.132813 | 118 | 0.622554 | false | 3.895902 | false | false | false |
AlandSailingRobots/sailingrobot | update_config.py | 1 | 1807 | #!/usr/bin/python3
# Updates the configuration in the json to the database
# Can run without argument for using standard file
# Or specify the file by passing it as a argument
import json
import sqlite3
import sys
if len(sys.argv) > 1:
if str(sys.argv[1]) == 'ASPire':
filename = 'config_ASPire.json'
elif str(sys.argv[1]) == 'Janet':
filename = 'config_Janet.json'
else :
filename = str(sys.argv[1])
else:
filename = 'config_ASPire.json'
print(filename)
try:
cfg = json.load(open(filename))
except FileNotFoundError:
sys.exit('Error to open the file.\nPlease enter in argument either \'ASPire\', \'Janet\' or the filepath.')
conn = sqlite3.connect('asr.db')
db = conn.cursor()
for table in cfg:
data = cfg[table]
setstr = ''
keystr = ''
valstr = ''
for key, value in cfg[table].items():
if isinstance(value, str):
value = '"' + value + '"'
else:
value = str(value)
if (setstr == ''):
setstr = key + ' = ' + value
keystr = key
valstr = value
else:
setstr = setstr + ', ' + key + ' = ' + value
keystr = keystr + ', ' + key
valstr = valstr + ', ' + value
try:
db.execute('SELECT count(*) FROM ' + str(table) + ';')
except sqlite3.OperationalError:
sys.exit('Error to retrieve the tables.\nCheck if the selected file \''+filename+'\' correspond to the current Database configuration')
count = db.fetchone()[0]
if count == 0:
db.execute('INSERT INTO ' + str(table) + ' (' + keystr +
') VALUES (' + valstr + ');')
else:
db.execute('UPDATE ' + str(table) + ' SET ' +
setstr + ' WHERE ID = 1;')
conn.commit()
db.close()
| gpl-2.0 | 8,881,442,081,132,022,000 | 28.145161 | 143 | 0.556724 | false | 3.571146 | false | false | false |
xu6148152/Binea_Python_Project | PythonCookbook/text_str/strs_and_text.py | 1 | 7706 | # !python3
import re
def test_re_split():
line = 'asdf fjdk; dfjkaf, fdjksf, jdksf, foo'
print(re.split(r'[;,\s]\s*', line))
fields = re.split(r'(;|,|\s)\s*', line)
print(fields)
values = fields[::2]
print(values)
delimiter = fields[1::2] + ['']
print(delimiter)
print(re.split(r'(?:,|;|\s)\s*', line))
def test_start_with():
filenames = ['Makefile', 'foo.c', 'bar.py', 'spam.c', 'spam.h']
print([name for name in filenames if name.endswith(('.c', '.h'))])
print(any(name.endswith('.py')) for name in filenames)
def test_fnmatch():
from fnmatch import fnmatch, fnmatchcase
print(fnmatch('foo.txt', '*.txt'))
print(fnmatchcase('foo.txt', '*.TXT'))
def test_str_match():
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
text1 = '11/27/2012'
text2 = 'Nov 27, 2012'
m = datepat.match(text1)
print(m.group(0))
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(m.groups())
text = 'Today is 11/27/2012. PyCon starts 3/13/2013'
print(datepat.findall(text))
def test_str_replace():
text = 'Today is 11/27/2012. PyCon starts 3/13/2013'
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
print(datepat.sub(r'\3-\1-\2', text))
print(datepat.sub(change_date, text))
def change_date(m):
from calendar import month_abbr
mon_name = month_abbr[int(m.group(1))]
return '{} {} {}'.format(m.group(2), mon_name, m.group(3))
def test_unicode():
s1 = 'Spicy Jalape\u00f1o'
s2 = 'Spicy Jalapen\u0303o'
s3 = 'Spicy Jalape\xf1o'
import unicodedata
# NFC表示字符整体组成
t1 = unicodedata.normalize('NFC', s1)
t2 = unicodedata.normalize('NFC', s2)
# NFD表示字符分解多个表示
t3 = unicodedata.normalize('NFD', s3)
print(t1)
print(t2)
print(t3)
def test_strip():
s = ' Hello world \n'
print(s.strip())
t = '--------------hello========'
print(t.strip('-='))
def test_translate():
import unicodedata
import sys
digitmap = {c: ord('0') + unicodedata.digit(chr(c))
for c in range(sys.maxunicode)
if unicodedata.category(chr(c)) == 'Nd'}
x = '\u0661\u0662\u0663'
print(x.translate(digitmap))
def test_just():
text = 'Hello World'
print(text.ljust(20, '='))
print(text.rjust(20))
print(text.center(20, '*'))
print(format(text, '=>20'))
print(format(text, '*^20'))
print('{:>10s} {:>10s}'.format('Hello', 'World'))
def test_join():
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
print(' '.join(parts))
print(','.join(parts))
print(''.join(parts))
a = 'Is Chicago'
b = 'Not Chicago'
c = 'None'
print(a + ' ' + b)
print('Hello' 'World')
date = ['ACME', 50, 91.1]
print(','.join(str(d) for d in date))
print(a, b, c, sep=':')
def test_format():
s = '{name} has {n} message'
print(s.format(name='Guido', n=37))
name = 'Guido'
# n = 37
# print(s.format_map(vars()))
print(s.format_map(SafeSub(vars())))
print(sub('Hello {name}'))
print(sub('You have {n} messages.'))
class SafeSub(dict):
def __missing__(self, key):
return '{' + key + '}'
def sub(text):
import sys
return text.format_map(SafeSub(sys._getframe(1).f_locals))
def test_textwrap():
s = "Look into my eyes, look into my eyes, the eyes, the eyes, " \
"the eyes, not around the eyes, don't look around the eyes," \
"look into my eyes, you're under"
import textwrap
print(textwrap.fill(s, 40, initial_indent=' '))
print(textwrap.fill(s, 40, subsequent_indent=' '))
# os.get_terminal_size().columns
def generate_tokens(pat, text):
from collections import namedtuple
Token = namedtuple('Token', ['type', 'value'])
scanner = pat.scanner(text)
for m in iter(scanner.match, None):
yield Token(m.lastgroup, m.group())
def test_bin_text():
a = b'Hello World'
print(a)
print(a[0])
print(a.decode('ascii'))
def test_gz_file():
import gzip
with gzip.open('somefile.gz', 'rt') as f:
text = f.read()
print(text)
def test_gz_file():
import bz2
with bz2.open('somefile.bz2', 'rt') as f:
text = f.read()
print(text)
def test_partial_file():
from functools import partial
RECORD_SIZE = 32
with open('somefile.data', 'rb') as f:
records = iter(partial(f.read, RECORD_SIZE), b'')
def read_into_buffer(filename):
import os.path
buf = bytearray(os.path.getsize(filename))
with open(filename, 'rb') as f:
f.readinto(buf)
return buf
def test_buffer():
with open('sample.bin', 'wb') as f:
f.write(b'Hello World')
buf = read_into_buffer('sample.bin')
print(buf)
print(buf[0:5])
m1 = memoryview(buf)
m2 = m1[-5:]
print(m2)
m2[:] = b'WORLD'
print(buf)
import os
import mmap
def memory_map(filename, access=mmap.ACCESS_WRITE):
size = os.path.getsize(filename)
fd = os.open(filename, os.O_RDWR)
return mmap.mmap(fd, size, access=access)
def test_mmap():
size = 1000000
with open('data', 'wb') as f:
f.seek(size - 1)
f.write(b'\x00')
m = memory_map('data')
print(len(m))
print(m[0:10])
print(m[0])
m[0:11] = b'Hello World'
m.close()
with open('data', 'rb') as f:
print(f.read(11))
def test_filepath():
import os
path = os.path.abspath('.')
print(os.path.basename(path))
print(os.path.dirname(path))
print(os.path.join('tmp', 'data', os.path.basename(path)))
print(os.path.expanduser(path))
print(os.path.split(path))
def test_file_exist():
print(os.path.exists('.'))
print(os.path.isfile('xt.bin'))
print(os.path.isdir(os.path.dirname(os.path.abspath('.'))))
print(os.path.islink('.'))
print(os.path.getsize('.'))
def test_file_list():
print(os.listdir('.'))
from fnmatch import fnmatch
pyfiles = [name for name in os.listdir('.') if fnmatch(name, '*.py')]
print(pyfiles)
import glob
print(glob.glob('./*.py'))
import time
name_sz_date = [(name, os.path.getsize(name), os.path.getmtime(name)) for name in pyfiles]
for name, size, mtime in name_sz_date:
try:
print(name, size, time.ctime(mtime))
except UnicodeEncodeError:
print(bad_filename(name))
def test_filename_encode():
import sys
print(sys.getfilesystemencoding())
def bad_filename(filename):
return repr(filename)[1:-1]
def test_write_bin_file():
import sys
sys.stdout.buffer.write(b'Hello\n')
def test_tempfile():
from tempfile import TemporaryFile
from tempfile import NamedTemporaryFile
with TemporaryFile('w+t') as f:
f.write('Hello World')
f.write('Testing\n')
f.seek(0)
data = f.read()
with NamedTemporaryFile('w+t') as f:
print('filename is:', f.name)
def test_serial():
import pickle
data = 'Hello, World'
f = open('somefile', 'wb')
pickle.dump(data, f)
f = open('somefile', 'rb')
data = pickle.load(f)
print(data)
f = open('somedata', 'wb')
pickle.dump([1, 2, 3, 4], f)
pickle.dump('hello', f)
pickle.dump({'Apple', 'Pear', 'Banana'}, f)
f.close()
f = open('somedata', 'rb')
print(pickle.load(f))
print(pickle.load(f))
print(pickle.load(f))
def test_countdown():
from class_object import countdown
c = countdown.Countdown(30)
print(c)
f = open('cstate.p', 'wb')
import pickle
pickle.dump(c, f)
f.close()
f = open('cstate.p', 'rb')
print(pickle.load(f))
if __name__ == '__main__':
test_countdown() | mit | 9,049,429,624,892,995,000 | 22.530675 | 94 | 0.580574 | false | 3.023256 | true | false | false |
andrewderekjackson/python_lcd_menu | lcd_menu/menu.py | 1 | 4202 | import os
class MenuItem(object):
'''A single menu item which can contain child menu items'''
def __init__(self, title, items=None, refresh_callback=None, refresh_callback_args = None):
self._title = title
self._items = items
self._refresh_callback = refresh_callback
self._refresh_callback_args = refresh_callback_args
@property
def title(self):
return self._title
@property
def items(self):
return self._items
def refresh(self):
if self._refresh_callback is not None:
self._items = self._refresh_callback(self, self._refresh_callback_args)
class Command(MenuItem):
'''A single menu item which executes a callback when selected'''
def __init__(self, title, command, arg=None):
MenuItem.__init__(self, title, None)
self._command = command
self._arg = arg
def invoke_command(self):
if self._command is not None:
self._command(self, self._arg)
return True
return False
def refresh(self):
pass
class MenuView(object):
'''Represents a current menu level and tracks the selected item'''
def __init__(self, items):
self._selected_index = 0
self._items = items
@property
def selected_index(self):
return self._selected_index
@selected_index.setter
def selected_index(self, val):
if val >= len(self._items):
self._selected_index = len(self._items)-1
else:
if val > 0:
self._selected_index = val
else:
self._selected_index = 0
@property
def items(self):
return self._items
def down(self):
self.selected_index += 1
def up(self):
self.selected_index -= 1
def refresh(self):
self.selected_item.refresh()
@property
def selected_item(self):
return self._items[self._selected_index]
class Menu(object):
'''Base menu controller responsible for managing the menu'''
def __init__(self, items, update):
self._history = []
self.main_menu = MenuView(items)
self.current_menu = self.main_menu
self.update = update
self.showing_menu = False
# start with the menu closed
self.close()
def menu(self):
"""
Shows the main menu
"""
self.current_menu = self.main_menu
self.showing_menu = True
self.update(self.current_menu)
def up(self):
"""
Navigates up in the menu
"""
self.current_menu.up()
self.update(self.current_menu)
def down(self):
"""
Navigates down in the menu
"""
self.current_menu.down()
self.update(self.current_menu)
def select(self):
"""
Selects the current menu. Either enters a submenu or invokes the command
"""
if isinstance(self.current_menu.selected_item, Command):
self.current_menu.selected_item.invoke_command()
return
if isinstance(self.current_menu.selected_item, MenuItem):
self.current_menu.selected_item.refresh()
if self.current_menu.selected_item.items is not None:
# add current menu to history
self._history.append(self.current_menu)
self.current_menu = MenuView(self.current_menu.selected_item.items)
self.update(self.current_menu)
def back(self):
"""
Returns back to a previous menu
"""
if len(self._history) > 0:
self.current_menu = self._history.pop()
self.update(self.current_menu)
else:
self.close()
def show(self):
"""
Shows the main menu
"""
self.current_menu = self.main_menu
self.showing_menu = True
self.update(self.current_menu)
def close(self):
"""
Closes the menu.
"""
self.current_menu = None
self.showing_menu = False
self.update(self.current_menu)
pass
def update(self):
pass
| mit | 5,973,343,013,663,982,000 | 24.011905 | 95 | 0.565445 | false | 4.210421 | false | false | false |
daniel-kurushin/iisu | biu/khc.py | 1 | 6304 | import sys
from struct import pack, unpack
from time import sleep
class KHC(object):
NAME = 'KHC'
cmd_inc_engine = b'\xae\xae\x01\x00\x01\x08\x00' # увеличить обороты и подтвердить результат
cmd_dec_engine = b'\xae\xae\x01\x00\x02\x08\x00' # уменьшить обороты и подтвердить результат
cmd_stop_engine = b'\xae\xae\x01\x00\x07\x07\x00' # остановка
cmd_get_distances = b'\xae\xae\x01\x00\x08\x07\x00' # вернуть расстояния от дальномеров
cmd_get_encoders = b'\xae\xae\x01\x00\x09\x07\x00' # энкодеры колес
cmd_reverse = b'\xae\xae\x01\x00\x0a\x08\x00' # вкл-выкл реверса
cmd_brakes = b'\xae\xae\x01\x00\x11\x0a\x00' # тормоза
# | | +--- правый 0 - выкл, 1 - вкл
# | +----- левый
# +------- передний
cmd_get_state = b'\xae\xae\x01\x00\xff\x07\x00' # вернуть состояние КХЧ
# currentAccelPos - обороты
# is_frw_brake ff - вкл передний тормоз 00 - выкл
# is_lgt_brake ff - вкл левый тормоз 00 - выкл
# is_rgt_brake ff - вкл правый тормоз 00 - выкл
# is_reverse ff - вкл реверс 00 - выкл
# enc_sec - срабатываний энкодера в сек
# enc_min - срабатываний энкодера в мин
currentAccelPos = 0
def parse_distances(self, x):
return dict(
ok = True,
rear = int(unpack('<B', x[3:4])[0]) * 128.0 / 58.0 / 100.0,
left = int(unpack('<B', x[4:5])[0]) * 128.0 / 58.0 / 100.0,
front = int(unpack('<B', x[5:6])[0]) * 128.0 / 58.0 / 100.0,
right = int(unpack('<B', x[6:7])[0]) * 128.0 / 58.0 / 100.0,
)
def parse_engine(self, x):
return dict(
ok = True,
currentAccelPos = int(unpack('<b', x[3:4])[0]),
)
def parse_reverse(self, x):
return dict(
ok = True,
is_reverse = bool(unpack('<b', x[3:4])[0]),
)
def parse_brakes(self, x):
return dict(
ok = True,
is_frw_brake = bool(unpack('<b', x[3:4])[0]),
is_lgt_brake = bool(unpack('<b', x[4:5])[0]),
is_rgt_brake = bool(unpack('<b', x[5:6])[0]),
)
def parse_encoders(self, x):
return dict(a=0)
def parse_state(self, x):
return dict(
ok = True,
currentAccelPos = int(unpack('<b', x[3: 4])[0]),
is_frw_brake = bool(unpack('<b', x[4: 5])[0]),
is_lgt_brake = bool(unpack('<b', x[5: 6])[0]),
is_rgt_brake = bool(unpack('<b', x[6: 7])[0]),
is_reverse = bool(unpack('<b', x[7: 8])[0]),
enc_sec = int(unpack('<b', x[8: 9])[0]),
enc_min = int(unpack('<b', x[9:10])[0]),
)
def inc_engine(self):
cmd = self.cmd_inc_engine
v = pack('>b', 1)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos += 1
return self.parse_engine(ret)
def dec_engine(self):
cmd = self.cmd_dec_engine
v = pack('>b', 1)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos -= 1
return self.parse_engine(ret)
def gooo(self, req_acc_pos = 31, rgt_brk = 0, lgt_brk = 0):
backward_needed = req_acc_pos < 0
acc_pos = abs(req_acc_pos)
stop_needed = acc_pos == 0
self.state = self.get_state()
self.brakes(rgt = rgt_brk, lgt = lgt_brk, frw = 0)
if self.state['is_reverse'] != backward_needed and backward_needed:
print(backward_needed, self.state['is_reverse'])
self.reverse(1)
if self.state['is_reverse'] != backward_needed and not backward_needed: self.reverse(0)
self.state = self.get_state()
D = int(acc_pos - self.state['currentAccelPos'])
if D > 0: f = self.inc_engine
else: f = self.dec_engine
for i in range(abs(D)): f()
_ = self.get_state()
pos = _['currentAccelPos']
if _['is_reverse']: pos = -1 * pos
return dict(
ok = pos == req_acc_pos,
requiredAccelPos = req_acc_pos,
currentAccelPos = pos,
)
def stop_engine(self):
cmd = self.cmd_stop_engine
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos = 0
return self.parse_engine(ret)
def reverse(self, v = 1):
cmd = self.cmd_reverse
v = pack('>b', v)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
return self.parse_reverse(ret)
def brakes(self, rgt = 0, lgt = 0, frw = 1):
cmd = self.cmd_brakes
rgt = pack('>b', rgt)
lgt = pack('>b', lgt)
frw = pack('>b', frw)
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
self.port.write(frw)
self.port.write(rgt)
self.port.write(lgt)
ret = self.port.read(6)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 6
return self.parse_brakes(ret)
def get_encoders(self):
cmd = self.cmd_get_distances
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(7)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 7
return self.parse_encoders(ret)
def get_state(self):
cmd = self.cmd_get_state
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(10)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 10
return self.parse_state(ret)
def get_distances(self):
cmd = self.cmd_get_distances
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(7)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 7
return self.parse_distances(ret)
def __init__(self, port = None):
if port != None:
self.port = port
else:
raise Exception('port is None')
self.state = self.get_state()
if __name__ == "__main__":
from biu import BIU
khc = KHC(BIU())
print(khc.get_distances())
# print(khc.gooo(31))
# sleep(6)
# print(khc.gooo(-31))
# sleep(6)
print(khc.stop_engine())
| gpl-3.0 | 1,060,215,723,341,621,400 | 28.781095 | 96 | 0.589208 | false | 2.267424 | false | false | false |
markgw/jazzparser | src/jazzparser/utils/latex.py | 1 | 1979 | """Latex output utility functions to help with producing valid Latex files.
Utility functions for handling processing and output of Latex.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
def filter_latex(text):
"""
Applies necessary filters to Latex text before outputting. Mainly
involves escaping strings.
"""
text = text.replace("#","\\#")
text = text.replace("%","\\%")
text = text.replace("_", "\\_")
return text
def start_document(title=None, author=None, packages=[], options=[], toc=False):
output = ""
output += "\\documentclass[%s]{article}\n" % ",".join(options+['a4paper'])
for package in packages:
output += "\\usepackage{%s}\n" % package
output += "\\begin{document}\n"
if title is not None:
output += "\\title{%s}\n" % title
if author is not None:
output += "\\author{%s}\n" % author
else:
output += "\\author{}\n"
output += "\\maketitle\n"
if toc:
output += "\\tableofcontents\n"
return output
| gpl-3.0 | -1,280,402,752,457,710,600 | 34.339286 | 80 | 0.622537 | false | 4.014199 | false | false | false |
landier/imdb-crawler | crawler/libs/sqlalchemy/orm/deprecated_interfaces.py | 1 | 21785 | # orm/deprecated_interfaces.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import event, util
from .interfaces import EXT_CONTINUE
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note::
:class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ('instrument_class',))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self, listener,
(
'init_instance',
'init_failed',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'reconstruct_instance',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
))
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
if meth == 'reconstruct_instance':
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
.. deprecated:: 0.5
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note::
:class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
:func:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
'before_commit',
'after_commit',
'after_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'after_attach',
'after_bulk_update',
'after_bulk_delete',
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush( self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin( self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update( self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete( self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note::
:class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
| gpl-3.0 | 4,175,316,921,779,378,000 | 35.923729 | 84 | 0.629653 | false | 4.954514 | false | false | false |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/netdisco/discoverables/__init__.py | 1 | 5004 | """Provides helpful stuff for discoverables."""
# pylint: disable=abstract-method
import ipaddress
from urllib.parse import urlparse
from ..const import (
ATTR_NAME, ATTR_MODEL_NAME, ATTR_HOST, ATTR_PORT, ATTR_SSDP_DESCRIPTION,
ATTR_SERIAL, ATTR_MODEL_NUMBER, ATTR_HOSTNAME, ATTR_MAC_ADDRESS,
ATTR_PROPERTIES)
class BaseDiscoverable(object):
"""Base class for discoverable services or device types."""
def is_discovered(self):
"""Return True if it is discovered."""
return len(self.get_entries()) > 0
def get_info(self):
"""Return a list with the important info for each item.
Uses self.info_from_entry internally.
"""
return [self.info_from_entry(entry) for entry in self.get_entries()]
# pylint: disable=no-self-use
def info_from_entry(self, entry):
"""Return an object with important info from the entry."""
return entry
# pylint: disable=no-self-use
def get_entries(self):
"""Return all the discovered entries."""
raise NotImplementedError()
class SSDPDiscoverable(BaseDiscoverable):
"""uPnP discoverable base class."""
def __init__(self, netdis):
"""Initialize SSDPDiscoverable."""
self.netdis = netdis
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
url = urlparse(entry.location)
info = {
ATTR_HOST: url.hostname,
ATTR_PORT: url.port,
ATTR_SSDP_DESCRIPTION: entry.location
}
device = entry.description.get('device')
if device:
info[ATTR_NAME] = device.get('friendlyName')
info[ATTR_MODEL_NAME] = device.get('modelName')
info[ATTR_MODEL_NUMBER] = device.get('modelNumber')
info[ATTR_SERIAL] = device.get('serialNumber')
return info
# Helper functions
# pylint: disable=invalid-name
def find_by_st(self, st):
"""Find entries by ST (the device identifier)."""
return self.netdis.ssdp.find_by_st(st)
def find_by_device_description(self, values):
"""Find entries based on values from their description."""
return self.netdis.ssdp.find_by_device_description(values)
class MDNSDiscoverable(BaseDiscoverable):
"""mDNS Discoverable base class."""
def __init__(self, netdis, typ):
"""Initialize MDNSDiscoverable."""
self.netdis = netdis
self.typ = typ
self.services = {}
netdis.mdns.register_service(self)
def reset(self):
"""Reset found services."""
self.services.clear()
def is_discovered(self):
"""Return True if any device has been discovered."""
return len(self.get_entries()) > 0
# pylint: disable=unused-argument
def remove_service(self, zconf, typ, name):
"""Callback when a service is removed."""
self.services.pop(name, None)
def add_service(self, zconf, typ, name):
"""Callback when a service is found."""
service = None
tries = 0
while service is None and tries < 3:
service = zconf.get_service_info(typ, name)
tries += 1
if service is not None:
self.services[name] = service
def get_entries(self):
"""Return all found services."""
return self.services.values()
def info_from_entry(self, entry):
"""Return most important info from mDNS entries."""
properties = {}
for key, value in entry.properties.items():
if isinstance(value, bytes):
value = value.decode('utf-8')
properties[key.decode('utf-8')] = value
info = {
ATTR_HOST: str(ipaddress.ip_address(entry.address)),
ATTR_PORT: entry.port,
ATTR_HOSTNAME: entry.server,
ATTR_PROPERTIES: properties,
}
if "mac" in properties:
info[ATTR_MAC_ADDRESS] = properties["mac"]
return info
def find_by_device_name(self, name):
"""Find entries based on the beginning of their entry names."""
return [entry for entry in self.services.values()
if entry.name.startswith(name)]
class GDMDiscoverable(BaseDiscoverable):
"""GDM discoverable base class."""
def __init__(self, netdis):
"""Initialize GDMDiscoverable."""
self.netdis = netdis
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
return {
ATTR_HOST: entry.values['location'],
ATTR_PORT: entry.values['port'],
}
def find_by_content_type(self, value):
"""Find entries based on values from their content_type."""
return self.netdis.gdm.find_by_content_type(value)
def find_by_data(self, values):
"""Find entries based on values from any returned field."""
return self.netdis.gdm.find_by_data(values)
| gpl-2.0 | -675,812,168,902,687,900 | 30.275 | 76 | 0.609712 | false | 4.074919 | false | false | false |
jsidabras/GA-PMR | HFSS-loadbest.py | 1 | 1771 | # ----------------------------------------------
# Script Written by Jason W. Sidabras ([email protected])
# requires jsidabras/hycohanz as of 20-04-2017
# Loads a file with a list of 1s and 0s and implements it to HFSS as Silv/Vac
# used to load the best results per generation or final
# ----------------------------------------------
from random import *
import argparse
import hycohanz as hfss
[oAnsoftApp, oDesktop] = hfss.setup_interface()
oProject = oDesktop.SetActiveProject("GA_PlanarResonator")
oDesign = hfss.set_active_design(oProject, 'HFSSDesign1')
oEditor = hfss.set_active_editor(oDesign)
oFieldsReporter = hfss.get_module(oDesign, 'FieldsReporter')
parser = argparse.ArgumentParser(description='Load GA best file and run solution in HFSS.')
parser.add_argument('file', type=str, help='the filename to load')
args = parser.parse_args()
f = open(args.file, 'r')
loadthing = f.readline()
f.close()
dump = loadthing.strip("[")
dump = dump.rstrip()
dump = dump.strip(r"']").split(", ")
thing = []
for i in dump:
thing.append(int(i))
print(len(dump))
index = 0
Vac = []
Silv = []
for i in thing:
if i == 1:
Silv.append("Elm_"+str(index))
index += 1
else:
Vac.append("Elm_"+str(index))
index += 1
oDesktop.ClearMessages("", "", 3)
# Check if list is empty
if Vac:
hfss.assign_White(oEditor, Vac)
hfss.assign_material(oEditor, Vac, MaterialName="vacuum", SolveInside=True)
if Silv:
hfss.assign_Orange(oEditor, Silv)
hfss.assign_material(oEditor, Silv, MaterialName="pec", SolveInside=False)
oDesktop.ClearMessages("", "", 3)
# try:
#oDesign.Analyze("Setup1")
# except:
# print("Simulation Error")
#oProject.Save()
| mit | 856,297,286,986,434,200 | 27.032787 | 91 | 0.629588 | false | 2.917628 | false | false | false |
eeriks/velo.lv | velo/payment/forms.py | 1 | 19328 | from django import forms
from django.contrib import messages
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from crispy_forms.layout import Layout, Div, HTML, Field
from crispy_forms.helper import FormHelper
from velo.payment.models import ActivePaymentChannel, Payment, DiscountCode
from velo.payment.utils import create_application_invoice, create_bank_transaction, create_team_invoice, \
approve_payment
from velo.payment.widgets import PaymentTypeWidget, DoNotRenderWidget
from velo.registration.models import Application
from velo.velo.mixins.forms import RequestKwargModelFormMixin, GetClassNameMixin
from velo.velo.utils import load_class
class ApplicationPayUpdateForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
accept_terms = forms.BooleanField(label=_("I confirm, that: the competition organizers are not responsible for possible injuries of participants, during the competition; my health condition corresponds to the selected distance; I will definitely use a fastened helmet and will observe road traffic regulations and competition regulations; I agree with the conditions for participation in the competition, mentioned in the regulations; I am informed, that the paid participation fee will not be returned and the participant’s starting number shall not be transferred to any other person."),
required=True)
accept_inform_participants = forms.BooleanField(label=_("I will inform all registered participants about rules."),
required=True)
accept_insurance = forms.BooleanField(label="", required=False)
discount_code = forms.CharField(label=_("Discount code"), required=False)
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
participants = None
success_url = None
class Meta:
model = Application
fields = ('discount_code', 'company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',
'invoice_show_names', 'donation')
widgets = {
'donation': DoNotRenderWidget, # We will add field manually
}
def _post_clean(self):
super()._post_clean()
if not bool(self.errors):
try:
instance = self.instance
instance.set_final_price() # if donation have changed, then we need to recalculate,
# because instance is not yet saved and it means,
# that this function on model is not yet run.
if instance.final_price == 0:
payment = Payment.objects.create(content_object=instance,
total=instance.final_price,
status=Payment.STATUSES.ok,
competition=instance.competition)
approve_payment(payment, self.request.user, self.request)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
else:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_application_invoice(instance, active_payment_type)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
messages.success(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def save(self, commit=True):
instance = super(ApplicationPayUpdateForm, self).save(commit=False)
if self.request:
instance.updated_by = self.request.user
if instance.payment_status < Application.PAY_STATUS.waiting:
instance.payment_status = Application.PAY_STATUS.waiting
instance.params = dict(self.cleaned_data)
instance.params.pop("donation", None)
discount_code = instance.params.pop("discount_code", None)
if discount_code:
instance.params.update({'discount_code': discount_code.code})
if commit:
instance.save()
return instance
def clean_donation(self):
donation = self.cleaned_data.get('donation', 0.00)
# If person have already taken invoice, then we do not allow changing donation amount
if self.instance.invoice:
return float(self.instance.donation)
else:
return donation
def clean_discount_code(self):
code = self.cleaned_data.get('discount_code', "")
if not code:
return None
else:
if isinstance(code, DiscountCode):
return code
try:
return DiscountCode.objects.get(code=code)
except:
return None
def clean(self):
if not self.cleaned_data.get('donation', ''):
self.cleaned_data.update({'donation': 0.00})
super(ApplicationPayUpdateForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if self.data.get("discount_code", None) and active_payment_type.payment_channel.is_bill:
active_payment_type = None
self._errors.update({'payment_type': [_("Invoice is not available with discount code."), ]})
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
self.participants = kwargs.pop('participants', None)
super(ApplicationPayUpdateForm, self).__init__(*args, **kwargs)
insured_participants = self.participants.exclude(insurance=None)
if insured_participants:
self.fields['accept_insurance'].required = True
insurance_company = insured_participants[0].insurance.insurance_company
terms_doc = "<a href='%s' target='_blank'>%s</a>" % (insurance_company.terms_doc.url, _("Regulation")) if insurance_company.terms_doc else ""
self.fields['accept_insurance'].label = mark_safe("%s %s" % (insurance_company.term, terms_doc))
else:
self.fields['accept_insurance'].widget = forms.HiddenInput()
now = timezone.now()
competition = self.instance.competition
checkboxes = (
'accept_terms',
'accept_inform_participants',
'accept_insurance',
)
if competition.processing_class:
_class = load_class(competition.processing_class)
processing = _class(competition=competition)
if hasattr(processing, 'payment_additional_checkboxes'):
for key, field in processing.payment_additional_checkboxes(application=self.instance):
self.fields[key] = field
checkboxes += (key,)
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
if self.instance.final_price == 0:
self.fields['payment_type'].required = False
self.fields['payment_type'].widget = forms.HiddenInput()
else:
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
if self.instance.discount_code:
self.initial['discount_code'] = self.instance.discount_code.code
self.fields['donation'].required = False
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
*checkboxes,
Div(
Div(
Div(
Field(
"discount_code",
css_class="input-field if--50 if--dark js-placeholder-up"
),
),
css_class="input-wrap w100 bottom-margin--15 col-s-24 col-m-12 col-l-12 col-xl-12"
),
css_class="input-wrap w100 bottom-margin--15",
),
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")) if self.instance.final_price > 0 else HTML(""),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
class TeamPayForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
success_url = None
class Meta:
model = Application
fields = ('company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',)
def _post_clean(self):
super(TeamPayForm, self)._post_clean()
if not bool(self.errors):
try:
instance = self.instance
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_team_invoice(instance, active_payment_type)
self.success_url = reverse('account:team', kwargs={'pk2': instance.id})
messages.info(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def clean(self):
super(TeamPayForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
if self.cleaned_data.get('company_juridical_address', '') == '':
self._errors.update({'company_juridical_address': [_("Company Juridical Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
super(TeamPayForm, self).__init__(*args, **kwargs)
now = timezone.now()
competition = self.instance.distance.competition
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
| gpl-3.0 | -5,545,904,322,587,059,000 | 41.946667 | 593 | 0.506623 | false | 4.65799 | false | false | false |
saffsd/assignmentprint | assignmentprint.py | 1 | 15044 | """
Utility funtions and classes for preparing project marking bundles
for student assignments.
Marco Lui <[email protected]>, November 2012
"""
import os, sys, csv, re
import tokenize, textwrap, token
import trace, threading
import imp
import contextlib
from cStringIO import StringIO
from pprint import pformat
import pep8
from collections import Sequence, Mapping, Sized
RE_FILENAME = re.compile(r'proj2-(?P<filename>\w+).py')
RE_DIRNAME = re.compile(r'proj2-(?P<dirname>\w+)')
def as_module(path, name='submitted'):
module = imp.new_module(name)
with open(path) as f:
try:
# suppress stdout
sys.stdout = mystdout = StringIO()
exec f in module.__dict__
except Exception, e:
raise ImportError, "import failed: '{0}'".format(e)
finally:
sys.stdout = sys.__stdout__
return module, mystdout.getvalue()
def item2strs(item, max_lines=None):
output = pformat(item)
if max_lines is None or len(output.splitlines()) <= max_lines:
retval = output.splitlines()
else:
if isinstance(item, Mapping):
itemlen = len(item)
retval = ["<{0} of len {1}>".format(type(item),itemlen)]
for i in item.items()[:max_lines-2]:
retval.append(str(i))
retval.append(['... ({0} more items)'.format(itemlen-max_lines+2)])
elif isinstance(item, Sequence):
itemlen = len(item)
retval = ["<{0} of len {1}>".format(type(item),itemlen)]
for i in item[:max_lines-2]:
retval.append(str(i))
retval.append(['... ({0} more items)'.format(itemlen-max_lines+2)])
else:
retval = ["<item with repr len {0}>".format(len(repr(item)))]
# Add the item type to the start
retval[0] = "({0}) {1}".format(type(item), retval[0])
return retval
def split_comments(line):
code = []
noncode = []
try:
for tk in tokenize.generate_tokens(StringIO(line).readline):
if tk[2][0] != 1:
break
if tk[0] == tokenize.COMMENT:
noncode.append(tk[:2])
else:
code.append(tk)
except tokenize.TokenError:
pass
retval = tokenize.untokenize(code).strip(), tokenize.untokenize(noncode).strip()
#retval = ''.join(c[1] for c in code), ''.join(c[1] for c in noncode)
return retval
def get_indent(code):
tokens = tokenize.generate_tokens(StringIO(code).readline)
tk = tokens.next()
indent = tk[1] if tk[0] == token.INDENT else ''
return indent
def wrap_comment(line, width, add_indent=2):
"""
This assumes that line contains a (potentially whitespace-indented)
comment, and no actual code. It will assume anything before the
comment marker is padding, and will maintain the indent level
thereof.
"""
code, comm = split_comments(line)
indent = get_indent(line)
if len(indent) > width/2:
# Comment starts really far right, we shift it
# to start quarter way through the width
indent = ' ' * width/4
retval = textwrap.wrap(comm, width,
initial_indent= indent,
subsequent_indent= indent + '#' + ' '*add_indent,
)
return retval
def wrap_code(code, width, add_indent =' '):
"""
Attempts to wrap a single line of code, respecting token
boundaries.
"""
tokens = tokenize.generate_tokens(StringIO(code).readline)
indent = get_indent(code)
chunk_width = width - len(indent)
chunk_start = 0
chunk_end = 0
chunks = []
first_chunk = True
try:
for tk_type, tk_text, tk_start, tk_end, _ in tokens:
if tk_start[0] != tk_end[0]:
raise ValueError, "token spanning multiple lines"
tk_len = tk_end[1] - tk_start[1]
if first_chunk:
chunk_indent = '' # the indent is part of the tokens
else:
chunk_indent = indent + add_indent
chunk_width = width - len(chunk_indent)
if tk_end[1]-chunk_start >= chunk_width:
# this token starts a new chunk
chunk = chunk_indent+code[chunk_start:chunk_end]+'\\'
assert len(chunk) <= width
chunks.append(chunk)
chunk_start = tk_start[1]
first_chunk = False
chunk_end = tk_end[1]
assert len(chunk_indent+code[chunk_start:chunk_end]+'\\') <= width
except tokenize.TokenError:
# unmatched somethingorother, we don't really care as it
# may be matched on another line
pass
finally:
# flush remaining chunk
rest = code[chunk_start:]
if len(rest) == 1:
# if the token is only 1 character, it can replace the line continuation
chunks[-1] = chunks[-1][:-1] + rest
else:
chunk = chunk_indent + rest
assert len(chunk) <= width
chunks.append(chunk)
return chunks
def wrap_line(line, width):
"""
Attempt to intelligently wrap Python code to width
This also moves any comments to a line prior.
"""
if len(line) <= width:
# shortcut if the line is shorter than the width required
return [line]
_line = line.lstrip()
indent = len(line) - len(_line)
code, comm = split_comments(_line)
if code:
# there are comments, we output these first
if comm:
c = ' ' * indent + comm
retval = wrap_comment(c, width)
else:
retval = []
c = ' ' * indent + code
retval.extend(wrap_code(c, width))
return retval
elif comm:
# This line only contains comments. Wrap accordingly.
return wrap_comment(line, width)
else:
return ['']
def find_submission(path):
"""
Tries to find a submission in a given path.
Returns username, submission_path, else None
"""
if os.path.isdir(path):
m = RE_DIRNAME.search(path)
if m is not None:
dir_items = set(os.listdir(path))
username = m.group('dirname')
submission_name = username + '.py'
if submission_name in dir_items:
item_path = os.path.join(path, submission_name)
return username, item_path
elif os.path.isfile(path):
m = RE_FILENAME.search(path)
if m is not None:
username = m.group('filename')
return username, path
# from http://code.activestate.com/recipes/534166-redirectedio-context-manager-and-redirect_io-decor/
class RedirectedIO(object):
def __init__(self, target=None, mode='a+',
close_target=True):
try:
target = open(target, mode)
except TypeError:
if target is None:
target = StringIO()
self.target = target
self.close_target = close_target
def __enter__(self):
""" Redirect IO to self.target.
"""
self.original_stdout = sys.stdout
sys.stdout = self.target
return self.target
def __exit__(self, exc_type, exc_val, exc_tb):
""" Restore stdio and close the file.
"""
sys.stdout = self.original_stdout
if self.close_target:
self.target.close()
class ProjectPrinter(object):
"""
This class wraps a file-like object and provides
a series of methods for doing relevant output to
it.
"""
def __init__(self, target, pagewidth):
self.target = target
self.pagewidth = pagewidth
def writeln(self, line='', wrap=False):
if wrap:
self.target.write(textwrap.fill(line, width=self.pagewidth) + '\n')
else:
for l in line.splitlines():
self.target.write(textwrap.fill(l, width=self.pagewidth) + '\n')
def cwriteln(self, line):
"""
Write a centered line
"""
self.writeln("{0:^{1}}".format(line, self.pagewidth))
def hr(self, symbol='#'):
if len(symbol) != 1:
raise ValueError, "symbol must be a single character"
self.writeln(symbol * self.pagewidth)
def boxed_text(self, text, symbol='+', boxwidth=None, align='c', wrap=False):
if boxwidth is None:
boxwidth = self.pagewidth
if boxwidth < 0:
boxwidth = self.pagewidth + boxwidth
if self.pagewidth < boxwidth:
raise ValueError, "box wider than page"
if len(symbol) != 1:
raise ValueError, "symbol must be a single character"
if isinstance(text, basestring):
if wrap:
lines = textwrap.wrap(text, width=boxwidth-2*(len(symbol)+1))
else:
lines = text.splitlines()
else:
lines = text
self.cwriteln(symbol * boxwidth)
for line in lines:
if len(line) > boxwidth-2*(len(symbol)+1):
# line too long!
_lines = textwrap.wrap(line, width=boxwidth-2*(len(symbol)+1), subsequent_indent = ' ')
else:
_lines = [line]
for _line in _lines:
if align == 'c':
self.cwriteln('{0}{1:^{2}}{0}'.format(symbol, _line, boxwidth-2))
elif align == 'r':
self.cwriteln('{0}{1:>{2}} {0}'.format(symbol, _line, boxwidth-3))
else:
self.cwriteln('{0} {1:<{2}}{0}'.format(symbol, _line, boxwidth-3))
self.cwriteln(symbol * boxwidth)
def display_code(self, path):
"""
Display code with intelligent wrapping
"""
with open(path) as f:
for i, line in enumerate(f):
if len(line) > self.pagewidth - 6:
# Line too wide. Need to cleverly wrap it.
#_line = line.lstrip()
#indent = len(line) - len(_line)
indent = get_indent(line)
code, comm = split_comments(line)
if code:
if comm:
for l in wrap_comment(line, self.pagewidth-6):
self.writeln(' {0}'.format(l))
clines = wrap_code(indent + code, self.pagewidth - 6)
self.writeln('{0:>4}* {1}'.format(i+1, clines[0]))
for l in clines[1:]:
self.writeln(' {0}'.format(l))
else:
# only comments on this line
c_wrap = wrap_comment(line, self.pagewidth-6)
if c_wrap:
self.writeln( '{0:>4}: {1}'.format(i+1, c_wrap[0]) )
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
"""
# We splice out comments
try:
tokens = list(tokenize.generate_tokens(StringIO(line).readline))
comments = ''.join(t[1] for t in tokens if t[0] == tokenize.COMMENT)
noncomments = [ (t[0],t[1]) for t in tokens if t[0] != tokenize.COMMENT ]
ncline = tokenize.untokenize(noncomments).rstrip()
except tokenize.TokenError:
# This happens with unmatched things - in particular triplequote
# we just pretend the line had no comments in this case
comments = ''
ncline = line
if ncline.lstrip():
# More than comments on this line
# Write the comments first, followed by the code
if comments.strip():
lead_gap = len(ncline) - len(ncline.lstrip())
comments = ' '*lead_gap + comments
c_wrap = wrap_comment(comments, self.pagewidth-6)
self.writeln(' {0}'.format(c_wrap[0]))
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
if (len(ncline) + 6) > self.pagewidth:
# code is too long, must break
#self.writeln('line:{0} tokens:{1}'.format(len(ncline), len(noncomments)))
try:
broken = wrap_code(ncline, self.pagewidth-6)
except tokenize.TokenError:
# Can't tokenize, so we just wrap this with the same wrapping used
# for noncode and hope for the best.
broken = wrap_comment(ncline, self.pagewidth-6)
self.writeln('{0:>4}* {1}'.format(i+1, broken[0]))
for l in broken[1:]:
self.writeln(' {0}'.format(l))
else:
self.writeln('{0:>4}: {1}'.format(i+1, ncline))
else:
# Only comments on this line
c_wrap = wrap_comment(line, self.pagewidth-6)
self.writeln( '{0:>4}: {1}'.format(i+1, c_wrap[0]) )
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
"""
else:
# Line fits on page
self.writeln( '{0:>4}: {1}'.format(i+1, line.rstrip()) )
def display_pep8(self, path, summary=True):
pep8_out = StringIO()
try:
with RedirectedIO(target=pep8_out, close_target=False):
pep8.process_options([path])
pep8.input_file(path)
error_stats = pep8.get_error_statistics()
warning_stats = pep8.get_warning_statistics()
val = pep8_out.getvalue().splitlines()
for line in [ x.split(':',1)[1] for x in val if ':' in x]:
self.writeln(line)
if summary:
self.writeln()
self.writeln("Summary:")
for e in error_stats:
self.writeln(e)
for w in warning_stats:
self.writeln(w)
self.writeln()
except tokenize.TokenError:
self.boxed_text(["PEP8 processing failed - check your source code"], symbol="#")
# adapted from http://code.activestate.com/recipes/473878/
class TimeOutExceeded(Exception): pass
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill() method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
self.result = None
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def run(self):
# TODO: Capture STDOUT, STDERR
success = True
outstream = StringIO()
try:
with RedirectedIO(target=outstream, close_target=False):
val = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except Exception, e:
val = sys.exc_info()
success = False
output = outstream.getvalue()
self.result = success, val, output
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def timeout(func, args=(), kwargs={}, timeout_duration=10, default=None):
"""This function will spawn a thread and run the given function
using the args, kwargs and return the given default value if the
timeout_duration is exceeded.
"""
if isinstance(args, basestring):
args = eval(args)
if isinstance(kwargs, basestring):
kwargs = eval(kwargs)
t = KThread(target=func, args=args, kwargs=kwargs)
t.start()
t.join(timeout_duration)
if t.isAlive():
t.kill()
raise TimeOutExceeded()
else:
return t.result
@contextlib.contextmanager
def working_directory(path):
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
| gpl-3.0 | -6,263,825,766,030,460,000 | 30.276507 | 101 | 0.590335 | false | 3.655005 | false | false | false |
artificialnull/IshanBoot | aliasbot.py | 1 | 9126 | #!/usr/bin/python3
import requests
import json
import os
import time
import random as rand
import subprocess
#telegram bot stuff
url = "https://api.telegram.org/bot%s/%s"
token = open("token.txt").read().replace('\n', '')
print(url % (token, "getUpdates"))
path = os.path.dirname(__file__)
#globals
locked = []
aliases = {}
commands = {}
chat_id = 0
SCH_CHID = -1001032618176
LOG_CHID = -1001098108881
#requests stuff
ConnectionError = requests.exceptions.ConnectionError
def isCommand(text, command):
if text[:len(command)] != command:
return False
else:
return True
def stripCommand(text, command):
return text[len(command) + 1:]
def getUpdates():
try:
r = requests.get(
url % (token, "getUpdates"),
data={"offset": getUpdates.offset},
timeout=60
)
try:
r = json.loads(r.text)
except:
print("Loading error while getting updates")
return [], True
r = r['result']
if len(r) > 0:
getUpdates.offset = int(r[-1]['update_id']) + 1
except ConnectionError:
print("Connection error while getting updates")
return [], True
return r, False
getUpdates.offset = 0
def sendMessage(message, reply_id=False, markdown=True):
payload = {
"chat_id": chat_id,
"text": message,
"parse_mode": "Markdown",
"disable_web_page_preview": True
}
if reply_id:
payload['reply_to_message_id'] = reply_id
if not markdown:
del payload['parse_mode']
try:
tresponse = requests.post(
url % (token, "sendMessage"),
data=payload,
timeout=2
)
resp = json.loads(tresponse.text)
if not resp["ok"]:
return sendMessage(message, reply_id, False)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
print("Connection error while sending message")
return True
return False
def loadAliases():
aliases = {}
aliasFile = open(path + "/aliases.json").read()
aliases = json.loads(aliasFile)
return aliases
def saveAliases():
aliasFile = open(path + "/aliases.json", "w")
aliasFile.write(json.dumps(aliases, indent=4))
aliasFile.close()
def loadLocked():
locked = []
lfile = open(path + "/locked.txt").read()
for line in lfile.split('\n'):
if line != '':
locked.append(line)
return locked
def logMessage(message):
baseLM = "user: %s ; mesg: %s ; chid: %s\n"
if 'text' in message.keys():
filledLM = baseLM % (message['from']['first_name'],
message['text'],
message['chat']['id'])
logfile = open(path + "/logfile.txt", "a")
logfile.write(filledLM)
logfile.close()
if message['chat']['id'] == SCH_CHID:
payload = {
'chat_id': LOG_CHID,
'from_chat_id': SCH_CHID,
'disable_notification': True,
'message_id': message['message_id']
}
# try:
# tresponse = requests.post(url % (token, "forwardMessage"),
# data=payload, timeout=2)
# except:
# return
def alias(content, uid):
alias = content.split('=')[0]
while alias[0] == ' ':
alias = alias[1:]
while alias[-1] == ' ':
alias = alias[:-1]
alias = alias.replace(' ', '_')
value = '='.join(content.split('=')[1:])
if len(alias.split()) == 1:
if alias not in locked or uid == 204403520:
aliases[alias] = value
print("alias " + alias + "=" + value + " by " + name)
saveAliases()
sendMessage("Aliased " + alias + " to " + value, message_id)
else:
print("cannot unlock alias")
sendMessage("Alias is locked, sorry", message_id)
else:
print("alias malformed")
sendMessage("Alias must be a single term", message_id)
def unalias(content, uid):
alias = content
if alias not in locked:
if len(alias.split()) == 1 and alias in aliases.keys():
aliases[alias] = ''
print("del " + alias)
saveAliases()
sendMessage("Unaliased " + alias, message_id)
else:
print("unalias malformed")
sendMessage("Invalid alias", message_id)
else:
print("cannot unlock alias")
sendMessage("Alias is locked, sorry", message_id)
def random(content, uid):
randomAlias = rand.choice(list(aliases.keys()))
randomAliasStr = "/%s = %s" % (randomAlias, aliases[randomAlias])
print(randomAliasStr)
sendMessage(randomAliasStr)
def uptime(content, uid):
sendMessage('`' + subprocess.Popen('uptime', stdout=subprocess.PIPE).communicate()[0].decode("utf-8") + '`')
def welp(content, uid):
sendMessage("gg")
def rip(content, uid):
response = rand.choice(["me", "rip is right", "rip is me"])
sendMessage(response)
def amirite(content, uid):
if rand.randint(1, 10) == 4:
response = "yep"
else:
response = "¬_¬"
sendMessage(response)
def remind(content, uid):
global chat_id
chat_id = SCH_CHID
sendMessage("heres your periodic schedule reminder!!!\n" + aliases["schedule"])
def newdaypb(content, uid):
sendMessage(aliases["newdaypb"])
def queue(content, uid):
print("cue")
if rand.randint(1, 10) < 3:
print("Q")
sendMessage("u wot m8", message_id)
def stan(content, uid):
sendMessage('no', message_id)
commands = {
'/alias': alias,
'/unalias': unalias,
'/random': random,
'/time': uptime,
'w/elp': welp,
'/rip': rip,
'/amirite': amirite,
'/remindme': remind,
'/newdaypb': newdaypb,
'/q@IshanBot': queue,
'stan': stan,
'hi stan': stan
}
if __name__ == "__main__":
aliases = loadAliases()
locked = loadLocked()
print("Started")
loffset = getUpdates.offset - 1
while getUpdates.offset != loffset:
loffset = getUpdates.offset
getUpdates()
print("Updated to:", getUpdates.offset)
while __name__ == "__main__":
try:
r, err = getUpdates()
if len(r) != 0 and not err:
print("received updates")
elif err:
time.sleep(1)
for update in r:
message = update.get('message')
if message == None:
continue
logMessage(message)
message_id = message['message_id']
print(message_id)
chat = message['chat']
chat_id = chat['id']
user = message.get('from')
name = "@" + user.get('username')
if name == None:
name = user.get('first_name')
uid = user['id']
if chat_id == LOG_CHID:
try:
payload = {
'chat_id': LOG_CHID,
'user_id': uid
}
requests.post(
url % (token, "kickChatMember"),
data=payload,
timeout=2
)
continue
except ConnectionError:
pass
text = message.get('text', ' ')
found = False
for command in commands.keys():
if isCommand(text, command):
content = stripCommand(text, command)
found = True
commands[command](content, uid)
if found:
continue
if "/" in text:
terms = text.split()
response = ''
for term in terms:
if '/' == term[0]:
alias = ''
if '@' in term and term[1:].split('@')[-1] == "IshanBot":
alias = term[1:].split('@')[0]
else:
alias = term[1:]
"""
for key in aliases.keys():
if 'legendary' in aliases[key]:
print(key)
print([ord(c) for c in key])
print([ord(c) for c in alias])
print(alias == key)
"""
response += aliases.get(alias, '')
if response != '':
sendMessage(response + ' ' + name)
except KeyboardInterrupt:
print("Control menu:\n 0 - Quit\n 1 - Reload locks")
choice = int(input("> "))
if choice == 1:
locked = loadLocked()
else:
saveAliases()
raise SystemExit
except BaseException as e:
print(str(e))
| gpl-3.0 | -6,381,240,377,747,982,000 | 28.432258 | 112 | 0.49463 | false | 4.093315 | false | false | false |
AbhilashReddyM/GeometricMultigrid | mgd3d.py | 1 | 4921 | """
2017 (c) A. R. Malipeddi
3D geometric multigrid code for poissons equation in a cube.
- Finite difference method
- 7pt operator
- trilinear interpolation
- Two-color Gauss Seidel smoothing
"""
import numpy as np
def GSrelax(nx,ny,nz,u,f,iters=1,flag=1):
'''
Red-Black Gauss Seidel smoothing
flag : 1 = pre-sweep
2 = post-sweep
'''
dx=1.0/nx
dy=1.0/ny
dz=1.0/nz
Ax=1.0/dx**2
Ay=1.0/dy**2
Az=1.0/dz**2
Ap=1.0/(2.0*(1.0/dx**2+1.0/dy**2+1.0/dz**2))
#BCs. Needs to be generalized!
u[ 0,:,:] = -u[ 1,:,:]
u[-1,:,:] = -u[-2,:,:]
u[: ,0,:] = -u[:, 1,:]
u[:,-1,:] = -u[:,-2,:]
u[:,:, 0] = -u[:,:, 1]
u[:,:,-1] = -u[:,:,-2]
for it in range(iters):
c=0
for _ in [1,2]:
for i in range(1,nx+1):
cs=c
for j in range(1,ny+1):
for k in range(1+c,nz+1,2):
u[i,j,k]= Ap*( Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- f[i,j,k])
c=1-c
c=1-cs
c=1
#BCs. Needs to be generalized!
u[ 0,:,:] = -u[ 1,:,:]
u[-1,:,:] = -u[-2,:,:]
u[: ,0,:] = -u[:, 1,:]
u[:,-1,:] = -u[:,-2,:]
u[:,:, 0] = -u[:,:, 1]
u[:,:,-1] = -u[:,:,-2]
#if residual not needed
if(flag==2):
return u,None
res=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
res[i,j,k]=f[i,j,k] - (Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- 2.0*(Ax+Ay+Az)*u[i,j,k])
return u,res
def restrict(nx,ny,nz,v):
'''
restrict 'v' to the coarser grid
'''
v_c=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_c[i,j,k]=0.125*(v[2*i-1,2*j-1,2*k-1]+v[2*i,2*j-1,2*k-1]+v[2*i-1,2*j,2*k-1]+v[2*i,2*j,2*k-1]
+v[2*i-1,2*j-1,2*k ]+v[2*i,2*j-1,2*k ]+v[2*i-1,2*j,2*k ]+v[2*i,2*j,2*k ])
return v_c
def prolong(nx,ny,nz,v):
'''
interpolate correction to the fine grid
'''
v_f=np.zeros([2*nx+2,2*ny+2,2*nz+2])
a=27.0/64
b= 9.0/64
c= 3.0/64
d= 1.0/64
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_f[2*i-1,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i-1,j-1,k] + v[i-1,j,k-1] + v[i,j-1,k-1]) + d*v[i-1,j-1,k-1]
v_f[2*i ,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i+1,j-1,k] + v[i+1,j,k-1] + v[i,j-1,k-1]) + d*v[i+1,j-1,k-1]
v_f[2*i-1,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i-1,j+1,k] + v[i-1,j,k-1] + v[i,j+1,k-1]) + d*v[i-1,j+1,k-1]
v_f[2*i ,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i+1,j+1,k] + v[i+1,j,k-1] + v[i,j+1,k-1]) + d*v[i+1,j+1,k-1]
v_f[2*i-1,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i-1,j-1,k] + v[i-1,j,k+1] + v[i,j-1,k+1]) + d*v[i-1,j-1,k+1]
v_f[2*i ,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i+1,j-1,k] + v[i+1,j,k+1] + v[i,j-1,k+1]) + d*v[i+1,j-1,k+1]
v_f[2*i-1,2*j ,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i-1,j+1,k] + v[i-1,j,k+1] + v[i,j+1,k+1]) + d*v[i-1,j+1,k+1]
v_f[2*i ,2*j ,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i+1,j+1,k] + v[i+1,j,k+1] + v[i,j+1,k+1]) + d*v[i+1,j+1,k+1]
return v_f
def V_cycle(nx,ny,nz,num_levels,u,f,level=1):
'''
V cycle
'''
if(level==num_levels):#bottom solve
u,res=GSrelax(nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Relax Au=f on this grid
u,res=GSrelax(nx,ny,nz,u,f,2)
#Step 2: Restrict residual to coarse grid
res_c=restrict(nx//2,ny//2,nz//2,res)
#Step 3:Solve A e_c=res_c on the coarse grid. (Recursively)
e_c=np.zeros_like(res_c)
e_c,res_c=V_cycle(nx//2,ny//2,nz//2,num_levels,e_c,res_c,level+1)
#Step 4: Interpolate(prolong) e_c to fine grid and add to u
u+=prolong(nx//2,ny//2,nz//2,e_c)
#Step 5: Relax Au=f on this grid
if(level==1):
u,res=GSrelax(nx,ny,nz,u,f,2,flag=1)
else:
u,res=GSrelax(nx,ny,nz,u,f,2,flag=2)
return u,res
def FMG(nx,ny,nz,num_levels,f,nv=1,level=1):
if(level==num_levels):#bottom solve
u=np.zeros([nx+2,ny+2,nz+2])
u,res=GSrelax(nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Restrict the rhs to a coarse grid
f_c=restrict(nx//2,ny//2,nz//2,f)
#Step 2: Solve the coarse grid problem using FMG
u_c,_=FMG(nx//2,ny//2,nz//2,num_levels,f_c,nv,level+1)
#Step 3: Interpolate u_c to the fine grid
u=prolong(nx//2,ny//2,nz//2,u_c)
#step 4: Execute 'nv' V-cycles
for _ in range(nv):
u,res=V_cycle(nx,ny,nz,num_levels-level,u,f)
return u,res
| mit | -4,327,896,811,227,233,300 | 28.291667 | 154 | 0.470839 | false | 1.799927 | false | false | false |
aalien/mib | mib.py | 1 | 7386 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# mib: Modular irc bot
# Copyright Antti Laine <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ircsocket import IrcSocket
from ircutils import regexpify
from parser import parse, IRCMsg
import config
import os
import re
import sys
class Mib:
""" Main class which handles most of the core functionality.
"""
def __init__(self):
""" Initialize variables and read config.
"""
sys.path.append('plugins')
self.loaded_plugins = {} # plugin name : module
self.cmd_callbacks = {} # command : set(function)
self.privmsg_cmd_callbacks = {} # command : set(function)
self.command_masks = {} # command : list(regexp)
self.plugins = set(config.LOAD_PLUGINS)
self.cmd_prefixes = set(config.CMD_PREFIXES)
self.nick = config.NICK
self.username = config.USERNAME
self.realname = config.REALNAME
self.server, self.port = config.SERVER
self.channels = config.CHANNELS
self.socket = IrcSocket(self.server, self.port, self.nick,
self.username, self.realname)
self.socket.register_readline_cb(self.parse_line)
for channel in self.channels:
self.socket.join(channel)
for plugin in self.plugins:
print self.load_plugin(plugin)[1]
def run(self):
""" Start socket's main loop.
"""
self.socket.run()
def clean(self):
for plugin in self.loaded_plugins.itervalues():
plugin.clean()
def parse_line(self, line):
""" Parse line and call callbacks registered for command.
"""
print line
parsed = parse(line)
if not parsed:
print 'Unable to parse line: "%s"' %(line)
return
# call registered functions
for function in self.cmd_callbacks.get(parsed.cmd, ()):
try:
function(parsed)
except Exception, e:
print 'Error from function', repr(function), ':', e
# call registered privmsg functions with pre-parsed line
if parsed.cmd == 'PRIVMSG':
cmd_prefix = parsed.postfix.split(' ', 1)[0]
postfix = parsed.postfix[len(cmd_prefix):].lstrip()
if cmd_prefix in self.cmd_prefixes:
print 'Found command prefix', cmd_prefix
cmd = postfix.lstrip().split(' ', 1)[0]
postfix = postfix[len(cmd):].lstrip()
stripped_parsed = IRCMsg(parsed.prefix, parsed.cmd,
parsed.params, postfix)
print "stripped_parsed = ", stripped_parsed
print 'Searching for command', cmd
for function in self.privmsg_cmd_callbacks.get(cmd, ()):
run = False
if cmd not in self.command_masks:
run = True
else:
print 'There are limitations for this command'
for regexp in self.command_masks[cmd]:
print 'Matching %s to %s' % (parsed.prefix,
regexp.pattern)
if regexp.match(parsed.prefix):
run = True
break
if run:
try:
print 'Executing command %s' % cmd
function(stripped_parsed)
except Exception, e:
print 'Error from function', repr(function), ':', e
def load_plugin(self, plugin, params=None):
""" str, ([]) -> (bool, str)
Loads plugin from plugins/<plugin>.py
Params will be given to plugin's constructor.
Returns a tuple with a boolean stating if the plugin
was loaded properly and a message telling what happened.
"""
if plugin in self.loaded_plugins:
return (False, 'Plugin %s already loaded' %(plugin))
if not os.path.exists(os.path.join('plugins', plugin + '.py')):
return (False, 'Plugin %s does not exists' %(plugin))
try:
module = __import__(plugin)
if params:
obj = module.init(self, params)
else:
obj = module.init(self)
success = True
except Exception, err:
success = False
print err
if success:
self.loaded_plugins[plugin] = obj
return (True, 'Loaded plugin %s' %(plugin))
else:
return (False, 'Failed to load plugin %s' %(plugin))
def register_cmd(self, cmd, function):
""" Registers a function to be called when a line with
cmd is seen. Function must take one named tuple parameter.
Tuple contains line in parsed form with fields
(prefix, cmd, params, postfix)
"""
self.cmd_callbacks.setdefault(cmd, set()).add(function)
def register_privmsg_cmd(self, cmd, function):
""" Registers a function to be called when a PRIVMSG with
cmd is seen. Function must take one named tuple parameter.
Tuple contains line in parsed form with fields
(prefix, cmd, params,
postfix stripped from one of CMD_PREFIXES and cmd)
"""
self.privmsg_cmd_callbacks.setdefault(cmd, set()).add(function)
def add_cmd_permission(self, cmd, mask, regexpify=True):
""" Creates a regular expression from the mask and adds it
to the list of allowed regexps for the cmd.
mask is an IRC mask, and will be changed into a corresponding
regular expression.
"""
mask = regexpify(mask)
m = re.compile(mask)
self.command_masks.setdefault(cmd, []).append(m)
def rm_cmd_permission(self, cmd, mask):
""" Creates a regular expression from the mask, and removes
the permission for that expression from cmd's list.
mask is an IRC mask, and will be changed into a corresponding
regular expression.
"""
mask = regexpify(mask)
if cmd in self.command_masks:
for index, regexp in enumerate(self.command_masks[cmd]):
if regexp.pattern == mask:
del self.command_masks[cmd][index]
break
if __name__ == "__main__":
mib = Mib()
try:
mib.run()
except Exception, e:
print 'ERROR: ', e
except:
pass
mib.clean()
print 'Quiting!'
| mit | 7,265,648,255,817,794,000 | 37.670157 | 79 | 0.558218 | false | 4.51467 | true | false | false |
team-vigir/vigir_behaviors | vigir_flexbe_states/src/vigir_flexbe_states/read_dynamic_parameter_state.py | 1 | 2586 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from dynamic_reconfigure.client import Client
"""
Created on 11/03/2014
@author: Philipp Schillinger
"""
class ReadDynamicParameterState(EventState):
"""
Reads a given trajectory controller parameter.
"""
LEFT_ARM_WRX = ['left_arm_traj_controller', 'l_arm_wrx']
LEFT_ARM_WRY = ['left_arm_traj_controller', 'l_arm_wry']
LEFT_ARM_ELX = ['left_arm_traj_controller', 'l_arm_elx']
LEFT_ARM_ELY = ['left_arm_traj_controller', 'l_arm_ely']
LEFT_ARM_SHX = ['left_arm_traj_controller', 'l_arm_shx']
LEFT_ARM_SHZ = ['left_arm_traj_controller', 'l_arm_shz']
RIGHT_ARM_WRX = ['right_arm_traj_controller', 'r_arm_wrx']
RIGHT_ARM_WRY = ['right_arm_traj_controller', 'r_arm_wry']
RIGHT_ARM_ELX = ['right_arm_traj_controller', 'r_arm_elx']
RIGHT_ARM_ELY = ['right_arm_traj_controller', 'r_arm_ely']
RIGHT_ARM_SHX = ['right_arm_traj_controller', 'r_arm_shx']
RIGHT_ARM_SHZ = ['right_arm_traj_controller', 'r_arm_shz']
def __init__(self, param):
"""Constructor"""
super(ReadDynamicParameterState, self).__init__(outcomes=['read', 'failed'],
input_keys=['traj_controller'],
output_keys=['parameter_value'])
self._param = param
self._failed = False
self._clients = {}
self._waiting_for_response = []
self._parameter_value_list = []
def execute(self, userdata):
if self._failed:
return 'failed'
value_offset = 0
for i in range(len(self._clients.keys())):
if self._waiting_for_response[i]:
param_dict = self._clients.values()[i].get_configuration(0.1)
if param_dict is not None:
self._waiting_for_response[i] = False
value_list = []
for j in range(len(self._param.values()[i])):
value_list.append(param_dict[self._param.values()[i][j]])
self._parameter_value_list[value_offset:value_offset+len(value_list)] = value_list
value_offset += len(self._param.values()[i])
if all(not waiting for waiting in self._waiting_for_response):
userdata.parameter_value = self._parameter_value_list
return 'read'
def on_enter(self, userdata):
self._clients = {}
self._waiting_for_response = [True] * len(self._param.keys())
self._parameter_value_list = [None] * sum(map(len, self._param.values()))
try:
for server in self._param.keys():
self._clients[server] = Client("/trajectory_controllers/" + userdata.traj_controller[0] + "/" + server + "/" + userdata.traj_controller[1])
except Exception as e:
Logger.logwarn('Was unable to reach parameter server:\n%s' % str(e))
self._failed = True
| bsd-3-clause | 9,053,889,298,215,037,000 | 30.536585 | 143 | 0.664346 | false | 2.905618 | false | false | false |
will-iam/Variant | script/process/ergodicity_scaling.py | 1 | 4083 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import __future__
import parser
import sys
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
import numpy as np
import operator
from collections import *
caseSize = (8192, 8192)
if parser.args.res:
maxAvailableNode = parser.args.res
else:
maxAvailableNode = 8
sizeDataDict = []
for p in range(0, int(np.log2(maxAvailableNode)) + 1):
filterDict = {'nSizeX' : caseSize[0], 'nSizeY' : caseSize[1], 'R' : 64 * 2**p}
print filterDict
data = parser.getData(filterDict)
if len(data):
sizeDataDict.append(data)
if len(sizeDataDict) == 0:
print("No data found.")
sys.exit(1)
loopTimeDict = dict()
for data in sizeDataDict:
for key, value in data.items():
keyDict = parser.extractKey(key)
Nt = keyDict['Nt']
R = keyDict['R']
if keyDict['Ny'] != caseSize[0] or keyDict['Nx'] != caseSize[1]:
print("Error in collected data")
sys.exit(1)
for run in value:
nSDD = run['point'][0] * run['point'][1]
# On several nodes, select only pure SDD, which is the best result.
if R > 64 and nSDD < R:
continue
# Don't remove HyperThreading.
# We assume that hyperthreading with SDD leads to same results as with SDS.
#if R > 64 and nSDD == R and Nt > 1.0:
# continue
# On a single node, select only pure SDS
if R == 64 and nSDD > 1:
continue
loopT = run['loopTime'] * caseSize[0] * caseSize[1] * keyDict['Ni'] / 1000.
if R not in loopTimeDict.keys():
loopTimeDict[R] = list()
loopTimeDict[R].append(loopT)
# And now, we must plot that
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
#ax = fig.add_subplot(211)
#ax.set_xscale('log', basex=2)
#ax.set_yscale('log')
maxSimulationNumber = 42
xArray = range(1, maxSimulationNumber + 1)
'''
#Perfect Scale
loopTimeDict[128] = [k / 2. for k in loopTimeDict[64]]
loopTimeDict[256] = [k / 4. for k in loopTimeDict[64]]
loopTimeDict[512] = [k / 8. for k in loopTimeDict[64]]
'''
for r in sorted(loopTimeDict):
nodeNeeded = r // 64
minT = np.min(loopTimeDict[r])
print("Min Time %s node(s) = %s" % (nodeNeeded, minT))
totalTimeArray = np.zeros(maxSimulationNumber)
for i in xArray:
totalTimeArray[i-1] = minT * (1 + (i * nodeNeeded - 1) // maxAvailableNode)
ax.plot(xArray, totalTimeArray, '-', label="Batch Size %s" % (r // 64))
parser.outputCurve("ergodicity_scaling-%s.dat" % (r//64), xArray, totalTimeArray)
'''
minSize = int(np.sqrt(np.min(syncTimeDict.keys())))
maxSize = int(np.sqrt(np.max(syncTimeDict.keys())))
nodeNumber = (caseSize[0] * caseSize[1] / (maxSize * maxSize))
'''
plt.title('%sx%s batch time with %s node(s) available at the same time.' % (caseSize[0], caseSize[1], maxAvailableNode))
plt.xlabel('Total number of simulation to run')
plt.ylabel('Loop Time')
plt.legend()
'''
bx = fig.add_subplot(212)
bx.set_xscale('log', basex=2)
bx.plot(sorted(sdsWeakDict), [np.min(v) for k, v in sorted(sdsWeakDict.items(), key=operator.itemgetter(0))], 'g+-', label="SDS scaling")
bx.plot(sorted(sddWeakDict), [np.min(v) for k, v in sorted(sddWeakDict.items())], 'b+-', label="SDD scaling")
#bx.plot(sorted(hybridWeakDict), [np.min(v) for k, v in sorted(hybridWeakDict.items())], 'y+-', label="Hybrid scaling")
bx.plot(sorted(sddWeakDict), [firstValueSDD for k in sorted(sddWeakDict.keys())], 'b--', label="SDD ideal")
bx.plot(sorted(sdsWeakDict), [firstValueSDS for k in sorted(sdsWeakDict.keys())], 'g--', label="SDS ideal")
for k in sdsWeakDict:
bx.plot(np.full(len(sdsWeakDict[k]), k), sdsWeakDict[k], 'g+')
for k in sddWeakDict:
bx.plot(np.full(len(sddWeakDict[k]), k), sddWeakDict[k], 'b+')
plt.title('Weak Scaling from %sx%s to %sx%s' % (initSize, initSize, initSize * 2**((maxPower-1) / 2), initSize * 2**((maxPower-1) / 2)) )
plt.xlabel('Core(s)')
plt.ylabel('Loop Time / iteration')
plt.legend()
'''
plt.show()
| mit | 3,501,576,982,939,167,000 | 30.898438 | 137 | 0.62552 | false | 2.93319 | false | false | false |
rfleschenberg/django-shop | example/myshop/migrations/polymorphic/0003_add_polymorphic.py | 1 | 9751 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import cms.models.fields
import djangocms_text_ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
('contenttypes', '0002_remove_content_type_name'),
('filer', '0002_auto_20150606_2003'),
('myshop', '0002_add_i18n'),
]
operations = [
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('active', models.BooleanField(default=True, help_text='Is this product publicly visible.', verbose_name='Active')),
('product_name', models.CharField(max_length=255, verbose_name='Product Name')),
('slug', models.SlugField(unique=True, verbose_name='Slug')),
('order', models.PositiveIntegerField(verbose_name='Sort by', db_index=True)),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='ProductTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('description', djangocms_text_ckeditor.fields.HTMLField(help_text='Description for the list view of products.', verbose_name='Description')),
],
),
migrations.CreateModel(
name='SmartPhone',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('storage', models.PositiveIntegerField(help_text='Internal storage in MB', verbose_name='Internal Storage')),
],
),
migrations.AlterUniqueTogether(
name='smartcardtranslation',
unique_together=set([]),
),
migrations.RemoveField(
model_name='smartcardtranslation',
name='master',
),
migrations.AlterModelOptions(
name='smartcard',
options={'verbose_name': 'Smart Card', 'verbose_name_plural': 'Smart Cards'},
),
migrations.RemoveField(
model_name='smartcard',
name='active',
),
migrations.RemoveField(
model_name='smartcard',
name='cms_pages',
),
migrations.RemoveField(
model_name='smartcard',
name='created_at',
),
migrations.RemoveField(
model_name='smartcard',
name='id',
),
migrations.RemoveField(
model_name='smartcard',
name='images',
),
migrations.RemoveField(
model_name='smartcard',
name='manufacturer',
),
migrations.RemoveField(
model_name='smartcard',
name='order',
),
migrations.RemoveField(
model_name='smartcard',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='smartcard',
name='product_name',
),
migrations.RemoveField(
model_name='smartcard',
name='slug',
),
migrations.RemoveField(
model_name='smartcard',
name='updated_at',
),
migrations.AlterField(
model_name='cartitem',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Product', blank=True, to='myshop.Product', null=True),
),
migrations.AlterField(
model_name='productimage',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.AlterField(
model_name='productpage',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.CreateModel(
name='Commodity',
fields=[
('product_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='myshop.Product')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('placeholder', cms.models.fields.PlaceholderField(slotname='Commodity Details', editable=False, to='cms.Placeholder', null=True)),
],
options={
'verbose_name': 'Commodity',
'verbose_name_plural': 'Commodities',
},
bases=('myshop.product',),
),
migrations.CreateModel(
name='SmartPhoneModel',
fields=[
('product_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='myshop.Product')),
('battery_type', models.PositiveSmallIntegerField(verbose_name='Battery type', choices=[(1, 'Lithium Polymer (Li-Poly)'), (2, 'Lithium Ion (Li-Ion)')])),
('battery_capacity', models.PositiveIntegerField(help_text='Battery capacity in mAh', verbose_name='Capacity')),
('ram_storage', models.PositiveIntegerField(help_text='RAM storage in MB', verbose_name='RAM')),
('wifi_connectivity', models.PositiveIntegerField(help_text='WiFi Connectivity', verbose_name='WiFi', choices=[(1, '802.11 b/g/n')])),
('bluetooth', models.PositiveIntegerField(help_text='Bluetooth Connectivity', verbose_name='Bluetooth', choices=[(1, 'Bluetooth 4.0')])),
('gps', models.BooleanField(default=False, help_text='GPS integrated', verbose_name='GPS')),
('width', models.DecimalField(help_text='Width in mm', verbose_name='Width', max_digits=4, decimal_places=1)),
('height', models.DecimalField(help_text='Height in mm', verbose_name='Height', max_digits=4, decimal_places=1)),
('weight', models.DecimalField(help_text='Weight in gram', verbose_name='Weight', max_digits=5, decimal_places=1)),
('screen_size', models.DecimalField(help_text='Diagonal screen size in inch', verbose_name='Screen size', max_digits=4, decimal_places=2)),
('operating_system', models.ForeignKey(verbose_name='Operating System', to='myshop.OperatingSystem')),
],
options={
'verbose_name': 'Smart Phone',
'verbose_name_plural': 'Smart Phones',
},
bases=('myshop.product',),
),
migrations.DeleteModel(
name='SmartCardTranslation',
),
migrations.AddField(
model_name='producttranslation',
name='master',
field=models.ForeignKey(related_name='translations', to='myshop.Product', null=True),
),
migrations.AddField(
model_name='product',
name='cms_pages',
field=models.ManyToManyField(help_text='Choose list view this product shall appear on.', to='cms.Page', through='myshop.ProductPage'),
),
migrations.AddField(
model_name='product',
name='images',
field=models.ManyToManyField(to='filer.Image', through='myshop.ProductImage'),
),
migrations.AddField(
model_name='product',
name='manufacturer',
field=models.ForeignKey(verbose_name='Manufacturer', to='myshop.Manufacturer'),
),
migrations.AddField(
model_name='product',
name='polymorphic_ctype',
field=models.ForeignKey(related_name='polymorphic_myshop.product_set+', editable=False, to='contenttypes.ContentType', null=True),
),
migrations.AddField(
model_name='smartcard',
name='product_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=None, serialize=False, to='myshop.Product'),
preserve_default=False,
),
migrations.AddField(
model_name='smartphone',
name='product',
field=models.ForeignKey(verbose_name='Smart-Phone Model', to='myshop.SmartPhoneModel'),
),
migrations.AlterUniqueTogether(
name='producttranslation',
unique_together=set([('language_code', 'master')]),
),
]
| bsd-3-clause | -1,009,144,222,055,504,600 | 44.565421 | 169 | 0.573172 | false | 4.428247 | false | false | false |
elastic/elasticsearch-py | test_elasticsearch/test_types/async_types.py | 1 | 3095 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, AsyncGenerator, Dict
from elasticsearch import (
AIOHttpConnection,
AsyncElasticsearch,
AsyncTransport,
ConnectionPool,
)
from elasticsearch.helpers import (
async_bulk,
async_reindex,
async_scan,
async_streaming_bulk,
)
es = AsyncElasticsearch(
[{"host": "localhost", "port": 9443}],
transport_class=AsyncTransport,
)
t = AsyncTransport(
[{}],
connection_class=AIOHttpConnection,
connection_pool_class=ConnectionPool,
sniff_on_start=True,
sniffer_timeout=0.1,
sniff_timeout=1,
sniff_on_connection_fail=False,
max_retries=1,
retry_on_status={100, 400, 503},
retry_on_timeout=True,
send_get_body_as="source",
)
async def async_gen() -> AsyncGenerator[Dict[Any, Any], None]:
yield {}
async def async_scan_types() -> None:
async for _ in async_scan(
es,
query={"query": {"match_all": {}}},
request_timeout=10,
clear_scroll=True,
scroll_kwargs={"request_timeout": 10},
):
pass
async for _ in async_scan(
es,
raise_on_error=False,
preserve_order=False,
scroll="10m",
size=10,
request_timeout=10.0,
):
pass
async def async_streaming_bulk_types() -> None:
async for _ in async_streaming_bulk(es, async_gen()):
pass
async for _ in async_streaming_bulk(es, async_gen().__aiter__()):
pass
async for _ in async_streaming_bulk(es, [{}]):
pass
async for _ in async_streaming_bulk(es, ({},)):
pass
async def async_bulk_types() -> None:
_, _ = await async_bulk(es, async_gen())
_, _ = await async_bulk(es, async_gen().__aiter__())
_, _ = await async_bulk(es, [{}])
_, _ = await async_bulk(es, ({},))
async def async_reindex_types() -> None:
_, _ = await async_reindex(
es, "src-index", "target-index", query={"query": {"match": {"key": "val"}}}
)
_, _ = await async_reindex(
es, source_index="src-index", target_index="target-index", target_client=es
)
_, _ = await async_reindex(
es,
"src-index",
"target-index",
chunk_size=1,
scroll="10m",
scan_kwargs={"request_timeout": 10},
bulk_kwargs={"request_timeout": 10},
)
| apache-2.0 | 6,571,657,479,850,991,000 | 27.394495 | 83 | 0.625202 | false | 3.582176 | false | false | false |
gemagomez/keepnote | keepnote/gui/main_window.py | 1 | 52638 | """
KeepNote
Graphical User Interface for KeepNote Application
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import mimetypes
import os
import shutil
import subprocess
import sys
import time
import thread
import threading
import uuid
# pygtk imports
import pygtk
pygtk.require('2.0')
import gtk
import gobject
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, \
ensure_unicode, \
unicode_gtk, \
FS_ENCODING
from keepnote.notebook import \
NoteBookError, \
NoteBookVersionError
from keepnote import notebook as notebooklib
from keepnote import tasklib
from keepnote.gui import \
get_resource, \
get_resource_image, \
get_resource_pixbuf, \
Action, \
ToggleAction, \
add_actions, \
CONTEXT_MENU_ACCEL_PATH, \
FileChooserDialog, \
init_key_shortcuts, \
UIManager
from keepnote.gui.icons import \
lookup_icon_filename
from keepnote.gui import richtext
from keepnote.gui import \
dialog_image_new, \
dialog_drag_drop_test, \
dialog_wait, \
update_file_preview
from keepnote.gui.icon_menu import IconMenu
from keepnote.gui.three_pane_viewer import ThreePaneViewer
from keepnote.gui.tabbed_viewer import TabbedViewer
_ = keepnote.translate
CLIPBOARD_NAME = "CLIPBOARD"
class KeepNoteWindow (gtk.Window):
"""Main windows for KeepNote"""
def __init__(self, app, winid=None):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self._app = app # application object
self._winid = winid if winid else unicode(uuid.uuid4())
self._viewers = []
# window state
self._maximized = False # True if window is maximized
self._was_maximized = False # True if iconified and was maximized
self._iconified = False # True if window is minimized
self._tray_icon = None # True if tray icon is present
self._recent_notebooks = []
self._uimanager = UIManager()
self._accel_group = self._uimanager.get_accel_group()
self.add_accel_group(self._accel_group)
init_key_shortcuts()
self.init_layout()
self.setup_systray()
# load preferences for the first time
self.load_preferences(True)
def get_id(self):
return self._winid
def init_layout(self):
# init main window
self.set_title(keepnote.PROGRAM_NAME)
self.set_default_size(*keepnote.DEFAULT_WINDOW_SIZE)
self.set_icon_list(get_resource_pixbuf("keepnote-16x16.png"),
get_resource_pixbuf("keepnote-32x32.png"),
get_resource_pixbuf("keepnote-64x64.png"))
# main window signals
self.connect("error", lambda w,m,e,t: self.error(m,e,t))
self.connect("delete-event", lambda w,e: self._on_close())
self.connect("window-state-event", self._on_window_state)
self.connect("size-allocate", self._on_window_size)
#self._app.pref.changed.add(self._on_app_options_changed)
#====================================
# Dialogs
self.drag_test = dialog_drag_drop_test.DragDropTestDialog(self)
self.viewer = self.new_viewer()
#====================================
# Layout
# vertical box
main_vbox = gtk.VBox(False, 0)
self.add(main_vbox)
# menu bar
main_vbox.set_border_width(0)
self.menubar = self.make_menubar()
main_vbox.pack_start(self.menubar, False, True, 0)
# toolbar
main_vbox.pack_start(self.make_toolbar(), False, True, 0)
main_vbox2 = gtk.VBox(False, 0)
main_vbox2.set_border_width(1)
main_vbox.pack_start(main_vbox2, True, True, 0)
# viewer
self.viewer_box = gtk.VBox(False, 0)
main_vbox2.pack_start(self.viewer_box, True, True, 0)
# status bar
status_hbox = gtk.HBox(False, 0)
main_vbox.pack_start(status_hbox, False, True, 0)
# message bar
self.status_bar = gtk.Statusbar()
status_hbox.pack_start(self.status_bar, False, True, 0)
self.status_bar.set_property("has-resize-grip", False)
self.status_bar.set_size_request(300, -1)
# stats bar
self.stats_bar = gtk.Statusbar()
status_hbox.pack_start(self.stats_bar, True, True, 0)
#====================================================
# viewer
self.viewer_box.pack_start(self.viewer, True, True, 0)
# add viewer menus
self.viewer.add_ui(self)
def setup_systray(self):
"""Setup systray for window"""
# system tray icon
if gtk.gtk_version > (2, 10):
if not self._tray_icon:
self._tray_icon = gtk.StatusIcon()
self._tray_icon.set_from_pixbuf(
get_resource_pixbuf("keepnote-32x32.png"))
self._tray_icon.set_tooltip(keepnote.PROGRAM_NAME)
self._statusicon_menu = self.make_statusicon_menu()
self._tray_icon.connect("activate", self._on_tray_icon_activate)
self._tray_icon.connect('popup-menu',
self._on_systray_popup_menu)
self._tray_icon.set_property(
"visible", self._app.pref.get("window", "use_systray",
default=True))
else:
self._tray_icon = None
def _on_systray_popup_menu(self, status, button, time):
self._statusicon_menu.popup(None, None, None, button, time)
#==============================================
# viewers
def new_viewer(self):
"""Creates a new viewer for this window"""
#viewer = ThreePaneViewer(self._app, self)
viewer = TabbedViewer(self._app, self)
viewer.connect("error", lambda w,m,e: self.error(m, e, None))
viewer.connect("status", lambda w,m,b: self.set_status(m, b))
viewer.connect("window-request", self._on_window_request)
viewer.connect("current-node", self._on_current_node)
viewer.connect("modified", self._on_viewer_modified)
return viewer
def add_viewer(self, viewer):
"""Adds a viewer to the window"""
self._viewers.append(viewer)
def remove_viewer(self, viewer):
"""Removes a viewer from the window"""
self._viewers.remove(viewer)
def get_all_viewers(self):
"""Returns list of all viewers associated with window"""
return self._viewers
def get_all_notebooks(self):
"""Returns all notebooks loaded by all viewers"""
return set(filter(lambda n: n is not None,
(v.get_notebook() for v in self._viewers)))
#===============================================
# accessors
def get_app(self):
"""Returns application object"""
return self._app
def get_uimanager(self):
"""Returns the UIManager for the window"""
return self._uimanager
def get_viewer(self):
"""Returns window's viewer"""
return self.viewer
def get_accel_group(self):
"""Returns the accel group for the window"""
return self._accel_group
def get_notebook(self):
"""Returns the currently loaded notebook"""
return self.viewer.get_notebook()
def get_current_page(self):
"""Returns the currently selected page"""
return self.viewer.get_current_page()
#=========================================================
# main window gui callbacks
def _on_window_state(self, window, event):
"""Callback for window state"""
iconified = self._iconified
# keep track of maximized and minimized state
self._iconified = bool(event.new_window_state &
gtk.gdk.WINDOW_STATE_ICONIFIED)
# detect recent iconification
if not iconified and self._iconified:
# save maximized state before iconification
self._was_maximized = self._maximized
self._maximized = bool(event.new_window_state &
gtk.gdk.WINDOW_STATE_MAXIMIZED)
# detect recent de-iconification
if iconified and not self._iconified:
# explicitly maximize if not maximized
# NOTE: this is needed to work around a MS windows GTK bug
if self._was_maximized:
gobject.idle_add(self.maximize)
def _on_window_size(self, window, event):
"""Callback for resize events"""
# record window size if it is not maximized or minimized
if not self._maximized and not self._iconified:
self._app.pref.get("window")["window_size"] = self.get_size()
#def _on_app_options_changed(self):
# self.load_preferences()
def _on_tray_icon_activate(self, icon):
"""Try icon has been clicked in system tray"""
if self.is_active():
self.minimize_window()
else:
self.restore_window()
#=============================================================
# viewer callbacks
def _on_window_request(self, viewer, action):
"""Callback for requesting an action from the main window"""
if action == "minimize":
self.minimize_window()
elif action == "restore":
self.restore_window()
else:
raise Exception("unknown window request: " + str(action))
#=================================================
# Window manipulation
def minimize_window(self):
"""Minimize the window (block until window is minimized"""
if self._iconified:
return
# TODO: add timer in case minimize fails
def on_window_state(window, event):
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
gtk.main_quit()
sig = self.connect("window-state-event", on_window_state)
self.iconify()
gtk.main()
self.disconnect(sig)
def restore_window(self):
"""Restore the window from minimization"""
self.deiconify()
self.present()
def on_new_window(self):
"""Open a new window"""
win = self._app.new_window()
notebook = self.get_notebook()
if notebook:
self._app.ref_notebook(notebook)
win.set_notebook(notebook)
#==============================================
# Application preferences
def load_preferences(self, first_open=False):
"""Load preferences"""
p = self._app.pref
# notebook
window_size = p.get("window", "window_size",
default=keepnote.DEFAULT_WINDOW_SIZE)
window_maximized = p.get("window", "window_maximized", default=True)
self.setup_systray()
use_systray = p.get("window", "use_systray", default=True)
# window config for first open
if first_open:
self.resize(*window_size)
if window_maximized:
self.maximize()
minimize = p.get("window", "minimize_on_start", default=False)
if use_systray and minimize:
self.iconify()
# config window
skip = p.get("window", "skip_taskbar", default=False)
if use_systray:
self.set_property("skip-taskbar-hint", skip)
self.set_keep_above(p.get("window", "keep_above", default=False))
if p.get("window", "stick", default=False):
self.stick()
else:
self.unstick()
# other window wide properties
self._recent_notebooks = p.get("recent_notebooks", default=[])
self.set_recent_notebooks_menu(self._recent_notebooks)
self._uimanager.set_force_stock(
p.get("look_and_feel", "use_stock_icons", default=False))
self.viewer.load_preferences(self._app.pref, first_open)
def save_preferences(self):
"""Save preferences"""
p = self._app.pref
# save window preferences
p.set("window", "window_maximized", self._maximized)
p.set("recent_notebooks", self._recent_notebooks)
# let viewer save preferences
self.viewer.save_preferences(self._app.pref)
def set_recent_notebooks_menu(self, recent_notebooks):
"""Set the recent notebooks in the file menu"""
menu = self._uimanager.get_widget("/main_menu_bar/File/Open Recent Notebook")
# init menu
if menu.get_submenu() is None:
submenu = gtk.Menu()
submenu.show()
menu.set_submenu(submenu)
menu = menu.get_submenu()
# clear menu
menu.foreach(lambda x: menu.remove(x))
def make_filename(filename, maxsize=30):
if len(filename) > maxsize:
base = os.path.basename(filename)
pre = max(maxsize - len(base), 10)
return os.path.join(filename[:pre] + u"...", base)
else:
return filename
def make_func(filename):
return lambda w: self.open_notebook(filename)
# populate menu
for i, notebook in enumerate(recent_notebooks):
item = gtk.MenuItem(u"%d. %s" % (i+1, make_filename(notebook)))
item.connect("activate", make_func(notebook))
item.show()
menu.append(item)
def add_recent_notebook(self, filename):
"""Add recent notebook"""
if filename in self._recent_notebooks:
self._recent_notebooks.remove(filename)
self._recent_notebooks = [filename] + \
self._recent_notebooks[:keepnote.gui.MAX_RECENT_NOTEBOOKS]
self.set_recent_notebooks_menu(self._recent_notebooks)
#=============================================
# Notebook open/save/close UI
def on_new_notebook(self):
"""Launches New NoteBook dialog"""
dialog = FileChooserDialog(
_("New Notebook"), self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("New"), gtk.RESPONSE_OK),
app=self._app,
persistent_path="new_notebook_path")
response = dialog.run()
if response == gtk.RESPONSE_OK:
# create new notebook
if dialog.get_filename():
self.new_notebook(unicode_gtk(dialog.get_filename()))
dialog.destroy()
def on_open_notebook(self):
"""Launches Open NoteBook dialog"""
dialog = gtk.FileChooserDialog(
_("Open Notebook"), self,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("Open"), gtk.RESPONSE_OK))
def on_folder_changed(filechooser):
folder = unicode_gtk(filechooser.get_current_folder())
if os.path.exists(os.path.join(folder, notebooklib.PREF_FILE)):
filechooser.response(gtk.RESPONSE_OK)
dialog.connect("current-folder-changed", on_folder_changed)
path = self._app.get_default_path("new_notebook_path")
if os.path.exists(path):
dialog.set_current_folder(path)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*.nbk")
file_filter.set_name(_("Notebook (*.nbk)"))
dialog.add_filter(file_filter)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*")
file_filter.set_name(_("All files (*.*)"))
dialog.add_filter(file_filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
path = dialog.get_current_folder()
if path:
self._app.pref.set("default_paths", "new_notebook_path",
os.path.dirname(path))
notebook_file = unicode_gtk(dialog.get_filename())
if notebook_file:
self.open_notebook(notebook_file)
dialog.destroy()
def _on_close(self):
"""Callback for window close"""
try:
# TODO: decide if a clipboard action is needed before
# closing down.
#clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
#clipboard.set_can_store(None)
#clipboard.store()
self._app.save()
self.close_notebook()
if self._tray_icon:
# turn off try icon
self._tray_icon.set_property("visible", False)
except Exception, e:
self.error("Error while closing", e, sys.exc_info()[2])
return False
def close(self):
"""Close the window"""
self._on_close()
self.emit("delete-event", None)
self.destroy()
def on_quit(self):
"""Quit the application"""
self._app.save()
self._app.quit()
#===============================================
# Notebook actions
def save_notebook(self, silent=False):
"""Saves the current notebook"""
try:
# save window information for all notebooks associated with this
# window
for notebook in self.get_all_notebooks():
p = notebook.pref.get("windows", "ids", define=True)
p[self._winid] = {
"viewer_type": self.viewer.get_name(),
"viewerid": self.viewer.get_id()}
# let the viewer save its information
self.viewer.save()
self.set_status(_("Notebook saved"))
except Exception, e:
if not silent:
self.error(_("Could not save notebook."), e, sys.exc_info()[2])
self.set_status(_("Error saving notebook"))
return
def reload_notebook(self):
"""Reload the current NoteBook"""
notebook = self.viewer.get_notebook()
if notebook is None:
self.error(_("Reloading only works when a notebook is open."))
return
filename = notebook.get_path()
self._app.close_all_notebook(notebook, False)
self.open_notebook(filename)
self.set_status(_("Notebook reloaded"))
def new_notebook(self, filename):
"""Creates and opens a new NoteBook"""
if self.viewer.get_notebook() is not None:
self.close_notebook()
try:
# make sure filename is unicode
filename = ensure_unicode(filename, FS_ENCODING)
notebook = notebooklib.NoteBook(filename)
notebook.create()
notebook.close()
self.set_status(_("Created '%s'") % notebook.get_title())
except NoteBookError, e:
self.error(_("Could not create new notebook."), e, sys.exc_info()[2])
self.set_status("")
return None
return self.open_notebook(filename, new=True)
def _load_notebook(self, filename):
"""Loads notebook in background with progress bar"""
notebook = self._app.get_notebook(filename, self)
if notebook is None:
return None
# check for indexing
# TODO: is this the best place for checking?
# There is a difference between normal incremental indexing
# and indexing due version updating.
# incremental updating (checking a few files that have changed on
# disk) should be done within notebook.load().
# Whole notebook re-indexing, triggered by version upgrade
# should be done separately, and with a different wait dialog
# clearly indicating that notebook loading is going to take
# longer than usual.
if notebook.index_needed():
self.update_index(notebook)
return notebook
def _restore_windows(self, notebook, open_here=True):
"""
Restore multiple windows for notebook
open_here -- if True, will open notebook in this window
Cases:
1. if notebook has no saved windows, just open notebook in this window
2. if notebook has 1 saved window
if open_here:
open it in this window
else:
if this window has no opened notebooks,
reassign its ids to the notebook and open it here
else
reassign notebooks saved ids to this window and viewer
3. if notebook has >1 saved windows, open them in their own windows
if this window has no notebook, reassign its id to one of the
saved ids.
"""
# init window lookup
win_lookup = dict((w.get_id(), w) for w in
self._app.get_windows())
def open_in_window(winid, viewerid, notebook):
win = win_lookup.get(winid, None)
if win is None:
# open new window
win = self._app.new_window()
win_lookup[winid] = win
win._winid = winid
if viewerid:
win.get_viewer().set_id(viewerid)
# set notebook
self._app.ref_notebook(notebook)
win.set_notebook(notebook)
# find out how many windows this notebook had last time
# init viewer if needed
windows = notebook.pref.get("windows", "ids", define=True)
notebook.pref.get("viewers", "ids", define=True)
if len(windows) == 0:
# no presistence info found, just open notebook in this window
self.set_notebook(notebook)
elif len(windows) == 1:
# restore a single window
winid, winpref = windows.items()[0]
viewerid = winpref.get("viewerid", None)
if viewerid is not None:
if len(self.get_all_notebooks()) == 0:
# no notebooks are open, so it is ok to reassign
# the viewer's id to match the notebook pref
self._winid = winid
self.viewer.set_id(viewerid)
self.set_notebook(notebook)
elif open_here:
# TODO: needs more testing
# notebooks are open, so reassign the notebook's pref to
# match the existing viewer
notebook.pref.set("windows", "ids",
{self._winid:
{"viewerid": self.viewer.get_id(),
"viewer_type": self.viewer.get_name()}})
notebook.pref.set(
"viewers", "ids", self.viewer.get_id(),
notebook.pref.get("viewers", "ids", viewerid,
define=True))
del notebook.pref.get("viewers", "ids")[viewerid]
self.set_notebook(notebook)
else:
# open in whatever window the notebook wants
open_in_window(winid, viewerid, notebook)
self._app.unref_notebook(notebook)
elif len(windows) > 1:
# get different kinds of window ids
restoring_ids = set(windows.keys())
new_ids = restoring_ids - set(win_lookup.keys())
if len(self.get_all_notebooks()) == 0:
# special case: if no notebooks opened, then make sure
# to reuse this window
if self._winid not in restoring_ids:
self._winid = iter(restoring_ids).next()
restoring_ids.remove(self._winid)
viewerid = windows[self._winid].get("viewerid", None)
if viewerid:
self.viewer.set_id(viewerid)
self.set_notebook(notebook)
# restore remaining windows
while len(restoring_ids) > 0:
winid = restoring_ids.pop()
viewerid = windows[winid].get("viewerid", None)
open_in_window(winid, viewerid, notebook)
self._app.unref_notebook(notebook)
def open_notebook(self, filename, new=False, open_here=True):
"""Opens a new notebook"""
try:
filename = notebooklib.normalize_notebook_dirname(
filename, longpath=False)
except Exception, e:
self.error(_("Could note find notebook '%s'.") % filename, e,
sys.exc_info()[2])
notebook = None
else:
notebook = self._load_notebook(filename)
if notebook is None:
return
# setup notebook
self._restore_windows(notebook, open_here=open_here)
if not new:
self.set_status(_("Loaded '%s'") % notebook.get_title())
self.update_title()
# save notebook to recent notebooks
self.add_recent_notebook(filename)
return notebook
def close_notebook(self, notebook=None):
"""Close the NoteBook"""
if notebook is None:
notebook = self.get_notebook()
self.viewer.close_notebook(notebook)
self.set_status(_("Notebook closed"))
def _on_close_notebook(self, notebook):
"""Callback when notebook is closing"""
pass
def set_notebook(self, notebook):
"""Set the NoteBook for the window"""
self.viewer.set_notebook(notebook)
def update_index(self, notebook=None, clear=False):
"""Update notebook index"""
if notebook is None:
notebook = self.viewer.get_notebook()
if notebook is None:
return
def update(task):
# erase database first
# NOTE: I do this right now so that corrupt databases can be
# cleared out of the way.
if clear:
notebook.clear_index()
try:
for node in notebook.index_all():
# terminate if search is canceled
if task.aborted():
break
except Exception, e:
self.error(_("Error during index"), e, sys.exc_info()[2])
task.finish()
# launch task
self.wait_dialog(_("Indexing notebook"), _("Indexing..."),
tasklib.Task(update))
#=====================================================
# viewer callbacks
def update_title(self, node=None):
"""Set the modification state of the notebook"""
notebook = self.viewer.get_notebook()
if notebook is None:
self.set_title(keepnote.PROGRAM_NAME)
else:
title = notebook.get_attr("title", u"")
if node is None:
node = self.get_current_page()
if node is not None:
title += u": " + node.get_attr("title", "")
modified = notebook.save_needed()
if modified:
self.set_title(u"* %s" % title)
self.set_status(_("Notebook modified"))
else:
self.set_title(title)
def _on_current_node(self, viewer, node):
"""Callback for when viewer changes the current node"""
self.update_title(node)
def _on_viewer_modified(self, viewer, modified):
"""Callback for when viewer has a modified notebook"""
self.update_title()
#===========================================================
# page and folder actions
def get_selected_nodes(self):
"""
Returns list of selected nodes
"""
return self.viewer.get_selected_nodes()
def confirm_delete_nodes(self, nodes):
"""Confirm whether nodes should be deleted"""
# TODO: move to app?
# TODO: add note names to dialog
# TODO: assume one node is selected
# could make this a stand alone function/dialog box
for node in nodes:
if node.get_attr("content_type") == notebooklib.CONTENT_TYPE_TRASH:
self.error(_("The Trash folder cannot be deleted."), None)
return False
if node.get_parent() == None:
self.error(_("The top-level folder cannot be deleted."), None)
return False
if len(nodes) > 1 or len(nodes[0].get_children()) > 0:
message = _("Do you want to delete this note and all of its children?")
else:
message = _("Do you want to delete this note?")
return self._app.ask_yes_no(message, _("Delete Note"),
parent=self.get_toplevel())
def on_empty_trash(self):
"""Empty Trash folder in NoteBook"""
if self.get_notebook() is None:
return
try:
self.get_notebook().empty_trash()
except NoteBookError, e:
self.error(_("Could not empty trash."), e, sys.exc_info()[2])
#=================================================
# action callbacks
def on_view_node_external_app(self, app, node=None, kind=None):
"""View a node with an external app"""
self._app.save()
# determine node to view
if node is None:
nodes = self.get_selected_nodes()
if len(nodes) == 0:
self.emit("error", _("No notes are selected."), None, None)
return
node = nodes[0]
try:
self._app.run_external_app_node(app, node, kind)
except KeepNoteError, e:
self.emit("error", e.msg, e, sys.exc_info()[2])
#=====================================================
# Cut/copy/paste
# forward cut/copy/paste to the correct widget
def on_cut(self):
"""Cut callback"""
widget = self.get_focus()
if gobject.signal_lookup("cut-clipboard", widget) != 0:
widget.emit("cut-clipboard")
def on_copy(self):
"""Copy callback"""
widget = self.get_focus()
if gobject.signal_lookup("copy-clipboard", widget) != 0:
widget.emit("copy-clipboard")
def on_copy_tree(self):
"""Copy tree callback"""
widget = self.get_focus()
if gobject.signal_lookup("copy-tree-clipboard", widget) != 0:
widget.emit("copy-tree-clipboard")
def on_paste(self):
"""Paste callback"""
widget = self.get_focus()
if gobject.signal_lookup("paste-clipboard", widget) != 0:
widget.emit("paste-clipboard")
def on_undo(self):
"""Undo callback"""
self.viewer.undo()
def on_redo(self):
"""Redo callback"""
self.viewer.redo()
#===================================================
# Misc.
def view_error_log(self):
"""View error in text editor"""
# windows locks open files
# therefore we should copy error log before viewing it
try:
filename = os.path.realpath(keepnote.get_user_error_log())
filename2 = filename + u".bak"
shutil.copy(filename, filename2)
# use text editor to view error log
self._app.run_external_app("text_editor", filename2)
except Exception, e:
self.error(_("Could not open error log") + ":\n" + str(e),
e, sys.exc_info()[2])
def view_config_files(self):
"""View config folder in a file explorer"""
try:
# use text editor to view error log
filename = keepnote.get_user_pref_dir()
self._app.run_external_app("file_explorer", filename)
except Exception, e:
self.error(_("Could not open error log") + ":\n" + str(e),
e, sys.exc_info()[2])
#==================================================
# Help/about dialog
def on_about(self):
"""Display about dialog"""
def func(dialog, link, data):
try:
self._app.open_webpage(link)
except KeepNoteError, e:
self.error(e.msg, e, sys.exc_info()[2])
gtk.about_dialog_set_url_hook(func, None)
about = gtk.AboutDialog()
about.set_name(keepnote.PROGRAM_NAME)
about.set_version(keepnote.PROGRAM_VERSION_TEXT)
about.set_copyright(keepnote.COPYRIGHT)
about.set_logo(get_resource_pixbuf("keepnote-icon.png"))
about.set_website(keepnote.WEBSITE)
about.set_license(keepnote.LICENSE_NAME)
about.set_translator_credits(keepnote.TRANSLATOR_CREDITS)
license_file = keepnote.get_resource(u"rc", u"COPYING")
if os.path.exists(license_file):
about.set_license(open(license_file).read())
#about.set_authors(["Matt Rasmussen <[email protected]>"])
about.set_transient_for(self)
about.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
about.connect("response", lambda d,r: about.destroy())
about.show()
#===========================================
# Messages, warnings, errors UI/dialogs
def set_status(self, text, bar="status"):
"""Sets a status message in the status bar"""
if bar == "status":
self.status_bar.pop(0)
self.status_bar.push(0, text)
elif bar == "stats":
self.stats_bar.pop(0)
self.stats_bar.push(0, text)
else:
raise Exception("unknown bar '%s'" % bar)
def error(self, text, error=None, tracebk=None):
"""Display an error message"""
self._app.error(text, error, tracebk)
def wait_dialog(self, title, text, task, cancel=True):
"""Display a wait dialog"""
# NOTE: pause autosave while performing long action
self._app.pause_auto_save(True)
dialog = dialog_wait.WaitDialog(self)
dialog.show(title, text, task, cancel=cancel)
self._app.pause_auto_save(False)
#================================================
# Menus
def get_actions(self):
actions = map(lambda x: Action(*x),
[
("File", None, _("_File")),
("New Notebook", gtk.STOCK_NEW, _("_New Notebook..."),
"", _("Start a new notebook"),
lambda w: self.on_new_notebook()),
("Open Notebook", gtk.STOCK_OPEN, _("_Open Notebook..."),
"<control>O", _("Open an existing notebook"),
lambda w: self.on_open_notebook()),
("Open Recent Notebook", gtk.STOCK_OPEN,
_("Open Re_cent Notebook")),
("Reload Notebook", gtk.STOCK_REVERT_TO_SAVED,
_("_Reload Notebook"),
"", _("Reload the current notebook"),
lambda w: self.reload_notebook()),
("Save Notebook", gtk.STOCK_SAVE, _("_Save Notebook"),
"<control>S", _("Save the current notebook"),
lambda w: self._app.save()),
("Close Notebook", gtk.STOCK_CLOSE, _("_Close Notebook"),
"", _("Close the current notebook"),
lambda w: self._app.close_all_notebook(self.get_notebook())),
("Export", None, _("_Export Notebook")),
("Import", None, _("_Import Notebook")),
("Quit", gtk.STOCK_QUIT, _("_Quit"),
"<control>Q", _("Quit KeepNote"),
lambda w: self.on_quit()),
#=======================================
("Edit", None, _("_Edit")),
("Undo", gtk.STOCK_UNDO, None,
"<control>Z", None,
lambda w: self.on_undo()),
("Redo", gtk.STOCK_REDO, None,
"<control><shift>Z", None,
lambda w: self.on_redo()),
("Cut", gtk.STOCK_CUT, None,
"<control>X", None,
lambda w: self.on_cut()),
("Copy", gtk.STOCK_COPY, None,
"<control>C", None,
lambda w: self.on_copy()),
("Copy Tree", gtk.STOCK_COPY, None,
"<control><shift>C", None,
lambda w: self.on_copy_tree()),
("Paste", gtk.STOCK_PASTE, None,
"<control>V", None,
lambda w: self.on_paste()),
("Empty Trash", gtk.STOCK_DELETE, _("Empty _Trash"),
"", None,
lambda w: self.on_empty_trash()),
#========================================
("Search", None, _("_Search")),
("Search All Notes", gtk.STOCK_FIND, _("_Search All Notes"),
"<control>K", None,
lambda w: self.search_box.grab_focus()),
#=======================================
("Go", None, _("_Go")),
#========================================
("View", None, _("_View")),
("View Note in File Explorer", gtk.STOCK_OPEN,
_("View Note in File Explorer"),
"", None,
lambda w: self.on_view_node_external_app("file_explorer",
kind="dir")),
("View Note in Text Editor", gtk.STOCK_OPEN,
_("View Note in Text Editor"),
"", None,
lambda w: self.on_view_node_external_app("text_editor",
kind="page")),
("View Note in Web Browser", gtk.STOCK_OPEN,
_("View Note in Web Browser"),
"", None,
lambda w: self.on_view_node_external_app("web_browser",
kind="page")),
("Open File", gtk.STOCK_OPEN,
_("_Open File"),
"", None,
lambda w: self.on_view_node_external_app("file_launcher",
kind="file")),
#=========================================
("Tools", None, _("_Tools")),
("Update Notebook Index", None, _("_Update Notebook Index"),
"", None,
lambda w: self.update_index(clear=True)),
("KeepNote Preferences", gtk.STOCK_PREFERENCES, _("_Preferences"),
"", None,
lambda w: self._app.app_options_dialog.show(self)),
#=========================================
("Window", None, _("Window")),
("New Window", None, _("New Window"),
"", _("Open a new window"),
lambda w: self.on_new_window()),
("Close Window", None, _("Close Window"),
"", _("Close window"),
lambda w: self.close()),
#=========================================
("Help", None, _("_Help")),
("View Error Log...", gtk.STOCK_DIALOG_ERROR, _("View _Error Log..."),
"", None,
lambda w: self.view_error_log()),
("View Preference Files...", None, _("View Preference Files..."), "", None,
lambda w: self.view_config_files()),
("Drag and Drop Test...", None, _("Drag and Drop Test..."),
"", None,
lambda w: self.drag_test.on_drag_and_drop_test()),
("About", gtk.STOCK_ABOUT, _("_About"),
"", None,
lambda w: self.on_about())
]) + [
Action("Main Spacer Tool"),
Action("Search Box Tool", None, None, "", _("Search All Notes")),
Action("Search Button Tool", gtk.STOCK_FIND, None, "",
_("Search All Notes"),
lambda w: self.search_box.on_search_nodes())]
# make sure recent notebooks is always visible
recent = [x for x in actions
if x.get_property("name") == "Open Recent Notebook"][0]
recent.set_property("is-important", True)
return actions
def setup_menus(self, uimanager):
pass
def get_ui(self):
return ["""
<ui>
<!-- main window menu bar -->
<menubar name="main_menu_bar">
<menu action="File">
<menuitem action="New Notebook"/>
<placeholder name="Viewer"/>
<placeholder name="New"/>
<separator/>
<menuitem action="Open Notebook"/>
<menuitem action="Open Recent Notebook"/>
<menuitem action="Save Notebook"/>
<menuitem action="Close Notebook"/>
<menuitem action="Reload Notebook"/>
<menuitem action="Empty Trash"/>
<separator/>
<menu action="Export" />
<menu action="Import" />
<separator/>
<placeholder name="Extensions"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Edit">
<menuitem action="Undo"/>
<menuitem action="Redo"/>
<separator/>
<menuitem action="Cut"/>
<menuitem action="Copy"/>
<menuitem action="Copy Tree"/>
<menuitem action="Paste"/>
<separator/>
<placeholder name="Viewer"/>
<separator/>
<menuitem action="KeepNote Preferences"/>
</menu>
<menu action="Search">
<menuitem action="Search All Notes"/>
<placeholder name="Viewer"/>
</menu>
<placeholder name="Viewer"/>
<menu action="Go">
<placeholder name="Viewer"/>
</menu>
<menu action="Tools">
<placeholder name="Viewer"/>
<menuitem action="Update Notebook Index"/>
<placeholder name="Extensions"/>
</menu>
<menu action="Window">
<menuitem action="New Window"/>
<menuitem action="Close Window"/>
<placeholder name="Viewer Window"/>
</menu>
<menu action="Help">
<menuitem action="View Error Log..."/>
<menuitem action="View Preference Files..."/>
<menuitem action="Drag and Drop Test..."/>
<separator/>
<menuitem action="About"/>
</menu>
</menubar>
<!-- main window tool bar -->
<toolbar name="main_tool_bar">
<placeholder name="Viewer"/>
<toolitem action="Main Spacer Tool"/>
<toolitem action="Search Box Tool"/>
<toolitem action="Search Button Tool"/>
</toolbar>
<!-- popup menus -->
<menubar name="popup_menus">
</menubar>
</ui>
"""]
def get_actions_statusicon(self):
"""Set actions for StatusIcon menu and return."""
actions = map(lambda x: Action(*x),
[
("KeepNote Preferences", gtk.STOCK_PREFERENCES, _("_Preferences"),
"", None,
lambda w: self._app.app_options_dialog.show(self)),
("Quit", gtk.STOCK_QUIT, _("_Quit"),
"<control>Q", _("Quit KeepNote"),
lambda w: self.close()),
("About", gtk.STOCK_ABOUT, _("_About"),
"", None,
lambda w: self.on_about())
])
return actions
def get_ui_statusicon(self):
"""Create UI xml-definition for StatusIcon menu and return."""
return ["""
<ui>
<!-- statusicon_menu -->
<popup name="statusicon_menu">
<menuitem action="KeepNote Preferences"/>
<menuitem action="About"/>
<separator/>
<menuitem action="Quit"/>
</popup>
</ui>
"""]
def make_menubar(self):
"""Initialize the menu bar"""
#===============================
# ui manager
self._actiongroup = gtk.ActionGroup('MainWindow')
self._uimanager.insert_action_group(self._actiongroup, 0)
# setup menus
add_actions(self._actiongroup, self.get_actions())
for s in self.get_ui():
self._uimanager.add_ui_from_string(s)
self.setup_menus(self._uimanager)
# return menu bar
menubar = self._uimanager.get_widget('/main_menu_bar')
return menubar
def make_toolbar(self):
# configure toolbar
toolbar = self._uimanager.get_widget('/main_tool_bar')
toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL)
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_border_width(0)
try:
# NOTE: if this version of GTK doesn't have this size, then
# ignore it
toolbar.set_property("icon-size", gtk.ICON_SIZE_SMALL_TOOLBAR)
except:
pass
# separator (is there a better way to do this?)
spacer = self._uimanager.get_widget("/main_tool_bar/Main Spacer Tool")
spacer.remove(spacer.child)
spacer.set_expand(True)
# search box
self.search_box = SearchBox(self)
self.search_box.show()
w = self._uimanager.get_widget("/main_tool_bar/Search Box Tool")
w.remove(w.child)
w.add(self.search_box)
return toolbar
def make_statusicon_menu(self):
"""Initialize the StatusIcon menu."""
#===============================
# ui manager
self._actiongroup_statusicon = gtk.ActionGroup('StatusIcon')
self._tray_icon.uimanager = gtk.UIManager()
self._tray_icon.uimanager.insert_action_group(
self._actiongroup_statusicon, 0)
# setup menu
add_actions(self._actiongroup_statusicon,
self.get_actions_statusicon())
for s in self.get_ui_statusicon():
self._tray_icon.uimanager.add_ui_from_string(s)
self.setup_menus(self._tray_icon.uimanager)
# return menu
statusicon_menu = self._tray_icon.uimanager.get_widget(
'/statusicon_menu')
return statusicon_menu
gobject.type_register(KeepNoteWindow)
gobject.signal_new("error", KeepNoteWindow, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (str, object, object))
class SearchBox (gtk.Entry):
def __init__(self, window):
gtk.Entry.__init__(self)
self._window = window
self.connect("changed", self._on_search_box_text_changed)
self.connect("activate", lambda w: self.on_search_nodes())
self.search_box_list = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING)
self.search_box_completion = gtk.EntryCompletion()
self.search_box_completion.connect("match-selected",
self._on_search_box_completion_match)
self.search_box_completion.set_match_func(lambda c, k, i: True)
self.search_box_completion.set_model(self.search_box_list)
self.search_box_completion.set_text_column(0)
self.set_completion(self.search_box_completion)
def on_search_nodes(self):
"""Search nodes"""
# do nothing if notebook is not defined
if not self._window.get_notebook():
return
# TODO: add parsing grammar
# get words
words = [x.lower() for x in
unicode_gtk(self.get_text()).strip().split()]
# clear listview
self._window.get_viewer().start_search_result()
# queue for sending results between threads
from threading import Lock
from Queue import Queue
queue = Queue()
lock = Lock() # a mutex for the notebook (protect sqlite)
# update gui with search result
def search(task):
alldone = Lock() # ensure gui and background sync up at end
alldone.acquire()
def gui_update():
lock.acquire()
more = True
try:
maxstep = 20
for i in xrange(maxstep):
# check if search is aborted
if task.aborted():
more = False
break
# skip if queue is empty
if queue.empty():
break
node = queue.get()
# no more nodes left, finish
if node is None:
more = False
break
# add result to gui
self._window.get_viewer().add_search_result(node)
except Exception, e:
self._window.error(_("Unexpected error"), e)
more = False
finally:
lock.release()
if not more:
alldone.release()
return more
gobject.idle_add(gui_update)
# init search
notebook = self._window.get_notebook()
try:
nodes = (notebook.get_node_by_id(nodeid)
for nodeid in
notebook.search_node_contents(" ".join(words)))
except:
keepnote.log_error()
# do search in thread
try:
lock.acquire()
for node in nodes:
if task.aborted():
break
lock.release()
if node:
queue.put(node)
lock.acquire()
lock.release()
queue.put(None)
except Exception, e:
self.error(_("Unexpected error"), e)
# wait for gui thread to finish
# NOTE: if task is aborted, then gui_update stops itself for
# some reason, thus no need to acquire alldone.
if not task.aborted():
alldone.acquire()
# launch task
task = tasklib.Task(search)
self._window.wait_dialog(
_("Searching notebook"), _("Searching..."), task)
if task.exc_info()[0]:
e, t, tr = task.exc_info()
keepnote.log_error(e, tr)
self._window.get_viewer().end_search_result()
def focus_on_search_box(self):
"""Place cursor in search box"""
self.grab_focus()
def _on_search_box_text_changed(self, url_text):
self.search_box_update_completion()
def search_box_update_completion(self):
if not self._window.get_notebook():
return
text = unicode_gtk(self.get_text())
self.search_box_list.clear()
if len(text) > 0:
results = self._window.get_notebook().search_node_titles(text)[:10]
for nodeid, title in results:
self.search_box_list.append([title, nodeid])
def _on_search_box_completion_match(self, completion, model, iter):
if not self._window.get_notebook():
return
nodeid = model[iter][1]
node = self._window.get_notebook().get_node_by_id(nodeid)
if node:
self._window.get_viewer().goto_node(node, False)
| gpl-2.0 | -3,410,582,267,432,658,000 | 30.576485 | 87 | 0.520195 | false | 4.294175 | false | false | false |
heytcass/homeassistant-config | deps/cherrypy/_cpreqbody.py | 1 | 37427 | """Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short,
:attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of
:class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the
'image' types altogether. If neither the full type nor the major type has a
matching processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
\"""Read application/json data into request.json.\"""
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the
``processors`` dictionary. All processor functions take a single argument,
the ``Entity`` instance they are to process. It will be called whenever a
request is received (for those URI's where the tool is turned on) which
has a ``Content-Type`` of "application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid),
then reads the remaining bytes on the socket. The ``fp`` object knows its
own length, so it won't hang waiting for data that never arrives. It will
return when all data has been read. Then, we decode those bytes using
Python's built-in ``json`` module, and stick the decoded result onto
``request.json`` . If it cannot be decoded, we raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the
``processors`` dict so that request entities of other ``Content-Types``
aren't parsed at all. Since there's no entry for those invalid MIME
types, the ``default_proc`` method of ``cherrypy.request.body`` is
called. But this does nothing by default (usually to provide the page
handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace
``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``.
Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way,
not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
try:
from urllib import unquote_plus
except ImportError:
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(ntob('+'), ntob(' '))
atoms = bs.split(ntob('%'))
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return ntob('').join(atoms)
import cherrypy
from cherrypy._cpcompat import text_or_bytes, ntob, ntou
from cherrypy.lib import httputil
# ------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(ntob('&')):
for pair in aparam.split(ntob(';')):
if not pair:
continue
atoms = pair.split(ntob('='), 1)
if len(atoms) == 1:
atoms.append(ntob(''))
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ""
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match("^[ -~]{0,200}[!-~]$", ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params.
"""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# -------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`.
This uses the ``content_type`` of the Entity to look up a suitable
processor in
:attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`,
a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method
of the Entity is called (which does nothing by default; you can
override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = _cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the Content-Type
given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type
"multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing of
multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type
# charset
dec = self.content_type.params.get("charset", None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if (
clen is not None and
'chunked' not in headers.get('Transfer-Encoding', '')
):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if (
self.filename.startswith('"') and
self.filename.endswith('"')
):
self.filename = self.filename[1:-1]
# The 'type' attribute is deprecated in 3.2; remove it in 3.3.
type = property(
lambda self: self.content_type,
doc="A deprecated alias for "
":attr:`content_type<cherrypy._cpreqbody.Entity.content_type>`."
)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
value = self.decode_entity(value)
return value
def decode_entity(self , value):
"""Return a given byte encoded value as a string"""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return value
else:
raise cherrypy.HTTPError(
400,
"The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(self.attempt_charsets)
)
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store
its data in a file (generated by
:func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi`
module in Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
@classmethod
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
@classmethod
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError("Illegal end of headers.")
if line == ntob('\r\n'):
# Normal end of headers
break
if not line.endswith(ntob('\r\n')):
raise ValueError("MIME requires CRLF terminators: %r" % line)
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(ntob(":"), 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ", ".join((existing, v))
headers[k] = v
return headers
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and that fp is returned.
"""
endmarker = self.boundary + ntob("--")
delim = ntob("")
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError("Illegal end of multipart body.")
if line.startswith(ntob("--")) and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(ntob("\r\n")):
delim = ntob("\r\n")
line = line[:-2]
prev_lf = True
elif line.endswith(ntob("\n")):
delim = ntob("\n")
line = line[:-1]
prev_lf = True
else:
delim = ntob("")
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = ntob('').join(lines)
return result
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, text_or_bytes):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
try:
inf = float('inf')
except ValueError:
# Python 2.4 and lower
class Infinity(object):
def __cmp__(self, other):
return 1
def __sub__(self, other):
return self
inf = Infinity()
comma_separated_headers = [
'Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow',
'Cache-Control', 'Connection', 'Content-Encoding',
'Content-Language', 'Expect', 'If-Match',
'If-None-Match', 'Pragma', 'Proxy-Authenticate',
'Te', 'Trailer', 'Transfer-Encoding', 'Upgrade',
'Vary', 'Via', 'Warning', 'Www-Authenticate'
]
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE,
has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = ntob('')
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return ntob('')
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = ntob('')
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return ntob('').join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(ntob('\n')) + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return ntob('').join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(ntob(":"), 1)
except ValueError:
raise ValueError("Illegal header line.")
k = k.strip().title()
v = v.strip()
if k in comma_separated_headers:
existing = self.trailers.get(envname)
if existing:
v = ntob(", ").join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
# Python 2 only: keyword arguments must be byte strings (type
# 'str').
if sys.version_info < (3, 0):
if isinstance(key, unicode):
key = key.encode('ISO-8859-1')
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| mit | 8,550,716,814,100,659,000 | 35.765226 | 84 | 0.58538 | false | 4.25017 | false | false | false |
lykops/lykops | lykops/urls.py | 1 | 5817 | """lykops URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
# from django.contrib import admin
from library.connecter.database.mongo import Op_Mongo
from library.connecter.database.redis_api import Op_Redis
# from lykops import settings
from lykops.ansible.execute import Exec
from lykops.ansible.options import Options
from lykops.ansible.report import Report
from lykops.ansible.yaml import Yaml
from lykops.sysadmin.inventory import Inventory
# from lykops.sysadmin.privacy import Privacy
from lykops.sysadmin.user import User
from lykops.views import Login
mongoclient = Op_Mongo()
redisclient = Op_Redis()
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^$', User(mongoclient=mongoclient, redisclient=redisclient).summary, name='index'),
url(r'^login.html', Login(mongoclient=mongoclient, redisclient=redisclient).login, name='login'),
url(r'^logout.html', Login(mongoclient=mongoclient, redisclient=redisclient).logout, name='logout'),
url(r'^user/create_admin', Login(mongoclient=mongoclient, redisclient=redisclient).create_admin, name='create_admin'),
url(r'^user/detail', User(mongoclient=mongoclient, redisclient=redisclient).detail),
url(r'^user/list', User(mongoclient=mongoclient, redisclient=redisclient).summary, name='user_list'),
url(r'^user/add', User(mongoclient=mongoclient, redisclient=redisclient).add, name='user_add'),
url(r'^user/edit', User(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^user/chgpwd', User(mongoclient=mongoclient, redisclient=redisclient).change_pwd),
url(r'^user/chgpvltwd', User(mongoclient=mongoclient, redisclient=redisclient).change_vaultpwd),
url(r'^user/del', User(mongoclient=mongoclient, redisclient=redisclient).delete),
url(r'^user/disable', User(mongoclient=mongoclient, redisclient=redisclient).disable),
url(r'^user/enable', User(mongoclient=mongoclient, redisclient=redisclient).enable),
url(r'^user/$', User(mongoclient=mongoclient, redisclient=redisclient).summary),
# url(r'^privacy/edit', Privacy(mongoclient=mongoclient, redisclient=redisclient).edit, name='privacy_edit'),
# url(r'^privacy/detail', Privacy(mongoclient=mongoclient, redisclient=redisclient).detail, name='privacy_detail'),
# url(r'^privacy/$', Privacy(mongoclient=mongoclient, redisclient=redisclient).detail),
# 该功能用于保存用户的机密数据,但该版本暂时不需要使用,故暂时不做展示
url(r'^inventory/add$', Inventory(mongoclient=mongoclient, redisclient=redisclient).add, name='inventory_add'),
url(r'^inventory/list$', Inventory(mongoclient=mongoclient, redisclient=redisclient).summary, name='inventory_list'),
url(r'^inventory/$', Inventory(mongoclient=mongoclient, redisclient=redisclient).summary),
url(r'^inventory/detail$', Inventory(mongoclient=mongoclient, redisclient=redisclient).detail, name='inventory_detail'),
url(r'^inventory/edit$', Inventory(mongoclient=mongoclient, redisclient=redisclient).edit, name='inventory_edit'),
url(r'^inventory/del$', Inventory(mongoclient=mongoclient, redisclient=redisclient).delete, name='inventory_del'),
url(r'^ansible/$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible'),
url(r'^ansible/report/$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_report'),
url(r'^ansible/report/list$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_report_list'),
url(r'^ansible/report/detail$', Report(mongoclient=mongoclient, redisclient=redisclient).detail),
url(r'^ansible/yaml/add$', Yaml(mongoclient=mongoclient, redisclient=redisclient).add, name='ansible_yaml_add'),
url(r'^ansible/yaml/import$', Yaml(mongoclient=mongoclient, redisclient=redisclient).import_file, name='ansible_yaml_import'),
url(r'^ansible/yaml/list$', Yaml(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_yaml_list'),
url(r'^ansible/yaml/detail$', Yaml(mongoclient=mongoclient, redisclient=redisclient).detail, name='ansible_yaml_detail'),
url(r'^ansible/yaml/edit$', Yaml(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^ansible/yaml/$', Yaml(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_yaml'),
url(r'^ansible/exec/adhoc$', Exec(mongoclient=mongoclient, redisclient=redisclient).adhoc, name='ansible_exec_adhoc'),
url(r'^ansible/exec/playbook$', Exec(mongoclient=mongoclient, redisclient=redisclient).playbook, name='ansible_exec_playbook'),
url(r'^ansible/option/$', Options(mongoclient=mongoclient, redisclient=redisclient).detail, name='ansible_option'),
url(r'^ansible/option/edit$', Options(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^ansible/option/detail$', Options(mongoclient=mongoclient, redisclient=redisclient).detail),
# url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.STATICFILES_DIRS, 'show_indexes':False}),
# url(r'^file/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.MEDIA_ROOT, 'show_indexes':False}),
]
| apache-2.0 | 4,648,493,441,235,367,000 | 63.595506 | 132 | 0.744651 | false | 3.179757 | false | false | false |
google/tmppy | _py2tmp/ir0_optimization/_expression_simplification.py | 1 | 17752 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from _py2tmp.ir0 import ir, Transformation, is_expr_variadic, GlobalLiterals, select1st_literal
from _py2tmp.ir0_optimization._compute_non_expanded_variadic_vars import compute_non_expanded_variadic_vars
from _py2tmp.ir0_optimization._recalculate_template_instantiation_can_trigger_static_asserts_info import expr_can_trigger_static_asserts
class ExpressionSimplificationTransformation(Transformation):
def __init__(self) -> None:
super().__init__()
self.in_variadic_type_expansion = False
def transform_not_expr(self, not_expr: ir.NotExpr) -> ir.Expr:
expr = self.transform_expr(not_expr.inner_expr)
# !true => false
# !false => true
if isinstance(expr, ir.Literal):
assert isinstance(expr.value, bool)
return ir.Literal(not expr.value)
# !!x => x
if isinstance(expr, ir.NotExpr):
return expr.inner_expr
# !(x && y) => (!x || !y)
# !(x || y) => (!x && !y)
if isinstance(expr, ir.BoolBinaryOpExpr):
op = {
'&&': '||',
'||': '&&',
}[expr.op]
return self.transform_expr(
ir.BoolBinaryOpExpr(lhs=ir.NotExpr(expr.lhs), rhs=ir.NotExpr(expr.rhs), op=op))
# !(x == y) => x != y
# !(x != y) => x == y
# !(x < y) => x >= y
# !(x <= y) => x > y
# !(x > y) => x <= y
# !(x >= y) => x < y
if isinstance(expr, ir.ComparisonExpr) and expr.op in ('==', '!='):
op = {
'==': '!=',
'!=': '==',
'<': '>=',
'<=': '>',
'>': '<=',
'>=': '<',
}[expr.op]
return ir.ComparisonExpr(expr.lhs, expr.rhs, op)
return ir.NotExpr(expr)
def transform_unary_minus_expr(self, unary_minus: ir.UnaryMinusExpr) -> ir.Expr:
expr = self.transform_expr(unary_minus.inner_expr)
# -(3) => -3
if isinstance(expr, ir.Literal):
assert isinstance(expr.value, int)
return ir.Literal(-expr.value)
# -(x - y) => y - x
if isinstance(expr, ir.Int64BinaryOpExpr) and expr.op == '-':
return ir.Int64BinaryOpExpr(lhs=expr.rhs, rhs=expr.lhs, op='-')
return ir.UnaryMinusExpr(expr)
def transform_int64_binary_op_expr(self, binary_op: ir.Int64BinaryOpExpr) -> ir.Expr:
lhs = binary_op.lhs
rhs = binary_op.rhs
op = binary_op.op
# (x - y) => (x + -y)
# This pushes down the minus, so that e.g. (x - (-y)) => (x + y).
if op == '-':
rhs = ir.UnaryMinusExpr(rhs)
op = '+'
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if op == '+' and isinstance(rhs, ir.UnaryMinusExpr):
# We could not push down the minus, so switch back to a subtraction.
op = '-'
rhs = rhs.inner_expr
if op == '+':
# 3 + 5 => 8
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value + rhs.value)
# 0 + x => x
if isinstance(lhs, ir.Literal) and lhs.value == 0:
return rhs
# x + 0 => x
if isinstance(rhs, ir.Literal) and rhs.value == 0:
return lhs
if op == '-':
# 8 - 5 => 3
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value - rhs.value)
# 0 - x => -x
if isinstance(lhs, ir.Literal) and lhs.value == 0:
return ir.UnaryMinusExpr(rhs)
# x - 0 => x
if isinstance(rhs, ir.Literal) and rhs.value == 0:
return lhs
if op == '*':
# 3 * 5 => 15
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value * rhs.value)
# 0 * x => 0
if isinstance(lhs, ir.Literal) and lhs.value == 0:
if self._can_remove_subexpression(rhs):
return ir.Literal(0)
# x * 0 => 0
if isinstance(rhs, ir.Literal) and rhs.value == 0:
if self._can_remove_subexpression(lhs):
return ir.Literal(0)
# 1 * x => x
if isinstance(lhs, ir.Literal) and lhs.value == 1:
return rhs
# x * 1 => x
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return lhs
if op == '/':
# 16 / 3 => 5
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value // rhs.value)
# x / 1 => x
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return lhs
if op == '%':
# 16 % 3 => 1
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value % rhs.value)
# x % 1 => 0
if isinstance(rhs, ir.Literal) and rhs.value == 1:
return ir.Literal(0)
return ir.Int64BinaryOpExpr(lhs, rhs, op)
def transform_bool_binary_op_expr(self, binary_op: ir.BoolBinaryOpExpr) -> ir.Expr:
lhs = binary_op.lhs
rhs = binary_op.rhs
op = binary_op.op
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if op == '&&':
# true && false => false
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value and rhs.value)
# true && x => x
if isinstance(lhs, ir.Literal) and lhs.value is True:
return rhs
# x && true => x
if isinstance(rhs, ir.Literal) and rhs.value is True:
return lhs
# false && x => false
if isinstance(lhs, ir.Literal) and lhs.value is False:
if self._can_remove_subexpression(rhs):
return ir.Literal(False)
# x && false => false
if isinstance(rhs, ir.Literal) and rhs.value is False:
if self._can_remove_subexpression(lhs):
return ir.Literal(False)
if op == '||':
# true || false => true
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
return ir.Literal(lhs.value or rhs.value)
# false || x => x
if isinstance(lhs, ir.Literal) and lhs.value is False:
return rhs
# x || false => x
if isinstance(rhs, ir.Literal) and rhs.value is False:
return lhs
# true || x => true
if isinstance(lhs, ir.Literal) and lhs.value is True:
if self._can_remove_subexpression(rhs):
return ir.Literal(True)
# x || true => true
if isinstance(rhs, ir.Literal) and rhs.value is True:
if self._can_remove_subexpression(lhs):
return ir.Literal(True)
return ir.BoolBinaryOpExpr(lhs, rhs, op)
def transform_comparison_expr(self, comparison: ir.ComparisonExpr) -> ir.Expr:
lhs = comparison.lhs
rhs = comparison.rhs
op = comparison.op
lhs = self.transform_expr(lhs)
rhs = self.transform_expr(rhs)
if isinstance(lhs, ir.Literal) and isinstance(rhs, ir.Literal):
if op == '==':
return ir.Literal(lhs.value == rhs.value)
if op == '!=':
return ir.Literal(lhs.value != rhs.value)
if op == '<':
return ir.Literal(lhs.value < rhs.value)
if op == '<=':
return ir.Literal(lhs.value <= rhs.value)
if op == '>':
return ir.Literal(lhs.value > rhs.value)
if op == '>=':
return ir.Literal(lhs.value >= rhs.value)
if op in ('==', '!=') and self._is_syntactically_equal(lhs, rhs) and not expr_can_trigger_static_asserts(lhs):
if self._can_remove_subexpression(lhs) and self._can_remove_subexpression(rhs):
return {
'==': ir.Literal(True),
'!=': ir.Literal(False),
}[op]
if op in ('==', '!=') and isinstance(rhs, ir.Literal) and rhs.expr_type == ir.BoolType():
rhs, lhs = lhs, rhs
if op in ('==', '!=') and isinstance(lhs, ir.Literal) and lhs.expr_type == ir.BoolType():
return {
('==', True): lambda: rhs,
('==', False): lambda: self.transform_expr(ir.NotExpr(rhs)),
('!=', True): lambda: self.transform_expr(ir.NotExpr(rhs)),
('!=', False): lambda: rhs,
}[(op, lhs.value)]()
return ir.ComparisonExpr(lhs, rhs, op)
def transform_static_assert(self, static_assert: ir.StaticAssert):
expr = self.transform_expr(static_assert.expr)
if isinstance(expr, ir.Literal) and expr.value is True:
return
self.writer.write(ir.StaticAssert(expr=expr,
message=static_assert.message))
def _is_syntactically_equal(self, lhs: ir.Expr, rhs: ir.Expr):
if not lhs.is_same_expr_excluding_subexpressions(rhs):
return False
lhs_exprs = list(lhs.direct_subexpressions)
rhs_exprs = list(rhs.direct_subexpressions)
if len(lhs_exprs) != len(rhs_exprs):
return False
return all(self._is_syntactically_equal(lhs_expr, rhs_expr)
for lhs_expr, rhs_expr in zip(lhs_exprs, rhs_exprs))
def transform_variadic_type_expansion(self, expr: ir.VariadicTypeExpansion):
old_in_variadic_type_expansion = self.in_variadic_type_expansion
self.in_variadic_type_expansion = True
result = super().transform_variadic_type_expansion(expr)
self.in_variadic_type_expansion = old_in_variadic_type_expansion
return result
def transform_class_member_access(self, class_member_access: ir.ClassMemberAccess):
if (isinstance(class_member_access.inner_expr, ir.TemplateInstantiation)
and isinstance(class_member_access.inner_expr.template_expr, ir.AtomicTypeLiteral)):
if class_member_access.inner_expr.template_expr.cpp_type == 'GetFirstError':
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_get_first_error(args)
if class_member_access.inner_expr.template_expr.cpp_type == 'std::is_same':
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_is_same(args)
if class_member_access.inner_expr.template_expr.cpp_type.startswith('Select1st'):
args = self.transform_exprs(class_member_access.inner_expr.args, original_parent_element=class_member_access.inner_expr)
return self.transform_select1st(args)
return super().transform_class_member_access(class_member_access)
def _can_remove_subexpression(self, expr: ir.Expr):
# If we're in a variadic type expr, we can't remove variadic sub-exprs (not in general at least).
# E.g. BoolList<(F<Ts>::value || true)...> can't be optimized to BoolList<true>
if self.in_variadic_type_expansion and is_expr_variadic(expr):
return False
return True
def transform_get_first_error(self, args: Tuple[ir.Expr, ...]):
new_args = []
for arg in args:
if isinstance(arg, ir.AtomicTypeLiteral) and arg.cpp_type == 'void':
pass
elif (isinstance(arg, ir.VariadicTypeExpansion)
and isinstance(arg.inner_expr, ir.ClassMemberAccess)
and isinstance(arg.inner_expr.inner_expr, ir.TemplateInstantiation)
and isinstance(arg.inner_expr.inner_expr.template_expr, ir.AtomicTypeLiteral)
and arg.inner_expr.inner_expr.template_expr.cpp_type.startswith('Select1stType')
and len(arg.inner_expr.inner_expr.args) == 2
and isinstance(arg.inner_expr.inner_expr.args[0], ir.AtomicTypeLiteral)
and arg.inner_expr.inner_expr.args[0].cpp_type == 'void'):
# Select1stType*<void, expr>...
pass
else:
new_args.append(arg)
return ir.ClassMemberAccess(inner_expr=ir.TemplateInstantiation(template_expr=GlobalLiterals.GET_FIRST_ERROR,
args=tuple(new_args),
instantiation_might_trigger_static_asserts=False),
expr_type=ir.TypeType(),
member_name='type')
def transform_is_same(self, args: Tuple[ir.Expr, ...]):
assert len(args) == 2
lhs, rhs = args
list_template_names = {'List', 'BoolList', 'Int64List'}
if (isinstance(lhs, ir.TemplateInstantiation) and isinstance(lhs.template_expr, ir.AtomicTypeLiteral) and lhs.template_expr.cpp_type in list_template_names
and isinstance(rhs, ir.TemplateInstantiation) and isinstance(rhs.template_expr, ir.AtomicTypeLiteral) and rhs.template_expr.cpp_type in list_template_names
and lhs.template_expr.cpp_type == rhs.template_expr.cpp_type
and not any(isinstance(arg, ir.VariadicTypeExpansion) for arg in lhs.args)
and not any(isinstance(arg, ir.VariadicTypeExpansion) for arg in rhs.args)
and len(lhs.args) == len(rhs.args)
and lhs.args):
# std::is_same<List<X1, X2, X3>, List<Y1, Y2, Y3>>::value
# -> std::is_same<X1, Y1>::value && std::is_same<X2, Y2>::value && std::is_same<X3, Y3>::value
if lhs.template_expr.cpp_type == 'List':
result = None
for lhs_arg, rhs_arg in zip(lhs.args, rhs.args):
if result:
result = ir.BoolBinaryOpExpr(lhs=result,
rhs=self._create_is_same_expr(lhs_arg, rhs_arg),
op='&&')
else:
result = self._create_is_same_expr(lhs_arg, rhs_arg)
return self.transform_expr(result)
# std::is_same<IntList<n1, n2, n3>, IntList<m1, m2, m3>>::value
# -> (n1 == m1) && (n2 == m2) && (n3 == m3)
# (and same for BoolList)
result = None
for lhs_arg, rhs_arg in zip(lhs.args, rhs.args):
if result:
result = ir.BoolBinaryOpExpr(lhs=result,
rhs=ir.ComparisonExpr(lhs_arg, rhs_arg, op='=='),
op='&&')
else:
result = ir.ComparisonExpr(lhs_arg, rhs_arg, op='==')
return self.transform_expr(result)
return self._create_is_same_expr(lhs, rhs)
def _create_is_same_expr(self, lhs: ir.Expr, rhs: ir.Expr):
return ir.ClassMemberAccess(
inner_expr=ir.TemplateInstantiation(template_expr=GlobalLiterals.STD_IS_SAME,
args=(lhs, rhs),
instantiation_might_trigger_static_asserts=False),
expr_type=ir.BoolType(),
member_name='value')
def transform_select1st(self, args: Tuple[ir.Expr, ...]):
lhs, rhs = args
best_var = None
# First preference to non-expanded variadic vars, to keep the Select1st* expression variadic if it is now.
for var_name in compute_non_expanded_variadic_vars(rhs):
[best_var] = (var
for var in rhs.free_vars
if var.cpp_type == var_name)
break
# If there are none, then any non-variadic var is also ok.
if not best_var:
for var in rhs.free_vars:
if not var.is_variadic and isinstance(var.expr_type, (ir.BoolType, ir.Int64Type, ir.TypeType)):
best_var = var
break
if best_var:
rhs = best_var
return ir.ClassMemberAccess(inner_expr=ir.TemplateInstantiation(template_expr=select1st_literal(lhs.expr_type, rhs.expr_type),
args=(lhs, rhs),
instantiation_might_trigger_static_asserts=False),
expr_type=lhs.expr_type,
member_name='value')
| apache-2.0 | -2,463,025,841,349,026,000 | 44.634961 | 171 | 0.534193 | false | 3.903254 | false | false | false |
melmothx/jsonbot | jsb/plugs/wave/gadget.py | 1 | 4625 | # jsb.plugs.wave/gadget.py
#
#
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.persist import PlugPersist
gadgeturls = PlugPersist('gadgeturls')
gadgeturls.data['gadget'] = 'https://jsonbot.appspot.com/gadget.xml'
gadgeturls.data['poll'] = 'https://jsonbot.appspot.com/poll.xml'
gadgeturls.data['iframe'] = 'https://jsonbot.appspot.com/iframe.xml'
gadgeturls.data['loadiframe'] = 'https://jsonbot.appspot.com/loadiframe.xml'
def loadroot(event, url):
if event.rootblip:
from waveapi import element
event.rootblip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def load(event, url):
if event.blip:
from waveapi import element
event.blip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def handle_gadgetload(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if load(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-load", handle_gadgetload, 'USER')
examples.add("gadget-load", "load a gadget into a blip", "gadget-load")
def handle_gadgetloadroot(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if loadroot(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-loadroot", handle_gadgetloadroot, 'USER')
examples.add("gadget-loadroot", "load a gadget into the root blip", "gadget-loadroot")
def handle_gadgetiframe(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<url>')
return
try:
url = gadgeturls.data['loadiframe'] + "?&iframeurl=%s" % event.rest
event.reply('loading %s' % url)
load(event, url)
except KeyError:
event.reply("we don't have a iframe url")
cmnds.add("gadget-iframe", handle_gadgetiframe, 'USER')
examples.add("gadget-iframe", "load a url into a iframe", "gadget-iframe")
def handle_gadgetaddurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
if not gadgeturls.data.has_key(name):
gadgeturls.data[name] = url
gadgeturls.save()
else:
event.reply("we already have a %s gadget" % name)
cmnds.add("gadget-addurl", handle_gadgetaddurl, 'USER')
examples.add("gadget-addurl", "store a gadget url", "gadget-addurl jsb https://jsonbot.appspot.com/iframe.xml")
def handle_gadgetdelurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
gadgeturls.data[name] = url
gadgeturls.save()
cmnds.add("gadget-delurl", handle_gadgetdelurl, 'OPER')
examples.add("gadget-delurl", "delete a gadget url", "gadget-delurl mygadget")
def handle_gadgetlist(bot, event):
result = []
for name, url in gadgeturls.data.iteritems():
result.append("%s - %s" % (name, url))
event.reply("available gadgets: ", result)
cmnds.add("gadget-list", handle_gadgetlist, 'USER')
examples.add("gadget-list", "list known gadget urls", "gadget-list")
def handle_gadgetconsole(bot, event):
if event.bottype != "wave":
event.reply("this command only works in google wave.");
return
wave = event.chan
if wave.data.feeds and wave.data.dotitle:
event.set_title("JSONBOT - %s #%s" % (" - ".join(wave.data.feeds), str(wave.data.nrcloned)))
from waveapi import element
#url = gadgeturls.data['loadiframe'] + "?&iframeurl=https://jsonbot.appspot.com"
#event.reply('loading %s' % url)
event.append("loading ...\n")
#load(event, "http://jsonbot.appspot.com/iframe.xml")
event.append(
element.Gadget('http://jsonbot.appspot.com/console.xml?gadget_cache=0'))
cmnds.add("gadget-console", handle_gadgetconsole, 'OPER')
examples.add("gadget-console", "load the console gadget", "gadget-console")
| mit | 9,067,515,519,668,668,000 | 28.647436 | 111 | 0.644108 | false | 3.200692 | false | false | false |
mellowizz/ocny_tax_info | qgis_show_ocny_tax_info.py | 1 | 1143 | from qgis.core import *
from qgis.gui import *
import mechanize
import cookielib
@qgsfunction(args='auto', group='Custom')
def show_tax_info(pin, feature, parent):
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
url = 'http://propertydata.orangecountygov.com/imate/propdetail.aspx'
# first 4 of PIN are town code: str(pin)[0:4]
# search = '/'.join([BASE_URL, 'viewlist.aspx?sort=printkey&swis={tcode}'])
# get cookie
br.open('http://www.co.orange.ny.us/content/124/1368/4136.aspx')
for link in br.links():
if 'index.aspx' in link.url:
br.follow_link(link)
break
swis = str(pin)[:6]
printkey = str(pin)[6:]
search_terms = 'swis={}&printkey={}'.format(swis, printkey)
full_url = '?'.join([url, search_terms])
response = br.open(full_url)
return response.read()
| gpl-3.0 | -1,176,792,807,719,864,800 | 26.214286 | 79 | 0.64392 | false | 3.05615 | false | false | false |
AlexWoo/pyed | pysys/pycmdserver.py | 1 | 1693 | from pyevent.event import event
from pyevent.tcpserver import tcpserver
from pyevent.tcpconnection import tcpconnection
class cmdserver(tcpserver):
def __init__(self, pesys):
self.evs = pesys.evs
self.tms = pesys.tms
self.log = pesys.log
self.proc = pesys.proc
self.proc.setcmdserver(self)
self.srvconf = pesys.conf.cmdserver
self.c = None
tcpserver.__init__(self, self.accepthandler, self.srvconf,
self.evs, self.tms)
def accepthandler(self, ev):
csock, _ = ev.sock.accept()
if self.c:
csock.close()
self.log.logInfo("CmdServer", "Cmdserver has cmd to process, close new cmdclient")
return
self.c = tcpconnection(csock, self.srvconf, self.evs, self.tms)
self.c.set_recvmsg(self.recvmsg)
self.c.set_broken(self.brokenhandler)
def recvmsg(self, c):
buf = self.c.read()
self.log.logInfo("CmdServer", "Send cmd[%s] to worker", buf.strip())
self.proc.sendcmd(buf)
self.ev = event(self.evs, self.tms)
self.ev.add_timer(5000, self.timeouthandler) # set cmd response timeout to 5s
def sendresp(self, buf, islast):
self.c.write(buf)
if islast:
self.c.close()
self.c = None
self.ev.del_timer()
def brokenhandler(self, c):
self.c = None
self.ev.del_timer()
self.log.logInfo("CmdServer", "Cmdclient link broken")
def timeouthandler(self, ev):
self.log.logInfo("CmdServer", "Wait for Worker response timeout")
self.c.close()
self.c = None
self.ev.del_timer()
| bsd-2-clause | -195,326,906,366,246,620 | 32.196078 | 94 | 0.594802 | false | 3.352475 | false | false | false |
NCI-Cloud/cloud-tools | check-defunct-instances.py | 1 | 1214 | #!/usr/bin/env python
#
# Take a list of instance UUIDs and check their status. If the last activity
# recorded for them is more than six months ago mark them as defunct.
from util import get_nova_client, get_keystone_client
from util import get_instance, is_instance_to_be_expired
from util import output_report
from util import parser_with_common_args
def parse_args():
parser = parser_with_common_args()
parser.add_argument("-d", "--days", action='store', required=False,
type=int, default='90',
help=(
"Number of days before an instance is considered"
"defunct"
))
return parser.parse_args()
def main():
args = parse_args()
nc = get_nova_client()
kc = get_keystone_client()
instances = []
for uuid in args.hosts:
instance = get_instance(nc, uuid)
if instance is None:
print("Instance %s not found" % (uuid))
else:
if is_instance_to_be_expired(nc, instance, days=args.days):
instances.append(instance)
output_report(nc, kc, instances)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,662,251,379,629,455,400 | 27.904762 | 77 | 0.584843 | false | 4.019868 | false | false | false |
maeltac/hazanet | sense.py | 1 | 1881 |
import pdb
"""
Each sensor that uses this will follow these rules:
calling sensor.startup() function will initialize and calibrate the sensor. It will return 'Green' on success, 'Red' on failure
calling sensor.read() will return a float for that tick
calling sensor.reset() will attempt to reset the sensor, returning 0 for success, 1 for failure, or 2 for wait
"""
class Sensor():
def startup(self,sentype):
#pdb.set_trace()
if sentype == 'RAD':
return RAD.startup(self,sentype)
elif sentype =='CO':
return CO.startup(self,sentype)
elif sentype =='CH4':
return CH4.startup(self,sentype)
elif sentype =='C6H6':
return C6H6.startup(self,sentype)
elif sentype =='C3H8':
return C3H8.startup(self,sentype)
else:
return 'Error Initializing'
def read(self):
return 0
def reset(self):
return 0
def supported(self):
supportlist = ['RAD', 'CO', 'CH4', 'C6H6', 'C3H8']
return supportlist
class RAD(Sensor):
def startup(self,sentype):
retstring = 'Sickly Green'
return retstring
def read(self):
return 0
def reset(self):
return 0
class CO(Sensor):
def startup(self,sentype):
return 'Blue'
def read(self):
return 0
def reset(self):
return 0
class CH4(Sensor):
def startup(self,sentype):
return 'Nausious'
def read(self):
return 0
def reset(self):
return 0
class C6H6(Sensor):
def startup(self, sentype):
return 'Toxic'
def read(self):
return 0
def reset(self):
return 0
class C3H8(Sensor):
def startup(self, sentype):
return 'On Fire'
def read(self):
return 0
def reset(self):
return 0
| apache-2.0 | -8,357,207,316,017,917,000 | 16.416667 | 127 | 0.5832 | false | 3.747012 | false | false | false |
Hossein-Noroozpour/PyHDM | hml/classification/HNearestNeighborsClassifier.py | 1 | 2014 | #!/usr/bin/python3.3
# coding=utf-8
"""
Module for K nearest neighbours.
"""
__author__ = 'Hossein Noroozpour Thany Abady'
#from math3d import sqrt
import numpy
class HNearestNeighboursClassifier():
"""
Class for K nearest neighbors algorithm.
"""
def __init__(self, n_neighbors=5, weight_function=lambda l: [1. / (d + .0001) for d in l], weight_name='i'):
self.n_neighbors = n_neighbors
self.weight_function = weight_function
self.train = None
self.target = None
self.weight_name = weight_name
def fit(self, train, target):
"""
:param train:
:param target:
"""
self.train = numpy.array(train)
self.target = target
return self
def predict(self, test):
"""
:param test:
"""
result = []
test = numpy.array(test)
for t in test:
distances = []
for r in self.train:
d = r - t
distances.append(sqrt(d.dot(d)))
weights = self.weight_function(distances)
wc = [(weights[i], self.target[i]) for i in range(len(self.target))]
wc.sort(key=lambda tup: tup[0], reverse=True)
v = dict()
for i in range(self.n_neighbors):
if v.get(wc[i][1]) is None:
v[wc[i][1]] = 1
else:
v[wc[i][1]] += 1
vote = 0
c = 0
for k in v.keys():
if v[k] >= vote:
c = k
result.append(c)
return result
def __str__(self):
return 'K nearest neighbors classifier with n=' + str(self.n_neighbors) + ' and weight=' + str(self.weight_name)
def score(self, x, y):
"""
:param x:
:param y:
"""
p = self.predict(x)
c = 0
for i in range(len(y)):
if p[i] == y[i]:
c += 1
return float(c) / float(len(y)) | mit | 3,813,939,173,336,904,000 | 25.866667 | 120 | 0.478153 | false | 3.688645 | false | false | false |
ybonjour/nuus | services/indexing/Indexer.py | 1 | 3702 | __author__ = 'Yves Bonjour'
from Tokenizer import create_tokenizer
import redis
import uuid
def create_indexer(redis_host, redis_port):
tokenizer = create_tokenizer()
redis_db = redis.Redis(redis_host, redis_port)
store = RedisIndexStore(redis_db)
return Indexer(store, tokenizer)
class Indexer:
def __init__(self, store, tokenizer):
self.store = store
self.tokenizer = tokenizer
def index(self, text, document_id):
tokens = self.tokenizer.tokenize(text)
for token in tokens:
self.store.add(document_id, token)
def document_frequency_normalized(self, term):
return float(self.store.document_frequency(term)) / float(self.store.num_documents())
def term_document_frequency(self, document, term):
return self.store.term_document_frequency(document, term)
def get_posting_list(self, term):
return self.store.posting_list(term)
def get_terms(self, document):
return self.store.get_terms(document)
class MemoryIndexStore(object):
def __init__(self):
self.posting_lists = {}
self.documents = {}
def posting_list(self, term):
if term not in self.posting_lists:
return {}
return self.posting_lists[term]
def get_terms(self, document):
if document not in self.documents:
return []
return self.documents[document]
def document_frequency(self, term):
if term not in self.posting_lists:
return 0
return len(self.posting_lists[term])
def num_documents(self):
return len(self.documents)
def term_document_frequency(self, document, term):
if term not in self.posting_lists or document not in self.posting_lists[term]:
return 0
return self.posting_lists[term][document]
def add(self, document, term):
if term not in self.posting_lists:
self.posting_lists[term] = {}
if document not in self.posting_lists[term]:
self.posting_lists[term][document] = 0
self.posting_lists[term][document] += 1
if document not in self.documents:
self.documents[document] = set()
self.documents[document].add(term)
class RedisIndexStore(object):
def __init__(self, redis):
self.redis = redis
def posting_list(self, term):
return {uuid.UUID(document): int(self.redis.get(self._posting_key(term, document)))
for document in self.redis.smembers(self._term_key(term))}
def document_frequency(self, term):
return len(self.redis.smembers(self._term_key(term)))
def get_terms(self, document):
return self.redis.smembers(self._document_key(document))
def num_documents(self):
return len(self.redis.smembers(self._documents_key()))
def term_document_frequency(self, document, term):
tdf = self.redis.get(self._posting_key(term, document))
return int(tdf) if tdf else 0
def add(self, document, term):
self.redis.sadd(self._documents_key(), document)
self.redis.sadd(self._term_key(term), document)
self.redis.sadd(self._document_key(document), term)
self.redis.setnx(self._posting_key(term, document), 0)
self.redis.incr(self._posting_key(term, document))
def _documents_key(self):
return "documents"
def _document_key(self, document):
return "document:{document}".format(document=document)
def _term_key(self, term):
return "term:{term}".format(term=term)
def _posting_key(self, term, document):
return "posting:{term}:{document}".format(term=term, document=document) | mit | 5,113,585,835,216,535,000 | 29.105691 | 93 | 0.642896 | false | 3.820433 | false | false | false |
cria/microSICol | import_db.py | 1 | 7329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Script to import XML data to current SICol database
# Obs: This script must be executed on root directory
# Author:Renato Arnellas Coelho renatoac at gmail dot com
import sys
import os
from xml.dom.minidom import Document,parse
def importSQLite(xml,sqlite_path='./db/sqlite.db'):
'''
xml = XML filename
sqlite_path = default is usually used
'''
from pysqlite2 import dbapi2 as sqlite
print "Connecting to SQLite database..."
if os.path.exists(sqlite_path):
#Connect
connect = sqlite.connect(sqlite_path,detect_types=sqlite.PARSE_COLNAMES,isolation_level=None)
cursor = connect.cursor()
print "Loading SQLite XML..."
doc = parse(xml)
tables = doc.getElementsByTagName('table')
for table in tables:
tablename = table.getAttribute('name')
print "Emptying table '%s'..." % tablename
rows = table.getElementsByTagName('row')
cursor.execute("DELETE FROM %s;" % tablename) #clear table first
print "Inserting values in table '%s'..." % tablename
### INSERT ITEM ###
for row in rows:
fields = row.getElementsByTagName('field')
colnames = []
colvalues = []
for field in fields:
colnames.append('`'+field.getAttribute('name')+'`')
coltype = field.getAttribute('type')
if coltype == 'integer':
colvalues.append(field.getAttribute('value'))
elif coltype == 'NULL':
colvalues.append("NULL")
else: #behaves as string
colvalues.append("'"+field.getAttribute('value').replace("'","\\'")+"'")
cursor.execute("INSERT INTO `%s` (%s) VALUES (%s);" % (tablename,",".join(colnames),",".join(colvalues) ) )
###################
#Close
cursor.close()
connect.close()
print "*** Import Finished ***"
raw_input()
else:
print "*** ERROR ***"
print "Unable to connect to SQLite database."
raw_input()
def importData(xml,host,user,pwd,dbname,port):
'''
xml = XML filename
host = MySQL host
user = MySQL root user
pwd = MySQL root password
dbname = MySQL database to be used
port = MySQL port number
'''
import MySQLdb as mysql
#Load file to Python XML object
print "Loading XML..."
doc = parse(xml)
print "Generating intermediate SQL import file..."
output = []
#Connect to database
output.append("USE %s;" % dbname)
#Set Global VARS
output.append("/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;")
output.append("/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;")
output.append("/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;")
output.append("/*!40101 SET NAMES utf8 */;")
output.append("/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;")
output.append("/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;")
output.append("/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;")
output.append("")
#Insert data in each table disabling key constrains
tables = doc.getElementsByTagName('table')
for table in tables:
tablename = table.getAttribute('name')
print "Reading table '%s'..." % tablename
rows = table.getElementsByTagName('row')
output.append("/*!40000 ALTER TABLE `%s` DISABLE KEYS */;" % tablename)
output.append("TRUNCATE TABLE `%s`;" % tablename) #clear table first
### INSERT ITEM ###
for row in rows:
fields = row.getElementsByTagName('field')
colnames = []
colvalues = []
for field in fields:
colnames.append('`'+field.getAttribute('name')+'`')
coltype = field.getAttribute('type')
if coltype == 'integer':
colvalues.append(field.getAttribute('value'))
elif coltype == 'NULL':
colvalues.append("NULL")
else: #behaves as string
colvalues.append("'"+field.getAttribute('value').replace("'","\\'")+"'")
output.append("INSERT INTO `%s`.`%s` (%s) VALUES (%s);" % (dbname,tablename,",".join(colnames),",".join(colvalues) ) )
###################
output.append("/*!40000 ALTER TABLE `%s` ENABLE KEYS */;" % tablename)
#Set Global VARS
output.append("")
output.append("/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;")
output.append("/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;")
output.append("/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;")
output.append("/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;")
output.append("/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;")
output.append("/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;")
output.append("/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;")
#Save SQL file
open('import.sql','w').write("\n".join(output).encode('utf-8'))
print "Running SQL import..."
sicol_path = os.getcwd()+os.sep+'db'+os.sep+'scripts'+os.sep
import platform
if platform.system() == "Windows" or platform.system() == "Microsoft":
mysql_path = [x for x in os.environ['PATH'].split(";") if x.lower().find('mysql') != -1]
else: #UNIX
pipe = os.popen("which mysql") #grab where MySQL is installed
mysql_path = pipe.read().strip()
if mysql_path == '' or mysql_path == []:
print "*********** ERROR ***********"
print "Please insert path to executable directory (mysql.exe) in OS 'PATH' variable."
raw_input() #Wait for user input...
else:
if platform.system() == "Windows" or platform.system() == "Microsoft":
#Ignore whether PATH ends with '\' or not
mysql_path = mysql_path[0]
if mysql_path[-1] != '\\': mysql_path += '\\'
mysql_path = '"' + mysql_path + 'mysql.exe"'
try:
bd_version = dbname.split("_")[1]
except Exception,e:
print "*********** ERROR ***********"
print "Please type \"sicol_v###\" where ### = version number."
raw_input() #Wait for user input...
return
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,os.getcwd()+os.sep+"import.sql") )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
print "*** Import Finished ***"
raw_input()
#If this script is called locally...
if __name__ == "__main__":
print "*** Import SICol Database ***"
opt = raw_input("Import MySQL data? (y/n)")[0].lower()
if opt == 'y':
import getpass
import os.path
host = raw_input("host=")
port = raw_input("port=")
root_login = raw_input("administrator login=")
root_pwd = getpass.getpass("administrator password=")
dbname = raw_input("database name=")
xml = raw_input("import XML filename=")
while not os.path.exists(xml) and xml != '':
print "*** ERROR ***"
print "Specified file does not exist!"
xml = raw_input("import XML filename=")
if xml != '':
importData(xml,host,root_login,root_pwd,dbname,port)
opt = raw_input("Import SQLite data? (y/n)")[0].lower()
if opt == 'y':
xml = raw_input("import XML filename=")
while not os.path.exists(xml) and xml != '':
print "*** ERROR ***"
print "Specified file does not exist!"
xml = raw_input("import XML filename=")
if xml != '':
importSQLite(xml)
| gpl-2.0 | -555,823,517,836,128,260 | 39.716667 | 127 | 0.617137 | false | 3.573379 | false | false | false |
GoogleCloudPlatform/professional-services | examples/bq-email-exports/export_query_results_function/main.py | 1 | 2698 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Function for exporting BigQuery results from an anonymous table to GCS.
Triggered after BigQuery query is complete.
"""
import base64
import json
import logging
import os
import google.api_core.client_info
from google.cloud import bigquery
CLIENT_INFO = google.api_core.client_info.ClientInfo(
user_agent="google-pso-example/bq-email-exports")
def main(event, context):
"""Entrypoint for Cloud Function"""
data = base64.b64decode(event['data'])
upstream_bq_dts_obj = json.loads(data)
error = upstream_bq_dts_obj.get('errorStatus')
if error:
logging.error(
RuntimeError(f"Error in upstream query job: {error['message']}."))
else:
project_id = get_env('PROJECT_ID')
dataset_id = upstream_bq_dts_obj['destinationDatasetId']
table_name = upstream_bq_dts_obj['params'][
'destination_table_name_template']
schedule_time = upstream_bq_dts_obj['scheduleTime']
bq_client = bigquery.Client(client_info=CLIENT_INFO)
dataset_ref = bigquery.DatasetReference.from_string(
dataset_id, default_project=project_id)
table_ref = dataset_ref.table(table_name)
destination_uri = get_destination_uri(schedule_time)
extract_config = bigquery.ExtractJobConfig(
compression=get_env('COMPRESSION'),
destination_format=get_env('DEST_FMT'),
field_delimeter=get_env('FIELD_DELIMITER'),
use_avro_logical_types=get_env('USE_AVRO_TYPES'))
bq_client.extract_table(table_ref,
destination_uri,
job_id_prefix="email_export_",
job_config=extract_config)
print(
f"Exporting {project_id}:{dataset_id}.{table_name} to {destination_uri}"
)
def get_destination_uri(schedule_time):
"""Returns destination GCS URI for export"""
return (f"gs://{get_env('BUCKET_NAME')}/"
f"{schedule_time}/{get_env('OBJECT_NAME')}")
def get_env(name):
"""Returns environment variable"""
return os.environ[name]
| apache-2.0 | -1,615,927,224,140,515,800 | 34.973333 | 84 | 0.659748 | false | 3.854286 | false | false | false |
hustodemon/spacewalk | backend/server/rhnServer/server_hardware.py | 1 | 34703 | #
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# This file contains all the logic necessary to manipulate Hardware
# items - load, reload, instanciate and save
#
import string
import sys
from rhn.UserDictCase import UserDictCase
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTB import Traceback
from spacewalk.server import rhnSQL
def kudzu_mapping(dict=None):
""" this is a class we use to get the mapping for a kudzu entry """
# This is the generic mapping we need
mapping = {
'desc': 'description',
}
# error handling if we get passed weird stuff.
if not dict:
return mapping
if not type(dict) == type({}) and not isinstance(dict, UserDictCase):
return mapping
hw_bus = dict.get("bus")
# we need to have a bus type to be able to continue
if not hw_bus:
return mapping
hw_bus = string.lower(hw_bus)
extra = {}
if hw_bus == "ddc":
extra = {
"id": None,
"horizsyncmin": "prop1",
"horizsyncmax": "prop2",
"vertrefreshmin": "prop3",
"vertrefreshmax": "prop4",
"modes": None,
"mem": None,
}
elif hw_bus == "ide":
extra = {
"physical": "prop1",
"logical": "prop2",
}
elif hw_bus in ["isapnp", "isa"]:
extra = {
"pdeviceid": "prop1",
"deviceid": "prop2",
"compat": "prop3",
"native": None,
"active": None,
"cardnum": None, # XXX: fix me
"logdev": "prop4",
"io": "prop2",
"irq": "prop1",
"dma": "prop3",
"mem": "prop4",
}
elif hw_bus == "keyboard":
extra = {}
elif hw_bus == "psaux":
extra = {}
elif hw_bus == "parallel":
extra = {
'pnpmfr': 'prop1',
'pnpdesc': 'prop2',
'pnpmodel': 'prop3',
'pnpmodes': 'prop4',
'pinfo': None,
'pinfo.xres': None,
'pinfo.yres': None,
'pinfo.color': None,
'pinfo.ascii': None,
}
elif hw_bus == "pci":
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'subvendorid': 'prop3',
'subdeviceid': 'prop4',
'network.hwaddr': None,
'pcibus': None,
'pcidev': None,
'pcifn': None,
'pcidom': None,
}
elif hw_bus == "sbus":
extra = {
"monitor": "prop1",
"width": "prop2",
"height": "prop3",
"freq": "prop4",
}
elif hw_bus == "scsi":
extra = {
'host': 'prop1',
'id': 'prop2',
'channel': 'prop3',
'lun': 'prop4',
'generic': None,
}
elif hw_bus == "serial":
extra = {
'pnpmfr': 'prop1',
'pnpdesc': 'prop2',
'pnpmodel': 'prop3',
'pnpcompat': "prop4",
}
elif hw_bus == "usb":
extra = {
"vendorid": "prop1",
"deviceid": "prop2",
"usbclass": "prop3",
"usbbus": "prop4",
"usblevel": "pciType",
"usbdev": None,
"usbprod": None,
"usbsubclass": None,
"usbprotocol": None,
"usbport": None,
"usbmfr": None,
"productname": None,
"productrevision": None,
'network.hwaddr': None,
}
elif hw_bus == "firewire":
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'subvendorid': 'prop3',
'subdeviceid': 'prop4',
}
elif hw_bus == 'pcmcia':
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'function': 'prop3',
'slot': 'prop4',
'network.hwaddr': None,
}
mapping.update(extra)
return mapping
def cleanse_ip_addr(ip_addr):
""" Cleans up things like 127.00.00.01 """
if ip_addr is None:
return None
# Make sure it's a string
ip_addr = str(ip_addr)
# If the ipaddr is empty, jus return empty str
if not len(ip_addr):
return ''
arr = ip_addr.split('.')
# lstrip will remove all leading zeros; if multiple zeros are present, it
# would remove too much, hence the or '0' here.
return '.'.join([x.lstrip('0') or '0' for x in arr])
class GenericDevice:
""" A generic device class """
table = "override-GenericDevice"
def __init__(self):
self.id = 0
self.status = 1 # just added
self.data = {}
# default to the hardware seq...
self.sequence = "rhn_hw_dev_id_seq"
self._autonull = ("description", "board")
def getid(self):
if self.id == 0:
self.id = rhnSQL.Sequence(self.sequence)()
return self.id
def must_save(self):
if self.id == 0 and self.status == 2: # deleted new item
return 0
if self.status == 0: # original item, unchanged
return 0
return 1
def save(self, sysid):
""" save data in the rhnDevice table """
log_debug(4, self.table, self.status, self.data)
if not self.must_save():
return 0
t = rhnSQL.Table(self.table, "id")
# check if we have to delete
if self.status == 2 and self.id:
# delete the entry
del t[self.id]
return 0
# set description to null if empty
self._null_columns([self.data], self._autonull)
# make sure we have a device id
devid = self.getid()
for k in self.data.keys():
if self.data[k] is None:
del self.data[k]
self.data["server_id"] = sysid
t[devid] = self.data
self.status = 0 # now it is saved
return 0
def reload(self, devid):
""" reload from rhnDevice table based on devid """
if not devid:
return -1
t = rhnSQL.Table(self.table, "id")
self.data = t[devid]
# clean up fields we don't want
if self.data:
for k in ["created", "modified"]:
if self.data.has_key(k):
del self.data[k]
self.id = devid
self.status = 0
return 0
def _null_columns(self, params, names=()):
""" Method searches for empty string in params dict with names
defined in names list and replaces them with None value which
is translated to NULL in SQL.
We do not allow empty strings in database for compatibility
reasons between Oracle and PostgreSQL.
"""
# list of dicts
for param in params:
for name in names:
if name in param and param[name] == '':
param[name] = None
class Device(GenericDevice):
""" This is the base Device class that supports instantiation from a
dictionarry. the __init__ takes the dictionary as its argument,
together with a list of valid fields to recognize and with a mapping
for dictionary keys into valid field names for self.data
The fields are required to know what fields we have in the
table. The mapping allows transformation from whatever comes in to
valid fields in the table Looks complicated but it isn't -- gafton
"""
def __init__(self, fields, dict=None, mapping=None):
GenericDevice.__init__(self)
x = {}
for k in fields:
x[k] = None
self.data = UserDictCase(x)
if not dict:
return
# make sure we get a UserDictCase to work with
if type(dict) == type({}):
dict = UserDictCase(dict)
if mapping is None or type(mapping) == type({}):
mapping = UserDictCase(mapping)
if not isinstance(dict, UserDictCase) or \
not isinstance(mapping, UserDictCase):
log_error("Argument passed is not a dictionary", dict, mapping)
raise TypeError("Argument passed is not a dictionary",
dict, mapping)
# make sure we have a platform
for k in dict.keys():
if dict[k] == '':
dict[k] = None
if self.data.has_key(k):
self.data[k] = dict[k]
continue
if mapping.has_key(k):
# the mapping dict might tell us to lose some fields
if mapping[k] is not None:
self.data[mapping[k]] = dict[k]
else:
log_error("Unknown HW key =`%s'" % k,
dict.dict(), mapping.dict())
# The try-except is added just so that we can send e-mails
try:
raise KeyError("Don't know how to parse key `%s''" % k,
dict.dict())
except:
Traceback(mail=1)
# Ignore this key
continue
# clean up this data
try:
for k in self.data.keys():
if type(self.data[k]) == type("") and len(self.data[k]):
self.data[k] = string.strip(self.data[k])
if not len(self.data[k]):
continue
if self.data[k][0] == '"' and self.data[k][-1] == '"':
self.data[k] = self.data[k][1:-1]
except IndexError:
raise IndexError, "Can not process data = %s, key = %s" % (
repr(self.data), k), sys.exc_info()[2]
class HardwareDevice(Device):
""" A more specific device based on the Device class """
table = "rhnDevice"
def __init__(self, dict=None):
fields = ['class', 'bus', 'device', 'driver', 'detached',
'description', 'pcitype', 'prop1', 'prop2',
'prop3', 'prop4']
# get a processed mapping
mapping = kudzu_mapping(dict)
# ... and do little to no work
Device.__init__(self, fields, dict, mapping)
# use the hardware id sequencer
self.sequence = "rhn_hw_dev_id_seq"
class CPUDevice(Device):
""" A class for handling CPU - mirrors the rhnCPU structure """
table = "rhnCPU"
def __init__(self, dict=None):
fields = ['cpu_arch_id', 'architecture', 'bogomips', 'cache',
'family', 'mhz', 'stepping', 'flags', 'model',
'version', 'vendor', 'nrcpu', 'acpiVersion',
'apic', 'apmVersion', 'chipset', 'nrsocket']
mapping = {
"bogomips": "bogomips",
"cache": "cache",
"model": "model",
"platform": "architecture",
"type": "vendor",
"model_rev": "stepping",
"model_number": "family",
"model_ver": "version",
"model_version": "version",
"speed": "mhz",
"count": "nrcpu",
"socket_count": "nrsocket",
"other": "flags",
"desc": None,
'class': None,
}
# now instantiate this class
Device.__init__(self, fields, dict, mapping)
self.sequence = "rhn_cpu_id_seq"
if not dict:
return
if self.data.get("cpu_arch_id") is not None:
return # all fine, we have the arch
# if we don't have an architecture, guess it
if not self.data.has_key("architecture"):
log_error("hash does not have a platform member: %s" % dict)
raise AttributeError, "Expected a hash value for member `platform'"
# now extract the arch field, which has to come out of rhnCpuArch
arch = self.data["architecture"]
row = rhnSQL.Table("rhnCpuArch", "label")[arch]
if row is None or not row.has_key("id"):
log_error("Can not find arch %s in rhnCpuArch" % arch)
raise AttributeError, "Invalid architecture for CPU: `%s'" % arch
self.data["cpu_arch_id"] = row["id"]
del self.data["architecture"]
if self.data.has_key("nrcpu"): # make sure this is a number
try:
self.data["nrcpu"] = int(self.data["nrcpu"])
except:
self.data["nrcpu"] = 1
if self.data["nrcpu"] == 0:
self.data["nrcpu"] = 1
class NetworkInformation(Device):
""" This is a wrapper class for the Network Information (rhnServerNetwork) """
table = "rhnServerNetwork"
def __init__(self, dict=None):
fields = ["hostname", "ipaddr", "ip6addr"]
mapping = {'class': None}
Device.__init__(self, fields, dict, mapping)
self._autonull = ('ipaddr', 'ip6addr')
# use our own sequence
self.sequence = "rhn_server_net_id_seq"
# bugzilla: 129840 kudzu (rhpl) will sometimes pad octets
# with leading zeros, causing confusion; clean those up
self.data['ipaddr'] = cleanse_ip_addr(self.data['ipaddr'])
class NetIfaceInformation(Device):
key_mapping = {
'hwaddr': 'hw_addr',
'module': 'module',
}
def __init__(self, dict=None):
log_debug(4, dict)
self.ifaces = {}
self.db_ifaces = []
# parameters which are not allowed to be empty and set to NULL
self._autonull = ('hw_addr', 'module')
if not dict:
return
for name, info in dict.items():
if name == 'class':
# Ignore it
continue
if not isinstance(info, type({})):
raise rhnFault(53, "Unexpected format for interface %s" %
name)
vdict = {}
for key, mapping in self.key_mapping.items():
# Look at the mapping first; if not found, look for the key
if info.has_key(mapping):
k = mapping
else:
k = key
if not info.has_key(k):
raise rhnFault(53, "Unable to find required field %s"
% key)
val = info[k]
vdict[mapping] = val
if 'ipaddr' in info and info['ipaddr']:
vdict['ipv4'] = NetIfaceAddress4(
[{'ipaddr': info['ipaddr'], 'broadcast': info['broadcast'], 'netmask': info['netmask']}])
if 'ipv6' in info and info['ipv6']:
vdict['ipv6'] = NetIfaceAddress6(info["ipv6"])
self.ifaces[name] = vdict
def __str__(self):
return "<%s Class at %d: %s>\n" % (
self.__class__.__name__,
id(self), {
"self.ifaces": self.ifaces,
"self.db_ifaces": self.db_ifaces,
})
__repr__ = __str__
def save(self, server_id):
log_debug(4, self.ifaces)
self.reload(server_id)
log_debug(4, "Interfaces in DB", self.db_ifaces)
# Compute updates, deletes and inserts
inserts = []
updates = []
deletes = []
ifaces = self.ifaces.copy()
for iface in self.db_ifaces:
name = iface['name']
if not self.ifaces.has_key(name):
# To be deleted
deletes.append({'server_id': server_id, 'name': name})
continue
uploaded_iface = ifaces[name].copy()
del ifaces[name]
if _hash_eq(uploaded_iface, iface):
# Same value
continue
uploaded_iface.update({'name': name, 'server_id': server_id})
if 'ipv4' in uploaded_iface:
del(uploaded_iface['ipv4'])
if 'ipv6' in uploaded_iface:
del(uploaded_iface['ipv6'])
updates.append(uploaded_iface)
# Everything else in self.ifaces has to be inserted
for name, info in ifaces.items():
iface = {}
iface['name'] = name
iface['server_id'] = server_id
iface['hw_addr'] = info['hw_addr']
iface['module'] = info['module']
inserts.append(iface)
log_debug(4, "Deletes", deletes)
log_debug(4, "Updates", updates)
log_debug(4, "Inserts", inserts)
self._update(updates)
self._insert(inserts)
ifaces = self.ifaces.copy()
for name, info in ifaces.items():
if not 'ipv6' in info:
info['ipv6'] = NetIfaceAddress6()
info['ipv6'].save(self.get_server_id(server_id, name))
if not 'ipv4' in info:
info['ipv4'] = NetIfaceAddress4()
info['ipv4'].save(self.get_server_id(server_id, name))
# delete address (if any) of deleted interaces
for d in deletes:
interface = NetIfaceAddress6()
interface.save(self.get_server_id(server_id, d['name']))
interface = NetIfaceAddress4()
interface.save(self.get_server_id(server_id, d['name']))
self._delete(deletes)
return 0
def get_server_id(self, server_id, name):
""" retrieve id for given server_id and name """
h = rhnSQL.prepare("select id from rhnServerNetInterface where server_id=:server_id and name=:name")
h.execute(server_id=server_id, name=name)
row = h.fetchone_dict()
if row:
return row['id']
else:
return None
def _insert(self, params):
q = """insert into rhnServerNetInterface
(%s) values (%s)"""
self._null_columns(params, self._autonull)
columns = self.key_mapping.values() + ['server_id', 'name']
columns.sort()
bind_params = string.join(map(lambda x: ':' + x, columns), ", ")
h = rhnSQL.prepare(q % (string.join(columns, ", "), bind_params))
return _dml(h, params)
def _delete(self, params):
q = """delete from rhnServerNetInterface
where %s"""
columns = ['server_id', 'name']
wheres = map(lambda x: '%s = :%s' % (x, x), columns)
h = rhnSQL.prepare(q % string.join(wheres, " and "))
return _dml(h, params)
def _update(self, params):
q = """update rhnServerNetInterface
set %s
where %s"""
self._null_columns(params, self._autonull)
wheres = ['server_id', 'name']
wheres = map(lambda x: '%s = :%s' % (x, x), wheres)
wheres = string.join(wheres, " and ")
updates = self.key_mapping.values()
updates.sort()
updates = map(lambda x: '%s = :%s' % (x, x), updates)
updates = string.join(updates, ", ")
h = rhnSQL.prepare(q % (updates, wheres))
return _dml(h, params)
def reload(self, server_id):
h = rhnSQL.prepare("""
select *
from rhnServerNetInterface
where server_id = :server_id
""")
h.execute(server_id=server_id)
self.db_ifaces = []
while 1:
row = h.fetchone_dict()
if not row:
break
hval = {'primary_id': row['id'], 'name': row['name'], 'server_id': server_id}
for key in self.key_mapping.values():
hval[key] = row[key]
hval['ipv4'] = NetIfaceAddress4()
hval['ipv4'].reload(hval['primary_id'])
hval['ipv6'] = NetIfaceAddress6()
hval['ipv6'].reload(hval['primary_id'])
self.db_ifaces.append(hval)
self.status = 0
return 0
class NetIfaceAddress(Device):
key_mapping = {
'netmask': 'netmask',
'address': 'address',
}
unique = ['address'] # to be overriden by child
table = 'rhnServerNetAddress' # to be overriden by child
def __init__(self, list_ifaces=None):
log_debug(4, list_ifaces)
self.ifaces = {}
self.db_ifaces = []
# parameters which are not allowed to be empty and set to NULL
self._autonull = ('address', 'netmask')
self.sequence = "rhn_srv_net_iface_id_seq"
if not list_ifaces:
return
for info in list_ifaces:
if not isinstance(info, type({})):
raise rhnFault(53, "Unexpected format for interface %s" %
info)
vdict = {}
for key, mapping in self.key_mapping.items():
# Look at the mapping first; if not found, look for the key
if info.has_key(mapping):
k = mapping
else:
k = key
if not info.has_key(k):
raise rhnFault(53, "Unable to find required field %s"
% (key))
val = info[k]
if mapping in ['ip_addr', 'netmask', 'broadcast', 'address']:
# bugzilla: 129840 kudzu (rhpl) will sometimes pad octets
# with leading zeros, causing confusion; clean those up
val = self.cleanse_ip_addr(val)
vdict[mapping] = val
self.ifaces[vdict['address']] = vdict
def __str__(self):
return "<%s Class at %d: %s>\n" % (
self.__class__.__name__,
id(self), {
"self.ifaces": self.ifaces,
"self.db_ifaces": self.db_ifaces,
})
__repr__ = __str__
def cleanse_ip_addr(self, val):
""" to be overriden by child """
return val
def save(self, interface_id):
log_debug(4, self.ifaces)
self.reload(interface_id)
log_debug(4, "Net addresses in DB", self.db_ifaces)
# Compute updates, deletes and inserts
inserts = []
updates = []
deletes = []
ifaces = self.ifaces.copy()
for iface in self.db_ifaces:
address = iface['address']
if not self.ifaces.has_key(iface['address']):
# To be deleted
# filter out params, which are not used in query
iface = dict((column, iface[column]) for column in self.unique)
deletes.append(iface)
continue
uploaded_iface = ifaces[address]
del ifaces[address]
# FIXME this is inefficient for IPv4 as it row is present it will be always update
if _hash_eq(uploaded_iface, iface):
# Same value
continue
uploaded_iface.update({'interface_id': interface_id})
updates.append(uploaded_iface)
# Everything else in self.ifaces has to be inserted
for name, iface in ifaces.items():
iface['address'] = iface['address']
iface['interface_id'] = interface_id
inserts.append(iface)
log_debug(4, "Deletes", deletes)
log_debug(4, "Updates", updates)
log_debug(4, "Inserts", inserts)
self._delete(deletes)
self._update(updates)
self._insert(inserts)
def _insert(self, params):
q = """insert into %s
(%s) values (%s)"""
self._null_columns(params, self._autonull)
columns = self.key_mapping.values() + ['interface_id']
columns.sort()
bind_params = string.join(map(lambda x: ':' + x, columns), ", ")
h = rhnSQL.prepare(q % (self.table, string.join(columns, ", "), bind_params))
return _dml(h, params)
def _delete(self, params):
q = """delete from %s
where %s"""
columns = self.unique
wheres = map(lambda x: '%s = :%s' % (x, x), columns)
h = rhnSQL.prepare(q % (self.table, string.join(wheres, " and ")))
return _dml(h, params)
def _update(self, params):
q = """update %s
set %s
where %s"""
self._null_columns(params, self._autonull)
wheres = self.unique
wheres = map(lambda x: '%s = :%s' % (x, x), wheres)
wheres = string.join(wheres, " and ")
updates = self.key_mapping.values()
updates.sort()
updates = map(lambda x: '%s = :%s' % (x, x), updates)
updates = string.join(updates, ", ")
h = rhnSQL.prepare(q % (self.table, updates, wheres))
return _dml(h, params)
def reload(self, interface_id):
h = rhnSQL.prepare("""
select *
from %s
where interface_id = :interface_id
order by interface_id
""" % self.table)
h.execute(interface_id=interface_id)
self.db_ifaces = []
while 1:
row = h.fetchone_dict()
if not row:
break
hval = {'interface_id': row['interface_id']}
for key in self.key_mapping.values():
hval[key] = row[key]
self.db_ifaces.append(hval)
self.status = 0
return 0
class NetIfaceAddress6(NetIfaceAddress):
""" IPv6 Network interface """
key_mapping = {
'netmask': 'netmask',
'addr': 'address',
'scope': 'scope',
}
table = 'rhnServerNetAddress6'
unique = ['interface_id', 'address', 'scope']
def __init__(self, addr_dict=None):
NetIfaceAddress.__init__(self, addr_dict)
self._autonull = ('address', 'netmask', 'scope')
class NetIfaceAddress4(NetIfaceAddress):
""" IPv4 Network interface """
key_mapping = {
'netmask': 'netmask',
'ipaddr': 'address',
'broadcast': 'broadcast',
}
table = 'rhnServerNetAddress4'
unique = ['interface_id']
def __init__(self, addr_dict=None):
NetIfaceAddress.__init__(self, addr_dict)
self._autonull = ('address', 'netmask', 'broadcast')
def cleanse_ip_addr(self, val):
return cleanse_ip_addr(val)
def _hash_eq(h1, h2):
""" Compares two hashes and return 1 if the first is a subset of the second """
log_debug(5, h1, h2)
for k, v in h1.items():
if not h2.has_key(k):
return 0
if h2[k] != v:
return 0
return 1
def _dml(statement, params):
log_debug(5, params)
if not params:
return 0
params = _transpose(params)
rowcount = statement.executemany(**params)
log_debug(5, "Affected rows", rowcount)
return rowcount
def _transpose(hasharr):
""" Transpose the array of hashes into a hash of arrays """
if not hasharr:
return {}
keys = hasharr[0].keys()
result = {}
for k in keys:
result[k] = []
for hval in hasharr:
for k in keys:
if hval.has_key(k):
result[k].append(hval[k])
else:
result[k].append(None)
return result
class MemoryInformation(Device):
""" Memory information """
table = "rhnRAM"
def __init__(self, dict=None):
fields = ["ram", "swap"]
mapping = {"class": None}
Device.__init__(self, fields, dict, mapping)
# use our own sequence
self.sequence = "rhn_ram_id_seq"
if not dict:
return
# Sometimes we get sent a NNNNL number and we need to strip the L
for k in fields:
if not self.data.has_key(k):
continue
if self.data[k] in [None, "None", ""]:
self.data[k] = -1
self.data[k] = str(self.data[k])
if self.data[k][-1] == 'L':
self.data[k] = self.data[k][:-1]
class DMIInformation(Device):
""" DMI information """
table = "rhnServerDMI"
def __init__(self, dict=None):
fields = ["vendor", "system", "product", "asset", "board",
"bios_vendor", "bios_version", "bios_release"]
mapping = {"class": None}
Device.__init__(self, fields, dict, mapping)
# use our own sequence
self.sequence = "rhn_server_dmi_id_seq"
self._autonull = ("vendor", "system", "product", "asset", "board",
"bios_vendor", "bios_version", "bios_release")
if not dict:
return
# deal with hardware with insanely long dmi strings...
for key, value in self.data.items():
# Some of the values may be None
if value and isinstance(value, type("")):
self.data[key] = value[:256]
class InstallInformation(Device):
""" Install information """
table = "rhnServerInstallInfo"
def __init__(self, dict=None):
fields = ['install_method', 'iso_status', 'mediasum']
mapping = {
'class': None,
'installmethod': 'install_method',
'isostatus': 'iso_status',
'mediasum': 'mediasum',
}
Device.__init__(self, fields, dict, mapping)
self.sequence = 'rhn_server_install_info_id_seq'
class Hardware:
""" Support for the hardware items """
def __init__(self):
self.__hardware = {}
self.__loaded = 0
self.__changed = 0
def hardware_by_class(self, device_class):
return self.__hardware[device_class]
def add_hardware(self, hardware):
""" add new hardware """
log_debug(4, hardware)
if not hardware:
return -1
if type(hardware) == type({}):
hardware = UserDictCase(hardware)
if not isinstance(hardware, UserDictCase):
log_error("argument type is not hash: %s" % hardware)
raise TypeError, "This function requires a hash as an argument"
# validation is important
hw_class = hardware.get("class")
if hw_class is None:
return -1
hw_class = string.lower(hw_class)
class_type = None
if hw_class in ["video", "audio", "audio_hd", "usb", "other", "hd", "floppy",
"mouse", "modem", "network", "cdrom", "scsi",
"unspec", "scanner", "tape", "capture", "raid",
"socket", "keyboard", "printer", "firewire", "ide"]:
class_type = HardwareDevice
elif hw_class == "cpu":
class_type = CPUDevice
elif hw_class == "netinfo":
class_type = NetworkInformation
elif hw_class == "memory":
class_type = MemoryInformation
elif hw_class == "dmi":
class_type = DMIInformation
elif hw_class == "installinfo":
class_type = InstallInformation
elif hw_class == "netinterfaces":
class_type = NetIfaceInformation
else:
log_error("UNKNOWN CLASS TYPE `%s'" % hw_class)
# Same trick: try-except and raise the exception so that Traceback
# can send the e-mail
try:
raise KeyError, "Unknwon class type `%s' for hardware '%s'" % (
hw_class, hardware)
except:
Traceback(mail=1)
return
# create the new device
new_dev = class_type(hardware)
if self.__hardware.has_key(class_type):
_l = self.__hardware[class_type]
else:
_l = self.__hardware[class_type] = []
_l.append(new_dev)
self.__changed = 1
return 0
def delete_hardware(self, sysid=None):
""" This function deletes all hardware. """
log_debug(4, sysid)
if not self.__loaded:
self.reload_hardware_byid(sysid)
hardware = self.__hardware
if hardware == {}:
# nothing to delete
return 0
self.__changed = 1
for device_type in hardware.keys():
for hw in hardware[device_type]:
hw.status = 2 # deleted
# filter out the hardware that was just added and then
# deleted before saving
hardware[device_type] = filter(lambda a:
not (a.status == 2 and hasattr(a, "id") and a.id == 0),
hardware[device_type])
return 0
def save_hardware_byid(self, sysid):
"""Save the hardware list """
log_debug(3, sysid, "changed = %s" % self.__changed)
hardware = self.__hardware
if hardware == {}: # nothing loaded
return 0
if not self.__changed:
return 0
for device_type, hw_list in hardware.items():
for hw in hw_list:
hw.save(sysid)
self.__changed = 0
return 0
def __load_from_db(self, DevClass, sysid):
""" Load a certain hardware class from the database """
if not self.__hardware.has_key(DevClass):
self.__hardware[DevClass] = []
h = rhnSQL.prepare("select id from %s where server_id = :sysid" % DevClass.table)
h.execute(sysid=sysid)
rows = h.fetchall_dict() or []
for device in rows:
dev_id = device['id']
dev = DevClass()
dev.reload(dev_id)
self.__hardware[DevClass].append(dev)
def reload_hardware_byid(self, sysid):
""" load all hardware devices for a server """
log_debug(4, sysid)
if not sysid:
return -1
self.__hardware = {} # discard what was already loaded
# load from all hardware databases
self.__load_from_db(HardwareDevice, sysid)
self.__load_from_db(CPUDevice, sysid)
self.__load_from_db(DMIInformation, sysid)
self.__load_from_db(NetworkInformation, sysid)
self.__load_from_db(MemoryInformation, sysid)
self.__load_from_db(InstallInformation, sysid)
net_iface_info = NetIfaceInformation()
net_iface_info.reload(sysid)
self.__hardware[NetIfaceInformation] = [net_iface_info]
# now set the flag
self.__changed = 0
self.__loaded = 1
return 0
| gpl-2.0 | -6,922,134,149,411,319,000 | 32.757782 | 109 | 0.51788 | false | 3.919471 | false | false | false |
iffy/eliot | benchmarks/logwriter.py | 1 | 1041 | """
A benchmark for eliot.logwriter.
"""
import tempfile
import time
from twisted.internet.task import react
from twisted.python.filepath import FilePath
from eliot.logwriter import ThreadedFileWriter
LENGTH = 100
MESSAGES = 100000
def main(reactor):
print "Message size: %d bytes Num messages: %d" % (LENGTH, MESSAGES)
message = b"a" * LENGTH
fp = FilePath(tempfile.mktemp())
writer = ThreadedFileWriter(fp.open("ab"), reactor)
writer.startService()
start = time.time()
for i in range(MESSAGES):
writer(message)
d = writer.stopService()
def done(_):
elapsed = time.time() - start
kbSec = (LENGTH * MESSAGES) / (elapsed * 1024)
messagesSec = MESSAGES / elapsed
print "messages/sec: %s KB/sec: %s" % (messagesSec, kbSec)
d.addCallback(done)
def cleanup(result):
fp.restat()
print
print "File size: ", fp.getsize()
fp.remove()
d.addBoth(cleanup)
return d
if __name__ == '__main__':
react(main, [])
| apache-2.0 | -6,183,397,444,295,994,000 | 20.6875 | 74 | 0.622478 | false | 3.57732 | false | false | false |
hirofumi0810/tensorflow_end2end_speech_recognition | utils/dataset/xe.py | 1 | 5444 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for loading dataset for the frame-wise model.
In this class, all data will be loaded at each step.
You can use the multi-GPU version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from utils.dataset.base import Base
class DatasetBase(Base):
def __init__(self, *args, **kwargs):
super(DatasetBase, self).__init__(*args, **kwargs)
def __getitem__(self, index):
input_i = np.array(self.input_paths[index])
label_i = np.array(self.label_paths[index])
return (input_i, label_i)
def __len__(self):
if self.data_type == 'train':
return 18088388
elif self.data_type == 'dev_clean':
return 968057
elif self.data_type == 'dev_other':
return 919980
def __next__(self, batch_size=None):
"""Generate each mini-batch.
Args:
batch_size (int, optional): the size of mini-batch
Returns:
A tuple of `(inputs, labels, inputs_seq_len, labels_seq_len, input_names)`
inputs: list of input data of size
`[num_gpu, B, input_size]`
labels: list of target labels of size
`[num_gpu, B, num_classes]`
input_names: list of file name of input data of size
`[num_gpu, B]`
is_new_epoch (bool): If true, 1 epoch is finished
"""
if self.max_epoch is not None and self.epoch >= self.max_epoch:
raise StopIteration
# NOTE: max_epoch = None means infinite loop
if batch_size is None:
batch_size = self.batch_size
# reset
if self.is_new_epoch:
self.is_new_epoch = False
# Load the first block at each epoch
if self.iteration == 0 or self.is_new_epoch:
# Randomly sample block
block_index = random.sample(list(self.rest_block), 1)
self.rest_block -= set(block_index)
# Load block
self.inputs_block = np.array(list(
map(lambda path: np.load(path),
self.input_paths[block_index])))
# NOTE: `[1, num_frames_per_block, input_dim]`
self.inputs_block = self.inputs_block.reshape(
-1, self.inputs_block.shape[-1])
self.labels_block = np.array(list(
map(lambda path: np.load(path),
self.label_paths[block_index])))
# NOTE: `[1, num_frames_per_block, num_classes]`
self.labels_block = self.labels_block.reshape(
-1, self.labels_block.shape[-1])
self.rest_frames = set(range(0, len(self.inputs_block), 1))
# Load block if needed
if len(self.rest_frames) < batch_size and len(self.rest_block) != 0:
# Randomly sample block
if len(self.rest_block) > 1:
block_index = random.sample(list(self.rest_block), 1)
else:
# Last block in each epoch
block_index = list(self.rest_block)
self.rest_block -= set(block_index)
# tmp
rest_inputs_pre_block = self.inputs_block[list(self.rest_frames)]
rest_labels_pre_block = self.labels_block[list(self.rest_frames)]
self.inputs_block = np.array(list(
map(lambda path: np.load(path),
self.input_paths[block_index]))).reshape(-1, self.inputs_block.shape[-1])
self.labels_block = np.array(list(
map(lambda path: np.load(path),
self.label_paths[block_index]))).reshape(-1, self.labels_block.shape[-1])
# Concatenate
self.inputs_block = np.concatenate(
(rest_inputs_pre_block, self.inputs_block), axis=0)
self.labels_block = np.concatenate(
(rest_labels_pre_block, self.labels_block), axis=0)
self.rest_frames = set(range(0, len(self.inputs_block), 1))
# Randomly sample frames
if len(self.rest_frames) > batch_size:
frame_indices = random.sample(
list(self.rest_frames), batch_size)
else:
# Last mini-batch in each block
frame_indices = list(self.rest_frames)
# Shuffle selected mini-batch
random.shuffle(frame_indices)
self.rest_frames -= set(frame_indices)
if len(self.rest_block) == 0 and len(self.rest_frames) == 0:
self.reset()
self.is_new_epoch = True
self.epoch += 1
self.rest_block = set(range(0, len(self.input_paths), 1))
# Set values of each data in mini-batch
inputs = self.inputs_block[frame_indices]
labels = self.labels_block[frame_indices]
###############
# Multi-GPUs
###############
if self.num_gpu > 1:
# Now we split the mini-batch data by num_gpu
inputs = np.array_split(inputs, self.num_gpu, axis=0)
labels = np.array_split(labels, self.num_gpu, axis=0)
else:
inputs = inputs[np.newaxis, :, :]
labels = labels[np.newaxis, :, :]
self.iteration += len(frame_indices)
return (inputs, labels), self.is_new_epoch
| mit | -3,416,920,738,480,581,600 | 35.536913 | 93 | 0.549596 | false | 3.850071 | false | false | false |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/25 verizon/getDataAutoQuery.py | 1 | 3761 | import webhose;
import time;
from datetime import datetime, timedelta
from lxml import html
import requests
import unirest
webhose.config(token='c6052904-f312-436b-a6d8-d915084ac866')
days_back = 30
date_days_ago = datetime.now() - timedelta(days=days_back)
organization = 'verizon'
lang = 'english'
country = 'US'
#set API Token
apiToken = 'c6052904-f312-436b-a6d8-d915084ac866'
# Build URL
#queryURL = 'https://webhose.io/search?token=' + apiToken + '&format=json&q=' + sentiment + '%3A%22' + organization + '%22&ts=1478565932339'
### UPDATE YOUR END POINT HERE - Amazon Positive
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.positive%3A%22Verizon%22&ts=1478579908230",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_pos_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
## UPDATE YOUR END POINT HERE - Amazon Neutral
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.neutral%3A%22Verizon%22&ts=1478579995010",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_neu_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
## UPDATE YOUR END POINT HERE - Amazon Negative
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.negative%3A%22Verizon%22&ts=1478580006047",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_neg_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
'''
postiveData = webhose.search("organization.positive:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
negativeData = webhose.search("organization.negative:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" format:\"" + "json" +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
neutralData = webhose.search("organization.negative:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
page = requests.get('https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=organization.positive%3A%22Microsoft%22&ts=1478565802902')
#print page
#print page.content
#print negativeData.next
#tree = html.fromstring(page.content)
'''
| mit | 3,822,441,865,177,565,000 | 26.253623 | 208 | 0.686785 | false | 2.71945 | false | false | false |
jumpserver/jumpserver | apps/perms/serializers/asset/permission.py | 1 | 5347 | # -*- coding: utf-8 -*-
#
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from django.db.models import Prefetch, Q
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from perms.models import AssetPermission, Action
from assets.models import Asset, Node, SystemUser
from users.models import User, UserGroup
__all__ = [
'AssetPermissionSerializer',
'ActionsField',
]
class ActionsField(serializers.MultipleChoiceField):
def __init__(self, *args, **kwargs):
kwargs['choices'] = Action.CHOICES
super().__init__(*args, **kwargs)
def to_representation(self, value):
return Action.value_to_choices(value)
def to_internal_value(self, data):
if data is None:
return data
return Action.choices_to_value(data)
class ActionsDisplayField(ActionsField):
def to_representation(self, value):
values = super().to_representation(value)
choices = dict(Action.CHOICES)
return [choices.get(i) for i in values]
class AssetPermissionSerializer(BulkOrgResourceModelSerializer):
actions = ActionsField(required=False, allow_null=True)
is_valid = serializers.BooleanField(read_only=True)
is_expired = serializers.BooleanField(read_only=True, label=_('Is expired'))
users_display = serializers.ListField(child=serializers.CharField(), label=_('Users name'), required=False)
user_groups_display = serializers.ListField(child=serializers.CharField(), label=_('User groups name'), required=False)
assets_display = serializers.ListField(child=serializers.CharField(), label=_('Assets name'), required=False)
nodes_display = serializers.ListField(child=serializers.CharField(), label=_('Nodes name'), required=False)
system_users_display = serializers.ListField(child=serializers.CharField(), label=_('System users name'), required=False)
class Meta:
model = AssetPermission
fields_mini = ['id', 'name']
fields_small = fields_mini + [
'is_active', 'is_expired', 'is_valid', 'actions',
'created_by', 'date_created', 'date_expired',
'date_start', 'comment'
]
fields_m2m = [
'users', 'users_display', 'user_groups', 'user_groups_display', 'assets', 'assets_display',
'nodes', 'nodes_display', 'system_users', 'system_users_display',
'users_amount', 'user_groups_amount', 'assets_amount',
'nodes_amount', 'system_users_amount',
]
fields = fields_small + fields_m2m
read_only_fields = ['created_by', 'date_created']
extra_kwargs = {
'is_expired': {'label': _('Is expired')},
'is_valid': {'label': _('Is valid')},
'actions': {'label': _('Actions')},
'users_amount': {'label': _('Users amount')},
'user_groups_amount': {'label': _('User groups amount')},
'assets_amount': {'label': _('Assets amount')},
'nodes_amount': {'label': _('Nodes amount')},
'system_users_amount': {'label': _('System users amount')},
}
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related('users', 'user_groups', 'assets', 'nodes', 'system_users')
return queryset
def to_internal_value(self, data):
if 'system_users_display' in data:
# system_users_display 转化为 system_users
system_users = data.get('system_users', [])
system_users_display = data.pop('system_users_display')
for name in system_users_display:
system_user = SystemUser.objects.filter(name=name).first()
if system_user and system_user.id not in system_users:
system_users.append(system_user.id)
data['system_users'] = system_users
return super().to_internal_value(data)
def perform_display_create(self, instance, **kwargs):
# 用户
users_to_set = User.objects.filter(
Q(name__in=kwargs.get('users_display')) | Q(username__in=kwargs.get('users_display'))
).distinct()
instance.users.add(*users_to_set)
# 用户组
user_groups_to_set = UserGroup.objects.filter(name__in=kwargs.get('user_groups_display')).distinct()
instance.user_groups.add(*user_groups_to_set)
# 资产
assets_to_set = Asset.objects.filter(
Q(ip__in=kwargs.get('assets_display')) | Q(hostname__in=kwargs.get('assets_display'))
).distinct()
instance.assets.add(*assets_to_set)
# 节点
nodes_to_set = Node.objects.filter(full_value__in=kwargs.get('nodes_display')).distinct()
instance.nodes.add(*nodes_to_set)
def create(self, validated_data):
display = {
'users_display' : validated_data.pop('users_display', ''),
'user_groups_display' : validated_data.pop('user_groups_display', ''),
'assets_display' : validated_data.pop('assets_display', ''),
'nodes_display' : validated_data.pop('nodes_display', '')
}
instance = super().create(validated_data)
self.perform_display_create(instance, **display)
return instance
| gpl-2.0 | -1,230,686,421,651,684,400 | 40.913386 | 125 | 0.622957 | false | 3.940044 | false | false | false |
brainstorm/bcbio-nextgen | tests/bcbio_vm/test_docker.py | 1 | 3019 | import os
import subprocess
import pytest
from tests.conftest import make_workdir
from tests.conftest import get_post_process_yaml
@pytest.marks('docker')
def test_docker(install_test_files, data_dir):
"""Run an analysis with code and tools inside a docker container.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
with make_workdir() as workdir:
cl = [
"bcbio_vm.py",
"--datadir=%s" % data_dir,
"run",
"--systemconfig=%s" % get_post_process_yaml(data_dir, workdir),
"--fcdir=%s" % os.path.join(
data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-bam.yaml")
]
subprocess.check_call(cl)
@pytest.marks('docker_ipython', 'docker')
def test_docker_ipython(install_test_files, data_dir):
"""Run an analysis with code and tools inside a docker container,
driven via IPython.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
with make_workdir() as workdir:
cl = [
"bcbio_vm.py",
"--datadir=%s" % data_dir,
"ipython",
"--systemconfig=%s" % get_post_process_yaml(data_dir, workdir),
"--fcdir=%s" % os.path.join(
data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-bam.yaml"),
"lsf", "localrun"
]
subprocess.check_call(cl)
class TestCWL():
""" Run simple CWL workflows.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
@pytest.marks('cwl_docker', 'cwl', 'docker')
def test_2_cwl_docker(install_test_files, data_dir):
"""Create a common workflow language description and run on a
Docker installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(data_dir, workdir)]
subprocess.check_call(cl)
cl = ["bcbio_vm.py", "cwlrun", "cwltool", "run_info-cwl-workflow"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
@pytest.marks('speed2', 'cwl', 'cwl_local', 'install_required')
def test_1_cwl_local(self, install_test_files, data_dir):
"""Create a common workflow language description and run on local installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(data_dir, workdir)]
subprocess.check_call(cl)
cl = ["bcbio_vm.py", "cwlrun", "cwltool", "run_info-cwl-workflow",
"--no-container"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
| mit | -4,401,909,337,369,498,600 | 34.940476 | 87 | 0.576019 | false | 3.430682 | true | false | false |
chromium/chromium | buildtools/checkdeps/rules.py | 5 | 7044 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes to represent dependency rules, used by checkdeps.py"""
import os
import re
class Rule(object):
"""Specifies a single rule for an include, which can be one of
ALLOW, DISALLOW and TEMP_ALLOW.
"""
# These are the prefixes used to indicate each type of rule. These
# are also used as values for self.allow to indicate which type of
# rule this is.
ALLOW = '+'
DISALLOW = '-'
TEMP_ALLOW = '!'
def __init__(self, allow, directory, dependent_directory, source):
self.allow = allow
self._dir = directory
self._dependent_dir = dependent_directory
self._source = source
def __str__(self):
return '"%s%s" from %s.' % (self.allow, self._dir, self._source)
def AsDependencyTuple(self):
"""Returns a tuple (allow, dependent dir, dependee dir) for this rule,
which is fully self-sufficient to answer the question whether the dependent
is allowed to depend on the dependee, without knowing the external
context."""
return self.allow, self._dependent_dir or '.', self._dir or '.'
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + '/')
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + '/')
class MessageRule(Rule):
"""A rule that has a simple message as the reason for failing,
unrelated to directory or source.
"""
def __init__(self, reason):
super(MessageRule, self).__init__(Rule.DISALLOW, '', '', '')
self._reason = reason
def __str__(self):
return self._reason
def ParseRuleString(rule_string, source):
"""Returns a tuple of a character indicating what type of rule this
is, and a string holding the path the rule applies to.
"""
if not rule_string:
raise Exception('The rule string "%s" is empty\nin %s' %
(rule_string, source))
if not rule_string[0] in [Rule.ALLOW, Rule.DISALLOW, Rule.TEMP_ALLOW]:
raise Exception(
'The rule string "%s" does not begin with a "+", "-" or "!".' %
rule_string)
# If a directory is specified in a DEPS file with a trailing slash, then it
# will not match as a parent directory in Rule's [Parent|Child]OrMatch above.
# Ban them.
if rule_string[-1] == '/':
raise Exception(
'The rule string "%s" ends with a "/" which is not allowed.'
' Please remove the trailing "/".' % rule_string)
return rule_string[0], rule_string[1:]
class Rules(object):
"""Sets of rules for files in a directory.
By default, rules are added to the set of rules applicable to all
dependee files in the directory. Rules may also be added that apply
only to dependee files whose filename (last component of their path)
matches a given regular expression; hence there is one additional
set of rules per unique regular expression.
"""
def __init__(self):
"""Initializes the current rules with an empty rule list for all
files.
"""
# We keep the general rules out of the specific rules dictionary,
# as we need to always process them last.
self._general_rules = []
# Keys are regular expression strings, values are arrays of rules
# that apply to dependee files whose basename matches the regular
# expression. These are applied before the general rules, but
# their internal order is arbitrary.
self._specific_rules = {}
def __str__(self):
result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join(
' %s' % x for x in self._general_rules)]
for regexp, rules in list(self._specific_rules.items()):
result.append(' (limited to files matching %s): [\n%s\n ]' % (
regexp, '\n'.join(' %s' % x for x in rules)))
result.append(' }')
return '\n'.join(result)
def AsDependencyTuples(self, include_general_rules, include_specific_rules):
"""Returns a list of tuples (allow, dependent dir, dependee dir) for the
specified rules (general/specific). Currently only general rules are
supported."""
def AddDependencyTuplesImpl(deps, rules, extra_dependent_suffix=""):
for rule in rules:
(allow, dependent, dependee) = rule.AsDependencyTuple()
tup = (allow, dependent + extra_dependent_suffix, dependee)
deps.add(tup)
deps = set()
if include_general_rules:
AddDependencyTuplesImpl(deps, self._general_rules)
if include_specific_rules:
for regexp, rules in list(self._specific_rules.items()):
AddDependencyTuplesImpl(deps, rules, "/" + regexp)
return deps
def AddRule(self, rule_string, dependent_dir, source, dependee_regexp=None):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
dependent_dir: The directory to which this rule applies.
dependee_regexp: The rule will only be applied to dependee files
whose filename (last component of their path)
matches the expression. None to match all
dependee files.
"""
rule_type, rule_dir = ParseRuleString(rule_string, source)
if not dependee_regexp:
rules_to_update = self._general_rules
else:
if dependee_regexp in self._specific_rules:
rules_to_update = self._specific_rules[dependee_regexp]
else:
rules_to_update = []
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
rules_to_update = [x for x in rules_to_update
if not x.ParentOrMatch(rule_dir)]
rules_to_update.insert(0, Rule(rule_type, rule_dir, dependent_dir, source))
if not dependee_regexp:
self._general_rules = rules_to_update
else:
self._specific_rules[dependee_regexp] = rules_to_update
def RuleApplyingTo(self, include_path, dependee_path):
"""Returns the rule that applies to |include_path| for a dependee
file located at |dependee_path|.
"""
dependee_filename = os.path.basename(dependee_path)
for regexp, specific_rules in list(self._specific_rules.items()):
if re.match(regexp, dependee_filename):
for rule in specific_rules:
if rule.ChildOrMatch(include_path):
return rule
for rule in self._general_rules:
if rule.ChildOrMatch(include_path):
return rule
return MessageRule('no rule applying.')
| bsd-3-clause | 3,845,343,916,866,982,000 | 36.870968 | 80 | 0.66113 | false | 3.90899 | false | false | false |
anhaidgroup/py_entitymatching | py_entitymatching/dask/dask_extract_features.py | 1 | 9597 | import logging
import os
import pandas as pd
import multiprocessing
import numpy as np
import dask
from dask.diagnostics import ProgressBar
from dask import delayed
from cloudpickle import cloudpickle
import tempfile
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.feature.extractfeatures import get_feature_vals_by_cand_split
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
def dask_extract_feature_vecs(candset, attrs_before=None, feature_table=None,
attrs_after=None, verbose=False,
show_progress=True, n_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK
This function extracts feature vectors from a DataFrame (typically a
labeled candidate set).
Specifically, this function uses feature
table, ltable and rtable (that is present in the `candset`'s
metadata) to extract feature vectors.
Args:
candset (DataFrame): The input candidate set for which the features
vectors should be extracted.
attrs_before (list): The list of attributes from the input candset,
that should be added before the feature vectors (defaults to None).
feature_table (DataFrame): A DataFrame containing a list of
features that should be used to compute the feature vectors (
defaults to None).
attrs_after (list): The list of attributes from the input candset
that should be added after the feature vectors (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be displayed (defaults to False).
show_progress (boolean): A flag to indicate whether the progress of
extracting feature vectors must be displayed (defaults to True).
n_chunks (int): The number of partitions to split the candidate set. If it
is set to -1, the number of partitions will be set to the
number of cores in the machine.
Returns:
A pandas DataFrame containing feature vectors.
The DataFrame will have metadata ltable and rtable, pointing
to the same ltable and rtable as the input candset.
Also, the output
DataFrame will have three columns: key, foreign key ltable, foreign
key rtable copied from input candset to the output DataFrame. These
three columns precede the columns mentioned in `attrs_before`.
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `attrs_before` has attributes that
are not present in the input candset.
AssertionError: If `attrs_after` has attribtues that
are not present in the input candset.
AssertionError: If `feature_table` is set to None.
AssertionError: If `n_chunks` is not of type
int.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_extract_features import dask_extract_feature_vecs
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> match_f = em.get_features_for_matching(A, B)
>>> # G is the labeled dataframe which should be converted into feature vectors
>>> H = dask_extract_feature_vecs(G, features=match_f, attrs_before=['title'], attrs_after=['gold_labels'])
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# Validate input parameters
# # We expect the input candset to be of type pandas DataFrame.
validate_object_type(candset, pd.DataFrame, error_prefix='Input cand.set')
# # If the attrs_before is given, Check if the attrs_before are present in
# the input candset
if attrs_before != None:
if not ch.check_attrs_present(candset, attrs_before):
logger.error(
'The attributes mentioned in attrs_before is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_before is not present '
'in the input table')
# # If the attrs_after is given, Check if the attrs_after are present in
# the input candset
if attrs_after != None:
if not ch.check_attrs_present(candset, attrs_after):
logger.error(
'The attributes mentioned in attrs_after is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_after is not present '
'in the input table')
# We expect the feature table to be a valid object
if feature_table is None:
logger.error('Feature table cannot be null')
raise AssertionError('The feature table cannot be null')
# Do metadata checking
# # Mention what metadata is required to the user
ch.log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, '
'ltable, rtable, ltable key, rtable key', verbose)
# # Get metadata
ch.log_info(logger, 'Getting metadata from catalog', verbose)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
cm.get_metadata_for_candset(
candset, logger, verbose)
# # Validate metadata
ch.log_info(logger, 'Validating metadata', verbose)
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# Extract features
# id_list = [(row[fk_ltable], row[fk_rtable]) for i, row in
# candset.iterrows()]
# id_list = [tuple(tup) for tup in candset[[fk_ltable, fk_rtable]].values]
# # Set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # Apply feature functions
ch.log_info(logger, 'Applying feature functions', verbose)
col_names = list(candset.columns)
fk_ltable_idx = col_names.index(fk_ltable)
fk_rtable_idx = col_names.index(fk_rtable)
validate_object_type(n_chunks, int, 'Parameter n_chunks')
validate_chunks(n_chunks)
n_chunks = get_num_partitions(n_chunks, len(candset))
c_splits = np.array_split(candset, n_chunks)
pickled_obj = cloudpickle.dumps(feature_table)
feat_vals_by_splits = []
for i in range(len(c_splits)):
partial_result = delayed(get_feature_vals_by_cand_split)(pickled_obj,
fk_ltable_idx,
fk_rtable_idx, l_df,
r_df, c_splits[i],
False)
feat_vals_by_splits.append(partial_result)
feat_vals_by_splits = delayed(wrap)(feat_vals_by_splits)
if show_progress:
with ProgressBar():
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
feat_vals = sum(feat_vals_by_splits, [])
# Construct output table
feature_vectors = pd.DataFrame(feat_vals, index=candset.index.values)
# # Rearrange the feature names in the input feature table order
feature_names = list(feature_table['feature_name'])
feature_vectors = feature_vectors[feature_names]
ch.log_info(logger, 'Constructing output table', verbose)
# print(feature_vectors)
# # Insert attrs_before
if attrs_before:
if not isinstance(attrs_before, list):
attrs_before = [attrs_before]
attrs_before = gh.list_diff(attrs_before, [key, fk_ltable, fk_rtable])
attrs_before.reverse()
for a in attrs_before:
feature_vectors.insert(0, a, candset[a])
# # Insert keys
feature_vectors.insert(0, fk_rtable, candset[fk_rtable])
feature_vectors.insert(0, fk_ltable, candset[fk_ltable])
feature_vectors.insert(0, key, candset[key])
# # insert attrs after
if attrs_after:
if not isinstance(attrs_after, list):
attrs_after = [attrs_after]
attrs_after = gh.list_diff(attrs_after, [key, fk_ltable, fk_rtable])
attrs_after.reverse()
col_pos = len(feature_vectors.columns)
for a in attrs_after:
feature_vectors.insert(col_pos, a, candset[a])
col_pos += 1
# Reset the index
# feature_vectors.reset_index(inplace=True, drop=True)
# # Update the catalog
cm.init_properties(feature_vectors)
cm.copy_properties(candset, feature_vectors)
# Finally, return the feature vectors
return feature_vectors
| bsd-3-clause | 4,681,506,796,973,360,000 | 38.012195 | 115 | 0.624153 | false | 4.129518 | false | false | false |
NuAoA/mopidy-alcd | mopidy_AdafruitLCD/Adafruit_LCD_frontend.py | 1 | 3199 | #!/usr/bin/env python
import logging
import traceback
import pykka
import mopidy
import sys
import re #todo: remove
import threading
from time import sleep
from mopidy import core
from .Adafruit_player import AdafruitPlayer
logger = logging.getLogger(__name__)
class AdafruitLCD(pykka.ThreadingActor, core.CoreListener):
def __init__(self,config,core):
super(AdafruitLCD,self).__init__()
self.core = core
self.player = AdafruitPlayer(core)
self.startup = threading.Thread(target=self.media_scan)
#self.player.run()
def media_scan(self):
media_list = []
timeout = 0
self.player.plate.smessage("Loading Media...")
sleep(2)
while self.player.running:
if timeout>=50 or self.player.inMenus:
if not self.player.inMenus:
if len(media_list)==0:
self.player.plate.smessage("No Media Found",line=1)
elif self.player.track!=None:
self.player.displaySongInfo()
break
update = False
list = self.core.library.browse(None).get()
for media in list:
if media.name in media_list:
pass
else:
media_list.append(media.name)
update = True
break
if not self.player.inMenus:
if len(media_list) > 0:
if update:
str = ""
for item in media_list:
if str != "":
str = item+", "+str
else:
str = item
self.player.plate.smessage(str.ljust(16),line=1)
sleep(1)
else:
sleep(5)
else:
sleep(5)
timeout+=1
def on_start(self):
logger.info("[ALCD] Starting AdafruitLCD")
self.player.start()
self.startup.start()
def on_stop(self):
logger.info("[ALCD] Stopping AdafruitLCD")
self.player.stop()
def track_playback_ended(self,tl_track, time_position):
logger.info("[ALCD] track playback ended")
self.player.track_playback_ended(tl_track.track)
def track_playback_started(self,tl_track):
try:
logger.info("[ALCD] Now playing:")
try:
for artist in tl_track.track.artists:
logger.info("[ALCD] >"+tl_track.track.name+ " by " +artist.name)
except:
traceback.print_exc()
self.player.updateCurrentTrack(tl_track.track)
except:
traceback.print_exc()
def playback_state_changed(self,old_state,new_state):
try:
#logger.info("[ALCD] Playback state changed from " + old_state + " to " + new_state)
self.player.updatePlaybackState(old_state,new_state)
except:
traceback.print_exc()
def print_tracks(self,tl_track_list):
for tltrack in tl_track_list:
logger.info("[ALCD] " + tltrack.track.name)
"""
def playlists_loaded(self):
logger.info("[ALCD] Playlists:")
try:
for playlist in self.core.playlists.playlists.get():
if re.search("spotify:user:spotify",playlist.uri):
self.core.tracklist.add(tracks=playlist.tracks)
self.core.playback.play()
except:
traceback.print_exc()
def tracklist_changed(self):
logger.info("[ALCD] Tracklist updated")
print(" Total: "+str(len(self.core.tracklist.tl_tracks.get())))
#self.print_tracks(self.core.tracklist.tl_tracks.get())
def track_playback_ended(self,tl_track,time_position):
logger.info("[ALCD] Playback Ended")
"""
| apache-2.0 | -27,827,435,575,448,490 | 24.388889 | 87 | 0.65708 | false | 2.902904 | false | false | false |
EdDev/vdsm | tests/virttests/vmstats_test.py | 1 | 18952 | #
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import copy
import uuid
import six
from vdsm.virt import vmstats
from testlib import VdsmTestCase as TestCaseBase
from testlib import permutations, expandPermutations
_FAKE_BULK_STATS = {
'f3243a90-2e9e-4061-b7b3-a6c585e14857': (
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 1,
'net.0.name': 'vnet0',
'net.0.rx.bytes': 0,
'net.0.rx.pkts': 0,
'net.0.rx.errs': 0,
'net.0.rx.drop': 0,
'net.0.tx.bytes': 0,
'net.0.tx.pkts': 0,
'net.0.tx.errs': 0,
'net.0.tx.drop': 0,
'block.count': 2,
'block.0.name': 'hdc',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
},
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 2,
'net.0.name': 'vnet1',
'net.0.rx.bytes': 0,
'net.0.rx.pkts': 0,
'net.0.rx.errs': 0,
'net.0.rx.drop': 0,
'net.0.tx.bytes': 0,
'net.0.tx.pkts': 0,
'net.0.tx.errs': 0,
'net.0.tx.drop': 0,
'net.1.name': 'vnet0',
'net.1.rx.bytes': 1024,
'net.1.rx.pkts': 128,
'net.1.rx.errs': 0,
'net.1.rx.drop': 0,
'net.1.tx.bytes': 2048,
'net.1.tx.pkts': 256,
'net.1.tx.errs': 0,
'net.1.tx.drop': 0,
'block.count': 3,
'block.0.name': 'hdd',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
'block.2.name': 'hdc',
'block.2.rd.reqs': 0,
'block.2.rd.bytes': 0,
'block.2.rd.times': 0,
'block.2.wr.reqs': 0,
'block.2.wr.bytes': 0,
'block.2.wr.times': 0,
'block.2.fl.reqs': 0,
'block.2.fl.times': 0,
'block.2.allocation': 0,
},
),
}
# on SR-IOV we seen unexpected net.count == 2 but data only for one nic.
_FAKE_BULK_STATS_SRIOV = {
'f3243a90-2e9e-4061-b7b3-a6c585e14857': (
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 2,
'net.1.name': 'vnet1',
'net.1.rx.bytes': 0,
'net.1.rx.pkts': 0,
'net.1.rx.errs': 0,
'net.1.rx.drop': 0,
'net.1.tx.bytes': 0,
'net.1.tx.pkts': 0,
'net.1.tx.errs': 0,
'net.1.tx.drop': 0,
'block.count': 2,
'block.0.name': 'hdc',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
},
)
}
class VmStatsTestCase(TestCaseBase):
def setUp(self):
# just pick one sampling
self.samples = next(six.itervalues(_FAKE_BULK_STATS))
self.bulk_stats = self.samples[0]
self.interval = 10 # seconds
def assertNameIsAt(self, stats, group, idx, name):
self.assertEqual(stats['%s.%d.name' % (group, idx)], name)
def assertStatsHaveKeys(self, stats, keys):
for key in keys:
self.assertIn(key, stats)
def assertRepeatedStatsHaveKeys(self, items, stats, keys):
for item in items:
self.assertStatsHaveKeys(stats[item.name], keys)
@expandPermutations
class UtilsFunctionsTests(VmStatsTestCase):
# we should not test private functions, but this one is
# the cornerstone of bulk stats translation, so we make
# one exception for the sake of the practicality.
@permutations([['block', 'hdc'], ['net', 'vnet0']])
def test_find_existing(self, group, name):
indexes = vmstats._find_bulk_stats_reverse_map(
self.bulk_stats, group)
self.assertNameIsAt(
self.bulk_stats, group, indexes[name], name)
@permutations([['block'], ['net']])
def test_find_bogus(self, group):
name = 'inexistent'
indexes = vmstats._find_bulk_stats_reverse_map(
self.bulk_stats, group)
self.assertNotIn(name, indexes)
@permutations([['block', 'hdc'], ['net', 'vnet0']])
def test_index_can_change(self, group, name):
all_indexes = []
for bulk_stats in self.samples:
indexes = vmstats._find_bulk_stats_reverse_map(
bulk_stats, group)
self.assertNameIsAt(bulk_stats, group, indexes[name], name)
all_indexes.append(indexes)
# and indeed indexes must change
self.assertEqual(len(all_indexes), len(self.samples))
def test_network_missing(self):
# seen using SR-IOV
bulk_stats = next(six.itervalues(_FAKE_BULK_STATS_SRIOV))
indexes = vmstats._find_bulk_stats_reverse_map(
bulk_stats[0], 'net')
self.assertTrue(indexes)
@expandPermutations
class NetworkStatsTests(VmStatsTestCase):
# TODO: grab them from the schema
_EXPECTED_KEYS = (
'macAddr',
'name',
'speed',
'state',
'rxErrors',
'rxDropped',
'txErrors',
'txDropped',
'rx',
'tx',
'sampleTime',
)
def test_nic_have_all_keys(self):
nic = FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51')
testvm = FakeVM(nics=(nic,))
stats = vmstats._nic_traffic(
testvm,
nic.name, nic.nicModel, nic.macAddr,
self.bulk_stats, 0,
self.bulk_stats, 0,
self.interval)
self.assertStatsHaveKeys(stats, self._EXPECTED_KEYS)
def test_networks_have_all_keys(self):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
self.interval)
self.assertRepeatedStatsHaveKeys(nics, stats['network'],
self._EXPECTED_KEYS)
def test_networks_good_interval(self):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
1)
)
@permutations([[-42], [0]])
def test_networks_bad_interval(self, interval):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
0) is None
)
@permutations([
['net.0.rx.bytes'], ['net.0.rx.pkts'],
['net.0.rx.errs'], ['net.0.rx.drop'], ['net.0.tx.bytes'],
['net.0.tx.pkts'], ['net.0.tx.errs'], ['net.0.tx.drop'],
])
def test_networks_missing_key(self, key):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
vm.migrationPending = True
faulty_bulk_stats = {}
faulty_bulk_stats.update(self.bulk_stats)
del faulty_bulk_stats[key]
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, faulty_bulk_stats,
1)
)
class DiskStatsTests(VmStatsTestCase):
# TODO: grab them from the schema
# Note: these are the minimal set Vdsm exported,
# no clear rationale for this subset.
_EXPECTED_KEYS = (
'truesize',
'apparentsize',
'readLatency',
'writeLatency',
'flushLatency',
'imageID',
# TODO: add test for 'lunGUID'
'readRate',
'writeRate',
'readOps',
'writeOps',
'readBytes',
'writtenBytes',
)
def test_disk_all_keys_present(self):
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
stats_before = copy.deepcopy(self.bulk_stats)
stats_after = copy.deepcopy(self.bulk_stats)
_ensure_delta(stats_before, stats_after,
'block.0.rd.reqs', 1024)
_ensure_delta(stats_before, stats_after,
'block.0.rd.bytes', 128 * 1024)
vmstats.disks(testvm, stats,
stats_before, stats_after,
interval)
self.assertRepeatedStatsHaveKeys(drives, stats['disks'],
self._EXPECTED_KEYS)
def test_interval_zero(self):
interval = 0 # seconds
# with zero interval, we won't have {read,write}Rate
expected_keys = tuple(k for k in self._EXPECTED_KEYS
if k not in ('readRate', 'writeRate'))
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
self.bulk_stats, self.bulk_stats,
interval)
self.assertRepeatedStatsHaveKeys(drives,
stats['disks'],
expected_keys)
def test_disk_missing_rate(self):
partial_stats = self._drop_stats(
('block.0.rd.bytes', 'block.1.rd.bytes',
'block.0.wr.bytes', 'block.1.wr.bytes'))
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
partial_stats, partial_stats,
interval)
def test_disk_missing_latency(self):
partial_stats = self._drop_stats(
('block.0.rd.times', 'block.1.rd.times',
'block.0.wr.reqs', 'block.1.wr.reqs'))
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
partial_stats, partial_stats,
interval)
def _drop_stats(self, keys):
partial_stats = copy.deepcopy(self.bulk_stats)
for key in keys:
del partial_stats[key]
return partial_stats
FIRST_CPU_SAMPLE = {'cpu.user': 4740000000, 'cpu.system': 6490000000}
LAST_CPU_SAMPLE = {'cpu.user': 4760000000, 'cpu.system': 6500000000}
@expandPermutations
class CpuStatsTests(VmStatsTestCase):
# all data stolen from Vdsm and/or virsh -r domstats
INTERVAL = 15. # seconds.
# [first, last]
# intentionally use only one sample, the other empty
@permutations([[{}, {}],
[{}, FIRST_CPU_SAMPLE],
[FIRST_CPU_SAMPLE, {}]])
def test_empty_samples(self, first, last):
stats = {}
res = vmstats.cpu(stats, {}, {}, self.INTERVAL)
self.assertEqual(stats,
{'cpuUser': 0.0, 'cpuSys': 0.0})
self.assertEqual(res, None)
def test_only_cpu_user_system(self):
stats = {}
res = vmstats.cpu(stats, FIRST_CPU_SAMPLE, LAST_CPU_SAMPLE,
self.INTERVAL)
self.assertEqual(stats, {
'cpuUser': 0.0,
'cpuSys': 0.2,
'cpuUsage': '11260000000',
})
self.assertEqual(res, None)
def test_update_all_keys(self):
stats = {}
first_sample = {'cpu.time': 24345584838}
first_sample.update(FIRST_CPU_SAMPLE)
last_sample = {'cpu.time': 24478198023}
last_sample.update(LAST_CPU_SAMPLE)
res = vmstats.cpu(stats, first_sample, last_sample,
self.INTERVAL)
self.assertEqual(stats, {
'cpuUser': 0.6840879,
'cpuSys': 0.2,
'cpuUsage': '11260000000',
})
self.assertNotEquals(res, None)
# helpers
def _ensure_delta(stats_before, stats_after, key, delta):
"""
Set stats_before[key] and stats_after[key] so that
stats_after[key] - stats_before[key] == abs(delta).
"""
stats_before[key] = 0
stats_after[key] = abs(delta)
class FakeNic(object):
def __init__(self, name, model, mac_addr):
self.name = name
self.nicModel = model
self.macAddr = mac_addr
class FakeDrive(object):
def __init__(self, name, size):
self.name = name
self.apparentsize = size
self.truesize = size
self.GUID = str(uuid.uuid4())
self.imageID = str(uuid.uuid4())
self.domainID = str(uuid.uuid4())
self.poolID = str(uuid.uuid4())
self.volumeID = str(uuid.uuid4())
def __contains__(self, item):
# isVdsmImage support
return item in ('imageID', 'domainID', 'poolID', 'volumeID')
class FakeVM(object):
def __init__(self, nics=None, drives=None):
self.id = str(uuid.uuid4())
self.nics = nics if nics is not None else []
self.drives = drives if drives is not None else []
self.migrationPending = False
@property
def monitorable(self):
return not self.migrationPending
def getNicDevices(self):
return self.nics
def getDiskDevices(self):
return self.drives
| gpl-2.0 | -2,565,157,551,980,856,000 | 31.067682 | 79 | 0.503271 | false | 3.453353 | true | false | false |
eniac/faas | factor/linalg.py | 1 | 5126 | import sys
import os
import re
import math
import utils
import logging
logger = logging.getLogger('Linalg')
cmd_logger = logging.getLogger('cmd')
# Exact configuration here will depends on instance/hardware type.
def run_rankfile(linalg_params):
logger.info("--- Generating rankfile ---")
machines = linalg_params['machines']
num_of_mpi = linalg_params['mpi_rows'] * linalg_params['mpi_cols']
num_of_mach = len(machines)
num_of_sock = linalg_params['phys_socks_per_machine']
num_of_cores_per_sock = linalg_params['phys_core_per_sock']
jobs_assigned_to_mach = 0
with open(linalg_params['rankfile'], 'wt', encoding='utf-8') as rfile:
for mach_no in range(0, num_of_mach):
if mach_no < num_of_mpi % num_of_mach:
num_of_jobs = num_of_mpi // num_of_mach + 1
else:
num_of_jobs = num_of_mpi // num_of_mach
cores_unassigned = num_of_cores_per_sock * num_of_sock
socket_counter = {}
for sock in range(0, num_of_sock):
socket_counter[sock] = 0
for job_id in range(0, num_of_jobs):
rank_no = jobs_assigned_to_mach + job_id
sock_no = job_id % num_of_sock
start_core = socket_counter[sock_no]
cores_to_use = int(math.ceil(cores_unassigned // (num_of_jobs - job_id)))
end_core = socket_counter[sock_no] + cores_to_use - 1
# Case for socket splitting
if end_core >= num_of_cores_per_sock:
core_needed = cores_to_use
slot_str = ""
while core_needed > 0:
sock = min(socket_counter, key=socket_counter.get)
core_use = (num_of_cores_per_sock - socket_counter[sock] if core_needed >= num_of_cores_per_sock - socket_counter[sock] else core_needed)
core_needed -= core_use
start_core = socket_counter[sock]
end_core = socket_counter[sock] + core_use - 1
slot_str += ("{sock}:{start}-{end},"
.format(sock=sock, start=socket_counter[sock], end=end_core))
socket_counter[sock] += core_use
slot_str = slot_str[0:-1]
rfile.write("rank {n}={mach} slot={slot}\n"
.format(n=rank_no, mach=machines[mach_no], slot=slot_str))
cores_unassigned -= cores_to_use
continue
rfile.write("rank {n}={mach} slot={sock}:{start}-{end}\n"
.format(n=rank_no, mach=machines[mach_no], sock=sock_no, start=start_core, end=end_core))
socket_counter[sock_no] += cores_to_use
cores_unassigned -= cores_to_use
jobs_assigned_to_mach += num_of_jobs
logger.info("--- End of generating rankfile ---")
def run_linalg(linalg_params):
logger.info("--- Beginning MSieve linear algebra ---")
linalg_cmd = "mpirun -np " + str(linalg_params['mpi_rows'] * linalg_params['mpi_cols'])
linalg_cmd += " -H " + ",".join(linalg_params['machines'])
linalg_cmd += " -rf " + linalg_params['rankfile']
linalg_cmd += " " + os.path.join(linalg_params['msievedir'], 'msieve')
linalg_cmd += " -nf " + linalg_params['fb_path']
linalg_cmd += (" -nc2 \"mpi_nrows={rows} mpi_ncols={cols} target_density={td}\""
.format(rows=linalg_params['mpi_rows'], cols=linalg_params['mpi_cols'], td=linalg_params['target_density']))
linalg_cmd += " -v -t " + str(linalg_params['threads'])
linalg_cmd += " -l " + linalg_params['log_path']
linalg_cmd += " -s " + linalg_params['dat_path']
linalg_cmd += " " + str(linalg_params['N'])
cmd_logger.info(linalg_cmd)
stdout, stderr, ret = utils.run_command(linalg_cmd, include_stdout=True, include_stderr=True, include_returncode=True, logger=logger)
if ret != 0:
logger.error("Received error code " + str(ret) + " from Msieve linear algebra. Exiting...")
sys.exit(1)
logger.info("--- End of MSieve linear algebra ---")
def run(parameters):
linalg_paths = ['tasks', 'msieve', 'linalg']
linalg_keys = {
"N": int,
"msievedir": str,
"mpi": str,
"hosts": str,
"target_density": int,
"phys_socks_per_machine": int,
"phys_core_per_sock": int,
"threads_per_core": int,
"threads": int,
"rankfile": str,
"fb_path": str,
"log_path": str,
"dat_path": str,
}
linalg_params = parameters.myparams(linalg_keys, linalg_paths)
linalg_params['machines'] = [ m.strip() for m in linalg_params['hosts'].split(',') if len(m) > 0 ]
linalg_params['mpi_rows'], linalg_params['mpi_cols'] = [ int(x) for x in linalg_params['mpi'].split("x") ]
# Create a rankfile based on current mpi configuration
run_rankfile(linalg_params)
# Run linear algebra
run_linalg(linalg_params)
| lgpl-3.0 | 764,551,282,516,274,000 | 41.716667 | 161 | 0.552282 | false | 3.532736 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.