code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# encoding= utf-8
##
# VisibilityCheck.
# <p>
# Description of the test.
#
# @data INSTANCE_ID [String] instance id
##
from qtaste import *
import time
# update in order to cope with the javaGUI extension declared in your testbed configuration.
javaguiMI = testAPI.getJavaGUI(INSTANCE_ID=testData.getValue("JAVAGUI_INSTANCE_NAME"))
subtitler = testAPI.getSubtitler()
importTestScript("TabbedPaneSelection")
def step1():
"""
@step Description of the actions done for this step
@expected Description of the expected result
"""
doSubSteps(TabbedPaneSelection.changeTabByTitle)
subtitler.setSubtitle("Click on the button to make the component invisible")
time.sleep(1)
javaguiMI.clickOnButton("VISIBILITY_BUTTON")
time.sleep(1)
if javaguiMI.isVisible("VISIBILITY_TEXT") != False:
testAPI.stop(Status.FAIL, "The component should not be visible")
try:
subtitler.setSubtitle("Try to insert a value in the invible text field", 10)
javaguiMI.setText("VISIBILITY_TEXT", "pas bien")
testAPI.stop(Status.FAIL, "The component should not be visible and the setText() should failed")
except :
javaguiMI.clickOnButton("VISIBILITY_BUTTON")
doStep(step1)
| remybaranx/qtaste | demo/TestSuites/PlayBack/VisibilityCheck/TestScript.py | Python | gpl-3.0 | 1,239 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from .settings import MARTOR_ENABLE_LABEL
from .widgets import MartorWidget
class MartorFormField(forms.CharField):
def __init__(self, *args, **kwargs):
# to setup the editor without label
if not MARTOR_ENABLE_LABEL:
kwargs['label'] = ''
super(MartorFormField, self).__init__(*args, **kwargs)
if not issubclass(self.widget.__class__, MartorWidget):
self.widget = MartorWidget()
| agusmakmun/dracos-markdown-editor | martor/fields.py | Python | gpl-3.0 | 535 |
import random
from pybugger import myaixterm
color = myaixterm
color.aix_init()
def string_constructor(args, foreground="normal", background="normal"):
if foreground != "rainbow":
foreground = "" if foreground == "normal" else color.aix_fg(foreground)
background = "" if background == "normal" else color.aix_bg(background)
res = foreground + background
for arg in args:
res += arg
res = res + color.aix_normal()
return res
else:
colors = color.get_all_colors()
res = ""
for arg in args:
res += arg
rainbow_string = ""
for character in list(res):
foreground = color.aix_bg(colors[getRandomKey(colors)])
background = color.aix_fg(colors[getRandomKey(colors)])
rainbow_string += foreground + background + character
rainbow_string += color.aix_normal()
return rainbow_string
def getRandomKey(dictionary):
return random.sample(list(dictionary), 1).pop()
def default(*args):
"""Format the arguments with a default forgreound and background."""
print(string_constructor(args))
def success(*args):
"""Format the arguments with a green forgreound."""
print(string_constructor(args, "green"))
def mega_success(*args):
"""Format the arguments with a white forgreound and a green background."""
print(string_constructor(args, "white", "green"))
def warning(*args):
"""Format the arguments with a yellow forgreound."""
print(string_constructor(args, "yellow"))
def mega_warning(*args):
"""Format the arguments with a white forgreound and a yellow background."""
print(string_constructor(args, "black", "fullyellow"))
def info(*args):
"""Format the arguments with a cyan forgreound."""
print(string_constructor(args, "cyan"))
def mega_info(*args):
"""Format the arguments with a white forgreound and a cyan background."""
print(string_constructor(args, "white", "cyan"))
def error(*args):
"""Format the arguments with a red forgreound."""
print(string_constructor(args, "brightred"))
def mega_error(*args):
"""Format the arguments with a white forgreound and a red background."""
print(string_constructor(args, "white", "red"))
def randomize(*args):
"""Format the arguments with a random forgreound and background."""
print(string_constructor(args, "rainbow"))
def inverted(*args):
"""Format the arguments with a black foreground and white background."""
print(string_constructor(args, "black", "white"))
def custom(*args, delimiter='', fg="normal", bg="normal"):
"""Format the arguments with a custom foreground and background."""
debug_str = delimiter.join(args)
print(string_constructor(debug_str, fg, bg))
def test():
"""A test method to print out examples."""
print("")
print("pybugger.success(*lyric)")
success("\"We're no strangers to love,")
print("")
print("pybugger.mega_success(*lyric)")
mega_success("You know the rules and so do I")
print("")
print("pybugger.info(*lyric)")
info("A full commitment's what I'm thinking of")
print("")
print("pybugger.mega_info(*lyric)")
mega_info("You wouldn't get this from any other guy")
print("")
print("pybugger.warning(*lyric)")
warning("I just wanna tell you how I'm feeling")
print("")
print("pybugger.mega_warning(*lyric)")
mega_warning("Gotta make you understand,")
print("")
print("pybugger.error(*lyric)")
error("Never gonna give you up")
print("")
print("pybugger.mega_error(*lyric)")
mega_error("Never gonna let you down")
print("")
print("pybugger.randomize(*lyric)")
randomize("Never gonna run around and desert you")
print("")
print("pybugger.custom(lyric, \"color119\", \"color93\")")
custom("Never gonna make you cry", "color119", "color93")
print("")
print("pybugger.inverted(*lyric)")
inverted("Never gonna say goodbye.")
print("")
print("pybugger.default(*lyric)")
default("Never gonna tell a lie and hurt you.\"")
print("")
| fareskalaboud/pybugger | pybugger/pybugger.py | Python | gpl-3.0 | 4,133 |
# -*- coding: utf-8 -*-
#
# This file is part of scoopy.
#
# Scoopy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scoopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scoopy. If not, see <http://www.gnu.org/licenses/>.
#
"""
.. module:: scoopy.oauth
.. moduleauthor:: Mathieu D. (MatToufoutu) <mattoufootu[at]gmail.com>
"""
import os
from time import time
from urllib import urlencode
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
try:
import cPickle as pickle
except ImportError:
import pickle
import oauth2
__all__ = [
'REQUEST_TOKEN_URL',
'ACCESS_TOKEN_URL',
'AUTHORIZE_URL',
'OAuthException',
'OAuthRequestFailure',
'OAuthTokenError',
'OAuth',
]
BASE_URL = 'http://www.scoop.it'
REQUEST_TOKEN_URL = '%s/oauth/request' % BASE_URL
ACCESS_TOKEN_URL = '%s/oauth/access' % BASE_URL
AUTHORIZE_URL = '%s/oauth/authorize' % BASE_URL
class OAuthException(Exception):
"""
Basic exception for OAuth related errors.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OAuthRequestFailure(OAuthException):
"""
Exception raised when a request fails.
"""
pass
class OAuthTokenError(OAuthException):
"""
Exception raised when a token isn't set and
an operation requiring one is performed.
"""
pass
class OAuth(object):
"""
Helper class for all OAuth related actions.
"""
signature_method = oauth2.SignatureMethod_HMAC_SHA1()
def __init__(self, consumer_key, consumer_secret):
"""
:param consumer_key: The application's API consumer key.
:type consumer_key: str.
:param consumer_secret: The application's API consumer secret.
:type consumer_secret: str.
"""
self.consumer = oauth2.Consumer(consumer_key, consumer_secret)
self.client = oauth2.Client(self.consumer)
self.token = None
self.access_granted = False
def save_token(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
if self.token is None:
raise OAuthTokenError('no token found, get one first')
#TODO: if access is not granted, warn user the token saved will be a request_token
db = {'oauth_token': self.token.key,
'oauth_token_secret': self.token.secret}
outfile = open(filepath, 'wb')
try:
pickle.dump(db, outfile, protocol=pickle.HIGHEST_PROTOCOL)
finally:
outfile.close()
def load_token(self, filepath):
infile = open(filepath, 'rb')
try:
db = pickle.load(infile)
finally:
infile.close()
self.token = oauth2.Token(
db['oauth_token'],
db['oauth_token_secret']
)
self.client = oauth2.Client(self.consumer, self.token)
def get_request_token(self):
"""
Request the server for a request_token and return it.
"""
response, content = self.client.request(REQUEST_TOKEN_URL)
if response['status'] != '200':
raise OAuthRequestFailure(
"failed to get request_token (%s)" % response['status']
)
request_token = dict(parse_qsl(content))
self.token = oauth2.Token(
request_token['oauth_token'],
request_token['oauth_token_secret']
)
def get_access_token_url(self, callback_url):
"""
Generate the URL needed for the user to accept the application
and return it.
"""
if self.token is None:
raise OAuthTokenError(
"no request_token found, get one first"
)
#TODO: warn user if access already granted
return "%s?oauth_token=%s&oauth_callback=%s" % (
AUTHORIZE_URL,
self.token.key,
callback_url
)
def get_access_token(self, token_verifier):
"""
Request the server for an access token and return it.
"""
self.token.set_verifier(token_verifier)
self.client = oauth2.Client(self.consumer, self.token)
response, content = self.client.request(ACCESS_TOKEN_URL, 'POST')
if response['status'] != '200':
raise OAuthRequestFailure(
"failed to get access_token (%s)" % response['status']
)
self.access_granted = True
access_token = dict(parse_qsl(content))
self.token = oauth2.Token(
access_token['oauth_token'],
access_token['oauth_token_secret'],
)
self.client = oauth2.Client(self.consumer, self.token)
def generate_request_params(self, params):
"""
Given a dict of parameters, add the needed oauth_* parameters
to it and return an url-encoded string.
"""
request_params = {
'oauth_version': '1.0',
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': int(time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key,
}
for key, value in params.iteritems():
request_params[key] = value
return urlencode(request_params)
def request(self, url, params, method='GET'):
request_params = ''
if method.lower() == 'get':
if params:
url += ('?' + urlencode(params))
elif method.lower() == 'post':
request_params = self.generate_request_params(params)
else:
raise OAuthRequestFailure("request method can only be 'GET' or 'POST'")
return self.client.request(
url,
method=method,
body=request_params,
headers={'Accept-encoding': 'gzip'},
)
| mattoufoutu/scoopy | scoopy/oauth.py | Python | gpl-3.0 | 6,386 |
# Microcosmos: an antsy game
# Copyright (C) 2010 Cyril ADRIAN <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 exclusively.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
The Bugs model package provides bugs and their specific behaviour.
"""
from net.cadrian.microcosmos.model.bugs.antFemales import AntFemale, Target as AntFemaleTarget
from net.cadrian.microcosmos.model.bugs.antQueens import AntQueen
from net.cadrian.microcosmos.model.bugs.antSoldiers import AntSoldier
from net.cadrian.microcosmos.model.bugs.antWorkers import AntWorker
from net.cadrian.microcosmos.model.bugs.lice import Louse
| cadrian/microcosmos | src/net/cadrian/microcosmos/model/bugs/__init__.py | Python | gpl-3.0 | 1,120 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import importlib, os
from glob import glob
from cStringIO import StringIO
from common import write_if_changed
def discover():
# find packages
packages = {'horton': []}
for fn in glob('../horton/*/__init__.py'):
subpackage = fn.split('/')[2]
if subpackage == 'test':
continue
packages['horton.%s' % subpackage] = []
# find modules
for package, modules in packages.iteritems():
stub = package.replace('.', '/')
for fn in sorted(glob('../%s/*.py' % stub) + glob('../%s/*.so' % stub)):
module = fn.split('/')[-1][:-3]
if module == '__init__':
continue
modules.append(module)
for fn in sorted(glob('../%s/*.h' % stub)):
module = fn.split('/')[-1]
modules.append(module)
return packages
def get_first_docline(module):
m = importlib.import_module(module)
if m.__doc__ is not None:
lines = m.__doc__.split('\n')
if len(lines) > 0:
return lines[0]
return 'FIXME! Write module docstring.'
def get_first_doxygenline(fn_h):
with open('../%s' % fn_h) as f:
for line in f:
if line.startswith('// UPDATELIBDOCTITLE:'):
return line[21:].strip()
raise IOError('UPDATELIBDOCTITLE missing in %s' % fn_h)
def underline(line, char, f):
print >> f, line
print >> f, char*len(line)
print >> f
def write_disclaimer(f):
print >> f, '..'
print >> f, ' This file is automatically generated. Do not make '
print >> f, ' changes as these will be overwritten. Rather edit '
print >> f, ' the documentation in the source code.'
print >> f
def main():
packages = discover()
# Write new/updated rst files if needed
fns_rst = []
for package, modules in sorted(packages.iteritems()):
# write the new file to a StringIO
f1 = StringIO()
write_disclaimer(f1)
underline('``%s`` -- %s' % (package, get_first_docline(package)), '#', f1)
print >> f1
print >> f1, '.. automodule::', package
print >> f1, ' :members:'
print >> f1
print >> f1, '.. toctree::'
print >> f1, ' :maxdepth: 1'
print >> f1, ' :numbered:'
print >> f1
for module in modules:
f2 = StringIO()
write_disclaimer(f2)
if module.endswith('.h'):
#full = package + '/' + module
fn_h = package.replace('.', '/') + '/' + module
underline('``%s`` -- %s' % (fn_h, get_first_doxygenline(fn_h)), '#', f2)
print >> f2, '.. doxygenfile::', fn_h
print >> f2, ' :project: horton'
print >> f2
print >> f2
else:
full = package + '.' + module
underline('``%s`` -- %s' % (full, get_first_docline(full)), '#', f2)
print >> f2, '.. automodule::', full
print >> f2, ' :members:'
print >> f2
print >> f2
# write if the contents have changed
rst_name = 'mod_%s_%s' % (package.replace('.', '_'), module.replace('.', '_'))
fn2_rst = 'lib/%s.rst' % rst_name
fns_rst.append(fn2_rst)
write_if_changed(fn2_rst, f2.getvalue())
print >> f1, ' %s' % rst_name
# write if the contents have changed
fn1_rst = 'lib/pck_%s.rst' % package.replace('.', '_')
fns_rst.append(fn1_rst)
write_if_changed(fn1_rst, f1.getvalue())
# Remove other rst files
for fn_rst in glob('lib/*.rst'):
if fn_rst not in fns_rst:
print 'Removing %s' % fn_rst
os.remove(fn_rst)
if __name__ == '__main__':
main()
| crisely09/horton | doc/update_lib_doc.py | Python | gpl-3.0 | 4,657 |
# coding=utf-8
# Copyright (C) 2014 Stefano Guglielmetti
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import smtplib, os, sys
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
#From address, to address, subject and message body
from_address = 'EMAIL_FROM_ADDRESS'
to_address = ['EMAIL_TO_ADDRESS']
email_subject = 'Alert!!! Zombies!!! Ahead!!!'
email_body = 'An intruder has been detected and needs to be eliminated!'
# Credentials (if needed)
username = 'YOUR_EMAIL_USERNAME'
password = 'YOUR_EMAIL_PASSWORD'
# The actual mail send
server = 'smtp.gmail.com:587'
def send_mail(send_from, send_to, subject, text, files=[], server="localhost"):
assert type(send_to)==list
assert type(files)==list
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.starttls()
smtp.login(username,password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
send_mail(from_address, to_address, email_subject, email_body, [sys.argv[1]], server) #the first command line argument will be used as the image file name
| amicojeko/YouCantTouchThis | sendemail.py | Python | gpl-3.0 | 2,259 |
# -*- coding: utf-8 -*-
"""
Validators for UI
@author: Jan Gabriel
@contact: [email protected]
"""
from enum import Enum
from PyQt5.QtGui import QColor
from PyQt5 import QtWidgets
class ValidationColors(Enum):
"""
Holds colors for form validation.
"""
white = QColor("#ffffff")
red = QColor("#f6989d")
grey = QColor("#A0A0A0")
class ValidationColorizer:
"""
Provides simple coloring capability.
"""
@classmethod
def colorize(cls, field, color):
"""
Color background of the field with specified color.
:param field: Field handler.
:param color: Desired color.
:return:
"""
color_name = color.name()
class_name = field.__class__.__name__
field.setStyleSheet('%s { background-color: %s }' % (
class_name, color_name))
@classmethod
def colorize_frame(cls, field, color):
"""
Color border of the field with specified color.
:param field: Field handler.
:param color: Desired color.
:return:
"""
color_name = color.name()
class_name = field.__class__.__name__
field.setStyleSheet('%s { border: 1px solid %s; border-radius: 3px; }' % (
class_name, color_name))
@classmethod
def colorize_default(cls, field):
"""
Convenience method for white coloring.
:param field: Field handler.
:return:
"""
if isinstance( field, QtWidgets.QLineEdit):
cls.colorize(field, ValidationColors.white.value)
if isinstance( field, QtWidgets.QComboBox):
cls.colorize_frame(field, ValidationColors.grey.value)
@classmethod
def colorize_red(cls, field):
"""
Convenience method for red coloring.
:param field: Field handler.
:return:
"""
if isinstance( field, QtWidgets.QLineEdit):
cls.colorize(field, ValidationColors.red.value)
if isinstance( field, QtWidgets.QComboBox):
cls.colorize_frame(field, ValidationColors.red.value)
class PresetsValidationColorizer():
"""validator for controls in preset."""
def __init__(self):
self.controls={}
"""dictionary of validated controls"""
def add(self, key, control):
"""add control for validation"""
self.controls[key] = control
def colorize(self, errors):
"""Colorized associated control and return if any control was colorized"""
valid = True
for key, control in self.controls.items():
if key in errors:
control.setToolTip(errors[key])
ValidationColorizer.colorize_red(control)
valid = False
else:
ValidationColorizer.colorize_default(control)
control.setToolTip("")
return valid
def reset_colorize(self):
"""Colorized associated control to white"""
for key, control in self.controls.items():
ValidationColorizer.colorize_default(control)
control.setToolTip("")
def connect(self, validation_func):
"""Colorized associated control to white"""
for key, control in self.controls.items():
if isinstance( control, QtWidgets.QLineEdit):
control.editingFinished.connect(validation_func)
if isinstance( control, QtWidgets.QComboBox):
control.currentIndexChanged.connect(validation_func)
| GeoMop/GeoMop | src/JobPanel/ui/validators/validation.py | Python | gpl-3.0 | 3,569 |
# (C) 2012, Michael DeHaan, <[email protected]>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import json
TIME_FORMAT="%b %d %Y %H:%M:%S"
MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n"
if not os.path.exists("/var/log/ansible/hosts"):
os.makedirs("/var/log/ansible/hosts")
def log(host, category, data):
if type(data) == dict:
if 'verbose_override' in data:
# avoid logging extraneous data from facts
data = 'omitted'
else:
invocation = data.pop('invocation', None)
data = json.dumps(data)
if invocation is not None:
data = json.dumps(invocation) + " => %s " % data
path = os.path.join("/var/log/ansible/hosts", host)
now = time.strftime(TIME_FORMAT, time.localtime())
fd = open(path, "a")
fd.write(MSG_FORMAT % dict(now=now, category=category, data=data))
fd.close()
class CallbackModule(object):
"""
logs playbook results, per host, in /var/log/ansible/hosts
"""
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
log(host, 'FAILED', res)
def runner_on_ok(self, host, res):
log(host, 'OK', res)
def runner_on_error(self, host, msg):
log(host, 'ERROR', msg)
def runner_on_skipped(self, host, item=None):
log(host, 'SKIPPED', '...')
def runner_on_unreachable(self, host, res):
log(host, 'UNREACHABLE', res)
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
log(host, 'ASYNC_FAILED', res)
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
log(host, 'IMPORTED', imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
log(host, 'NOTIMPORTED', missing_file)
def playbook_on_play_start(self, pattern):
pass
def playbook_on_stats(self, stats):
pass
| shlomozippel/ansible | plugins/callbacks/log_plays.py | Python | gpl-3.0 | 3,121 |
# This file is part of Gem.
#
# Gem is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gem. If not, see <http://www.gnu.org/licenses/\>.
from gem.api import Location
from enum import Enum
LOG_TAG = "player"
def player_position_update(player, location, warped):
profile = player.profile
profile.location = location
| kaye64/gem | content/player.py | Python | gpl-3.0 | 838 |
#!/usr/bin/env python3
import sys
import os
from distutils.core import run_setup
# build_ext is subclassed, so we import it with a _ to avoid a collision
from distutils.command.build_ext import build_ext as _build_ext
from setuptools import setup, Extension, find_packages
PKG_NAME = "piqueserver"
extra_args = sys.argv[2:]
with open('README.rst') as f:
long_description = f.read()
# load version info from the piqueserver module manually
here = os.path.abspath(os.path.dirname(__file__))
version = {}
with open(os.path.join(here, 'piqueserver/version.py')) as f:
exec(f.read(), version)
ext_names = [
'pyspades.vxl',
'pyspades.bytes',
'pyspades.packet',
'pyspades.contained',
'pyspades.common',
'pyspades.world',
'pyspades.loaders',
'pyspades.mapmaker'
]
static = os.environ.get('STDCPP_STATIC') == "1"
if static:
print("Linking the build statically.")
linetrace = os.environ.get('CYTHON_TRACE') == '1'
# Compile the server with support for
# AddressSanitizer/UndefinedBehaviourSanitizer
USE_ASAN = os.environ.get('USE_ASAN') == '1'
USE_UBSAN = os.environ.get('USE_UBSAN') == '1'
ext_modules = []
for name in ext_names:
extra = {
"define_macros": [],
"extra_link_args": [],
"extra_compile_args": [],
} # type: dict
if static:
extra['extra_link_args'].extend(
['-static-libstdc++', '-static-libgcc'])
if USE_ASAN:
extra["extra_link_args"].append("-lasan")
extra["extra_compile_args"].append("-fsanitize=address")
if USE_UBSAN:
extra["extra_link_args"].append("-lubsan")
extra["extra_compile_args"].append("-fsanitize=undefined")
if name in ['pyspades.vxl', 'pyspades.world', 'pyspades.mapmaker']:
extra["extra_compile_args"].append('-std=c++11')
if linetrace:
extra['define_macros'].append(('CYTHON_TRACE', '1'))
ext_modules.append(Extension(name, ['./%s.pyx' % name.replace('.', '/')],
language='c++', include_dirs=['./pyspades'],
**extra))
class build_ext(_build_ext):
def run(self):
from Cython.Build import cythonize
if USE_ASAN:
from Cython.Compiler import Options
# make asan/valgrind's memory leak results better
Options.generate_cleanup_code = True
compiler_directives = {'language_level': 3, 'embedsignature': True}
if linetrace:
compiler_directives['linetrace'] = True
self.extensions = cythonize(self.extensions, compiler_directives=compiler_directives)
_build_ext.run(self)
run_setup(os.path.join(os.getcwd(), "setup.py"),
['build_py'] + extra_args)
setup(
name=PKG_NAME,
packages=find_packages(exclude=("tests", "tests.*")),
version=version['__version__'],
description='Open-Source server implementation for Ace of Spades ',
author=('Originally MatPow2 and PySnip contributors,'
'now, StackOverflow and piqueserver authors'),
author_email='[email protected]',
maintainer='noway421',
maintainer_email='[email protected]',
license='GNU General Public License v3',
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/piqueserver/piqueserver",
keywords=['ace of spades', 'aos', 'server',
'pyspades', 'pysnip', 'piqueserver'],
python_requires=">=3.6.0",
classifiers=[
'Intended Audience :: System Administrators',
'Development Status :: 5 - Production/Stable',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Framework :: Twisted',
'Topic :: Games/Entertainment',
'Topic :: Games/Entertainment :: First Person Shooters',
],
platforms="Darwin, Unix, Win32",
setup_requires=['Cython>=0.27,<1'],
install_requires=[
'pypiwin32;platform_system=="Windows"',
'Cython>=0.27,<1',
'Twisted[tls]',
'Jinja2>=2,<4',
'Pillow>=5.1.0,<10',
'aiohttp>=3.3.0,<3.8.0',
'pyenet',
'toml',
'packaging>=19.0'
],
extras_require={
'from': ['geoip2>=2.9,<5.0'],
'ssh': ['Twisted[tls,conch]'],
},
entry_points={
'console_scripts': [
'%s=%s.run:main' % (PKG_NAME, PKG_NAME)
],
},
package_data={"%s.web" % PKG_NAME: ["templates/status.html"]},
include_package_data=True,
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
)
| piqueserver/piqueserver | setup.py | Python | gpl-3.0 | 4,880 |
# Bulletproof Arma Launcher
# Copyright (C) 2016 Sascha Ebert
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import unittest
import time
import os
import shutil
import sys
import json
from multiprocessing import Pipe
from datetime import datetime
from datetime import timedelta
from mock import patch, Mock
from kivy.clock import Clock
from nose.plugins.attrib import attr
from utils.process import Process
def worker_func(con):
con.send('test1')
con.send('test2')
class EventBridgeTest(unittest.TestCase):
def setUp(self):
# To fix the Windows forking system it's necessary to point __main__ to
# the module we want to execute in the forked process
self.old_main = sys.modules["__main__"]
self.old_main_file = sys.modules["__main__"].__file__
sys.modules["__main__"] = sys.modules["tests.utils.event_bridge_test"]
sys.modules["__main__"].__file__ = sys.modules["tests.utils.event_bridge_test"].__file__
def tearDown(self):
sys.modules["__main__"] = self.old_main
sys.modules["__main__"].__file__ = self.old_main_file
def test_connection_can_hold_more_than_one_msg(self):
parent_conn, child_conn = Pipe()
p = Process(target=worker_func, args=(child_conn,))
p.start()
# time.sleep(2)
self.assertEqual(parent_conn.recv(), 'test1')
self.assertEqual(parent_conn.recv(), 'test2')
p.join()
| overfl0/Bulletproof-Arma-Launcher | tests/utils/event_bridge_test.py | Python | gpl-3.0 | 1,873 |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from nasa_r2_common_msgs/JointStatusArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import nasa_r2_common_msgs.msg
import std_msgs.msg
class JointStatusArray(genpy.Message):
_md5sum = "db132c4fff9528f41c0236d435100eda"
_type = "nasa_r2_common_msgs/JointStatusArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
JointStatus[] status
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: nasa_r2_common_msgs/JointStatus
string publisher
string joint
uint32 registerValue
bool coeffsLoaded
bool bridgeEnabled
bool motorEnabled
bool brakeReleased
bool motorPowerDetected
bool embeddedMotCom
bool jointFaulted
"""
__slots__ = ['header','status']
_slot_types = ['std_msgs/Header','nasa_r2_common_msgs/JointStatus[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(JointStatusArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = []
else:
self.header = std_msgs.msg.Header()
self.status = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.status)
buff.write(_struct_I.pack(length))
for val1 in self.status:
_x = val1.publisher
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.joint
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_I7B.pack(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.status = []
for i in range(0, length):
val1 = nasa_r2_common_msgs.msg.JointStatus()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.publisher = str[start:end].decode('utf-8')
else:
val1.publisher = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.joint = str[start:end].decode('utf-8')
else:
val1.joint = str[start:end]
_x = val1
start = end
end += 11
(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted,) = _struct_I7B.unpack(str[start:end])
val1.coeffsLoaded = bool(val1.coeffsLoaded)
val1.bridgeEnabled = bool(val1.bridgeEnabled)
val1.motorEnabled = bool(val1.motorEnabled)
val1.brakeReleased = bool(val1.brakeReleased)
val1.motorPowerDetected = bool(val1.motorPowerDetected)
val1.embeddedMotCom = bool(val1.embeddedMotCom)
val1.jointFaulted = bool(val1.jointFaulted)
self.status.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.status)
buff.write(_struct_I.pack(length))
for val1 in self.status:
_x = val1.publisher
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.joint
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_I7B.pack(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.status = []
for i in range(0, length):
val1 = nasa_r2_common_msgs.msg.JointStatus()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.publisher = str[start:end].decode('utf-8')
else:
val1.publisher = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.joint = str[start:end].decode('utf-8')
else:
val1.joint = str[start:end]
_x = val1
start = end
end += 11
(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted,) = _struct_I7B.unpack(str[start:end])
val1.coeffsLoaded = bool(val1.coeffsLoaded)
val1.bridgeEnabled = bool(val1.bridgeEnabled)
val1.motorEnabled = bool(val1.motorEnabled)
val1.brakeReleased = bool(val1.brakeReleased)
val1.motorPowerDetected = bool(val1.motorPowerDetected)
val1.embeddedMotCom = bool(val1.embeddedMotCom)
val1.jointFaulted = bool(val1.jointFaulted)
self.status.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_I7B = struct.Struct("<I7B")
| mkhuthir/catkin_ws | src/chessbot/devel/lib/python2.7/dist-packages/nasa_r2_common_msgs/msg/_JointStatusArray.py | Python | gpl-3.0 | 10,820 |
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file ComponentModelTest.py
# unittests for field Tags running Tango Server
#
import unittest
import os
import sys
import random
import struct
import binascii
import time
from PyQt5.QtCore import (
Qt, QAbstractItemModel, QModelIndex,)
from PyQt5.QtXml import QDomDocument
from nxsconfigtool.ComponentModel import ComponentModel
from nxsconfigtool.ComponentItem import ComponentItem
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
if sys.version_info > (3,):
long = int
unicode = str
# test fixture
class ComponentModelTest(unittest.TestCase):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
# MessageBox text
self.text = None
# MessageBox title
self.title = None
# action status
self.performed = False
try:
self.__seed = long(binascii.hexlify(os.urandom(16)), 16)
except NotImplementedError:
self.__seed = long(time.time() * 256)
# self.__seed = 105186230414225794971485160270620812570
self.__rnd = random.Random(self.__seed)
# test starter
# \brief Common set up
def setUp(self):
print("\nsetting up...")
print("SEED = %s" % self.__seed)
# test closer
# \brief Common tear down
def tearDown(self):
print("tearing down ...")
# constructor test
# \brief It tests default settings
def test_constructor(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
allAttr = False
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cd.parent, None)
self.assertEqual(cd.childNumber(), 0)
self.assertEqual(cd.node.nodeName(), "#document")
ci = cd.child(0)
self.assertEqual(ci.parent, cd)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
self.assertTrue(isinstance(ci.child(k), ComponentItem))
self.assertTrue(isinstance(ci.child(k).parent, ComponentItem))
self.assertEqual(ci.child(k).childNumber(), k)
self.assertEqual(ci.child(k).node, kds[k])
self.assertEqual(ci.child(k).parent.node, qdn)
self.assertEqual(ci.child(k).node.nodeName(), "kid%s" % k)
self.assertEqual(ci.child(k).parent, ci)
# constructor test
# \brief It tests default settings
def test_headerData(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
allAttr = False
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.headerData(0, Qt.Vertical), None)
hd = cm.headerData(0, Qt.Horizontal)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Name')
hd = cm.headerData(0, Qt.Horizontal, Qt.DisplayRole)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Name')
hd = cm.headerData(1, Qt.Horizontal)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Type')
hd = cm.headerData(1, Qt.Horizontal, Qt.DisplayRole)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Type')
hd = cm.headerData(2, Qt.Horizontal)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Value')
hd = cm.headerData(2, Qt.Horizontal, Qt.DisplayRole)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Value')
hd = cm.headerData(3, Qt.Horizontal)
self.assertEqual(hd, None)
hd = cm.headerData(3, Qt.Horizontal, Qt.DisplayRole)
hd = cm.headerData(-1, Qt.Horizontal)
self.assertEqual(hd, None)
hd = cm.headerData(-1, Qt.Horizontal, Qt.DisplayRole)
self.assertEqual(hd, None)
cm.setAttributeView(True)
hd = cm.headerData(1, Qt.Horizontal)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Attributes')
hd = cm.headerData(1, Qt.Horizontal, Qt.DisplayRole)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Attributes')
cm.setAttributeView(False)
hd = cm.headerData(1, Qt.Horizontal)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Type')
hd = cm.headerData(1, Qt.Horizontal, Qt.DisplayRole)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Type')
allAttr = True
cm = ComponentModel(doc, allAttr)
hd = cm.headerData(1, Qt.Horizontal)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Attributes')
hd = cm.headerData(1, Qt.Horizontal, Qt.DisplayRole)
self.assertTrue(isinstance(hd, str))
self.assertEqual(hd, 'Attributes')
# constructor test
# \brief It tests default settings
def test_data(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
allAttr = False
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.headerData(0, Qt.Vertical), None)
dt = cm.data(QModelIndex())
self.assertEqual(dt, None)
for role in range(1, 5):
dt = cm.data(cm.rootIndex, role)
self.assertEqual(dt, None)
dt = cm.data(cm.rootIndex)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, '#document')
dt = cm.data(cm.rootIndex, Qt.DisplayRole)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, '#document')
# constructor test
# \brief It tests default settings
def test_data_name(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = False
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.headerData(0, Qt.Vertical), None)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
ci = cd.child(0)
for n in range(nkids):
# kd =
ci.child(n)
ki0 = cm.index(n, 0, di)
dt = cm.data(ki0)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, 'kid%s: myname%s' % (n, n))
ki1 = cm.index(n, 1, di)
dt = cm.data(ki1)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), 'mytype%s' % n)
ki2 = cm.index(n, 2, di)
dt = cm.data(ki2)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), '')
ki2 = cm.index(n, -1, di)
dt = cm.data(ki2)
# self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, None)
ki2 = cm.index(n, 3, di)
dt = cm.data(ki2)
# self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, None)
def test_data_name_attr(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = False
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.headerData(0, Qt.Vertical), None)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
ci = cd.child(0)
for n in range(nkids):
# kd =
ci.child(n)
cm.setAttributeView(False)
ki0 = cm.index(n, 0, di)
dt = cm.data(ki0)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, 'kid%s: myname%s' % (n, n))
ki1 = cm.index(n, 1, di)
dt = cm.data(ki1)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), 'mytype%s' % n)
ki2 = cm.index(n, 2, di)
dt = cm.data(ki2)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), '')
ki2 = cm.index(n, -1, di)
dt = cm.data(ki2)
# self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, None)
ki2 = cm.index(n, 3, di)
dt = cm.data(ki2)
# self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, None)
cm.setAttributeView(True)
ki0 = cm.index(n, 0, di)
dt = cm.data(ki0)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, 'kid%s: myname%s' % (n, n))
ki1 = cm.index(n, 1, di)
dt = cm.data(ki1)
self.assertTrue(isinstance(dt, (unicode, str)))
s1 = set(str(dt).strip().split(" "))
s2 = set(('units="myunits%s" type="mytype%s" name="myname%s"' %
(n, n, n)).split(" "))
self.assertEqual(s1, s2)
ki2 = cm.index(n, 2, di)
dt = cm.data(ki2)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), '')
def test_data_name_attr_true(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.headerData(0, Qt.Vertical), None)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
ci = cd.child(0)
for n in range(nkids):
# kd =
ci.child(n)
ki0 = cm.index(n, 0, di)
dt = cm.data(ki0)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, 'kid%s: myname%s' % (n, n))
ki1 = cm.index(n, 1, di)
dt = cm.data(ki1)
self.assertTrue(isinstance(dt, (unicode, str)))
s1 = set(str(dt).strip().split(" "))
s2 = set(('units="myunits%s" type="mytype%s" name="myname%s"' %
(n, n, n)).split(" "))
self.assertEqual(s1, s2)
ki2 = cm.index(n, 2, di)
dt = cm.data(ki2)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), '')
def test_data_name_text(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.headerData(0, Qt.Vertical), None)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
# ci =
cd.child(0)
for n in range(nkids):
allAttr = not allAttr
cm.setAttributeView(allAttr)
ki = cm.index(n, 0, di)
ti = cm.index(0, 0, ki)
dt = cm.data(ti)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(dt, '#text')
ti = cm.index(0, 1, ki)
dt = cm.data(ti)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), '')
ti = cm.index(0, 2, ki)
dt = cm.data(ti)
self.assertTrue(isinstance(dt, (unicode, str)))
self.assertEqual(str(dt).strip(), 'Text %s' % n)
def test_flags(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
self.assertEqual(cm.flags(QModelIndex()), Qt.ItemIsEnabled)
ri = cm.rootIndex
self.assertEqual(
cm.flags(ri),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ri) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
di = cm.index(0, 0, ri)
self.assertEqual(
cm.flags(di),
Qt.ItemFlags(QAbstractItemModel.flags(cm, di) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
for n in range(nkids):
allAttr = not allAttr
cm.setAttributeView(allAttr)
ki = cm.index(n, 0, di)
self.assertEqual(
cm.flags(ki),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ki) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
ki = cm.index(n, 1, di)
self.assertEqual(
cm.flags(ki),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ki) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
ki = cm.index(n, 2, di)
self.assertEqual(
cm.flags(ki),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ki) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
ki = cm.index(n, 3, di)
self.assertEqual(cm.flags(ki), Qt.ItemIsEnabled)
ki = cm.index(n, 0, di)
ti = cm.index(0, 0, ki)
self.assertEqual(
cm.flags(ti),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ti) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
ti = cm.index(0, 1, ki)
self.assertEqual(
cm.flags(ti),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ti) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
ti = cm.index(0, 2, ki)
self.assertEqual(
cm.flags(ti),
Qt.ItemFlags(QAbstractItemModel.flags(cm, ti) |
Qt.ItemIsEnabled | Qt.ItemIsSelectable))
ti = cm.index(0, 3, ki)
self.assertEqual(cm.flags(ti), Qt.ItemIsEnabled)
def test_index(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
self.assertTrue(isinstance(di, QModelIndex))
self.assertEqual(di.row(), 0)
self.assertEqual(di.column(), 0)
self.assertEqual(di.internalPointer().node, qdn)
self.assertEqual(di.internalPointer().parent.node, doc)
iv = cm.index(0, 0)
self.assertTrue(isinstance(iv, QModelIndex))
self.assertEqual(iv.row(), 0)
self.assertEqual(iv.column(), 0)
self.assertEqual(iv, di)
self.assertEqual(iv.internalPointer(), di.internalPointer())
iv = cm.index(0, 0, QModelIndex())
self.assertTrue(isinstance(iv, QModelIndex))
self.assertEqual(iv.row(), 0)
self.assertEqual(iv.column(), 0)
self.assertEqual(iv, di)
self.assertEqual(iv.internalPointer(), di.internalPointer())
for n in range(nkids):
allAttr = not allAttr
cm.setAttributeView(allAttr)
ki = cm.index(n, 0, di)
self.assertTrue(isinstance(ki, QModelIndex))
self.assertEqual(ki.row(), n)
self.assertEqual(ki.column(), 0)
self.assertEqual(ki.internalPointer().node, kds[n])
self.assertEqual(ki.internalPointer().parent.node, qdn)
ki = cm.index(n, 1, di)
self.assertTrue(isinstance(ki, QModelIndex))
self.assertEqual(ki.row(), n)
self.assertEqual(ki.column(), 1)
self.assertEqual(ki.internalPointer().node, kds[n])
self.assertEqual(ki.internalPointer().parent.node, qdn)
ki = cm.index(n, 2, di)
self.assertTrue(isinstance(ki, QModelIndex))
self.assertEqual(ki.row(), n)
self.assertEqual(ki.column(), 2)
self.assertEqual(ki.internalPointer().node, kds[n])
self.assertEqual(ki.internalPointer().parent.node, qdn)
ki = cm.index(n, 3, di)
self.assertTrue(isinstance(ki, QModelIndex))
self.assertEqual(ki.row(), -1)
self.assertEqual(ki.column(), -1)
self.assertEqual(ki.internalPointer(), None)
ki = cm.index(n, 0, di)
ti = cm.index(0, 0, ki)
self.assertTrue(isinstance(ti, QModelIndex))
self.assertEqual(ti.row(), 0)
self.assertEqual(ti.column(), 0)
self.assertEqual(ti.internalPointer().node, tkds[n])
self.assertEqual(ti.internalPointer().parent.node, kds[n])
ti = cm.index(0, 1, ki)
self.assertTrue(isinstance(ti, QModelIndex))
self.assertEqual(ti.row(), 0)
self.assertEqual(ti.column(), 1)
self.assertEqual(ti.internalPointer().node, tkds[n])
self.assertEqual(ti.internalPointer().parent.node, kds[n])
ti = cm.index(0, 2, ki)
self.assertTrue(isinstance(ti, QModelIndex))
self.assertEqual(ti.row(), 0)
self.assertEqual(ti.column(), 2)
self.assertEqual(ti.internalPointer().node, tkds[n])
self.assertEqual(ti.internalPointer().parent.node, kds[n])
ti = cm.index(0, 3, ki)
self.assertTrue(isinstance(ti, QModelIndex))
self.assertEqual(ti.row(), -1)
self.assertEqual(ti.column(), -1)
self.assertEqual(ti.internalPointer(), None)
def test_parent(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
pri = cm.parent(ri)
self.assertTrue(isinstance(pri, QModelIndex))
self.assertEqual(pri.row(), -1)
self.assertEqual(pri.column(), -1)
self.assertEqual(pri.internalPointer(), None)
# avoids showing #document
di = cm.index(0, 0, ri)
pdi = cm.parent(di)
self.assertTrue(isinstance(pdi, QModelIndex))
self.assertEqual(pdi.row(), -1)
self.assertEqual(pdi.column(), -1)
self.assertEqual(pdi.internalPointer(), None)
iv = cm.index(0, 0)
piv = cm.parent(iv)
self.assertTrue(isinstance(piv, QModelIndex))
self.assertEqual(pdi.row(), -1)
self.assertEqual(pdi.column(), -1)
self.assertEqual(pdi.internalPointer(), None)
for n in range(nkids):
allAttr = not allAttr
cm.setAttributeView(allAttr)
ki = cm.index(n, 0, di)
pki = cm.parent(ki)
self.assertEqual(pki, di)
ki = cm.index(n, 1, di)
pki = cm.parent(ki)
self.assertEqual(pki, di)
ki = cm.index(n, 2, di)
pki = cm.parent(ki)
self.assertEqual(pki, di)
ki = cm.index(n, 3, di)
pki = cm.parent(ki)
self.assertTrue(isinstance(pki, QModelIndex))
self.assertEqual(pki.row(), -1)
self.assertEqual(pki.column(), -1)
self.assertEqual(pki.internalPointer(), None)
ki = cm.index(n, 0, di)
ti = cm.index(0, 0, ki)
pti = cm.parent(ti)
self.assertEqual(pti, ki)
ti = cm.index(0, 1, ki)
pti = cm.parent(ti)
self.assertEqual(pti, ki)
ti = cm.index(0, 2, ki)
pti = cm.parent(ti)
self.assertEqual(pti, ki)
ti = cm.index(0, 3, ki)
pti = cm.parent(ti)
self.assertTrue(isinstance(pti, QModelIndex))
self.assertEqual(pti.row(), -1)
self.assertEqual(pti.column(), -1)
self.assertEqual(pti.internalPointer(), None)
def test_rowCount(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
self.assertEqual(cm.rowCount(ri), 1)
# avoids showing #document
di = cm.index(0, 0, ri)
self.assertEqual(cm.rowCount(di), nkids)
iv = cm.index(0, 0)
self.assertEqual(cm.rowCount(iv), nkids)
for n in range(nkids):
allAttr = not allAttr
cm.setAttributeView(allAttr)
ki = cm.index(n, 0, di)
self.assertEqual(cm.rowCount(ki), 1)
ki = cm.index(n, 1, di)
self.assertEqual(cm.rowCount(ki), 0)
ki = cm.index(n, 2, di)
self.assertEqual(cm.rowCount(ki), 0)
# invalid index
ki = cm.index(n, 3, di)
self.assertEqual(cm.rowCount(ki), 1)
ki = cm.index(n, 0, di)
ti = cm.index(0, 0, ki)
self.assertEqual(cm.rowCount(ti), 0)
ti = cm.index(0, 1, ki)
self.assertEqual(cm.rowCount(ti), 0)
ti = cm.index(0, 2, ki)
self.assertEqual(cm.rowCount(ti), 0)
ti = cm.index(0, 3, ki)
self.assertEqual(cm.rowCount(ti), 1)
def test_columnCount(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
self.assertEqual(cm.columnCount(ri), 3)
# avoids showing #document
di = cm.index(0, 0, ri)
self.assertEqual(cm.columnCount(di), 3)
iv = cm.index(0, 0)
self.assertEqual(cm.columnCount(iv), 3)
for n in range(nkids):
allAttr = not allAttr
cm.setAttributeView(allAttr)
ki = cm.index(n, 0, di)
self.assertEqual(cm.columnCount(ki), 3)
ki = cm.index(n, 1, di)
self.assertEqual(cm.columnCount(ki), 3)
ki = cm.index(n, 2, di)
self.assertEqual(cm.columnCount(ki), 3)
# invalid index
ki = cm.index(n, 3, di)
self.assertEqual(cm.columnCount(ki), 3)
ki = cm.index(n, 0, di)
ti = cm.index(0, 0, ki)
self.assertEqual(cm.columnCount(ti), 3)
ti = cm.index(0, 1, ki)
self.assertEqual(cm.columnCount(ti), 3)
ti = cm.index(0, 2, ki)
self.assertEqual(cm.columnCount(ti), 3)
ti = cm.index(0, 3, ki)
self.assertEqual(cm.columnCount(ti), 3)
def test_insertItem(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
self.assertEqual(cm.columnCount(ri), 3)
# avoids showing #document
di = cm.index(0, 0, ri)
self.assertEqual(cm.columnCount(di), 3)
iv = cm.index(0, 0)
self.assertEqual(cm.columnCount(iv), 3)
ci = di.internalPointer()
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
self.assertTrue(isinstance(ks.child(0), ComponentItem))
self.assertTrue(isinstance(ks.child(0).parent, ComponentItem))
self.assertEqual(ks.child(0).childNumber(), 0)
self.assertEqual(ks.child(0).node, tkds[k])
self.assertEqual(ks.child(0).parent.node, ks.node)
self.assertEqual(ks.child(0).node.nodeName(), "#text")
self.assertEqual(
ks.child(0).node.toText().data(), '\nText\n %s\n' % k)
self.assertEqual(ks.child(0).parent, ks)
insd = self.__rnd.randint(0, nkids - 1)
inkd = doc.createElement("insertedkid")
self.assertTrue(not cm.insertItem(insd, inkd, QModelIndex()))
self.assertTrue(cm.insertItem(insd, inkd, di))
for k in range(nkids+1):
ks = ci.child(k)
if k == insd:
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, inkd)
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "insertedkid")
self.assertEqual(ks.parent, ci)
continue
kk = k if k < insd else k - 1
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[kk])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % kk)
self.assertEqual(ks.parent, ci)
self.assertTrue(isinstance(ks.child(0), ComponentItem))
self.assertTrue(isinstance(ks.child(0).parent, ComponentItem))
self.assertEqual(ks.child(0).childNumber(), 0)
self.assertEqual(ks.child(0).node, tkds[kk])
self.assertEqual(ks.child(0).parent.node, ks.node)
self.assertEqual(ks.child(0).node.nodeName(), "#text")
self.assertEqual(
ks.child(0).node.toText().data(), '\nText\n %s\n' % kk)
self.assertEqual(ks.child(0).parent, ks)
def test_appendItem(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
self.assertEqual(cm.columnCount(ri), 3)
# avoids showing #document
di = cm.index(0, 0, ri)
self.assertEqual(cm.columnCount(di), 3)
iv = cm.index(0, 0)
self.assertEqual(cm.columnCount(iv), 3)
ci = di.internalPointer()
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
self.assertTrue(isinstance(ks.child(0), ComponentItem))
self.assertTrue(isinstance(ks.child(0).parent, ComponentItem))
self.assertEqual(ks.child(0).childNumber(), 0)
self.assertEqual(ks.child(0).node, tkds[k])
self.assertEqual(ks.child(0).parent.node, ks.node)
self.assertEqual(ks.child(0).node.nodeName(), "#text")
self.assertEqual(
ks.child(0).node.toText().data(), '\nText\n %s\n' % k)
self.assertEqual(ks.child(0).parent, ks)
inkd = doc.createElement("insertedkid")
self.assertTrue(not cm.appendItem(inkd, QModelIndex()))
self.assertTrue(cm.appendItem(inkd, di))
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
self.assertTrue(isinstance(ks.child(0), ComponentItem))
self.assertTrue(isinstance(ks.child(0).parent, ComponentItem))
self.assertEqual(ks.child(0).childNumber(), 0)
self.assertEqual(ks.child(0).node, tkds[k])
self.assertEqual(ks.child(0).parent.node, ks.node)
self.assertEqual(ks.child(0).node.nodeName(), "#text")
self.assertEqual(
ks.child(0).node.toText().data(), '\nText\n %s\n' % k)
self.assertEqual(ks.child(0).parent, ks)
# print k, ks.childNumber()
k = nkids
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, inkd)
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "insertedkid")
self.assertEqual(ks.parent, ci)
def test_removeItem(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
tkds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
kds[-1].setAttribute("name", "myname%s" % n)
kds[-1].setAttribute("type", "mytype%s" % n)
kds[-1].setAttribute("units", "myunits%s" % n)
qdn.appendChild(kds[-1])
tkds.append(doc.createTextNode("\nText\n %s\n" % n))
kds[-1].appendChild(tkds[-1])
# print doc
allAttr = True
cm = ComponentModel(doc, allAttr)
self.assertTrue(isinstance(cm, QAbstractItemModel))
self.assertTrue(isinstance(cm.rootIndex, QModelIndex))
cd = cm.rootIndex.internalPointer()
self.assertTrue(isinstance(cd, ComponentItem))
self.assertEqual(cm.rootIndex.row(), 0)
self.assertEqual(cm.rootIndex.column(), 0)
ri = cm.rootIndex
self.assertEqual(cm.columnCount(ri), 3)
# avoids showing #document
di = cm.index(0, 0, ri)
self.assertEqual(cm.columnCount(di), 3)
iv = cm.index(0, 0)
self.assertEqual(cm.columnCount(iv), 3)
ci = di.internalPointer()
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
self.assertTrue(isinstance(ks.child(0), ComponentItem))
self.assertTrue(isinstance(ks.child(0).parent, ComponentItem))
self.assertEqual(ks.child(0).childNumber(), 0)
self.assertEqual(ks.child(0).node, tkds[k])
self.assertEqual(ks.child(0).parent.node, ks.node)
self.assertEqual(ks.child(0).node.nodeName(), "#text")
self.assertEqual(
ks.child(0).node.toText().data(), '\nText\n %s\n' % k)
self.assertEqual(ks.child(0).parent, ks)
rmvd = self.__rnd.randint(0, nkids - 1)
# rmkd =
ci.child(rmvd)
self.assertTrue(not cm.removeItem(rmvd, QModelIndex()))
self.assertTrue(cm.removeItem(rmvd, di))
for k in range(nkids):
if k == rmvd:
continue
kk = k if k < rmvd else k - 1
ks = ci.child(kk)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), kk)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
self.assertTrue(isinstance(ks.child(0), ComponentItem))
self.assertTrue(isinstance(ks.child(0).parent, ComponentItem))
self.assertEqual(ks.child(0).childNumber(), 0)
self.assertEqual(ks.child(0).node, tkds[k])
self.assertEqual(ks.child(0).parent.node, ks.node)
self.assertEqual(ks.child(0).node.nodeName(), "#text")
self.assertEqual(
ks.child(0).node.toText().data(), '\nText\n %s\n' % k)
self.assertEqual(ks.child(0).parent, ks)
if __name__ == '__main__':
unittest.main()
| nexdatas/configtool | test/ComponentModel_test.py | Python | gpl-3.0 | 46,177 |
# Copyright 2015-2021 Florian Bruhin (The Compiler) <[email protected]>
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.msgbox."""
import pytest
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QMessageBox, QWidget
from qutebrowser.misc import msgbox
from qutebrowser.utils import utils
@pytest.fixture(autouse=True)
def patch_args(fake_args):
fake_args.no_err_windows = False
def test_attributes(qtbot):
"""Test basic QMessageBox attributes."""
title = 'title'
text = 'text'
parent = QWidget()
qtbot.add_widget(parent)
icon = QMessageBox.Critical
buttons = QMessageBox.Ok | QMessageBox.Cancel
box = msgbox.msgbox(parent=parent, title=title, text=text, icon=icon,
buttons=buttons)
qtbot.add_widget(box)
if not utils.is_mac:
assert box.windowTitle() == title
assert box.icon() == icon
assert box.standardButtons() == buttons
assert box.text() == text
assert box.parent() is parent
@pytest.mark.parametrize('plain_text, expected', [
(True, Qt.PlainText),
(False, Qt.RichText),
(None, Qt.AutoText),
])
def test_plain_text(qtbot, plain_text, expected):
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, plain_text=plain_text)
qtbot.add_widget(box)
assert box.textFormat() == expected
def test_finished_signal(qtbot):
"""Make sure we can pass a slot to be called when the dialog finished."""
signal_triggered = False
def on_finished():
nonlocal signal_triggered
signal_triggered = True
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, on_finished=on_finished)
qtbot.add_widget(box)
with qtbot.waitSignal(box.finished):
box.accept()
assert signal_triggered
def test_information(qtbot):
box = msgbox.information(parent=None, title='foo', text='bar')
qtbot.add_widget(box)
if not utils.is_mac:
assert box.windowTitle() == 'foo'
assert box.text() == 'bar'
assert box.icon() == QMessageBox.Information
def test_no_err_windows(fake_args, capsys):
fake_args.no_err_windows = True
box = msgbox.information(parent=None, title='foo', text='bar')
box.exec() # should do nothing
out, err = capsys.readouterr()
assert not out
assert err == 'Message box: foo; bar\n'
| forkbong/qutebrowser | tests/unit/misc/test_msgbox.py | Python | gpl-3.0 | 3,129 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultMpeg4Gif."""
from telegram import InlineQueryResult
class InlineQueryResultCachedMpeg4Gif(InlineQueryResult):
"""
Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the
Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an
optional caption. Alternatively, you can use :attr:`input_message_content` to send a message
with the specified content instead of the animation.
Attributes:
type (:obj:`str`): 'mpeg4_gif'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`): Optional. Title for the result.
caption (:obj:`str`): Optional. Caption, 0-200 characters
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the MPEG-4 file.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`, optional): Title for the result.
caption (:obj:`str`, optional): Caption, 0-200 characters
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the MPEG-4 file.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
mpeg4_file_id,
title=None,
caption=None,
reply_markup=None,
input_message_content=None,
parse_mode=None,
**kwargs):
# Required
super(InlineQueryResultCachedMpeg4Gif, self).__init__('mpeg4_gif', id)
self.mpeg4_file_id = mpeg4_file_id
# Optionals
if title:
self.title = title
if caption:
self.caption = caption
if parse_mode:
self.parse_mode = parse_mode
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
| noam09/deluge-telegramer | telegramer/include/telegram/inline/inlinequeryresultcachedmpeg4gif.py | Python | gpl-3.0 | 3,901 |
from django import template
from django.core.urlresolvers import reverse
from django.utils.http import urlquote_plus
from apps.cms.models import Content
register = template.Library()
@register.simple_tag (takes_context=True)
def cms_content (context, key):
request = context['request']
can_edit = request.user.has_perm ('cms.change_content')
try:
obj = Content.objects.get (name=key)
except Content.DoesNotExist:
if not can_edit: return ''
url = reverse ('admin:cms_content_add') + '?name=' + key
return '<div class="small gray"><a href="%s">[add text]</a></div>' % url
if not can_edit:
return obj.content
url = reverse ('content-update', args=[obj.pk])
url += '?next=%s' % urlquote_plus (request.get_full_path())
return obj.content + '<a href="%s" accesskey="e" class="admin-edit-link">Rediger</a>' % url
# Note: returned string is automatically marked as safe
| normalnorway/normal.no | django/apps/cms/templatetags/cms.py | Python | gpl-3.0 | 942 |
# Copyright (c) 2014 RainMachine, Green Electronics LLC
# All rights reserved.
# Authors: Nicu Pavel <[email protected]>
# Codrin Juravle <[email protected]>
from datetime import datetime, timedelta, tzinfo
from math import sin, cos, asin, acos, sqrt
import time, calendar
import ctypes,os, fcntl, errno
from RMUtilsFramework.rmLogging import log
ZERO = timedelta(0)
Y2K38_MAX_YEAR = 2037
Y2K38_MAX_TIMESTAMP = 2147483647
# For monotonic time
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long)
]
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
utc_t0 = datetime(1970, 1, 1, tzinfo=utc)
def rmYMDToTimestamp(year, month, day):
if year > Y2K38_MAX_YEAR: #Y2K38
year = Y2K38_MAX_YEAR
try:
return int(datetime(year, month, day).strftime("%s"))
except ValueError:
return int(time.mktime(datetime(year, month, day).timetuple())) # Windows platform doesn't have strftime(%s)
def rmYMDFromTimestamp(timestamp):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
d = datetime.fromtimestamp(timestamp)
return d.year, d.month, d.day
def rmTimestampToDate(timestamp):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
return datetime.fromtimestamp(timestamp)
def rmTimestampToDateAsString(timestamp, format = None):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
if format:
return datetime.fromtimestamp(timestamp).strftime(format)
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def rmCurrentTimestampToDateAsString(format = None):
timestamp = int(time.time())
if format:
return datetime.fromtimestamp(timestamp).strftime(format)
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def rmTimestampToUtcDateAsString(timestamp, format = None):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
if format:
return datetime.utcfromtimestamp(timestamp).strftime(format)
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def rmTimestampFromDateAsString(dateString, format):
return int(datetime.strptime(dateString, format).strftime("%s"))
# Converts a date string in UTC format to a local timestamp (ex: 2019-05-20T12:00:00Z)
def rmTimestampFromUTCDateAsString(dateString, format):
dt = datetime.strptime(dateString, format)
return int((dt - datetime.utcfromtimestamp(0)).total_seconds())
def rmTimestampFromDateAsStringWithOffset(dateString):
# format in form of 2015-04-24T08:00:00-04:00 converted to UTC timestamp
if dateString is None:
return None
try:
sign = int(dateString[19:20] + '1')
(hour, minute) = [int(s) for s in dateString[20:].split(':')]
offset = sign * (hour * 60 * 60 + minute * 60)
except:
return None
try:
start_time = datetime.strptime(dateString[:19], "%Y-%m-%dT%H:%M:%S")
timestamp = int(calendar.timegm(start_time.timetuple())) - offset
except:
return None
return timestamp
def rmTimestampToYearMonthDay(timestamp):
d = datetime.fromtimestamp(timestamp)
return d.year, d.month, d.day
def rmNowToYearMonthDay():
d = datetime.now()
return d.year, d.month, d.day
def rmNormalizeTimestamp(timestamp):
return int(datetime.fromtimestamp(timestamp).strftime('%s'))
def rmTimestampToDayOfYear(timestamp):
if timestamp is None:
timestamp = rmCurrentDayTimestamp()
d = datetime.fromtimestamp(timestamp).timetuple()
return d.tm_yday
def rmNowDateTime():
return datetime.now()
def rmCurrentTimestamp():
return int(time.time())
def rmCurrentDayTimestamp():
return rmGetStartOfDay(int(time.time()))
def rmCurrentMinuteTimestamp():
timestamp = int(time.time())
return timestamp - (timestamp % 60)
def rmGetStartOfDay(timestamp):
tuple = datetime.fromtimestamp(timestamp).timetuple()
return int(datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday).strftime("%s"))
def rmGetStartOfDayUtc(timestamp):
tuple = datetime.utcfromtimestamp(timestamp).timetuple()
dt = datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday, tzinfo=utc)
return int((dt-utc_t0).total_seconds())
def rmTimestampIsLeapYear(timestamp):
d = datetime.fromtimestamp(timestamp)
#try:
# datetime(d.year, 2, 29)
# return True
#except ValueError:
# return False
if d.year % 400 == 0:
return True
elif d.year % 100 == 0:
return False
elif d.year % 4 == 0:
return True
return False
def rmConvertDateStringToFormat(dateString, inputFormat, outputFormat):
return datetime.strptime(dateString, inputFormat).strftime(outputFormat)
def rmDayRange(startDayTimestamp, numDays):
d = datetime.fromtimestamp(startDayTimestamp)
if numDays >=0:
dateList = [int(time.mktime( (d + timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
else:
numDays = -numDays
dateList = [int(time.mktime( (d - timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
return dateList
def rmDeltaDayFromTimestamp(startDayTimeStamp, deltaDays):
d = datetime.fromtimestamp(startDayTimeStamp)
if deltaDays < 0:
d = d - timedelta(days=-deltaDays)
else:
d = d + timedelta(days=deltaDays)
return int(time.mktime(d.timetuple()))
def rmGetNumberOfDaysBetweenTimestamps(startTimestamp, endTimestamp):
d1 = datetime.fromtimestamp(startTimestamp)
d2 = datetime.fromtimestamp(endTimestamp)
delta = d2-d1
return delta.days
# Sunrise and sunset for specific location and elevation
def computeSuntransitAndDayLenghtForDayTs(ts, lat, lon, elevation):
ts = rmGetStartOfDayUtc(ts)
n = julianDayFromTimestamp(ts)
J = __computeMeanSolarNoon(n, lon)
M = __computeSolarMeanAnomay(J)
C = __equationOfTheCenter(M)
L = __computeEclipticLongitude(M, C)
Jtr = computeSolarTransit(J, M, L)
delta = __computeSinSunDeclination(L)
w0 = computeHourAngle(lat, delta, elevation)
return Jtr, w0
def rmGetSunsetTimestampForDayTimestamp(ts, lat, lon, elevation):
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jset = Jtr+w0/360
tsJset = julianDayToUTC(Jset)
return tsJset
def rmGetSunriseTimestampForDayTimestamp(ts, lat, lon, elevation):
if lat is None or lon is None:
log.debug("Latitude or longitude is not set. Returning same timestamp")
return ts
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jrise = Jtr-w0/360
tsJrise = julianDayToUTC(Jrise)
return tsJrise
def julianDayFromTimestamp(ts):
ts = rmGetStartOfDayUtc(ts) + 12*3600
JD = float(ts)/86400 + 2440587.5
return JD - 2451545.0 + 0.0008
def julianDayToUTC(JD):
return (JD - 2440587.5)*86400
def __cosa(degree):
radian = degree/180*3.14159265359
return cos(radian)
def __sina(degree):
radian = degree/180*3.14159265359
return sin(radian)
def __acosa(x):
if abs(x) > 1:
return 180. if x< 0 else 0.
radian = acos(x)
return radian/3.14159265359*180.
def __asina(x):
if abs(x) > 1:
return -90. if x< 0 else 90.
radian = asin(x)
return radian/(3.14159265359)*180.
def __computeMeanSolarNoon(jd, wlon):
J = wlon/360 + jd
return J
def __computeSolarMeanAnomay(solarNoon): #degrees
return (357.5291 + 0.98560028*solarNoon)%360
def __equationOfTheCenter(solarMeanAnomaly): # constant from sine
M = solarMeanAnomaly
return 1.9148*__sina(M) + 0.0200*__sina(2*M) + 0.0003*__sina(3*M)
def __computeEclipticLongitude(solarMeanAnomaly, eqCenter): #degrees (it adds a sum a sines)
L = (solarMeanAnomaly + eqCenter + 180 + 102.9372) % 360
return L
def computeSolarTransit(meanSolarNoon, solarMeanAnomaly, eclipticLongitude): #substract sinuses from 12 am
Jtr = 2451545.0 + meanSolarNoon + (0.0053*__sina(solarMeanAnomaly) - 0.0069*__sina(2*eclipticLongitude))
return Jtr
def __computeSinSunDeclination(L):
delta = __sina(L)*__sina(23.439 )
return delta
def computeHourAngle(nlat, sdelta, elevation):
if elevation < 0:
elevation = 0
elevCoef = -2.076*sqrt(elevation)/60
cosw0 = (__sina(-0.83+elevCoef) - __sina(nlat)*sdelta)/ ( sqrt(1-sdelta*sdelta) * __cosa(nlat))
return __acosa(cosw0)
def rmNTPFetch(server = "pool.ntp.org", withRequestDrift = False):
import struct
from socket import socket, AF_INET, SOCK_DGRAM
requestPacket = '\x1b' + 47 * '\0'
startTime = time.time()
try:
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(5)
except Exception, e:
log.error("NTPFetch: Can't create socket")
return None
try:
sock.sendto(requestPacket, (server, 123))
data, ip = sock.recvfrom(1024)
except Exception, e:
#log.error("NTPFetch: Error receiving data: %s" % e)
return None
try:
if data:
timestamp = struct.unpack('!12I', data)[10]
timestamp -= 2208988800L # = date in sec since epoch
# http://stackoverflow.com/questions/1599060/how-can-i-get-an-accurate-utc-time-with-python
if withRequestDrift:
reqTime = time.time() - startTime
timestamp += reqTime / 2
return timestamp
except:
log.error("NTPFetch: Conversion failed.")
return None
def getAlarmElapsedRealTime():
### DEPRECATED: This method was used on Android to get the UP_TIME (replaced by monotonicTime())
elapsedTime = -1
try:
alarmFile = open("/dev/alarm", 'r')
if alarmFile:
t = timespec()
# ANDROID_ALARM_GET_TIME(ANDROID_ALARM_ELAPSED_REALTIME) = 0x40086134
result = fcntl.ioctl(alarmFile.fileno(), 0x40086134, t)
if result == 0:
elapsedTime = t.tv_sec
alarmFile.close()
except Exception, e:
log.error(e)
return elapsedTime
class rmMonotonicTime:
CLOCK_MONOTONIC_RAW = 4 # see <linux/time.h>
def __init__(self, fallback = True):
self.fallback = fallback
self.clock_gettime = None
self.get = None
self.monotonicInit()
def monotonicInit(self):
try:
from RMOSGlue.rmOSPlatform import RMOSPlatform
if RMOSPlatform().AUTODETECTED == RMOSPlatform.ANDROID:
librt = ctypes.CDLL('libc.so', use_errno=True)
log.info("Initialised Android monotonic clock")
elif RMOSPlatform().AUTODETECTED == RMOSPlatform.OPENWRT:
librt = ctypes.CDLL('librt.so.0', use_errno=True)
log.info("Initialised OpenWRT monotonic clock")
else:
librt = ctypes.CDLL('librt.so.1', use_errno=True)
log.info("Initialised generic monotonic clock")
self.clock_gettime = librt.clock_gettime
self.clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)]
self.get = self.monotonicTime
except Exception, e:
self.get = self.monotonicFallback
log.error("Cannot initialise monotonicClock will use fallback time.time() method !")
def monotonicFallback(self, asSeconds = True):
if asSeconds:
return int(time.time())
return time.time()
def monotonicTime(self, asSeconds = True):
t = timespec()
if self.clock_gettime(rmMonotonicTime.CLOCK_MONOTONIC_RAW , ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
if self.fallback:
log.info("Monotonic Clock Error ! Reverting to time.time() fallback")
return self.monotonicFallback(asSeconds)
else:
raise OSError(errno_, os.strerror(errno_))
if asSeconds:
return t.tv_sec
return t.tv_sec + t.tv_nsec * 1e-9
#-----------------------------------------------------------------------------------------------
#
#
#
globalMonotonicTime = rmMonotonicTime() | sprinkler/rainmachine-developer-resources | sdk-parsers/RMUtilsFramework/rmTimeUtils.py | Python | gpl-3.0 | 12,413 |
#!/usr/bin/python
# -*- coding: utf8 -*-
from pprint import pprint
import sys,os
import random
import json
import gzip
import random
import boto3
session = boto3.Session(
region_name='cn-north-1',
aws_access_key_id='xxxxxxxxxxxxxx',
aws_secret_access_key='xxxxxxxxxxxxxxxxxxxxxx'
)
sns = session.resource('sns')
sns_client = session.client('sns')
A=sns.create_topic(Name='abc').arn
#print(A)
#res = sns_client.list_topics()
#pprint(res)
#message={"create-time":"2019-04-23-15-59-04","synctoken":"1556035144","md5":"b7a7f68fad03bfe97ae401a6a126192e","url":"https://ip-ranges.amazonaws.com/ip-ranges.json"}
message={"create-time":"2019-04-23-15-59-04","synctoken":"1556035144","md5":"xxxxxxxxxx","url":"https://ip-ranges.amazonaws.com/ip-ranges.json"}
data={'default': json.dumps(message)}
print(json.dumps(data))
res = sns_client.publish(
TopicArn='arn:aws-cn:sns:cn-north-1:048912060910:AmazonIpSpaceChangedTest',
Message=json.dumps(data),
MessageStructure='json'
)
pprint(res)
| iceflow/aws-demo | sns/sns.py | Python | gpl-3.0 | 1,021 |
from tasty.types.driver import TestDriver
__params__ = {'la': 32, 'lb': 32}
driver = TestDriver()
def protocol(client, server, params):
la = params['la']
lb = params['lb']
client.a = Unsigned(bitlen=la).input(src=driver, desc='a')
client.b = Unsigned(bitlen=lb).input(src=driver, desc='b')
client.ga = Garbled(val=client.a)
client.gb = Garbled(val=client.b)
client.gc = client.ga + client.gb
server.gc <<= client.gc
server.c = Unsigned(val=server.gc)
server.c.output(dest=driver, desc='c')
| tastyproject/tasty | tasty/tests/functional/protocols/add/garbled_client_client_server/protocol_final.py | Python | gpl-3.0 | 531 |
#!/usr/bin/env python
import sys,sqlite3,os
if __name__ == "__main__":
if len(sys.argv) != 3:
print ("usage: update_database database_name division")
print ("\t\tdatabase_name = any database name that will be referred to later")
print ("\t\tdivision = the division as recognized by NCBI (used for downloading)")
print ("\t\t\texample: use pln for the plant division")
sys.exit(0)
database = sys.argv[1]
div = sys.argv[2]
if os.path.exists(database) == False:
print("database file doesn't exists -- run load_database instead")
sys.exit(0)
con = sqlite3.connect(database)
print("downloading taxonomy")
os.system("wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz")
os.system("tar -xzvf taxdump.tar.gz")
os.system("./update_names_dmp_pysqlite "+database+" names.dmp nodes.dmp")
os.system("./rebuild_tree_pysqlite "+database)
print("downloading sequences")
#os.system("wget ftp://ftp.ncbi.nih.gov/genbank/gb"+div+"*.seq.gz")
os.system("gunzip -d gb"+div+"*.seq.gz")
print("loading sequences")
os.system("./ungz_send_to_update_all_gb_files "+database+" . "+div)
#merge old ids with new ids in sequences
print("merging old ids with new ids")
os.system("./merge_old_names_in_sequences "+database+" merged.dmp")
print("done updating "+database)
con.close()
| chinchliff/autophy | other_scripts/src_stephen/update_database.py | Python | gpl-3.0 | 1,302 |
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwsumm.data`
"""
import os.path
import operator
import tempfile
import shutil
from collections import OrderedDict
from urllib.request import urlopen
import pytest
from numpy import (arange, testing as nptest)
from lal.utils import CacheEntry
from glue.lal import Cache
from gwpy.timeseries import TimeSeries
from gwpy.detector import Channel
from gwpy.segments import (Segment, SegmentList)
from gwsumm import (data, globalv)
from gwsumm.data import (utils, mathutils)
from .common import empty_globalv_CHANNELS
__author__ = 'Duncan Macleod <[email protected]>'
LOSC_DATA = {
'H1:LOSC-STRAIN': ['https://losc.ligo.org/s/events/GW150914/'
'H-H1_LOSC_4_V1-1126259446-32.gwf'],
'L1:LOSC-STRAIN': ['https://losc.ligo.org/s/events/GW150914/'
'L-L1_LOSC_4_V1-1126259446-32.gwf'],
}
LOSC_SEGMENTS = SegmentList([Segment(1126259446, 1126259478)])
def download(remote, target=None):
"""Download a file
"""
if target is None:
suffix = os.path.splitext(remote)[1]
_, target = tempfile.mkstemp(suffix=suffix, prefix='gwsumm-tests-')
response = urlopen(remote)
with open(target, 'wb') as f:
f.write(response.read())
return target
class TestData(object):
"""Tests for :mod:`gwsumm.data`:
"""
@classmethod
def setup_class(cls):
cls.FRAMES = {}
cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-')
# get data
for channel in LOSC_DATA:
cls.FRAMES[channel] = Cache()
for gwf in LOSC_DATA[channel]:
target = os.path.join(cls._tempdir, os.path.basename(gwf))
download(gwf, target)
cls.FRAMES[channel].append(CacheEntry.from_T050017(target))
@classmethod
def teardown_class(cls):
# remove the temporary data
shutil.rmtree(cls._tempdir)
# -- test utilities -------------------------
def test_find_frame_type(self):
channel = Channel('L1:TEST-CHANNEL')
assert data.find_frame_type(channel) == 'L1_R'
channel = Channel('C1:TEST-CHANNEL')
assert data.find_frame_type(channel) == 'R'
channel = Channel('H1:TEST-CHANNEL.rms,s-trend')
assert data.find_frame_type(channel) == 'H1_T'
channel = Channel('H1:TEST-CHANNEL.rms,m-trend')
assert data.find_frame_type(channel) == 'H1_M'
channel = Channel('H1:TEST-CHANNEL.rms,reduced')
assert data.find_frame_type(channel) == 'H1_LDAS_C02_L2'
channel = Channel('H1:TEST-CHANNEL.rms,online')
assert data.find_frame_type(channel) == 'H1_lldetchar'
def test_get_channel_type(self):
assert data.get_channel_type('L1:TEST-CHANNEL') == 'adc'
assert data.get_channel_type('G1:DER_DATA_HL') == 'proc'
assert data.get_channel_type('H1:GDS-CALIB_STRAIN') == 'proc'
assert data.get_channel_type('V1:GDS-CALIB_STRAIN') == 'adc'
@empty_globalv_CHANNELS
def test_make_globalv_key(self):
fftparams = utils.get_fftparams(
'L1:TEST-CHANNEL',
stride=123.456,
window='test-window',
method='scipy-welch',
)
key = utils.make_globalv_key('L1:TEST-CHANNEL', fftparams)
assert key == ';'.join([
'L1:TEST-CHANNEL', # channel
'scipy-welch', # method
'', # fftlength
'', # overlap
'test-window', # window
'123.456', # stride
'', # FFT scheme
])
def test_get_fftparams(self):
fftparams = utils.get_fftparams('L1:TEST-CHANNEL')
assert isinstance(fftparams, utils.FftParams)
for key in utils.FFT_PARAMS:
assert (getattr(fftparams, key) is
utils.DEFAULT_FFT_PARAMS.get(key, None))
fftparams = utils.get_fftparams('L1:TEST-CHANNEL', window='hanning',
overlap=0)
assert fftparams.window == 'hanning'
assert fftparams.overlap == 0
with pytest.raises(ZeroDivisionError):
utils.get_fftparams(None, stride=0)
@pytest.mark.parametrize('definition, math', [
(
'L1:TEST + L1:TEST2',
([('L1:TEST', 'L1:TEST2'), ([], [])], [operator.add]),
),
(
'L1:TEST + L1:TEST2 * 2',
([('L1:TEST', 'L1:TEST2'), ([], [(operator.mul, 2)])],
[operator.add]),
),
(
'L1:TEST * 2 + L1:TEST2 ^ 5',
([('L1:TEST', 'L1:TEST2'),
([(operator.mul, 2)], [(operator.pow, 5)])],
[operator.add]),
),
])
def test_parse_math_definition(self, definition, math):
chans, operators = mathutils.parse_math_definition(definition)
assert chans == OrderedDict(list(zip(*math[0])))
assert operators == math[1]
# -- test add/get methods -------------------
def test_add_timeseries(self):
a = TimeSeries([1, 2, 3, 4, 5], name='test name', epoch=0,
sample_rate=1)
# test simple add using 'name'
data.add_timeseries(a)
assert 'test name' in globalv.DATA
assert len(globalv.DATA['test name']) == 1
assert globalv.DATA['test name'][0] is a
# test add using key kwarg
data.add_timeseries(a, key='test key')
assert globalv.DATA['test key'][0] is a
# test add to existing key with coalesce
b = TimeSeries([6, 7, 8, 9, 10], name='test name 2', epoch=5,
sample_rate=1)
data.add_timeseries(b, key='test key', coalesce=True)
assert len(globalv.DATA['test key']) == 1
nptest.assert_array_equal(globalv.DATA['test key'][0].value,
arange(1, 11))
def test_get_timeseries(self):
# empty globalv.DATA
globalv.DATA = type(globalv.DATA)()
# test simple get after add
a = TimeSeries([1, 2, 3, 4, 5], name='test name', epoch=0,
sample_rate=1)
data.add_timeseries(a)
b, = data.get_timeseries('test name', [(0, 5)], nproc=1)
nptest.assert_array_equal(a.value, b.value)
assert a.sample_rate.value == b.sample_rate.value
# test more complicated add with a cache
a, = data.get_timeseries('H1:LOSC-STRAIN', LOSC_SEGMENTS,
cache=self.FRAMES['H1:LOSC-STRAIN'],
nproc=1)
b, = data.get_timeseries('H1:LOSC-STRAIN', LOSC_SEGMENTS,
nproc=1)
nptest.assert_array_equal(a.value, b.value)
@empty_globalv_CHANNELS
def test_get_spectrogram(self):
with pytest.raises(TypeError):
data.get_spectrogram('H1:LOSC-STRAIN', LOSC_SEGMENTS,
cache=self.FRAMES['H1:LOSC-STRAIN'],
nproc=1)
data.get_spectrogram('H1:LOSC-STRAIN', LOSC_SEGMENTS,
cache=self.FRAMES['H1:LOSC-STRAIN'],
stride=4, fftlength=2, overlap=1,
nproc=1)
def test_get_spectrum(self):
a, _, _ = data.get_spectrum('H1:LOSC-STRAIN', LOSC_SEGMENTS,
cache=self.FRAMES['H1:LOSC-STRAIN'],
nproc=1)
b, _, _ = data.get_spectrum('H1:LOSC-STRAIN', LOSC_SEGMENTS,
format='asd',
cache=self.FRAMES['H1:LOSC-STRAIN'],
nproc=1)
nptest.assert_array_equal(a.value ** (1/2.), b.value)
def test_get_coherence_spectrogram(self):
cache = Cache([e for c in self.FRAMES for e in self.FRAMES[c]])
data.get_coherence_spectrogram(
('H1:LOSC-STRAIN', 'L1:LOSC-STRAIN'), LOSC_SEGMENTS, cache=cache,
stride=4, fftlength=2, overlap=1, nproc=1,
)
def test_get_coherence_spectrum(self):
cache = Cache([e for c in self.FRAMES for e in self.FRAMES[c]])
data.get_coherence_spectrogram(
('H1:LOSC-STRAIN', 'L1:LOSC-STRAIN'), LOSC_SEGMENTS, cache=cache,
stride=4, fftlength=2, overlap=1, nproc=1,
)
| duncanmmacleod/gwsumm | gwsumm/tests/test_data.py | Python | gpl-3.0 | 9,035 |
# Copyright (C) 2012 Prayush Kumar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import lal
import numpy
from numpy import sqrt, log, float128
from pycuda.elementwise import ElementwiseKernel
from pycbc.libutils import pkg_config_header_strings
from pycbc.types import FrequencySeries, zeros, Array, complex64
preamble = """
#include <lal/LALConstants.h>
"""
phenomC_text = """
/* ********* Main paper : Phys Rev D82, 064016 (2010) ********* */
const double f = (double) (i + kmin ) * delta_f;
const double fd = (double) m_sec * f;
const double v = (double) cbrt(piM*f);
const double v2 = v * v;
const double v3 = v * v * v;
const double v4 = v2 * v2;
const double v5 = v2 * v3;
const double v6 = v3 * v3;
const double v7 = v3 * v4;
const double w = (double) cbrt( m_sec * f );
const double w3 = (double) w * w * w;
/* ******************************************************* */
/* *********************** Phasing *********************** */
/* This is defined in Eq 5.1 - 5.9, 3.13 of the main paper */
/* ******************************************************* */
double phSPA = 1. + pfa2 * v2 + pfa3 * v3 + pfa4 * v4 +
(1. + log(v3)) * pfa5 * v5 + (pfa6 + pfa6log * log(v3))*v6 +
pfa7 * v7;
phSPA *= (pfaN / v5);
phSPA -= (LAL_PI/4.0);
double phPM = (a1/(w3 * w * w)) + (a2/w3) + (a3/w) + a4 + (a5 * w * w) +(a6 * w3);
phPM /= eta;
double phRD = b1 + b2*fd;
double wPlusf1 = 0.5*(1. + tanh( (4*(fd - Mf1)/d1) ));
double wMinusf1 = 0.5*(1. - tanh( (4*(fd - Mf1)/d1) ));
double wPlusf2 = 0.5*(1. + tanh( (4*(fd - Mf2)/d2) ));
double wMinusf2 = 0.5*(1. - tanh( (4*(fd - Mf2)/d2) ));
double phasing = (phSPA * ((double) wMinusf1)) + (phPM * ((double) wPlusf1 * wMinusf2)) +
(phRD * ((double) wPlusf2));
/* ******************************************************* */
/* ********************** Amplitude **************** */
/* *** This is defined in Eq 5.11 - 5.13, 3.10, 3.6 ****** */
/* ******************************************************* */
double xdot = 1. + xdota2 * v2 + xdota3 * v3 + xdota4 * v4 + xdota5 * v5 +
(xdota6 + xdota6log * log(v2)) * v6 + xdota7 * v7;
xdot *= (xdotaN * v5 * v5);
double omgdot = 0.0, ampfac = 0.0;
double ampSPA = 0.0, ampSPAre = 0.0, ampSPAim = 0.0;
/* If xdot becomes negative, take ampSPA = 0.0 */
/* This is valid because it becomes negative much after ISCO */
if( xdot > 0.0 )
{
omgdot = 1.5 * v * xdot;
ampfac = sqrt( LAL_PI / omgdot );
ampSPAre = ampfac * AN * v2 * (1. + A2 * v2 + A3 * v3 + A4 * v4 +
A5 * v5 + (A6 + A6log * log(v2)) * v6);
ampSPAim = ampfac * AN * v2 * (A5imag * v5 + A6imag * v6);
ampSPA = sqrt( ampSPAre * ampSPAre + ampSPAim * ampSPAim );
}
double ampPM = ampSPA + (g1 * pow(fd, 5./6.));
const double sig = Mfrd * del2 / Q;
double sig2 = sig * sig;
double L = sig2 / ((fd - Mfrd) * (fd - Mfrd) + sig2/4.);
double ampRD = del1 * L * pow( fd, -7./6.);
double wPlusf0 = 0.5*(1. + tanh( (4*(fd - Mf0)/d0) ));
double wMinusf0 = 0.5*(1. - tanh( (4*(fd - Mf0)/d0) ));
double amplitude = (ampPM * ((double) wMinusf0)) + (ampRD * ((double) wPlusf0));
amplitude /= distance;
/* ************** htilde **************** */
htilde[i]._M_re = amplitude * cos( phasing );
htilde[i]._M_im = -1.0 * amplitude * sin( phasing );
"""
phenomC_kernel = ElementwiseKernel("""pycuda::complex<double> *htilde, int kmin, double delta_f,
double eta, double Xi, double distance,
double m_sec, double piM, double Mfrd,
double pfaN, double pfa2, double pfa3, double pfa4,
double pfa5, double pfa6, double pfa6log, double pfa7,
double a1, double a2, double a3, double a4,
double a5, double a6, double b1, double b2,
double Mf1, double Mf2, double Mf0,
double d1, double d2, double d0,
double xdota2, double xdota3, double xdota4,
double xdota5, double xdota6, double xdota6log,
double xdota7, double xdotaN, double AN,
double A2, double A3, double A4, double A5,
double A5imag, double A6, double A6log, double A6imag,
double g1, double del1, double del2, double Q""",
phenomC_text, "phenomC_kernel",
preamble=preamble, options=pkg_config_header_strings(['lal']))
def FinalSpin( Xi, eta ):
"""Computes the spin of the final BH that gets formed after merger. This is done usingn Eq 5-6 of arXiv:0710.3345"""
s4 = -0.129
s5 = -0.384
t0 = -2.686
t2 = -3.454
t3 = 2.353
etaXi = eta * Xi
eta2 = eta*eta
finspin = (Xi + s4*Xi*etaXi + s5*etaXi*eta + t0*etaXi + 2.*(3.**0.5)*eta + t2*eta2 + t3*eta2*eta)
if finspin > 1.0:
raise ValueError("Value of final spin > 1.0. Aborting")
else:
return finspin
def fRD( a, M):
"""Calculate the ring-down frequency for the final Kerr BH. Using Eq. 5.5 of Main paper"""
f = (lal.C_SI**3.0 / (2.0*lal.PI*lal.G_SI*M*lal.MSUN_SI)) * (1.5251 - 1.1568*(1.0-a)**0.1292)
return f
def Qa( a ):
"""Calculate the quality factor of ring-down, using Eq 5.6 of Main paper"""
return (0.7 + 1.4187*(1.0-a)**-0.4990)
#Functions to calculate the Tanh window, defined in Eq 5.8 of the main paper
def imrphenomc_tmplt(**kwds):
""" Return an IMRPhenomC waveform using CUDA to generate the phase and amplitude
Main Paper: arXiv:1005.3306
"""
# Pull out the input arguments
f_min = float128(kwds['f_lower'])
f_max = float128(kwds['f_final'])
delta_f = float128(kwds['delta_f'])
distance = float128(kwds['distance'])
mass1 = float128(kwds['mass1'])
mass2 = float128(kwds['mass2'])
spin1z = float128(kwds['spin1z'])
spin2z = float128(kwds['spin2z'])
if 'out' in kwds:
out = kwds['out']
else:
out = None
# Calculate binary parameters
M = mass1 + mass2
eta = mass1 * mass2 / (M * M)
Xi = (mass1 * spin1z / M) + (mass2 * spin2z / M)
Xisum = 2.*Xi
Xiprod = Xi*Xi
Xi2 = Xi*Xi
m_sec = M * lal.MTSUN_SI;
piM = lal.PI * m_sec;
## The units of distance given as input is taken to pe Mpc. Converting to SI
distance *= (1.0e6 * lal.PC_SI / (2. * sqrt(5. / (64.*lal.PI)) * M * lal.MRSUN_SI * M * lal.MTSUN_SI))
# Check if the value of f_max is correctly given, else replace with the fCut
# used in the PhenomB code in lalsimulation. The various coefficients come
# from Eq.(4.18) of http://arxiv.org/pdf/0710.2335 and
# Table I of http://arxiv.org/pdf/0712.0343
if not f_max:
f_max = (1.7086 * eta * eta - 0.26592 * eta + 0.28236) / piM
# Transform the eta, chi to Lambda parameters, using Eq 5.14, Table II of Main
# paper.
z101 = -2.417e-03
z102 = -1.093e-03
z111 = -1.917e-02
z110 = 7.267e-02
z120 = -2.504e-01
z201 = 5.962e-01
z202 = -5.600e-02
z211 = 1.520e-01
z210 = -2.970e+00
z220 = 1.312e+01
z301 = -3.283e+01
z302 = 8.859e+00
z311 = 2.931e+01
z310 = 7.954e+01
z320 = -4.349e+02
z401 = 1.619e+02
z402 = -4.702e+01
z411 = -1.751e+02
z410 = -3.225e+02
z420 = 1.587e+03
z501 = -6.320e+02
z502 = 2.463e+02
z511 = 1.048e+03
z510 = 3.355e+02
z520 = -5.115e+03
z601 = -4.809e+01
z602 = -3.643e+02
z611 = -5.215e+02
z610 = 1.870e+03
z620 = 7.354e+02
z701 = 4.149e+00
z702 = -4.070e+00
z711 = -8.752e+01
z710 = -4.897e+01
z720 = 6.665e+02
z801 = -5.472e-02
z802 = 2.094e-02
z811 = 3.554e-01
z810 = 1.151e-01
z820 = 9.640e-01
z901 = -1.235e+00
z902 = 3.423e-01
z911 = 6.062e+00
z910 = 5.949e+00
z920 = -1.069e+01
eta2 = eta*eta
Xi2 = Xiprod
# Calculate alphas, gamma, deltas from Table II and Eq 5.14 of Main paper
a1 = z101 * Xi + z102 * Xi2 + z111 * eta * Xi + z110 * eta + z120 * eta2
a2 = z201 * Xi + z202 * Xi2 + z211 * eta * Xi + z210 * eta + z220 * eta2
a3 = z301 * Xi + z302 * Xi2 + z311 * eta * Xi + z310 * eta + z320 * eta2
a4 = z401 * Xi + z402 * Xi2 + z411 * eta * Xi + z410 * eta + z420 * eta2
a5 = z501 * Xi + z502 * Xi2 + z511 * eta * Xi + z510 * eta + z520 * eta2
a6 = z601 * Xi + z602 * Xi2 + z611 * eta * Xi + z610 * eta + z620 * eta2
g1 = z701 * Xi + z702 * Xi2 + z711 * eta * Xi + z710 * eta + z720 * eta2
del1 = z801 * Xi + z802 * Xi2 + z811 * eta * Xi + z810 * eta + z820 * eta2
del2 = z901 * Xi + z902 * Xi2 + z911 * eta * Xi + z910 * eta + z920 * eta2
# Get the spin of the final BH
afin = FinalSpin( Xi, eta )
Q = Qa( abs(afin) )
# Get the fRD
frd = fRD( abs(afin), M)
Mfrd = frd * m_sec
# Define the frequencies where SPA->PM->RD
f1 = 0.1 * frd
Mf1 = m_sec * f1
f2 = frd
Mf2 = m_sec * f2
d1 = 0.005
d2 = 0.005
f0 = 0.98 * frd
Mf0 = m_sec * f0
d0 = 0.015
# Now use this frequency for calculation of betas
# calculate beta1 and beta2, that appear in Eq 5.7 in the main paper.
b2 = ((-5./3.)* a1 * pow(Mfrd,(-8./3.)) - a2/(Mfrd*Mfrd) - \
(a3/3.)*pow(Mfrd,(-4./3.)) + (2./3.)* a5 * pow(Mfrd,(-1./3.)) + a6)/eta
psiPMrd = (a1 * pow(Mfrd,(-5./3.)) + a2/Mfrd + a3 * pow(Mfrd,(-1./3.)) + \
a4 + a5 * pow(Mfrd,(2./3.)) + a6 * Mfrd)/eta
b1 = psiPMrd - (b2 * Mfrd)
### Calculate the PN coefficients, Eq A3 - A5 of main paper ###
pfaN = 3.0/(128.0 * eta)
pfa2 = (3715./756.) + (55.*eta/9.0)
pfa3 = -16.0*lal.PI + (113./3.)*Xi - 38.*eta*Xisum/3.
pfa4 = (152.93365/5.08032) - 50.*Xi2 + eta*(271.45/5.04 + 1.25*Xiprod) + \
3085.*eta2/72.
pfa5 = lal.PI*(386.45/7.56 - 65.*eta/9.) - \
Xi*(735.505/2.268 + 130.*eta/9.) + Xisum*(1285.0*eta/8.1 + 170.*eta2/9.) - \
10.*Xi2*Xi/3. + 10.*eta*Xi*Xiprod
pfa6 = 11583.231236531/4.694215680 - 640.0*lal.PI*lal.PI/3. - \
6848.0*lal.GAMMA/21. - 684.8*log(64.)/6.3 + \
eta*(2255.*lal.PI*lal.PI/12. - 15737.765635/3.048192) + \
76.055*eta2/1.728 - (127.825*eta2*eta/1.296) + \
2920.*lal.PI*Xi/3. - (175. - 1490.*eta)*Xi2/3. - \
(1120.*lal.PI/3. - 1085.*Xi/3.)*eta*Xisum + \
(269.45*eta/3.36 - 2365.*eta2/6.)*Xiprod
pfa6log = -6848./63.
pfa7 = lal.PI*(770.96675/2.54016 + 378.515*eta/1.512 - 740.45*eta2/7.56) - \
Xi*(20373.952415/3.048192 + 1509.35*eta/2.24 - 5786.95*eta2/4.32) + \
Xisum*(4862.041225*eta/1.524096 + 1189.775*eta2/1.008 - 717.05*eta2*eta/2.16 - 830.*eta*Xi2/3. + 35.*eta2*Xiprod/3.) - \
560.*lal.PI*Xi2 + 20.*lal.PI*eta*Xiprod + \
Xi2*Xi*(945.55/1.68 - 85.*eta) + Xi*Xiprod*(396.65*eta/1.68 + 255.*eta2)
xdotaN = 64.*eta/5.
xdota2 = -7.43/3.36 - 11.*eta/4.
xdota3 = 4.*lal.PI - 11.3*Xi/1.2 + 19.*eta*Xisum/6.
xdota4 = 3.4103/1.8144 + 5*Xi2 + eta*(13.661/2.016 - Xiprod/8.) + 5.9*eta2/1.8
xdota5 = -lal.PI*(41.59/6.72 + 189.*eta/8.) - Xi*(31.571/1.008 - 116.5*eta/2.4) + \
Xisum*(21.863*eta/1.008 - 79.*eta2/6.) - 3*Xi*Xi2/4. + \
9.*eta*Xi*Xiprod/4.
xdota6 = 164.47322263/1.39708800 - 17.12*lal.GAMMA/1.05 + \
16.*lal.PI*lal.PI/3 - 8.56*log(16.)/1.05 + \
eta*(45.1*lal.PI*lal.PI/4.8 - 561.98689/2.17728) + \
5.41*eta2/8.96 - 5.605*eta*eta2/2.592 - 80.*lal.PI*Xi/3. + \
eta*Xisum*(20.*lal.PI/3. - 113.5*Xi/3.6) + \
Xi2*(64.153/1.008 - 45.7*eta/3.6) - \
Xiprod*(7.87*eta/1.44 - 30.37*eta2/1.44)
xdota6log = -856./105.
xdota7 = -lal.PI*(4.415/4.032 - 358.675*eta/6.048 - 91.495*eta2/1.512) - \
Xi*(252.9407/2.7216 - 845.827*eta/6.048 + 415.51*eta2/8.64) + \
Xisum*(158.0239*eta/5.4432 - 451.597*eta2/6.048 + 20.45*eta2*eta/4.32 + 107.*eta*Xi2/6. - 5.*eta2*Xiprod/24.) + \
12.*lal.PI*Xi2 - Xi2*Xi*(150.5/2.4 + eta/8.) + \
Xi*Xiprod*(10.1*eta/2.4 + 3.*eta2/8.)
AN = 8.*eta*sqrt(lal.PI/5.)
A2 = (-107. + 55.*eta)/42.
A3 = 2.*lal.PI - 4.*Xi/3. + 2.*eta*Xisum/3.
A4 = -2.173/1.512 - eta*(10.69/2.16 - 2.*Xiprod) + 2.047*eta2/1.512
A5 = -10.7*lal.PI/2.1 + eta*(3.4*lal.PI/2.1)
A5imag = -24.*eta
A6 = 270.27409/6.46800 - 8.56*lal.GAMMA/1.05 + \
2.*lal.PI*lal.PI/3. + \
eta*(4.1*lal.PI*lal.PI/9.6 - 27.8185/3.3264) - \
20.261*eta2/2.772 + 11.4635*eta*eta2/9.9792 - \
4.28*log(16.)/1.05
A6log = -428./105.
A6imag = 4.28*lal.PI/1.05
### Define other parameters needed by waveform generation ###
kmin = int(f_min / delta_f)
kmax = int(f_max / delta_f)
n = kmax + 1;
if not out:
htilde = FrequencySeries(zeros(n,dtype=numpy.complex128), delta_f=delta_f, copy=False)
else:
if type(out) is not Array:
raise TypeError("Output must be an instance of Array")
if len(out) < kmax:
raise TypeError("Output array is too small")
if out.dtype != complex64:
raise TypeError("Output array is the wrong dtype")
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
phenomC_kernel(htilde.data[kmin:kmax], kmin, delta_f, eta, Xi, distance,
m_sec, piM, Mfrd,
pfaN, pfa2, pfa3, pfa4, pfa5, pfa6, pfa6log, pfa7,
a1, a2, a3, a4, a5, a6, b1, b2,
Mf1, Mf2, Mf0, d1, d2, d0,
xdota2, xdota3, xdota4, xdota5, xdota6, xdota6log,
xdota7, xdotaN, AN, A2, A3, A4, A5,
A5imag, A6, A6log, A6imag,
g1, del1, del2, Q )
hp = htilde
hc = htilde * 1j
return hp, hc
| hagabbar/pycbc_copy | pycbc/waveform/pycbc_phenomC_tmplt.py | Python | gpl-3.0 | 14,998 |
#
# This file is part of Freedom Maker.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Basic image builder using vmdebootstrap.
"""
import json
import logging
import shutil
import subprocess
logger = logging.getLogger(__name__)
class VmdebootstrapBuilderBackend():
"""Build an image using vmdebootstrap tool."""
def __init__(self, builder):
"""Initialize the builder."""
self.builder = builder
self.parameters = []
self.environment = []
self.execution_wrapper = []
def make_image(self):
"""Create a disk image."""
if self.builder.should_skip_step(self.builder.image_file):
logger.info('Image exists, skipping build - %s',
self.builder.image_file)
return
temp_image_file = self.builder.get_temp_image_file()
logger.info('Building image in temporary file - %s', temp_image_file)
self.execution_wrapper = ['sudo', '-H']
self.parameters = [
'--hostname',
self.builder.arguments.hostname,
'--image',
temp_image_file,
'--size',
self.builder.arguments.image_size,
'--mirror',
self.builder.arguments.build_mirror,
'--distribution',
self.builder.arguments.distribution,
'--arch',
self.builder.architecture,
'--lock-root-password',
'--log',
self.builder.log_file,
'--log-level',
self.builder.arguments.log_level,
'--verbose',
'--customize',
self.builder.customization_script,
]
self.environment = {
'MIRROR': self.builder.arguments.mirror,
'BUILD_MIRROR': self.builder.arguments.build_mirror,
'MACHINE': self.builder.machine,
'SOURCE': 'true'
if self.builder.arguments.download_source else 'false',
'SOURCE_IN_IMAGE': 'true'
if self.builder.arguments.include_source else 'false',
'SUITE': self.builder.arguments.distribution,
'ENABLE_NONFREE': 'no' if self.builder.free else 'yes',
}
self.process_variant()
self.process_architecture()
self.process_boot_loader()
self.process_kernel_flavor()
self.process_filesystems()
self.process_packages()
self.process_custom_packages()
self.process_environment()
command = self.execution_wrapper + [
self.builder.arguments.vmdebootstrap
] + self.parameters
try:
self.builder._run(command)
finally:
self._cleanup_vmdebootstrap(temp_image_file)
logger.info('Moving file: %s -> %s', temp_image_file,
self.builder.image_file)
self.builder._run(
['sudo', 'mv', temp_image_file, self.builder.image_file])
def _cleanup_vmdebootstrap(self, image_file):
"""Cleanup those that vmdebootstrap is supposed to have cleaned up."""
# XXX: Remove this when vmdebootstrap removes kpartx mappings properly
# after a successful build.
process = subprocess.run(['/sbin/losetup', '--json'],
stdout=subprocess.PIPE,
check=True)
output = process.stdout.decode()
if not output:
return
loop_data = json.loads(output)
loop_device = None
for device_data in loop_data['loopdevices']:
if image_file == device_data['back-file']:
loop_device = device_data['name']
break
if not loop_device:
return
partition_devices = [
'/dev/mapper/' + loop_device.split('/')[-1] + 'p' + str(number)
for number in range(1, 4)
]
# Don't log command, ignore errors, force
for device in partition_devices:
subprocess.run(['sudo', 'dmsetup', 'remove', '-f', device])
subprocess.run(['sudo', 'losetup', '-d', loop_device])
def process_variant(self):
"""Add paramaters for deboostrap variant."""
if self.builder.debootstrap_variant:
self.parameters += [
'--debootstrapopts',
'variant=' + self.builder.debootstrap_variant
]
def process_architecture(self):
"""Add parameters specific to the architecture."""
if self.builder.architecture not in ('i386', 'amd64'):
self.parameters += ['--foreign', '/usr/bin/qemu-arm-static']
# Using taskset to pin build process to single core. This
# is a workaround for a qemu-user-static issue that causes
# builds to hang. (See Debian bug #769983 for details.)
self.execution_wrapper = \
['taskset', '0x01'] + self.execution_wrapper
def process_boot_loader(self):
"""Add parameters related to boot loader."""
option_map = {
'grub': ['--grub'],
'u-boot': ['--no-extlinux'],
None: ['--no-extlinux']
}
self.parameters += option_map[self.builder.boot_loader]
if self.builder.boot_loader == 'u-boot':
self.parameters += [
'--package', 'u-boot-tools', '--package', 'u-boot'
]
if self.builder.boot_size:
self.parameters += ['--bootsize', self.builder.boot_size]
if self.builder.boot_offset:
self.parameters += ['--bootoffset', self.builder.boot_offset]
def process_kernel_flavor(self):
"""Add parameters for kernel flavor."""
if self.builder.kernel_flavor == 'default':
return
if self.builder.kernel_flavor is None:
self.parameters += ['--no-kernel']
return
self.parameters += [
'--kernel-package', 'linux-image-' + self.builder.kernel_flavor
]
def process_filesystems(self):
"""Add parameters necessary for file systems."""
self.parameters += ['--roottype', self.builder.root_filesystem_type]
if self.builder.boot_filesystem_type:
self.parameters += [
'--boottype', self.builder.boot_filesystem_type
]
if 'btrfs' in [
self.builder.root_filesystem_type,
self.builder.boot_filesystem_type
]:
self.builder.packages += ['btrfs-progs']
def process_packages(self):
"""Add parameters for additional packages to install in image."""
for package in self.builder.packages + (self.builder.arguments.package
or []):
self.parameters += ['--package', package]
def process_custom_packages(self):
"""Add parameters for custom DEB packages to install in image."""
for package in (self.builder.arguments.custom_package or []):
if 'plinth_' in package:
self.environment['CUSTOM_PLINTH'] = package
elif 'freedombox-setup_' in package:
self.environment['CUSTOM_SETUP'] = package
else:
self.parameters += ['--custom-package', package]
def process_environment(self):
"""Add environment we wish to pass to the command wrapper: sudo."""
for key, value in self.environment.items():
self.execution_wrapper += [key + '=' + value]
| peacekeeper/freedom-maker | freedommaker/vmdebootstrap.py | Python | gpl-3.0 | 8,089 |
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Simple starter for stand-alone RESTx server.
"""
import os
import sys
import time
import getopt
# RESTx imports
import restx.settings as settings
import restx.logger as logger
from restx.core import RequestDispatcher
from restx.platform_specifics import *
from org.mulesoft.restx import Settings
from org.mulesoft.restx.util import Url
from org.mulesoft.restx.component.api import *
def print_help():
print \
"""
RESTx server (c) 2010 MuleSoft
Usage: jython starter.py [options]
Options:
-h, --help
Print this help screen.
-P, --port <num>
Port on which the server listens for requests.
-p, --pidfile <filename>
If specified, the PID of the server is stored in <filename>.
-l, --logfile <filename>
If specified, the filename for the logfile. If not specified,
output will go to the console.
-r, --rootdir <dirname>
Root directory of the RESTx install
"""
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "hl:P:p:r:", ["help", "logfile=", "port=", "pidfile=", "rootdir="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
print_help()
sys.exit(1)
port = settings.LISTEN_PORT
for o, a in opts:
if o in ("-p", "--pidfile"):
# Writing our process ID
pid = os.getpid()
f = open(a, "w")
f.write(str(pid))
f.close()
elif o in ("-h", "--help"):
print_help()
sys.exit(0)
elif o in ("-P", "--port"):
port = int(a)
elif o in ("-r", "--rootdir"):
rootdir = str(a)
settings.set_root_dir(rootdir)
elif o in ("-l", "--logfile"):
logger.set_logfile(a)
my_server = HttpServer(port, RequestDispatcher())
| yyamano/RESTx | src/python/starter.py | Python | gpl-3.0 | 2,796 |
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from collections import defaultdict
from contextlib import ExitStack
import functools
from pathlib import Path
from ..helpers import TemporaryObject
from ..korlib import ConsoleToggler
from PyHSPlasma import *
from . import animation
from . import camera
from . import decal
from . import explosions
from . import etlight
from . import image
from . import locman
from . import logger
from . import manager
from . import mesh
from . import outfile
from . import physics
from . import rtlight
from . import utils
class Exporter:
def __init__(self, op):
self._op = op # Blender export operator
self._objects = []
self.actors = set()
self.want_node_trees = defaultdict(set)
self.exported_nodes = {}
def run(self):
log = logger.ExportVerboseLogger if self._op.verbose else logger.ExportProgressLogger
with ConsoleToggler(self._op.show_console), log(self._op.filepath) as self.report, ExitStack() as self.exit_stack:
# Step 0: Init export resmgr and stuff
self.mgr = manager.ExportManager(self)
self.mesh = mesh.MeshConverter(self)
self.physics = physics.PhysicsConverter(self)
self.light = rtlight.LightConverter(self)
self.animation = animation.AnimationConverter(self)
self.output = outfile.OutputFiles(self, self._op.filepath)
self.camera = camera.CameraConverter(self)
self.image = image.ImageCache(self)
self.locman = locman.LocalizationConverter(self)
self.decal = decal.DecalConverter(self)
self.oven = etlight.LightBaker(mesh=self.mesh, report=self.report)
# Step 0.8: Init the progress mgr
self.mesh.add_progress_presteps(self.report)
self.report.progress_add_step("Collecting Objects")
self.report.progress_add_step("Verify Competence")
self.report.progress_add_step("Touching the Intangible")
self.report.progress_add_step("Harvesting Actors")
if self._op.lighting_method != "skip":
etlight.LightBaker.add_progress_steps(self.report)
self.report.progress_add_step("Exporting Scene Objects")
self.report.progress_add_step("Exporting Logic Nodes")
self.report.progress_add_step("Finalizing Plasma Logic")
self.report.progress_add_step("Handling Snakes")
self.report.progress_add_step("Exporting Textures")
self.report.progress_add_step("Composing Geometry")
self.report.progress_add_step("Saving Age Files")
self.report.progress_start("EXPORTING AGE")
# Step 0.9: Apply modifiers to all meshes temporarily.
with self.mesh:
# Step 1: Create the age info and the pages
self._export_age_info()
# Step 2: Gather a list of objects that we need to export, given what the user has told
# us to export (both in the Age and Object Properties)... fun
self._collect_objects()
# Step 2.1: Run through all the objects we collected in Step 2 and make sure there
# is no ruddy funny business going on.
self._check_sanity()
# Step 2.2: Run through all the objects again and ask them to "pre_export" themselves.
# In other words, generate any ephemeral Blender objects that need to be exported.
self._pre_export_scene_objects()
# Step 2.5: Run through all the objects we collected in Step 2 and see if any relationships
# that the artist made requires something to have a CoordinateInterface
self._harvest_actors()
# Step 2.9: It is assumed that static lighting is available for the mesh exporter.
# Indeed, in PyPRP it was a manual step. So... BAKE NAO!
self._bake_static_lighting()
# Step 3: Export all the things!
self._export_scene_objects()
# Step 3.1: Ensure referenced logic node trees are exported
self._export_referenced_node_trees()
# Step 3.2: Now that all Plasma Objects (save Mipmaps) are exported, we do any post
# processing that needs to inspect those objects
self._post_process_scene_objects()
# Step 3.3: Ensure any helper Python files are packed
self._pack_ancillary_python()
# Step 4: Finalize...
self.mesh.material.finalize()
self.mesh.finalize()
# Step 5: FINALLY. Let's write the PRPs and crap.
self._save_age()
# Step 5.1: Save out the export report.
# If the export fails and this doesn't save, we have bigger problems than
# these little warnings and notices.
self.report.progress_end()
self.report.save()
# Step 5.2: If any nonfatal errors were encountered during the export, we will
# raise them here, now that everything is finished, to draw attention
# to whatever the problem might be.
self.report.raise_errors()
def _bake_static_lighting(self):
if self._op.lighting_method != "skip":
self.oven.bake_static_lighting(self._objects)
def _collect_objects(self):
scene = bpy.context.scene
self.report.progress_advance()
self.report.progress_range = len(scene.objects)
inc_progress = self.report.progress_increment
# Grab a naive listing of enabled pages
age = scene.world.plasma_age
pages_enabled = frozenset((page.name for page in age.pages if page.enabled and self._op.version in page.version))
all_pages = frozenset((page.name for page in age.pages))
# Because we can have an unnamed or a named default page, we need to see if that is enabled...
for page in age.pages:
if page.seq_suffix == 0:
default_enabled = page.enabled
default_inited = True
break
else:
default_enabled = True
default_inited = False
# Now we loop through the objects with some considerations:
# - The default page may or may not be defined. If it is, it can be disabled. If not, it
# can only ever be enabled.
# - Don't create the Default page unless it is used (implicit or explicit). It is a failure
# to export a useless file.
# - Any arbitrary page can be disabled, so check our frozenset.
# - Also, someone might have specified an invalid page, so keep track of that.
error = explosions.UndefinedPageError()
for obj in scene.objects:
if obj.plasma_object.enabled:
page = obj.plasma_object.page
if not page and not default_inited:
self.mgr.create_page(self.age_name, "Default", 0)
default_inited = True
if (default_enabled and not page) or (page in pages_enabled):
self._objects.append(obj)
elif page not in all_pages:
error.add(page, obj.name)
inc_progress()
error.raise_if_error()
def _check_sanity(self):
self.report.progress_advance()
self.report.progress_range = len(self._objects)
inc_progress = self.report.progress_increment
self.report.msg("\nEnsuring Age is sane...")
for bl_obj in self._objects:
for mod in bl_obj.plasma_modifiers.modifiers:
fn = getattr(mod, "sanity_check", None)
if fn is not None:
fn()
inc_progress()
self.report.msg("... Age is grinning and holding a spatula. Must be OK, then.")
def _export_age_info(self):
# Make life slightly easier...
age_info = bpy.context.scene.world.plasma_age
age_name = self.age_name
mgr = self.mgr
# Generate the plAgeInfo
mgr.AddAge(age_info.export(self))
# Create all the pages we need
ver = self._op.version
for page in age_info.pages:
if page.enabled and ver in page.version:
mgr.create_page(age_name, page.name, page.seq_suffix)
mgr.create_builtins(age_name, age_info.use_texture_page)
def _export_actor(self, so, bo):
"""Exports a Coordinate Interface if we need one"""
if self.has_coordiface(bo):
self._export_coordinate_interface(so, bo)
# If this object has a parent, then we will need to go upstream and add ourselves to the
# parent's CoordinateInterface... Because life just has to be backwards.
parent = bo.parent
if parent is not None:
if parent.plasma_object.enabled:
self.report.msg("Attaching to parent SceneObject '{}'", parent.name, indent=1)
parent_ci = self._export_coordinate_interface(None, parent)
parent_ci.addChild(so.key)
else:
self.report.warn("You have parented Plasma Object '{}' to '{}', which has not been marked for export. \
The object may not appear in the correct location or animate properly.".format(
bo.name, parent.name))
def _export_coordinate_interface(self, so, bl):
"""Ensures that the SceneObject has a CoordinateInterface"""
if so is None:
so = self.mgr.find_create_object(plSceneObject, bl=bl)
if so.coord is None:
ci_cls = bl.plasma_object.ci_type
ci = self.mgr.add_object(ci_cls, bl=bl, so=so)
# Now we have the "fun" work of filling in the CI
ci.localToWorld = utils.matrix44(bl.matrix_basis)
ci.worldToLocal = ci.localToWorld.inverse()
ci.localToParent = utils.matrix44(bl.matrix_local)
ci.parentToLocal = ci.localToParent.inverse()
return ci
return so.coord.object
def _export_scene_objects(self):
self.report.progress_advance()
self.report.progress_range = len(self._objects)
inc_progress = self.report.progress_increment
log_msg = self.report.msg
for bl_obj in self._objects:
log_msg("\n[SceneObject '{}']".format(bl_obj.name))
# First pass: do things specific to this object type.
# note the function calls: to export a MESH, it's _export_mesh_blobj
export_fn = "_export_{}_blobj".format(bl_obj.type.lower())
try:
export_fn = getattr(self, export_fn)
except AttributeError:
self.report.warn("""'{}' is a Plasma Object of Blender type '{}'
... And I have NO IDEA what to do with that! Tossing.""".format(bl_obj.name, bl_obj.type))
continue
log_msg("Blender Object '{}' of type '{}'".format(bl_obj.name, bl_obj.type), indent=1)
# Create a sceneobject if one does not exist.
# Before we call the export_fn, we need to determine if this object is an actor of any
# sort, and barf out a CI.
sceneobject = self.mgr.find_create_object(plSceneObject, bl=bl_obj)
self._export_actor(sceneobject, bl_obj)
export_fn(sceneobject, bl_obj)
# And now we puke out the modifiers...
for mod in bl_obj.plasma_modifiers.modifiers:
log_msg("Exporting '{}' modifier".format(mod.bl_label), indent=1)
mod.export(self, bl_obj, sceneobject)
inc_progress()
def _export_camera_blobj(self, so, bo):
# Hey, guess what? Blender's camera data is utter crap!
# NOTE: Animation export is dependent on camera type, so we'll do that later.
camera = bo.data.plasma_camera
self.camera.export_camera(so, bo, camera.camera_type, camera.settings, camera.transitions)
def _export_empty_blobj(self, so, bo):
self.animation.convert_object_animations(bo, so)
def _export_lamp_blobj(self, so, bo):
self.animation.convert_object_animations(bo, so)
self.light.export_rtlight(so, bo)
def _export_mesh_blobj(self, so, bo):
self.animation.convert_object_animations(bo, so)
if bo.data.materials:
self.mesh.export_object(bo, so)
else:
self.report.msg("No material(s) on the ObData, so no drawables", indent=1)
def _export_font_blobj(self, so, bo):
self.animation.convert_object_animations(bo, so)
with utils.temporary_mesh_object(bo) as meshObj:
if bo.data.materials:
self.mesh.export_object(meshObj, so)
else:
self.report.msg("No material(s) on the ObData, so no drawables", indent=1)
def _export_referenced_node_trees(self):
self.report.progress_advance()
self.report.progress_range = len(self.want_node_trees)
inc_progress = self.report.progress_increment
self.report.msg("\nChecking Logic Trees...")
for tree_name, references in self.want_node_trees.items():
self.report.msg("NodeTree '{}'", tree_name, indent=1)
tree = bpy.data.node_groups[tree_name]
for bo, so in references:
tree.export(self, bo, so)
inc_progress()
def _harvest_actors(self):
self.report.progress_advance()
self.report.progress_range = len(self._objects) + len(bpy.data.textures)
inc_progress = self.report.progress_increment
for bl_obj in self._objects:
for mod in bl_obj.plasma_modifiers.modifiers:
if mod.enabled:
self.actors.update(mod.harvest_actors())
inc_progress()
# This is a little hacky, but it's an edge case... I guess?
# We MUST have CoordinateInterfaces for EnvironmentMaps (DCMs, bah)
for texture in bpy.data.textures:
envmap = getattr(texture, "environment_map", None)
if envmap is not None:
viewpt = envmap.viewpoint_object
if viewpt is not None:
self.actors.add(viewpt.name)
inc_progress()
def has_coordiface(self, bo):
if bo.type in {"CAMERA", "EMPTY", "LAMP"}:
return True
if bo.parent is not None:
return True
if bo.name in self.actors:
return True
if bo.plasma_object.has_transform_animation:
return True
for mod in bo.plasma_modifiers.modifiers:
if mod.enabled:
if mod.requires_actor:
return True
return False
def _post_process_scene_objects(self):
self.report.progress_advance()
self.report.progress_range = len(self._objects)
inc_progress = self.report.progress_increment
self.report.msg("\nPost-Processing SceneObjects...")
mat_mgr = self.mesh.material
for bl_obj in self._objects:
sceneobject = self.mgr.find_object(plSceneObject, bl=bl_obj)
if sceneobject is None:
# no SO? fine then. turd.
continue
# Synchronization is applied for the root SO and all animated layers (WTF)
# So, we have to keep in mind shared layers (whee) in the synch options kode
net = bl_obj.plasma_net
net.propagate_synch_options(sceneobject, sceneobject)
for mat in mat_mgr.get_materials(bl_obj):
for layer in mat.object.layers:
layer = layer.object
if isinstance(layer, plLayerAnimation):
net.propagate_synch_options(sceneobject, layer)
# Modifiers don't have to expose post-processing, but if they do, run it
for mod in bl_obj.plasma_modifiers.modifiers:
proc = getattr(mod, "post_export", None)
if proc is not None:
self.report.msg("Post processing '{}' modifier '{}'", bl_obj.name, mod.bl_label, indent=1)
proc(self, bl_obj, sceneobject)
inc_progress()
def _pre_export_scene_objects(self):
self.report.progress_advance()
self.report.progress_range = len(self._objects)
inc_progress = self.report.progress_increment
self.report.msg("\nGenerating export dependency objects...")
# New objects may be generate during this process; they will be appended at the end.
new_objects = []
@functools.singledispatch
def handle_temporary(temporary, parent):
raise RuntimeError("Temporary object of type '{}' generated by '{}' was unhandled".format(temporary.__class__, parent.name))
@handle_temporary.register(bpy.types.Object)
def _(temporary, parent):
self.exit_stack.enter_context(TemporaryObject(temporary, bpy.data.objects.remove))
self.report.msg("'{}': generated Object '{}' (Plasma Object: {})", parent.name,
temporary.name, temporary.plasma_object.enabled, indent=1)
if temporary.plasma_object.enabled:
new_objects.append(temporary)
# If the object is marked as a Plasma Object, be sure that we go into the same page
# as the requestor, unless the modifier decided it knows better.
if not temporary.plasma_object.property_set("page"):
temporary.plasma_object.page = parent.plasma_object.page
# Wow, recursively generated objects. Aren't you special?
for mod in temporary.plasma_modifiers.modifiers:
mod.sanity_check()
do_pre_export(temporary)
@handle_temporary.register(bpy.types.NodeTree)
def _(temporary, parent):
self.exit_stack.enter_context(TemporaryObject(temporary, bpy.data.node_groups.remove))
self.report.msg("'{}' generated NodeTree '{}'", parent.name, temporary.name)
if temporary.bl_idname == "PlasmaNodeTree":
parent_so = self.mgr.find_create_object(plSceneObject, bl=parent)
self.want_node_trees[temporary.name].add((parent, parent_so))
def do_pre_export(bo):
for mod in bo.plasma_modifiers.modifiers:
for i in filter(None, mod.pre_export(self, bo)):
handle_temporary(i, bo)
for bl_obj in self._objects:
do_pre_export(bl_obj)
inc_progress()
self.report.msg("... {} new object(s) were generated!", len(new_objects))
self._objects += new_objects
def _pack_ancillary_python(self):
texts = bpy.data.texts
self.report.progress_advance()
self.report.progress_range = len(texts)
inc_progress = self.report.progress_increment
for i in texts:
if i.name.endswith(".py") and self.output.want_py_text(i):
self.output.add_python_code(i.name, text_id=i)
inc_progress()
def _save_age(self):
self.report.progress_advance()
self.report.msg("\nWriting Age data...")
# If something bad happens in the final flush, it would be a shame to
# simply toss away the potentially freshly regenerated texture cache.
try:
self.locman.save()
self.mgr.save_age()
self.output.save()
finally:
self.image.save()
@property
def age_name(self):
if self._op.dat_only:
return Path(self._op.filepath).stem
else:
return bpy.context.scene.world.plasma_age.age_name
@property
def dat_only(self):
return self._op.dat_only
@property
def envmap_method(self):
return bpy.context.scene.world.plasma_age.envmap_method
@property
def python_method(self):
return bpy.context.scene.world.plasma_age.python_method
@property
def texcache_path(self):
age = bpy.context.scene.world.plasma_age
filepath = age.texcache_path
try:
valid_path = filepath and Path(filepath).is_file()
except OSError:
valid_path = False
if not valid_path:
filepath = bpy.context.blend_data.filepath
if not filepath:
filepath = self._op.filepath
filepath = str(Path(filepath).with_suffix(".ktc"))
age.texcache_path = filepath
return filepath
@property
def texcache_method(self):
return bpy.context.scene.world.plasma_age.texcache_method
| Hoikas/korman | korman/exporter/convert.py | Python | gpl-3.0 | 21,736 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-07-13 05:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('waterlevel', '0004_auto_20160531_1218'),
]
operations = [
migrations.AlterModelOptions(
name='watergate',
options={'get_latest_by': 'pk', 'ordering': ['-pk'], 'verbose_name_plural': 'Water gates'},
),
migrations.AlterModelOptions(
name='waterlevelreport',
options={'get_latest_by': 'pk', 'ordering': ['-pk'], 'verbose_name_plural': 'Water level reports'},
),
]
| geoenvo/opendims | opendims/waterlevel/migrations/0005_auto_20160713_1247.py | Python | gpl-3.0 | 673 |
from math import *
testing_number = int(1)
sum_of_logs = log(2)
n = int(input('Choose a number: '))
while testing_number < n:
testing_number = testing_number + 2
for checking_number in range(2, testing_number):
if testing_number % checking_number == 0:
break
else:
sum_of_logs = sum_of_logs + testing_number
print(sum_of_logs/n)
print('The sum of the logs is:', sum_of_logs)
| Safuya/python_project_euler | prime_numbers3.py | Python | gpl-3.0 | 441 |
#!/usr/bin/env python
"""
@file CSV2polyconvertXML.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-07-17
@version $Id: CSV2polyconvertXML.py 22608 2017-01-17 06:28:54Z behrisch $
Converts a given CSV-file that contains a list of pois to
an XML-file that may be read by POLYCONVERT.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
if len(sys.argv) < 4:
print("Error: Missing argument(s)")
print(
"Call: CSV2polyconvertXML.py <CSV_FILE> <OUTPUT_FILE> <VALUENAME>[,<VALUENAME>]*")
print(" The values within the csv-file are supposed to be divided by ';'.")
print(
" <VALUENAME>s give the attribute names in order of their appearence within the csv-file .")
exit()
names = sys.argv[3].split(',')
inpf = open(sys.argv[1])
outf = open(sys.argv[2], "w")
outf.write("<pois>\n")
for line in inpf:
if len(line) == 0 or line[0] == '#':
continue
vals = line.strip().split(';')
outf.write(" <poi")
for i, n in enumerate(names):
outf.write(' ' + n + '="' + vals[i] + '"')
outf.write("/>\n")
outf.write("</pois>\n")
inpf.close()
outf.close()
| 702nADOS/sumo | tools/shapes/CSV2polyconvertXML.py | Python | gpl-3.0 | 1,565 |
#/* bigbob9.py - demo ranger interface
# * K. Nickels 7/24/13
# */
# Check with "locate playerc.py"
import sys,os
sys.path.append('/usr/local/lib/python2.7/site-packages/')
sys.path.append('/usr/local/lib64/python2.7/site-packages/')
import math
from playerc import *
# /* Create a client and connect it to the server. */
robot = playerc_client(None, 'localhost',6665)
if robot.connect():
raise Exception(playerc_error_str())
# /* Create and subscribe to a position2d device. */
sonarProxy = playerc_ranger(robot,0)
if sonarProxy.subscribe(PLAYERC_OPEN_MODE):
raise Exception(playerc_error_str())
toothProxy = playerc_ranger(robot,1)
if toothProxy.subscribe(PLAYERC_OPEN_MODE):
raise Exception(playerc_error_str())
laserProxy = playerc_ranger(robot,2)
if laserProxy.subscribe(PLAYERC_OPEN_MODE):
raise Exception(playerc_error_str())
# /* read from the proxies */
sonarProxy.get_geom()
toothProxy.get_geom()
laserProxy.get_geom()
robot.read()
print "max range = ", laserProxy.max_range
print "%d sonar ranges: "% sonarProxy.ranges_count
for i in range(sonarProxy.ranges_count):
print "%.3f, " % sonarProxy.ranges[i],
print "."
print "%d tooth laser ranges: "% toothProxy.ranges_count
for i in range(toothProxy.ranges_count):
print "%.3f, " % laserProxy.ranges[i],
print "."
print "%d laser ranges: "% laserProxy.ranges_count
for i in range(laserProxy.ranges_count):
print "%.3f, " % laserProxy.ranges[i],
print "."
# Clean up
sonarProxy.unsubscribe()
toothProxy.unsubscribe()
laserProxy.unsubscribe()
robot.disconnect()
| lsa-pucrs/Player-Stage-Manual | code/Ch9.3/bigbob9_c.py | Python | gpl-3.0 | 1,538 |
from django.shortcuts import render,HttpResponse
from .models import Employee
from .models import Record
import datetime
import calendar
def wage_list(request):
return render(request,'app/wage_list.html',{})
def get_data(request):
date_from_user = str(request.POST.get('datepicker'))
date_from_user = date_from_user.split(' ')# 1st month 2nd is year
print(date_from_user)
filtered_record = Record.objects.filter(date__year=date_from_user[1],date__month=date_from_user[0])
employees_list = list(filtered_record)
print(type(employees_list))
temp=[]
employees = Employee.objects.all()
e = Employee.objects.all()
# To find no of working days
full_date = datetime.datetime.now()
no_of_sundays = sum( [ 1 for i in calendar.monthcalendar( full_date.year, full_date.month ) if i[6]!=0 ] )
no_of_saturdays = sum( [ 1 for i in calendar.monthcalendar( full_date.year, full_date.month ) if i[5]!=0 ] )
tupl = calendar.monthrange(full_date.year,full_date.month)
total_days_in_month = int(tupl[1])
total_working_days = int(total_days_in_month - ( no_of_sundays + no_of_saturdays))
# End to find no of working days
# To find net payable slary
no_of_employees = filtered_record.count()
salary_payable=[]
salary=[]
net_payable = []
no_of_holiday = []
salary_deducted = []
net_final_payable = []
total_ot_hrs = []
Ot_Salary = []
salary_per_day=[]
days_attended = []
esi_cutting = []
esi = 1.75
i=0
int_array=[]
name_of_employee=[]
for counter in range(no_of_employees):
int_array.append(int(counter))
emp_data = []
while i<no_of_employees:
salary.append( int(filtered_record[i].Employee.pay_per_month) ) # Salary per month
salary_per_day.append(( round(int(filtered_record[i].Employee.pay_per_month)/total_working_days,2) ))
# name_of_employee.append(filtered_record[i].Employee.first_name)
temp.append(employees_list[i].Employee)
m=filtered_record[i].Employee.record_set.all()
no_of_holiday.append(str(int(m[0].no_of_holidays) + float(float(m[0].no_of_hours_absent)/8) ))
days_attended.append(float(total_working_days) - float(no_of_holiday[i]))
salary_payable.append( round(salary_per_day[i] * days_attended[i],2))
esi_cutting.append( round(salary_payable[i]*0.0175,2) )
net_salary = round(salary_payable[i] - salary_payable[i]*0.0175,2)
net_payable.append(net_salary)
# salary_deducted.append( round((int(m[0].no_of_holidays) + int(int(m[0].no_of_hours_absent)/8))*salary_per_day[i],2 ))
salary_deducted.append( round((int(int(m[0].no_of_hours_absent)/8))*salary_per_day[i],2 ))
total_ot_hrs.append(int(m[0].no_of_ot_hours))
Ot_Salary.append( round((int(m[0].no_of_ot_hours)/8)*salary_per_day[i],2) )
net_final_payable.append( round(Ot_Salary[i] + net_payable[i] -salary_deducted[i],2) )
emp_data.append({
'name_of_employee':temp[-1],
'salary':salary[-1],
'salary_payable' : salary_payable[-1],
'no_of_holiday' : no_of_holiday[-1],
'esi_cutting' : esi_cutting[-1],
'net_payable' : net_payable[-1],
'salary_deducted' : salary_deducted[-1],
'total_ot_hrs' : total_ot_hrs[-1],
'Ot_Salary' : Ot_Salary[-1],
'net_final_payable' : net_final_payable[-1],
})
# print(no_of_holiday)
i+=1
# End to find net payable slary
return render(request,'app/wage_list.html',{'employees': employees_list,'twd':total_working_days,'no_of_employees':no_of_employees,'salary':salary,
'salary_payable':salary_payable,'net_payable':net_payable,'no_of_holiday':no_of_holiday,'salary_deducted':salary_deducted,
'total_ot_hrs':total_ot_hrs,'Ot_Salary':Ot_Salary,'net_final_payable':net_final_payable,'esi':esi,
'esi_cutting':esi_cutting,'filtered_record':filtered_record,'int_array':int_array,'emp_data':emp_data})
# Make ESI new variable
def index_data(request):
return render(request,'app/wage_list.html',{}) | piyushhajare/Wage_Management | app/views.py | Python | gpl-3.0 | 3,832 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# scikit-survival documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import datetime
import inspect
import os
from pathlib import Path
import re
import sys
from nbconvert.preprocessors import Preprocessor
import nbsphinx
from setuptools_scm import get_version
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
# https://docs.readthedocs.io/en/latest/faq.html?highlight=environ#how-do-i-change-behavior-for-read-the-docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if on_rtd:
sys.path.insert(0, os.path.abspath(os.path.pardir))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.8'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'nbsphinx',
]
autosummary_generate = True
autodoc_default_options = {
'members': None,
'inherited-members': None,
}
# Napoleon settings
napoleon_google_docstring = False
napoleon_include_init_with_doc = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'scikit-survival'
current_year = datetime.utcnow().year
copyright = f'2015-{current_year}, Sebastian Pölsterl and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
if on_rtd:
release = get_version(root='..', relative_to=__file__)
else:
import sksurv
release = sksurv.__version__
# The short X.Y.Z version.
version = '.'.join(release.split('.')[:3])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# The default language to highlight source code in.
highlight_language = 'none'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**/README.*', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
nbsphinx_execute = 'never'
nbsphinx_prolog = r"""
{% set docname = "doc/" + env.doc2path(env.docname, base=None) %}
{% set notebook = env.doc2path(env.docname, base=None)|replace("user_guide/", "notebooks/") %}
{% set branch = 'master' if 'dev' in env.config.release else 'v{}'.format(env.config.release) %}
.. raw:: html
<div class="admonition note" style="line-height: 150%;">
This page was generated from
<a class="reference external" href="https://github.com/sebp/scikit-survival/blob/{{ branch|e }}/{{ docname|e }}">{{ docname|e }}</a>.<br/>
Interactive online version:
<span style="white-space: nowrap;"><a href="https://mybinder.org/v2/gh/sebp/scikit-survival/{{ branch|e }}?urlpath=lab/tree/{{ notebook|e }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>.</span>
</div>
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"github_url": "https://github.com/sebp/scikit-survival",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "scikit-survival {0}".format(version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
html_css_files = ['custom.css']
html_js_files = ['buttons.js']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
extlinks = {
'issue': ('https://github.com/sebp/scikit-survival/issues/%s', '#'),
}
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
Adapted from scipy.
"""
import sksurv
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
fn = inspect.getsourcefile(obj)
except TypeError:
fn = None
if fn is None and hasattr(obj, '__module__'):
fn = inspect.getsourcefile(sys.modules[obj.__module__])
if fn is None:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except ValueError:
lineno = None
if lineno:
linespec = '#L%d-L%d' % (lineno, lineno + len(source) - 1)
else:
linespec = ''
startdir = Path(sksurv.__file__).parent.parent.absolute()
if not fn.startswith(str(startdir)): # not in sksurv
return None
fn = '/'.join(Path(fn).relative_to(startdir).parts)
if fn.startswith('sksurv/'):
m = re.match(r'^.*dev[0-9]+\+g([a-f0-9]+)$', release)
if m:
branch = m.group(1)
elif 'dev' in release:
branch = 'master'
else:
branch = 'v{}'.format(release)
return 'https://github.com/sebp/scikit-survival/blob/{branch}/{filename}{linespec}'.format(
branch=branch,
filename=fn,
linespec=linespec
)
else:
return None
class RTDUrlPreprocessor(Preprocessor):
"""Convert URLs to RTD in notebook to relative urls."""
URL_PATTERN = re.compile(
r'\(https://scikit-survival\.readthedocs\.io/.+?/.+?/([-._a-zA-Z0-9/]+)/(.+?)\.html.*?\)'
)
DOC_DIR = Path(__file__).parent
def preprocess_cell(self, cell, resources, index):
# path of notebook directory, relative to conf.py
nb_path = Path(resources['metadata']['path']).relative_to(self.DOC_DIR)
to_root = [os.pardir] * len(nb_path.parts)
if cell.cell_type == 'markdown':
text = cell.source
replace = []
for match in self.URL_PATTERN.finditer(text):
path = to_root[:]
path.append(match.group(1))
rel_url = "/".join(path)
filename = match.group(2)
replace.append((match.group(0), '({}/{}.rst)'.format(rel_url, filename)))
for s, r in replace:
text = text.replace(s, r)
cell.source = text
return cell, resources
return cell, resources
def _from_notebook_node(self, nb, resources, **kwargs):
filters = [RTDUrlPreprocessor(), ]
for f in filters:
nb, resources = f.preprocess(nb, resources=resources)
return nbsphinx_from_notebook_node(self, nb, resources=resources, **kwargs)
# see https://github.com/spatialaudio/nbsphinx/issues/305#issuecomment-506748814-permalink
nbsphinx_from_notebook_node = nbsphinx.Exporter.from_notebook_node
nbsphinx.Exporter.from_notebook_node = _from_notebook_node
# ------------------------
# Mock dependencies on RTD
# ------------------------
if on_rtd:
MOCK_MODULES = [
# external dependencies
'ecos',
'joblib',
'numexpr',
'numpy',
'osqp',
'pandas',
'pandas.api.types',
'scipy',
'scipy.integrate',
'scipy.io.arff',
'scipy.linalg',
'scipy.optimize',
'scipy.sparse',
'scipy.special',
'scipy.stats',
'sklearn',
'sklearn.base',
'sklearn.dummy',
'sklearn.ensemble',
'sklearn.ensemble._base',
'sklearn.ensemble._forest',
'sklearn.ensemble._gb',
'sklearn.ensemble._gb_losses',
'sklearn.ensemble._gradient_boosting',
'sklearn.ensemble.base',
'sklearn.ensemble.forest',
'sklearn.ensemble.gradient_boosting',
'sklearn.exceptions',
'sklearn.externals.joblib',
'sklearn.linear_model',
'sklearn.metrics',
'sklearn.metrics.pairwise',
'sklearn.model_selection',
'sklearn.pipeline',
'sklearn.preprocessing',
'sklearn.svm',
'sklearn.tree',
'sklearn.tree._classes',
'sklearn.tree._splitter',
'sklearn.tree._tree',
'sklearn.tree.tree',
'sklearn.utils',
'sklearn.utils._joblib',
'sklearn.utils.extmath',
'sklearn.utils.fixes',
'sklearn.utils.metaestimators',
'sklearn.utils.validation',
# our C modules
'sksurv.bintrees._binarytrees',
'sksurv.ensemble._coxph_loss',
'sksurv.kernels._clinical_kernel',
'sksurv.linear_model._coxnet',
'sksurv.svm._minlip',
'sksurv.svm._prsvm',
'sksurv.tree._criterion']
from unittest.mock import Mock
class MockModule(Mock):
"""mock imports"""
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper() and name[0] != "_":
# Not very good, we assume Uppercase names are classes...
mocktype = type(name, (), {})
mocktype.__module__ = __name__
return mocktype
else:
return MockModule()
sys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)
else:
from sklearn.ensemble._gb import BaseGradientBoosting
# Remove inherited API doc to avoid sphinx's duplicate object description error
BaseGradientBoosting.feature_importances_.__doc__ = None
| sebp/scikit-survival | doc/conf.py | Python | gpl-3.0 | 12,460 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, clf, res=0.02):
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, res),
np.arange(y_min, y_max, res))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
class Perceptron(object):
def __init__(self, eta=0.01, epochs=50):
self.eta = eta
self.epochs = epochs
def train(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.epochs):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
# Корректные выходы перцептрона для данной выборки
y = np.array([[1],[1],[1],[1],[-1],[-1],[-1],[-1]]).reshape(8,1)
# Массив входных данных для перцептрона
X = np.array([[0,3],[1,2],[2,2],[4,0],[-1,2],[2,0],[3,-1],[4,-1]]).reshape(8,2)
ppn = Perceptron(epochs=10, eta=0.1)
ppn.train(X, y)
plot_decision_regions(X, y, clf=ppn)
plt.title('Perceptron')
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
plt.plot(range(1, len(ppn.errors_)+1), ppn.errors_, marker='o')
plt.xlabel('Iterations')
plt.ylabel('Misclassifications')
plt.show()
| wyzekid/Python_Projects | Perceptron/Rosenblatt_perceptron.py | Python | gpl-3.0 | 1,928 |
#!/usr/bin/env python
"""
segmentation-fold can predict RNA 2D structures including K-turns.
Copyright (C) 2012-2016 Youri Hoogstrate
This file is part of segmentation-fold and originally taken from
yh-kt-fold.
segmentation-fold is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
segmentation-fold is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class RNA:
def __init__(self,name,sequence,organism,structures):
self.name = name
self.organism = organism
self.sequence = sequence
self.structures = structures
def get_sequence(self):
return self.sequence
def get_structures(self):
return self.structures
def get_unique_associated_segments(self):
segments = []
for structure in self.structures:
for associated_segment in structure['associated_segments']:
segments.append(associated_segment)
return list(set(segments))
| yhoogstrate/segmentation-fold | scripts/energy-estimation-utility/segmentation_fold_utils/RNA.py | Python | gpl-3.0 | 1,466 |
#!/usr/bin/python
# coding: utf8
#
# cookiecutter-py documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from setup import get_distribution_info
project_metadata = get_distribution_info()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
"sphinx_autodoc_typehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = project_metadata["name"]
author = project_metadata["author"]
copyright = '2016, {!s}'.format(author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = project_metadata["version"]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cookiecutter-pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cookiecutter-py.tex', 'cookiecutter-py Documentation',
'Charles Bouchard-Légaré', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cookiecutter-py', 'cookiecutter-py Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cookiecutter-py', 'cookiecutter-py Documentation',
author, 'cookiecutter-py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| antoinedube/numeric-cookiecutter | docs/conf.py | Python | gpl-3.0 | 9,486 |
from common.common_consts.telem_categories import TelemCategoryEnum
from infection_monkey.telemetry.base_telem import BaseTelem
class ScanTelem(BaseTelem):
def __init__(self, machine):
"""
Default scan telemetry constructor
:param machine: Scanned machine
"""
super(ScanTelem, self).__init__()
self.machine = machine
telem_category = TelemCategoryEnum.SCAN
def get_data(self):
return {"machine": self.machine.as_dict(), "service_count": len(self.machine.services)}
| guardicore/monkey | monkey/infection_monkey/telemetry/scan_telem.py | Python | gpl-3.0 | 537 |
from django.contrib.auth import get_user_model
# from django.urls import reverse
from model_bakery import baker
from tenant_schemas.test.cases import TenantTestCase
from tenant_schemas.test.client import TenantClient
# from siteconfig.models import SiteConfig
from hackerspace_online.tests.utils import ViewTestUtilsMixin
from djcytoscape.models import CytoScape
User = get_user_model()
class ViewTests(ViewTestUtilsMixin, TenantTestCase):
def setUp(self):
self.client = TenantClient(self.tenant)
# need a teacher and a student with known password so tests can log in as each, or could use force_login()?
self.test_password = "password"
# need a teacher before students can be created or the profile creation will fail when trying to notify
self.test_teacher = User.objects.create_user('test_teacher', password=self.test_password, is_staff=True)
self.test_student1 = User.objects.create_user('test_student', password=self.test_password)
self.map = baker.make('djcytoscape.CytoScape')
def test_all_page_status_codes_for_anonymous(self):
''' If not logged in then all views should redirect to home page '''
self.assertRedirectsLogin('djcytoscape:index')
self.assertRedirectsLogin('djcytoscape:primary')
self.assertRedirectsLogin('djcytoscape:quest_map', args=[1])
self.assertRedirectsLogin('djcytoscape:quest_map_personalized', args=[1, 1])
self.assertRedirectsLogin('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assertRedirectsLogin('djcytoscape:list')
self.assertRedirectsAdmin('djcytoscape:regenerate', args=[1])
self.assertRedirectsAdmin('djcytoscape:regenerate_all')
self.assertRedirectsAdmin('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
self.assertRedirectsAdmin('djcytoscape:generate_unseeded')
self.assertRedirectsAdmin('djcytoscape:update', args=[1])
self.assertRedirectsAdmin('djcytoscape:delete', args=[1])
def test_all_page_status_codes_for_students(self):
success = self.client.login(username=self.test_student1.username, password=self.test_password)
self.assertTrue(success)
self.assert200('djcytoscape:index')
self.assert200('djcytoscape:quest_map_personalized', args=[self.map.id, self.test_student1.id])
# need to build interlinked maps to test this. Do in own test
# self.assert200('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assert200('djcytoscape:list')
self.assert200('djcytoscape:primary')
self.assert200('djcytoscape:quest_map', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:update', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:delete', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:regenerate', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:regenerate_all')
self.assertRedirectsAdmin('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
self.assertRedirectsAdmin('djcytoscape:generate_unseeded')
def test_all_page_status_codes_for_teachers(self):
# log in a teacher
success = self.client.login(username=self.test_teacher.username, password=self.test_password)
self.assertTrue(success)
self.assert200('djcytoscape:index')
self.assert200('djcytoscape:quest_map_personalized', args=[self.map.id, self.test_student1.id])
# need to build interlinked maps to test this. Do in own test
# self.assert200('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assert200('djcytoscape:list')
self.assert200('djcytoscape:primary')
self.assert200('djcytoscape:quest_map', args=[self.map.id])
self.assert200('djcytoscape:update', args=[self.map.id])
self.assert200('djcytoscape:delete', args=[self.map.id])
# These will need their own tests:
# self.assert200('djcytoscape:regenerate', args=[self.map.id])
# self.assert200('djcytoscape:regenerate_all')
# self.assert200('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
# self.assert200('djcytoscape:generate_unseeded')
class PrimaryViewTests(ViewTestUtilsMixin, TenantTestCase):
def test_initial_map_generated_on_first_view(self):
# shouldn't be any maps from the start
self.assertFalse(CytoScape.objects.exists())
# log in anoyone
self.client = TenantClient(self.tenant)
anyone = User.objects.create_user('anyone', password="password")
success = self.client.login(username=anyone.username, password="password")
self.assertTrue(success)
# Access the primary map view
self.assert200('djcytoscape:primary')
# Should have generated the "Main" map
self.assertEqual(CytoScape.objects.count(), 1)
self.assertTrue(CytoScape.objects.filter(name="Main").exists())
| timberline-secondary/hackerspace | src/djcytoscape/tests/test_views.py | Python | gpl-3.0 | 5,010 |
from flask import g
from flask.ext.restplus import Namespace, reqparse, marshal
from app.api.attendees import TICKET
from app.api.microlocations import MICROLOCATION
from app.api.sessions import SESSION
from app.api.speakers import SPEAKER
from app.api.sponsors import SPONSOR
from app.api.tracks import TRACK
from app.helpers.data import save_to_db, record_activity
from app.models.call_for_papers import CallForPaper as EventCFS
from app.models.event import Event as EventModel
from app.models.event_copyright import EventCopyright
from app.models.role import Role
from app.models.social_link import SocialLink as SocialLinkModel
from app.models.user import ORGANIZER
from app.models.users_events_roles import UsersEventsRoles
from helpers.special_fields import EventTypeField, EventTopicField, \
EventPrivacyField, EventSubTopicField, EventStateField
from app.api.helpers import custom_fields as fields
from app.api.helpers.helpers import requires_auth, parse_args, \
can_access, fake_marshal_with, fake_marshal_list_with, erase_from_dict
from app.api.helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \
PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO
from app.api.helpers.utils import Resource, ETAG_HEADER_DEFN
api = Namespace('events', description='Events')
EVENT_CREATOR = api.model('EventCreator', {
'id': fields.Integer(),
'email': fields.Email()
})
EVENT_COPYRIGHT = api.model('EventCopyright', {
'holder': fields.String(),
'holder_url': fields.Uri(),
'licence': fields.String(),
'licence_url': fields.Uri(),
'year': fields.Integer(),
'logo': fields.String()
})
EVENT_CFS = api.model('EventCFS', {
'announcement': fields.String(),
'start_date': fields.DateTime(),
'end_date': fields.DateTime(),
'timezone': fields.String(),
'privacy': EventPrivacyField() # [public, private]
})
EVENT_VERSION = api.model('EventVersion', {
'event_ver': fields.Integer(),
'sessions_ver': fields.Integer(),
'speakers_ver': fields.Integer(),
'tracks_ver': fields.Integer(),
'sponsors_ver': fields.Integer(),
'microlocations_ver': fields.Integer()
})
SOCIAL_LINK = api.model('SocialLink', {
'id': fields.Integer(),
'name': fields.String(required=True),
'link': fields.String(required=True)
})
SOCIAL_LINK_POST = api.clone('SocialLinkPost', SOCIAL_LINK)
del SOCIAL_LINK_POST['id']
EVENT = api.model('Event', {
'id': fields.Integer(required=True),
'identifier': fields.String(),
'name': fields.String(required=True),
'event_url': fields.Uri(),
'email': fields.Email(),
'logo': fields.Upload(),
'start_time': fields.DateTime(required=True),
'end_time': fields.DateTime(required=True),
'timezone': fields.String(),
'latitude': fields.Float(),
'longitude': fields.Float(),
'background_image': fields.Upload(attribute='background_url'),
'description': fields.String(),
'location_name': fields.String(),
'searchable_location_name': fields.String(),
'organizer_name': fields.String(),
'organizer_description': fields.String(),
'state': EventStateField(default='Draft'),
'type': EventTypeField(),
'topic': EventTopicField(),
'sub_topic': EventSubTopicField(),
'privacy': EventPrivacyField(),
'ticket_url': fields.Uri(),
'creator': fields.Nested(EVENT_CREATOR, allow_null=True),
'copyright': fields.Nested(EVENT_COPYRIGHT, allow_null=True),
'schedule_published_on': fields.DateTime(),
'code_of_conduct': fields.String(),
'social_links': fields.List(fields.Nested(SOCIAL_LINK), attribute='social_link'),
'call_for_papers': fields.Nested(EVENT_CFS, allow_null=True),
'version': fields.Nested(EVENT_VERSION),
'has_session_speakers': fields.Boolean(default=False),
'thumbnail': fields.Uri(),
'large': fields.Uri()
})
EVENT_COMPLETE = api.clone('EventComplete', EVENT, {
'sessions': fields.List(fields.Nested(SESSION), attribute='session'),
'microlocations': fields.List(fields.Nested(MICROLOCATION), attribute='microlocation'),
'tracks': fields.List(fields.Nested(TRACK), attribute='track'),
'sponsors': fields.List(fields.Nested(SPONSOR), attribute='sponsor'),
'speakers': fields.List(fields.Nested(SPEAKER), attribute='speaker'),
'tickets': fields.List(fields.Nested(TICKET), attribute='tickets'),
})
EVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {
'results': fields.List(fields.Nested(EVENT))
})
EVENT_POST = api.clone('EventPost', EVENT)
del EVENT_POST['id']
del EVENT_POST['creator']
del EVENT_POST['social_links']
del EVENT_POST['version']
# ###################
# Data Access Objects
# ###################
class SocialLinkDAO(ServiceDAO):
"""
Social Link DAO
"""
version_key = 'event_ver'
class EventDAO(BaseDAO):
"""
Event DAO
"""
version_key = 'event_ver'
def fix_payload(self, data):
"""
Fixes the payload data.
Here converts string time from datetime obj
"""
datetime_fields = ['start_time', 'end_time', 'schedule_published_on']
for f in datetime_fields:
if f in data:
data[f] = EVENT_POST[f].from_str(data.get(f))
# cfs datetimes
if data.get('call_for_papers'):
for _ in ['start_date', 'end_date']:
if _ in data['call_for_papers']:
data['call_for_papers'][_] = EVENT_CFS[_].from_str(
data['call_for_papers'].get(_))
return data
def create(self, data, url):
data = self.validate(data)
payload = self.fix_payload(data)
# save copyright info
payload['copyright'] = CopyrightDAO.create(payload.get('copyright', {}), validate=False)
# save cfs info
if payload.get('call_for_papers'): # don't create if call_for_papers==null
payload['call_for_papers'] = CFSDAO.create(payload['call_for_papers'], validate=False)
# save event
new_event = self.model(**payload)
new_event.creator = g.user
save_to_db(new_event, "Event saved")
# set organizer
role = Role.query.filter_by(name=ORGANIZER).first()
uer = UsersEventsRoles(g.user, new_event, role)
save_to_db(uer, 'UER saved')
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(new_event.id)
return self.get(new_event.id), 201, {'Location': resource_location}
def update(self, event_id, data):
data = self.validate_put(data)
payload = self.fix_payload(data)
# get event
event = self.get(event_id)
# update copyright if key exists
if 'copyright' in payload:
CopyrightDAO.update(event.copyright.id, payload['copyright']
if payload['copyright'] else {})
payload.pop('copyright')
# update cfs
if 'call_for_papers' in payload:
cfs_data = payload.get('call_for_papers')
if event.call_for_papers:
if cfs_data: # update existing
CFSDAO.update(event.call_for_papers.id, cfs_data)
else: # delete if null
CFSDAO.delete(event.call_for_papers.id)
elif cfs_data: # create new (only if data exists)
CFSDAO.create(cfs_data, validate=False)
payload.pop('call_for_papers')
# master update
return BaseDAO.update(self, event_id, payload, validate=False)
LinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)
DAO = EventDAO(EventModel, EVENT_POST)
CopyrightDAO = BaseDAO(EventCopyright, EVENT_COPYRIGHT)
CFSDAO = BaseDAO(EventCFS, EVENT_CFS) # CFS = Call For Speakers
# DEFINE PARAMS
EVENT_PARAMS = {
'location': {},
'contains': {
'description': 'Contains the string in name and description'
},
'state': {},
'privacy': {},
'type': {},
'topic': {},
'sub_topic': {},
'start_time_gt': {},
'start_time_lt': {},
'end_time_gt': {},
'end_time_lt': {},
'time_period': {},
'include': {
'description': 'Comma separated list of additional fields to load. '
'Supported: sessions,tracks,microlocations,speakers,sponsors)'
},
}
SINGLE_EVENT_PARAMS = {
'include': {
'description': 'Comma separated list of additional fields to load. '
'Supported: sessions,tracks,microlocations,speakers,sponsors,tickets)'
},
}
def get_extended_event_model(includes=None):
if includes is None:
includes = []
included_fields = {}
if 'sessions' in includes:
included_fields['sessions'] = fields.List(fields.Nested(SESSION), attribute='session')
if 'tracks' in includes:
included_fields['tracks'] = fields.List(fields.Nested(TRACK), attribute='track')
if 'microlocations' in includes:
included_fields['microlocations'] = fields.List(fields.Nested(MICROLOCATION), attribute='microlocation')
if 'sponsors' in includes:
included_fields['sponsors'] = fields.List(fields.Nested(SPONSOR), attribute='sponsor')
if 'speakers' in includes:
included_fields['speakers'] = fields.List(fields.Nested(SPEAKER), attribute='speaker')
if 'tickets' in includes:
included_fields['tickets'] = fields.List(fields.Nested(TICKET), attribute='tickets')
return EVENT.extend('ExtendedEvent', included_fields)
# DEFINE RESOURCES
class EventResource():
"""
Event Resource Base class
"""
event_parser = reqparse.RequestParser()
event_parser.add_argument('location', type=unicode, dest='__event_search_location')
event_parser.add_argument('contains', type=unicode, dest='__event_contains')
event_parser.add_argument('state', type=str)
event_parser.add_argument('privacy', type=str)
event_parser.add_argument('type', type=str)
event_parser.add_argument('topic', type=str)
event_parser.add_argument('sub_topic', type=str)
event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')
event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')
event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')
event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')
event_parser.add_argument('time_period', type=str, dest='__event_time_period')
event_parser.add_argument('include', type=str)
class SingleEventResource():
event_parser = reqparse.RequestParser()
event_parser.add_argument('include', type=str)
@api.route('/<int:event_id>')
@api.param('event_id')
@api.response(404, 'Event not found')
class Event(Resource, SingleEventResource):
@api.doc('get_event', params=SINGLE_EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_with(EVENT_COMPLETE) # Fake marshal decorator to add response model to swagger doc
def get(self, event_id):
"""Fetch an event given its id"""
includes = parse_args(self.event_parser).get('include', '').split(',')
return marshal(DAO.get(event_id), get_extended_event_model(includes))
@requires_auth
@can_access
@api.doc('delete_event')
@api.marshal_with(EVENT)
def delete(self, event_id):
"""Delete an event given its id"""
event = DAO.delete(event_id)
record_activity('delete_event', event_id=event_id)
return event
@requires_auth
@can_access
@api.doc('update_event', responses=PUT_RESPONSES)
@api.marshal_with(EVENT)
@api.expect(EVENT_POST)
def put(self, event_id):
"""Update an event given its id"""
event = DAO.update(event_id, self.api.payload)
record_activity('update_event', event_id=event_id)
return event
@api.route('/<int:event_id>/event')
@api.param('event_id')
@api.response(404, 'Event not found')
class EventWebapp(Resource, SingleEventResource):
@api.doc('get_event_for_webapp')
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_with(EVENT_COMPLETE) # Fake marshal decorator to add response model to swagger doc
def get(self, event_id):
"""Fetch an event given its id.
Alternate endpoint for fetching an event.
"""
includes = parse_args(self.event_parser).get('include', '').split(',')
return marshal(DAO.get(event_id), get_extended_event_model(includes))
@api.route('')
class EventList(Resource, EventResource):
@api.doc('list_events', params=EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@fake_marshal_list_with(EVENT_COMPLETE)
def get(self):
"""List all events"""
parsed_args = parse_args(self.event_parser)
includes = parsed_args.get('include', '').split(',')
erase_from_dict(parsed_args, 'include')
return marshal(DAO.list(**parsed_args), get_extended_event_model(includes))
@requires_auth
@api.doc('create_event', responses=POST_RESPONSES)
@api.marshal_with(EVENT)
@api.expect(EVENT_POST)
def post(self):
"""Create an event"""
item = DAO.create(self.api.payload, self.api.url_for(self))
record_activity('create_event', event_id=item[0].id)
return item
@api.route('/page')
class EventListPaginated(Resource, PaginatedResourceBase, EventResource):
@api.doc('list_events_paginated', params=PAGE_PARAMS)
@api.doc(params=EVENT_PARAMS)
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(EVENT_PAGINATED)
def get(self):
"""List events in a paginated manner"""
args = self.parser.parse_args()
return DAO.paginated_list(args=args, **parse_args(self.event_parser))
@api.route('/<int:event_id>/links')
@api.param('event_id')
class SocialLinkList(Resource):
@api.doc('list_social_links')
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_list_with(SOCIAL_LINK)
def get(self, event_id):
"""List all social links"""
return LinkDAO.list(event_id)
@requires_auth
@can_access
@api.doc('create_social_link', responses=POST_RESPONSES)
@api.marshal_with(SOCIAL_LINK)
@api.expect(SOCIAL_LINK_POST)
def post(self, event_id):
"""Create a social link"""
return LinkDAO.create(
event_id,
self.api.payload,
self.api.url_for(self, event_id=event_id)
)
@api.route('/<int:event_id>/links/<int:link_id>')
class SocialLink(Resource):
@requires_auth
@can_access
@api.doc('delete_social_link')
@api.marshal_with(SOCIAL_LINK)
def delete(self, event_id, link_id):
"""Delete a social link given its id"""
return LinkDAO.delete(event_id, link_id)
@requires_auth
@can_access
@api.doc('update_social_link', responses=PUT_RESPONSES)
@api.marshal_with(SOCIAL_LINK)
@api.expect(SOCIAL_LINK_POST)
def put(self, event_id, link_id):
"""Update a social link given its id"""
return LinkDAO.update(event_id, link_id, self.api.payload)
@api.hide
@api.header(*ETAG_HEADER_DEFN)
@api.marshal_with(SOCIAL_LINK)
def get(self, event_id, link_id):
"""Fetch a social link given its id"""
return LinkDAO.get(event_id, link_id)
| arpitn30/open-event-orga-server | app/api/events.py | Python | gpl-3.0 | 15,239 |
#!/usr/bin/env python3
# ReText
# Copyright 2011-2012 Dmitry Shachnev
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import sys
import signal
from ReText import *
from ReText.window import ReTextWindow
def main():
app = QApplication(sys.argv)
app.setOrganizationName("ReText project")
app.setApplicationName("ReText")
RtTranslator = QTranslator()
for path in datadirs:
if RtTranslator.load('retext_'+QLocale.system().name(), path+'/locale'):
break
QtTranslator = QTranslator()
QtTranslator.load("qt_"+QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath))
app.installTranslator(RtTranslator)
app.installTranslator(QtTranslator)
if settings.contains('appStyleSheet'):
stylename = readFromSettings('appStyleSheet', str)
sheetfile = QFile(stylename)
sheetfile.open(QIODevice.ReadOnly)
app.setStyleSheet(QTextStream(sheetfile).readAll())
sheetfile.close()
window = ReTextWindow()
window.show()
fileNames = [QFileInfo(arg).canonicalFilePath() for arg in sys.argv[1:]]
for fileName in fileNames:
try:
fileName = QString.fromUtf8(fileName)
except:
# Not needed for Python 3
pass
if QFile.exists(fileName):
window.openFileWrapper(fileName)
signal.signal(signal.SIGINT, lambda sig, frame: window.close())
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| codemedic/retext | retext.py | Python | gpl-3.0 | 1,988 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user models.
"""
from django.contrib.auth.models import User, Group
from django.test import TestCase
from weblate.accounts.models import AutoGroup
class AutoGroupTest(TestCase):
@staticmethod
def create_user():
return User.objects.create_user('test1', '[email protected]', 'pass')
def test_default(self):
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
def test_none(self):
AutoGroup.objects.all().delete()
user = self.create_user()
self.assertEqual(user.groups.count(), 0)
def test_matching(self):
AutoGroup.objects.create(
match='^.*@weblate.org',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 2)
def test_nonmatching(self):
AutoGroup.objects.create(
match='^.*@example.net',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
| dtschan/weblate | weblate/accounts/tests/test_models.py | Python | gpl-3.0 | 1,853 |
import locale
import os
import re
import subprocess
import sys
import platform
import time
# init libs
PROGRAM_DIR = os.path.dirname(os.path.normpath(os.path.abspath(os.path.join(__file__, os.pardir))))
LIBS_DIR = os.path.join(PROGRAM_DIR, 'libs')
sys.path.insert(0, LIBS_DIR)
# init preliminaries
SYS_ARGV = sys.argv[1:]
APP_FILENAME = sys.argv[0]
APP_NAME = os.path.basename(APP_FILENAME)
LOG_DIR = os.path.join(PROGRAM_DIR, 'logs')
LOG_FILE = os.path.join(LOG_DIR, 'nzbtomedia.log')
PID_FILE = os.path.join(LOG_DIR, 'nzbtomedia.pid')
CONFIG_FILE = os.path.join(PROGRAM_DIR, 'autoProcessMedia.cfg')
CONFIG_SPEC_FILE = os.path.join(PROGRAM_DIR, 'autoProcessMedia.cfg.spec')
CONFIG_MOVIE_FILE = os.path.join(PROGRAM_DIR, 'autoProcessMovie.cfg')
CONFIG_TV_FILE = os.path.join(PROGRAM_DIR, 'autoProcessTv.cfg')
TEST_FILE = os.path.join(os.path.join(PROGRAM_DIR, 'tests'), 'test.mp4')
MYAPP = None
from core.autoProcess.autoProcessComics import autoProcessComics
from core.autoProcess.autoProcessGames import autoProcessGames
from core.autoProcess.autoProcessMovie import autoProcessMovie
from core.autoProcess.autoProcessMusic import autoProcessMusic
from core.autoProcess.autoProcessTV import autoProcessTV
from core import logger, versionCheck, nzbToMediaDB
from core.nzbToMediaConfig import config
from core.nzbToMediaUtil import category_search, sanitizeName, copy_link, parse_args, flatten, getDirs, \
rmReadOnly,rmDir, pause_torrent, resume_torrent, remove_torrent, listMediaFiles, \
extractFiles, cleanDir, update_downloadInfoStatus, get_downloadInfo, WakeUp, makeDir, cleanDir, \
create_torrent_class, listMediaFiles, RunningProcess
from core.transcoder import transcoder
from core.databases import mainDB
# Client Agents
NZB_CLIENTS = ['sabnzbd','nzbget']
TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'other']
# sabnzbd constants
SABNZB_NO_OF_ARGUMENTS = 8
SABNZB_0717_NO_OF_ARGUMENTS = 9
# sickbeard fork/branch constants
FORKS = {}
FORK_DEFAULT = "default"
FORK_FAILED = "failed"
FORK_FAILED_TORRENT = "failed-torrent"
FORK_SICKRAGE = "sickrage"
FORKS[FORK_DEFAULT] = {"dir": None}
FORKS[FORK_FAILED] = {"dirName": None, "failed": None}
FORKS[FORK_FAILED_TORRENT] = {"dir": None, "failed": None, "process_method": None}
FORKS[FORK_SICKRAGE] = {"dir": None, "failed": None, "process_method": None, "force": None}
ALL_FORKS = {"dir": None, "dirName": None, "failed": None, "process_method": None, "force": None}
SICKBEARD_FAILED = [FORK_FAILED, FORK_FAILED_TORRENT, FORK_SICKRAGE]
SICKBEARD_TORRENT = [FORK_FAILED_TORRENT, FORK_SICKRAGE]
# NZBGet Exit Codes
NZBGET_POSTPROCESS_PARCHECK = 92
NZBGET_POSTPROCESS_SUCCESS = 93
NZBGET_POSTPROCESS_ERROR = 94
NZBGET_POSTPROCESS_NONE = 95
CFG = None
LOG_DEBUG = None
LOG_DB = None
LOG_ENV = None
LOG_GIT = None
SYS_ENCODING = None
AUTO_UPDATE = None
NZBTOMEDIA_VERSION = None
NEWEST_VERSION = None
NEWEST_VERSION_STRING = None
VERSION_NOTIFY = None
GIT_PATH = None
GIT_USER = None
GIT_BRANCH = None
GIT_REPO = None
FORCE_CLEAN = None
SAFE_MODE = None
NZB_CLIENTAGENT = None
SABNZBDHOST = None
SABNZBDPORT = None
SABNZBDAPIKEY = None
NZB_DEFAULTDIR = None
TORRENT_CLIENTAGENT = None
TORRENT_CLASS = None
USELINK = None
OUTPUTDIRECTORY = None
NOFLATTEN = []
DELETE_ORIGINAL = None
TORRENT_DEFAULTDIR = None
REMOTEPATHS = []
UTORRENTWEBUI = None
UTORRENTUSR = None
UTORRENTPWD = None
TRANSMISSIONHOST = None
TRANSMISSIONPORT = None
TRANSMISSIONUSR = None
TRANSMISSIONPWD = None
DELUGEHOST = None
DELUGEPORT = None
DELUGEUSR = None
DELUGEPWD = None
PLEXSSL = None
PLEXHOST = None
PLEXPORT = None
PLEXTOKEN = None
PLEXSEC = []
EXTCONTAINER = []
COMPRESSEDCONTAINER = []
MEDIACONTAINER = []
AUDIOCONTAINER = []
METACONTAINER = []
SECTIONS = []
CATEGORIES = []
GETSUBS = False
TRANSCODE = None
CONCAT = None
FFMPEG_PATH = None
DUPLICATE = None
IGNOREEXTENSIONS = []
VEXTENSION = None
OUTPUTVIDEOPATH = None
PROCESSOUTPUT = False
GENERALOPTS = []
ALANGUAGE = None
AINCLUDE = False
SLANGUAGES = []
SINCLUDE = False
SUBSDIR = None
ALLOWSUBS = False
SEXTRACT = False
SEMBED = False
BURN = False
DEFAULTS = None
VCODEC = None
VCODEC_ALLOW = []
VPRESET = None
VFRAMERATE = None
VBITRATE = None
VLEVEL = None
VCRF = None
VRESOLUTION = None
ACODEC = None
ACODEC_ALLOW = []
ACHANNELS = None
ABITRATE = None
ACODEC2 = None
ACODEC2_ALLOW = []
ACHANNELS2 = None
ABITRATE2 = None
ACODEC3 = None
ACODEC3_ALLOW = []
ACHANNELS3 = None
ABITRATE3 = None
SCODEC = None
OUTPUTFASTSTART = None
OUTPUTQUALITYPERCENT = None
FFMPEG = None
SEVENZIP = None
FFPROBE = None
CHECK_MEDIA = None
NICENESS = []
HWACCEL = False
PASSWORDSFILE = None
DOWNLOADINFO = None
GROUPS = None
USER_SCRIPT_MEDIAEXTENSIONS = None
USER_SCRIPT = None
USER_SCRIPT_PARAM = None
USER_SCRIPT_SUCCESSCODES = None
USER_SCRIPT_CLEAN = None
USER_DELAY = None
USER_SCRIPT_RUNONCE = None
__INITIALIZED__ = False
def initialize(section=None):
global NZBGET_POSTPROCESS_ERROR, NZBGET_POSTPROCESS_NONE, NZBGET_POSTPROCESS_PARCHECK, NZBGET_POSTPROCESS_SUCCESS, \
NZBTOMEDIA_TIMEOUT, FORKS, FORK_DEFAULT, FORK_FAILED_TORRENT, FORK_FAILED, SICKBEARD_TORRENT, SICKBEARD_FAILED, \
NZBTOMEDIA_BRANCH, NZBTOMEDIA_VERSION, NEWEST_VERSION, NEWEST_VERSION_STRING, VERSION_NOTIFY, SYS_ARGV, CFG, \
SABNZB_NO_OF_ARGUMENTS, SABNZB_0717_NO_OF_ARGUMENTS, CATEGORIES, TORRENT_CLIENTAGENT, USELINK, OUTPUTDIRECTORY, \
NOFLATTEN, UTORRENTPWD, UTORRENTUSR, UTORRENTWEBUI, DELUGEHOST, DELUGEPORT, DELUGEUSR, DELUGEPWD, VLEVEL, \
TRANSMISSIONHOST, TRANSMISSIONPORT, TRANSMISSIONPWD, TRANSMISSIONUSR, COMPRESSEDCONTAINER, MEDIACONTAINER, \
METACONTAINER, SECTIONS, ALL_FORKS, TEST_FILE, GENERALOPTS, LOG_GIT, GROUPS, SEVENZIP, CONCAT, VCRF, \
__INITIALIZED__, AUTO_UPDATE, APP_FILENAME, USER_DELAY, APP_NAME, TRANSCODE, DEFAULTS, GIT_PATH, GIT_USER, \
GIT_BRANCH, GIT_REPO, SYS_ENCODING, NZB_CLIENTAGENT, SABNZBDHOST, SABNZBDPORT, SABNZBDAPIKEY, \
DUPLICATE, IGNOREEXTENSIONS, VEXTENSION, OUTPUTVIDEOPATH, PROCESSOUTPUT, VCODEC, VCODEC_ALLOW, VPRESET, \
VFRAMERATE, LOG_DB, VBITRATE, VRESOLUTION, ALANGUAGE, AINCLUDE, ACODEC, ACODEC_ALLOW, ABITRATE, \
ACODEC2, ACODEC2_ALLOW, ABITRATE2, ACODEC3, ACODEC3_ALLOW, ABITRATE3, ALLOWSUBS, SEXTRACT, SEMBED, SLANGUAGES, \
SINCLUDE, SUBSDIR, SCODEC, OUTPUTFASTSTART, OUTPUTQUALITYPERCENT, BURN, GETSUBS, HWACCEL, LOG_DIR, LOG_FILE, \
NICENESS, LOG_DEBUG, FORCE_CLEAN, FFMPEG_PATH, FFMPEG, FFPROBE, AUDIOCONTAINER, EXTCONTAINER, TORRENT_CLASS, \
DELETE_ORIGINAL, PASSWORDSFILE, USER_DELAY, USER_SCRIPT, USER_SCRIPT_CLEAN, USER_SCRIPT_MEDIAEXTENSIONS, \
USER_SCRIPT_PARAM, USER_SCRIPT_RUNONCE, USER_SCRIPT_SUCCESSCODES, DOWNLOADINFO, CHECK_MEDIA, SAFE_MODE, \
TORRENT_DEFAULTDIR, NZB_DEFAULTDIR, REMOTEPATHS, LOG_ENV, PID_FILE, MYAPP, ACHANNELS, ACHANNELS2, ACHANNELS3, \
PLEXSSL, PLEXHOST, PLEXPORT, PLEXTOKEN, PLEXSEC
if __INITIALIZED__:
return False
if os.environ.has_key('NTM_LOGFILE'):
LOG_FILE = os.environ['NTM_LOGFILE']
LOG_DIR = os.path.split(LOG_FILE)[0]
if not makeDir(LOG_DIR):
print("No log folder, logging to screen only")
MYAPP = RunningProcess()
while MYAPP.alreadyrunning():
print("Waiting for existing session to end")
time.sleep(30)
try:
locale.setlocale(locale.LC_ALL, "")
SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not SYS_ENCODING or SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(SYS_ENCODING)
except:
print 'Sorry, you MUST add the nzbToMedia folder to the PYTHONPATH environment variable'
print 'or find another way to force Python to use ' + SYS_ENCODING + ' for string encoding.'
if os.environ.has_key('NZBOP_SCRIPTDIR'):
sys.exit(NZBGET_POSTPROCESS_ERROR)
else:
sys.exit(1)
# init logging
logger.ntm_log_instance.initLogging()
# run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options.
if not config.migrate():
logger.error("Unable to migrate config file %s, exiting ..." % (CONFIG_FILE))
if os.environ.has_key('NZBOP_SCRIPTDIR'):
pass # We will try and read config from Environment.
else:
sys.exit(-1)
# run migrate to convert NzbGet data from old cfg style to new cfg style
if os.environ.has_key('NZBOP_SCRIPTDIR'):
CFG = config.addnzbget()
else: # load newly migrated config
logger.info("Loading config from [%s]" % (CONFIG_FILE))
CFG = config()
# Enable/Disable DEBUG Logging
LOG_DEBUG = int(CFG['General']['log_debug'])
LOG_DB = int(CFG['General']['log_db'])
LOG_ENV = int(CFG['General']['log_env'])
LOG_GIT = int(CFG['General']['log_git'])
if LOG_ENV:
for item in os.environ:
logger.info("%s: %s" % (item, os.environ[item]), "ENVIRONMENT")
# initialize the main SB database
nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema)
# Set Version and GIT variables
NZBTOMEDIA_VERSION = '10.11'
VERSION_NOTIFY = int(CFG['General']['version_notify'])
AUTO_UPDATE = int(CFG['General']['auto_update'])
GIT_REPO = 'nzbToMedia'
GIT_PATH = CFG['General']['git_path']
GIT_USER = CFG['General']['git_user'] or 'clinton-hall'
GIT_BRANCH = CFG['General']['git_branch'] or 'master'
FORCE_CLEAN = int(CFG["General"]["force_clean"])
FFMPEG_PATH = CFG["General"]["ffmpeg_path"]
CHECK_MEDIA = int(CFG["General"]["check_media"])
SAFE_MODE = int(CFG["General"]["safe_mode"])
# Check for updates via GitHUB
if versionCheck.CheckVersion().check_for_new_version():
if AUTO_UPDATE == 1:
logger.info("Auto-Updating nzbToMedia, Please wait ...")
updated = versionCheck.CheckVersion().update()
if updated:
# restart nzbToMedia
try:
del MYAPP
except: pass
restart()
else:
logger.error("Update wasn't successful, not restarting. Check your log for more information.")
# Set Current Version
logger.info(
'nzbToMedia Version:' + NZBTOMEDIA_VERSION + ' Branch:' + GIT_BRANCH + ' (' + platform.system() + ' ' + platform.release() + ')')
if int(CFG["WakeOnLan"]["wake"]) == 1:
WakeUp()
NZB_CLIENTAGENT = CFG["Nzb"]["clientAgent"] # sabnzbd
SABNZBDHOST = CFG["Nzb"]["sabnzbd_host"]
SABNZBDPORT = int(CFG["Nzb"]["sabnzbd_port"])
SABNZBDAPIKEY = CFG["Nzb"]["sabnzbd_apikey"]
NZB_DEFAULTDIR = CFG["Nzb"]["default_downloadDirectory"]
GROUPS = CFG["Custom"]["remove_group"]
if isinstance(GROUPS, str): GROUPS = GROUPS.split(',')
if GROUPS == ['']: GROUPS = None
TORRENT_CLIENTAGENT = CFG["Torrent"]["clientAgent"] # utorrent | deluge | transmission | rtorrent | vuze |other
USELINK = CFG["Torrent"]["useLink"] # no | hard | sym
OUTPUTDIRECTORY = CFG["Torrent"]["outputDirectory"] # /abs/path/to/complete/
TORRENT_DEFAULTDIR = CFG["Torrent"]["default_downloadDirectory"]
CATEGORIES = (CFG["Torrent"]["categories"]) # music,music_videos,pictures,software
NOFLATTEN = (CFG["Torrent"]["noFlatten"])
if isinstance(NOFLATTEN, str): NOFLATTEN = NOFLATTEN.split(',')
if isinstance(CATEGORIES, str): CATEGORIES = CATEGORIES.split(',')
DELETE_ORIGINAL = int(CFG["Torrent"]["deleteOriginal"])
UTORRENTWEBUI = CFG["Torrent"]["uTorrentWEBui"] # http://localhost:8090/gui/
UTORRENTUSR = CFG["Torrent"]["uTorrentUSR"] # mysecretusr
UTORRENTPWD = CFG["Torrent"]["uTorrentPWD"] # mysecretpwr
TRANSMISSIONHOST = CFG["Torrent"]["TransmissionHost"] # localhost
TRANSMISSIONPORT = int(CFG["Torrent"]["TransmissionPort"])
TRANSMISSIONUSR = CFG["Torrent"]["TransmissionUSR"] # mysecretusr
TRANSMISSIONPWD = CFG["Torrent"]["TransmissionPWD"] # mysecretpwr
DELUGEHOST = CFG["Torrent"]["DelugeHost"] # localhost
DELUGEPORT = int(CFG["Torrent"]["DelugePort"]) # 8084
DELUGEUSR = CFG["Torrent"]["DelugeUSR"] # mysecretusr
DELUGEPWD = CFG["Torrent"]["DelugePWD"] # mysecretpwr
REMOTEPATHS = CFG["Network"]["mount_points"] or []
if REMOTEPATHS:
if isinstance(REMOTEPATHS, list): REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list.
REMOTEPATHS = [ tuple(item.split(',')) for item in REMOTEPATHS.split('|') ] # /volume1/Public/,E:\|/volume2/share/,\\NAS\
PLEXSSL = int(CFG["Plex"]["plex_ssl"])
PLEXHOST = CFG["Plex"]["plex_host"]
PLEXPORT = CFG["Plex"]["plex_port"]
PLEXTOKEN = CFG["Plex"]["plex_token"]
PLEXSEC = CFG["Plex"]["plex_sections"] or []
if PLEXSEC:
if isinstance(PLEXSEC, list): PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list.
PLEXSEC = [ tuple(item.split(',')) for item in PLEXSEC.split('|') ]
devnull = open(os.devnull, 'w')
try:
subprocess.Popen(["nice"], stdout=devnull, stderr=devnull).communicate()
NICENESS.extend(['nice', '-n%s' % (int(CFG["Posix"]["niceness"]))])
except: pass
try:
subprocess.Popen(["ionice"], stdout=devnull, stderr=devnull).communicate()
try:
NICENESS.extend(['ionice', '-c%s' % (int(CFG["Posix"]["ionice_class"]))])
except: pass
try:
if 'ionice' in NICENESS:
NICENESS.extend(['-n%s' % (int(CFG["Posix"]["ionice_classdata"]))])
else:
NICENESS.extend(['ionice', '-n%s' % (int(CFG["Posix"]["ionice_classdata"]))])
except: pass
except: pass
devnull.close()
COMPRESSEDCONTAINER = [re.compile('.r\d{2}$', re.I),
re.compile('.part\d+.rar$', re.I),
re.compile('.rar$', re.I)]
COMPRESSEDCONTAINER += [re.compile('%s$' % ext, re.I) for ext in CFG["Extensions"]["compressedExtensions"]]
MEDIACONTAINER = CFG["Extensions"]["mediaExtensions"]
AUDIOCONTAINER = CFG["Extensions"]["audioExtensions"]
METACONTAINER = CFG["Extensions"]["metaExtensions"] # .nfo,.sub,.srt
if isinstance(COMPRESSEDCONTAINER, str): COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',')
if isinstance(MEDIACONTAINER, str): MEDIACONTAINER = MEDIACONTAINER.split(',')
if isinstance(AUDIOCONTAINER, str): AUDIOCONTAINER = AUDIOCONTAINER.split(',')
if isinstance(METACONTAINER, str): METACONTAINER = METACONTAINER.split(',')
GETSUBS = int(CFG["Transcoder"]["getSubs"])
TRANSCODE = int(CFG["Transcoder"]["transcode"])
DUPLICATE = int(CFG["Transcoder"]["duplicate"])
CONCAT = int(CFG["Transcoder"]["concat"])
IGNOREEXTENSIONS = (CFG["Transcoder"]["ignoreExtensions"])
if isinstance(IGNOREEXTENSIONS, str): IGNOREEXTENSIONS = IGNOREEXTENSIONS.split(',')
OUTPUTFASTSTART = int(CFG["Transcoder"]["outputFastStart"])
GENERALOPTS = (CFG["Transcoder"]["generalOptions"])
if isinstance(GENERALOPTS, str): GENERALOPTS = GENERALOPTS.split(',')
if GENERALOPTS == ['']: GENERALOPTS = []
if not '-fflags' in GENERALOPTS: GENERALOPTS.append('-fflags')
if not '+genpts' in GENERALOPTS: GENERALOPTS.append('+genpts')
try:
OUTPUTQUALITYPERCENT = int(CFG["Transcoder"]["outputQualityPercent"])
except: pass
OUTPUTVIDEOPATH = CFG["Transcoder"]["outputVideoPath"]
PROCESSOUTPUT = int(CFG["Transcoder"]["processOutput"])
ALANGUAGE = CFG["Transcoder"]["audioLanguage"]
AINCLUDE = int(CFG["Transcoder"]["allAudioLanguages"])
SLANGUAGES = CFG["Transcoder"]["subLanguages"]
if isinstance(SLANGUAGES, str): SLANGUAGES = SLANGUAGES.split(',')
if SLANGUAGES == ['']: SLANGUAGES = []
SINCLUDE = int(CFG["Transcoder"]["allSubLanguages"])
SEXTRACT = int(CFG["Transcoder"]["extractSubs"])
SEMBED = int(CFG["Transcoder"]["embedSubs"])
SUBSDIR = CFG["Transcoder"]["externalSubDir"]
VEXTENSION = CFG["Transcoder"]["outputVideoExtension"].strip()
VCODEC = CFG["Transcoder"]["outputVideoCodec"].strip()
VCODEC_ALLOW = CFG["Transcoder"]["VideoCodecAllow"].strip()
if isinstance(VCODEC_ALLOW, str): VCODEC_ALLOW = VCODEC_ALLOW.split(',')
if VCODEC_ALLOW == ['']: VCODEC_ALLOW = []
VPRESET = CFG["Transcoder"]["outputVideoPreset"].strip()
try:
VFRAMERATE = float(CFG["Transcoder"]["outputVideoFramerate"].strip())
except: pass
try:
VCRF = int(CFG["Transcoder"]["outputVideoCRF"].strip())
except: pass
try:
VLEVEL = CFG["Transcoder"]["outputVideoLevel"].strip()
except: pass
try:
VBITRATE = int((CFG["Transcoder"]["outputVideoBitrate"].strip()).replace('k','000'))
except: pass
VRESOLUTION = CFG["Transcoder"]["outputVideoResolution"]
ACODEC = CFG["Transcoder"]["outputAudioCodec"].strip()
ACODEC_ALLOW = CFG["Transcoder"]["AudioCodecAllow"].strip()
if isinstance(ACODEC_ALLOW, str): ACODEC_ALLOW = ACODEC_ALLOW.split(',')
if ACODEC_ALLOW == ['']: ACODEC_ALLOW = []
try:
ACHANNELS = int(CFG["Transcoder"]["outputAudioChannels"].strip())
except: pass
try:
ABITRATE = int((CFG["Transcoder"]["outputAudioBitrate"].strip()).replace('k','000'))
except: pass
ACODEC2 = CFG["Transcoder"]["outputAudioTrack2Codec"].strip()
ACODEC2_ALLOW = CFG["Transcoder"]["AudioCodec2Allow"].strip()
if isinstance(ACODEC2_ALLOW, str): ACODEC2_ALLOW = ACODEC2_ALLOW.split(',')
if ACODEC2_ALLOW == ['']: ACODEC2_ALLOW = []
try:
ACHANNELS2 = int(CFG["Transcoder"]["outputAudioTrack2Channels"].strip())
except: pass
try:
ABITRATE2 = int((CFG["Transcoder"]["outputAudioTrack2Bitrate"].strip()).replace('k','000'))
except: pass
ACODEC3 = CFG["Transcoder"]["outputAudioOtherCodec"].strip()
ACODEC3_ALLOW = CFG["Transcoder"]["AudioOtherCodecAllow"].strip()
if isinstance(ACODEC3_ALLOW, str): ACODEC3_ALLOW = ACODEC3_ALLOW.split(',')
if ACODEC3_ALLOW == ['']: ACODEC3_ALLOW = []
try:
ACHANNELS3 = int(CFG["Transcoder"]["outputAudioOtherChannels"].strip())
except: pass
try:
ABITRATE3 = int((CFG["Transcoder"]["outputAudioOtherBitrate"].strip()).replace('k','000'))
except: pass
SCODEC = CFG["Transcoder"]["outputSubtitleCodec"].strip()
BURN = int(CFG["Transcoder"]["burnInSubtitle"].strip())
DEFAULTS = CFG["Transcoder"]["outputDefault"].strip()
HWACCEL = int(CFG["Transcoder"]["hwAccel"])
allow_subs = ['.mkv','.mp4', '.m4v', 'asf', 'wma', 'wmv']
codec_alias = {
'libx264':['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'],
'libmp3lame':['libmp3lame', 'mp3'],
'libfaac':['libfaac', 'aac', 'faac']
}
transcode_defaults = {
'iPad':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2,
'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'iPad-1080p':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':'1920:1080','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2,
'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'iPad-720p':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':None, 'ACHANNELS':2,
'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'Apple-TV':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6,
'ACODEC2':'aac','ACODEC2_ALLOW':['libfaac'],'ABITRATE2':None, 'ACHANNELS2':2,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'iPod':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':'1280:720','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2,
'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'iPhone':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':'460:320','VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2,
'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'PS3':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6,
'ACODEC2':'aac','ACODEC2_ALLOW':['libfaac'],'ABITRATE2':None, 'ACHANNELS2':2,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'xbox':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'ac3','ACODEC_ALLOW':['ac3'],'ABITRATE':None, 'ACHANNELS':6,
'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'Roku-480p':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2,
'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'Roku-720p':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':128000, 'ACHANNELS':2,
'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'Roku-1080p':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'],
'ACODEC':'aac','ACODEC_ALLOW':['libfaac'],'ABITRATE':160000, 'ACHANNELS':2,
'ACODEC2':'ac3','ACODEC2_ALLOW':['ac3'],'ABITRATE2':None, 'ACHANNELS2':6,
'ACODEC3':None,'ACODEC3_ALLOW':[],'ABITRATE3':None, 'ACHANNELS3':None,
'SCODEC':'mov_text'
},
'mkv':{
'VEXTENSION':'.mkv','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':None,'VLEVEL':None,
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'],
'ACODEC':'dts','ACODEC_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE':None, 'ACHANNELS':8,
'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None,
'ACODEC3':'ac3','ACODEC3_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE3':None, 'ACHANNELS3':8,
'SCODEC':'mov_text'
},
'mp4-scene-release':{
'VEXTENSION':'.mp4','VCODEC':'libx264','VPRESET':None,'VFRAMERATE':None,'VBITRATE':None,'VCRF':19,'VLEVEL':'3.1',
'VRESOLUTION':None,'VCODEC_ALLOW':['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'],
'ACODEC':'dts','ACODEC_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE':None, 'ACHANNELS':8,
'ACODEC2':None,'ACODEC2_ALLOW':[],'ABITRATE2':None, 'ACHANNELS2':None,
'ACODEC3':'ac3','ACODEC3_ALLOW':['libfaac', 'dts', 'ac3', 'mp2', 'mp3'],'ABITRATE3':None, 'ACHANNELS3':8,
'SCODEC':'mov_text'
}
}
if DEFAULTS and DEFAULTS in transcode_defaults:
VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION']
VCODEC = transcode_defaults[DEFAULTS]['VCODEC']
VPRESET = transcode_defaults[DEFAULTS]['VPRESET']
VFRAMERATE = transcode_defaults[DEFAULTS]['VFRAMERATE']
VBITRATE = transcode_defaults[DEFAULTS]['VBITRATE']
VRESOLUTION = transcode_defaults[DEFAULTS]['VRESOLUTION']
VCRF = transcode_defaults[DEFAULTS]['VCRF']
VLEVEL = transcode_defaults[DEFAULTS]['VLEVEL']
VCODEC_ALLOW = transcode_defaults[DEFAULTS]['VCODEC_ALLOW']
ACODEC = transcode_defaults[DEFAULTS]['ACODEC']
ACODEC_ALLOW = transcode_defaults[DEFAULTS]['ACODEC_ALLOW']
ACHANNELS = transcode_defaults[DEFAULTS]['ACHANNELS']
ABITRATE = transcode_defaults[DEFAULTS]['ABITRATE']
ACODEC2 = transcode_defaults[DEFAULTS]['ACODEC2']
ACODEC2_ALLOW = transcode_defaults[DEFAULTS]['ACODEC2_ALLOW']
ACHANNELS2 = transcode_defaults[DEFAULTS]['ACHANNELS2']
ABITRATE2 = transcode_defaults[DEFAULTS]['ABITRATE2']
ACODEC3 = transcode_defaults[DEFAULTS]['ACODEC3']
ACODEC3_ALLOW = transcode_defaults[DEFAULTS]['ACODEC3_ALLOW']
ACHANNELS3 = transcode_defaults[DEFAULTS]['ACHANNELS3']
ABITRATE3 = transcode_defaults[DEFAULTS]['ABITRATE3']
SCODEC = transcode_defaults[DEFAULTS]['SCODEC']
transcode_defaults = {} # clear memory
if transcode_defaults in ['mp4-scene-release'] and not OUTPUTQUALITYPERCENT:
OUTPUTQUALITYPERCENT = 100
if VEXTENSION in allow_subs:
ALLOWSUBS = 1
if not VCODEC_ALLOW and VCODEC: VCODEC_ALLOW.extend([VCODEC])
for codec in VCODEC_ALLOW:
if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in VCODEC_ALLOW ]
VCODEC_ALLOW.extend(extra)
if not ACODEC_ALLOW and ACODEC: ACODEC_ALLOW.extend([ACODEC])
for codec in ACODEC_ALLOW:
if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in ACODEC_ALLOW ]
ACODEC_ALLOW.extend(extra)
if not ACODEC2_ALLOW and ACODEC2: ACODEC2_ALLOW.extend([ACODEC2])
for codec in ACODEC2_ALLOW:
if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in ACODEC2_ALLOW ]
ACODEC2_ALLOW.extend(extra)
if not ACODEC3_ALLOW and ACODEC3: ACODEC3_ALLOW.extend([ACODEC3])
for codec in ACODEC3_ALLOW:
if codec in codec_alias:
extra = [ item for item in codec_alias[codec] if item not in ACODEC3_ALLOW ]
ACODEC3_ALLOW.extend(extra)
codec_alias = {} # clear memory
PASSWORDSFILE = CFG["passwords"]["PassWordFile"]
# Setup FFMPEG, FFPROBE and SEVENZIP locations
if platform.system() == 'Windows':
FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg.exe')
FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe.exe')
SEVENZIP = os.path.join(PROGRAM_DIR, 'core', 'extractor', 'bin', platform.machine(), '7z.exe')
if not (os.path.isfile(FFMPEG)): # problem
FFMPEG = None
logger.warning("Failed to locate ffmpeg.exe. Transcoding disabled!")
logger.warning("Install ffmpeg with x264 support to enable this feature ...")
if not (os.path.isfile(FFPROBE)):
FFPROBE = None
if CHECK_MEDIA:
logger.warning("Failed to locate ffprobe.exe. Video corruption detection disabled!")
logger.warning("Install ffmpeg with x264 support to enable this feature ...")
else:
try:
SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not SEVENZIP:
try:
SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not SEVENZIP:
try:
SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not SEVENZIP:
SEVENZIP = None
logger.warning("Failed to locate 7zip. Transcosing of disk images and extraction of .7z files will not be possible!")
if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access(os.path.join(FFMPEG_PATH, 'ffmpeg'), os.X_OK):
FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg')
elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access(os.path.join(FFMPEG_PATH, 'avconv'), os.X_OK):
FFMPEG = os.path.join(FFMPEG_PATH, 'avconv')
else:
try:
FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not FFMPEG:
try:
FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not FFMPEG:
FFMPEG = None
logger.warning("Failed to locate ffmpeg. Transcoding disabled!")
logger.warning("Install ffmpeg with x264 support to enable this feature ...")
if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access(os.path.join(FFMPEG_PATH, 'ffprobe'), os.X_OK):
FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe')
elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access(os.path.join(FFMPEG_PATH, 'avprobe'), os.X_OK):
FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe')
else:
try:
FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not FFPROBE:
try:
FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip()
except: pass
if not FFPROBE:
FFPROBE = None
if CHECK_MEDIA:
logger.warning("Failed to locate ffprobe. Video corruption detection disabled!")
logger.warning("Install ffmpeg with x264 support to enable this feature ...")
# check for script-defied section and if None set to allow sections
SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)]
for section,subsections in SECTIONS.items():
CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()])
CATEGORIES = list(set(CATEGORIES))
# create torrent class
TORRENT_CLASS = create_torrent_class(TORRENT_CLIENTAGENT)
# finished initalizing
return True
def restart():
install_type = versionCheck.CheckVersion().install_type
status = 0
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, APP_FILENAME]
if popen_list:
popen_list += SYS_ARGV
logger.log(u"Restarting nzbToMedia with " + str(popen_list))
logger.close()
p = subprocess.Popen(popen_list, cwd=os.getcwd())
p.wait()
status = p.returncode
os._exit(status)
| grantsewell/nzbToMedia | core/__init__.py | Python | gpl-3.0 | 33,961 |
# -*- coding: utf-8 -*-
# Config file handling module
# Copyright (C) 2014 Yury Gavrilov <[email protected]>
# This file is part of VKBuddy.
# VKBuddy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# VKBuddy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with VKBuddy. If not, see <http://www.gnu.org/licenses/>.
import yaml
class IncorrectConfig(Exception): pass
class BareConfig:
def __init__(self):
self.config = {}
self.required_list = []
def add_parameter(self, name, required=False, description='',
default=None, typ=str):
if required:
self.required_list.append(name)
self.config[name] = {
'description': description,
'default': default,
'type': typ
}
class Config:
def __init__(self, filename, bare):
cfile = open(filename, 'r')
self.__config = yaml.load(cfile)
cfile.close()
self.bare = bare
if not self.__config:
self.__config = {}
for param in bare.required_list:
if not param in self.__config:
raise IncorrectConfig(
'Required parameter \'{}\' not found'.format(param)
)
def __getitem__(self, item):
if item in self.__config:
if item in self.bare.config:
return self.bare.config[item]['type'](self.__config[item])
else:
return self.__config[item]
elif item in self.bare.config:
return self.bare.config[item]['default']
else:
raise KeyError(item)
| gavyur/vkbuddy | config.py | Python | gpl-3.0 | 2,070 |
"""
Copyright (C) 2017 João Barroca <[email protected]>
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Importing the libraries
from domainIndependent import *
from operator import itemgetter
import itertools
# General Graphic Search (with goal state verification only after choosing a leaf node!)
def gs(problem, strategy):
node = {'state': problem.initialState, 'parent': [], 'actions': [], 'g': 0, 'f': 0}
frontier = [node]
exploredSet = []
iterCounter = itertools.count(start = 0)
nodesCounter = itertools.count(start = 1)
iteration = 0
generatedNodes = 1
while True:
# when there are no more nodes to explore and we didn't found a solution yet, return Failure
if not frontier:
iteration = next(iterCounter)
return None, iteration, len(frontier), generatedNodes
# chooses the node with the lowest cost
# first we sort by max and then we take the last element of the list
# this allow us to choose the vertice with the last lowest cost that was
# appended to list (in case of ties)
sortedFrontier = sorted(frontier, key = itemgetter('f'), reverse = True)
node = sortedFrontier[-1]
# and remove it from the frontier
frontier.remove(node)
# checks if the chosen node its a goal state before expand it
if problem.goalState(node) is True:
iteration = next(iterCounter)
return execute(node), iteration, len(frontier), generatedNodes
iteration = next(iterCounter)
# Debugging process
#gsDebug(iteration, node, frontier)
# adds the node being explored to the explored set
exploredSet.append(node)
# expand the node and get the child nodes
childNodes = childNodesGetter(problem, node, strategy)
for child in childNodes:
generatedNodes = next(nodesCounter)
# checks if the child node has already been explored or if it is already in the frontier
if not inExploredList(problem, child, exploredSet) and not inFrontier(problem, child, frontier):
frontier.append(child)
| joaobarroca93/AI | Space-Station-Assembly/search.py | Python | gpl-3.0 | 2,739 |
"""
Unit tests for nyx.panel.graph.
"""
import datetime
import unittest
import stem.control
import nyx.curses
import nyx.panel.graph
import test
from test import require_curses
from mock import patch
EXPECTED_BLANK_GRAPH = """
Download:
0 b
0 b
5s 10 15
""".rstrip()
EXPECTED_ACCOUNTING = """
Accounting (awake) Time to reset: 01:02
37.7 Kb / 842.0 Kb 16.0 Kb / 74.1 Kb
""".strip()
EXPECTED_GRAPH = """
Download:
5 Kb *
*
2 Kb ** *
* ****
0 b *********
5s 10 15
""".rstrip()
class TestGraphPanel(unittest.TestCase):
def test_x_axis_labels(self):
test_inputs = {
0: {},
7: {},
10: {5: '25s'},
15: {5: '25s', 10: '50'},
20: {5: '25s', 10: '50', 15: '1m'},
25: {5: '25s', 10: '50', 15: '1m', 20: '1.6'},
45: {5: '25s', 10: '50', 15: '1m', 20: '1.6', 25: '2.0', 30: '2.5', 35: '2.9', 40: '3.3'},
80: {10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'}, # spaced more since wide
}
for width, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(nyx.panel.graph.Interval.FIVE_SECONDS, width))
test_inputs = {
nyx.panel.graph.Interval.EACH_SECOND: {
10: '10s', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1m', 70: '1.1'
}, nyx.panel.graph.Interval.FIVE_SECONDS: {
10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'
}, nyx.panel.graph.Interval.THIRTY_SECONDS: {
10: '5m', 20: '10', 30: '15', 40: '20', 50: '25', 60: '30', 70: '35'
}, nyx.panel.graph.Interval.MINUTELY: {
10: '10m', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1h', 70: '1.1'
}, nyx.panel.graph.Interval.FIFTEEN_MINUTE: {
10: '2h', 20: '5', 30: '7', 40: '10', 50: '12', 60: '15', 70: '17'
}, nyx.panel.graph.Interval.THIRTY_MINUTE: {
10: '5h', 20: '10', 30: '15', 40: '20', 50: '1d', 60: '1.2', 70: '1.4'
}, nyx.panel.graph.Interval.HOURLY: {
10: '10h', 20: '20', 30: '1d', 40: '1.6', 50: '2.0', 60: '2.5', 70: '2.9'
}, nyx.panel.graph.Interval.DAILY: {
10: '10d', 20: '20', 30: '30', 40: '40', 50: '50', 60: '60', 70: '70'
},
}
for interval, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(interval, 80))
def test_y_axis_labels(self):
data = nyx.panel.graph.ConnectionStats()
# check with both even and odd height since that determines an offset in the middle
self.assertEqual({2: '10', 4: '7', 6: '5', 9: '2', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 10))
self.assertEqual({2: '10', 4: '6', 6: '3', 8: '0'}, nyx.panel.graph._y_axis_labels(9, data.primary, 0, 10))
# check where the min and max are the same
self.assertEqual({2: '0', 11: '0'}, nyx.panel.graph._y_axis_labels(12, data.primary, 0, 0))
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph_blank(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = None
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_BLANK_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_subgraph(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = '543,543 421,421 551,551 710,710 200,200 175,175 188,188 250,250 377,377'
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_GRAPH, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = True
accounting_stat = stem.control.AccountingStats(
1410723598.276578,
'awake',
datetime.datetime(2014, 9, 14, 19, 41),
62,
4837, 102944, 107781,
2050, 7440, 9490,
)
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, accounting_stat)
self.assertEqual(EXPECTED_ACCOUNTING, rendered.content)
@require_curses
@patch('nyx.panel.graph.tor_controller')
def test_draw_accounting_stats_disconnected(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = False
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, None)
self.assertEqual('Accounting: Connection Closed...', rendered.content)
| sammyshj/nyx | test/panel/graph.py | Python | gpl-3.0 | 4,813 |
import time
import bluetooth
from h7PolarDataPoints import h7PolarDataPoint
from h7PolarDataPointReader import h7PolarDataPointReader
if __name__ == '__main__':
h7PolarDataPointReader = h7PolarDataPointReader()
h7PolarDataPointReader.start()
while(True):
dataPoint = h7PolarDataPointReader.readNextDataPoint()
print (dataPoint) | lukefrasera/fatigueBSN | scripts/include/test_h7PolarRead.py | Python | gpl-3.0 | 363 |
#!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This Code tests module import from SEAS
"""
import os
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
#plotting
import SEAS_Utils.common_utils.data_plotter as plt
#timer
from SEAS_Utils.common_utils.timer import simple_timer
#dbm
import SEAS_Utils.common_utils.db_management2 as dbm
#config
import SEAS_Utils.common_utils.configurable as config
#DIR
from SEAS_Utils.common_utils.DIRs import Simulation_DB
#constants
from SEAS_Utils.common_utils.constants import *
if __name__ == "__main__":
pass
| azariven/BioSig_SEAS | bin/test/test_SEAS_import.py | Python | gpl-3.0 | 1,357 |
# -*- coding: utf-8 -*-
#
# This file is part of the osmxapi Python module.
#
# osmxapi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# osmxapi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with osmxapi. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright: © 2009-2010 Etienne Chové <[email protected]>
# Copyright: © 2012 Morten Kjeldgaard <[email protected]>
# License: GPL-3+
__version__ = '0.1'
import xml.dom.minidom
import dom, http
import os.path
class OsmXapi:
def __init__(self, api = "www.overpass-api.de", base="api", debug = False):
self.debug = debug
self.base = os.path.join('/', base, 'xapi')
self.http = http.Http(api, debug)
#.
def nodeGet(self, query=None, raw=None):
""" Returns NodeData for query """
if not query:
return None
#.
uri = self.base+"?node"+repr(query)
data = self.http.get(uri)
if raw: return data
if not data: return data
data = xml.dom.minidom.parseString(data)
data = data.getElementsByTagName("osm")[0].getElementsByTagName("node")
nodelist = []
for n in data:
nodelist.append(dom.parseNode(n))
#.
return nodelist
#.
def wayGet(self, query=None, raw=None):
"""Returns way data for query"""
if not query:
return None
#.
uri = self.base+"?way"+repr(query)
data = self.http.get(uri)
if raw: return data
if not data: return data
data = xml.dom.minidom.parseString(data)
data = data.getElementsByTagName("osm")[0].getElementsByTagName("way")
waylist = []
for w in data:
waylist.append(dom.parseWay(w))
#.
return waylist
#.
def relationGet(self, query=None, raw=None):
"""Return relation data for query"""
uri = self.base+"?relation"+repr(query)
data = self.http.get(uri)
if raw: return data
data = xml.dom.minidom.parseString(data)
data = data.getElementsByTagName("osm")[0].getElementsByTagName("relation")
relationlist = []
for r in data:
relationlist.append(dom.parseRelation(r))
#.
return relationlist
#.
def anyGet(self, query=None, raw=None):
"""Return any data for query"""
uri = self.base+"?*"+repr(query)
data = self.http.get(uri)
if raw: return data
data = xml.dom.minidom.parseString(data)
anydict = {}
for e in "node", "way", "relation":
d = data.getElementsByTagName("osm")[0].getElementsByTagName(e)
anylist = []
for a in d:
if e == "node":
anylist.append(dom.parseNode(a))
#.
if e == "way":
anylist.append(dom.parseWay(a))
#.
if e == "relation":
anylist.append(dom.parseRelation(a))
#.
#.
anydict[e] = anylist
#.
return anydict
#.
#.
if __name__ == '__main__':
from xapiquery import XapiQuery
xapi = OsmXapi(debug = True)
uniparken = XapiQuery (lats=56.1618032,
lonw=10.1891327,
latn=56.1719343,
lone=10.212822)
uniparken[u'amenity'] = u'parking'
N = xapi.nodeGet(uniparken)
print N
W = xapi.wayGet(uniparken)
print W
A = xapi.anyGet(uniparken)
print A
#.
| jmwenda/osmxapi | osmxapi/__init__.py | Python | gpl-3.0 | 4,014 |
# Natural Language Toolkit: Parsers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
NLTK Parsers
Classes and interfaces for producing tree structures that represent
the internal organization of a text. This task is known as "parsing"
the text, and the resulting tree structures are called the text's
"parses". Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called "ambiguous" texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does *not* distinguish these two types of
ambiguity.
The parser module defines ``ParserI``, a standard interface for parsing
texts; and two simple implementations of that interface,
``ShiftReduceParser`` and ``RecursiveDescentParser``. It also contains
three sub-modules for specialized kinds of parsing:
- ``nltk.parser.chart`` defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- ``nltk.parser.probabilistic`` defines probabilistic parsing, which
associates a probability with each parse.
"""
from nltk.parse.api import ParserI
from nltk.parse.chart import (ChartParser, SteppingChartParser, TopDownChartParser,
BottomUpChartParser, BottomUpLeftCornerChartParser,
LeftCornerChartParser)
from nltk.parse.featurechart import (FeatureChartParser, FeatureTopDownChartParser,
FeatureBottomUpChartParser,
FeatureBottomUpLeftCornerChartParser)
from nltk.parse.earleychart import (IncrementalChartParser, EarleyChartParser,
IncrementalTopDownChartParser,
IncrementalBottomUpChartParser,
IncrementalBottomUpLeftCornerChartParser,
IncrementalLeftCornerChartParser,
FeatureIncrementalChartParser,
FeatureEarleyChartParser,
FeatureIncrementalTopDownChartParser,
FeatureIncrementalBottomUpChartParser,
FeatureIncrementalBottomUpLeftCornerChartParser)
from nltk.parse.pchart import (BottomUpProbabilisticChartParser, InsideChartParser,
RandomChartParser, UnsortedChartParser,
LongestChartParser)
from nltk.parse.recursivedescent import (RecursiveDescentParser,
SteppingRecursiveDescentParser)
from nltk.parse.shiftreduce import (ShiftReduceParser, SteppingShiftReduceParser)
from nltk.parse.util import load_parser, TestGrammar, extract_test_sentences
from nltk.parse.viterbi import ViterbiParser
from nltk.parse.dependencygraph import DependencyGraph
from nltk.parse.projectivedependencyparser import (ProjectiveDependencyParser,
ProbabilisticProjectiveDependencyParser)
from nltk.parse.nonprojectivedependencyparser import (NonprojectiveDependencyParser,
NaiveBayesDependencyScorer,
ProbabilisticNonprojectiveParser)
from nltk.parse.malt import MaltParser
from nltk.parse.evaluate import DependencyEvaluator
from nltk.parse.transitionparser import TransitionParser
from nltk.parse.bllip import BllipParser
| adazey/Muzez | libs/nltk/parse/__init__.py | Python | gpl-3.0 | 4,269 |
#!/usr/bin/python
"""
@author: Manuel F Martinez <[email protected]>
@organization: Bashlinux
@copyright: Copyright (c) 2012 Bashlinux
@license: GNU GPL v3
"""
import usb.core
import usb.util
import serial
import socket
from .escpos import *
from .constants import *
from .exceptions import *
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
print("Cable isn't plugged in")
check_driver = None
try:
check_driver = self.device.is_kernel_driver_active(0)
except NotImplementedError:
pass
if check_driver is None or check_driver:
try:
self.device.detach_kernel_driver(0)
except usb.core.USBError as e:
if check_driver is not None:
print("Could not detatch kernel driver: %s" % str(e))
try:
self.device.set_configuration()
self.device.reset()
except usb.core.USBError as e:
print("Could not set configuration: %s" % str(e))
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(self.out_ep, msg, self.interface)
def __del__(self):
""" Release USB interface """
if self.device:
usb.util.dispose_resources(self.device)
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
xonxoff=False , dsrdtr=True):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
@param parity : Parity checking
@param stopbits : Number of stop bits
@param xonxoff : Software flow control
@param dsrdtr : Hardware flow control (False to enable RTS/CTS)
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.parity = parity
self.stopbits = stopbits
self.xonxoff = xonxoff
self.dsrdtr = dsrdtr
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate,
bytesize=self.bytesize, parity=self.parity,
stopbits=self.stopbits, timeout=self.timeout,
xonxoff=self.xonxoff, dsrdtr=self.dsrdtr)
if self.device is not None:
print("Serial printer enabled")
else:
print("Unable to open serial printer on: %s" % self.devfile)
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print("Could not open socket for %s" % self.host)
def _raw(self, msg):
""" Print any command sent in raw format """
if isinstance(msg, str):
self.device.send(msg.encode())
else:
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
def close(self):
self.__del__()
class File(Escpos):
""" Define Generic file printer """
def __init__(self, devfile="/dev/usb/lp0"):
"""
@param devfile : Device file under dev filesystem
"""
self.devfile = devfile
self.open()
def open(self):
""" Open system file """
self.device = open(self.devfile, "wb")
if self.device is None:
print("Could not open the specified file %s" % self.devfile)
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg);
def __del__(self):
""" Close system file """
self.device.close()
| lrks/python-escpos | escpos/printer.py | Python | gpl-3.0 | 5,425 |
#!/usr/bin/env python
"""
crate_anon/linkage/bulk_hash.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal ([email protected]).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
Tool to hash multiple IDs from the command line.
Test code to look at different types of digest:
.. code-block:: python
import hashlib
import hmac
msg = "This is an ex-parrot!"
key = "voom"
key_bytes = str(key).encode('utf-8')
msg_bytes = str(msg).encode('utf-8')
digestmod = hashlib.sha256
hmac_obj = hmac.new(key=key_bytes, msg=msg_bytes, digestmod=digestmod)
# These are the two default kinds of digest:
print(hmac_obj.digest()) # 8-bit binary
print(hmac_obj.hexdigest()) # hexadecimal
# Hex carries 4 bits per character. There are other possibilities,
# notably:
# - Base64 with 6 bits per character;
# - Base32 with 5 bits per character.
"""
import argparse
import logging
from typing import Optional, TextIO
from cardinal_pythonlib.file_io import (
gen_noncomment_lines,
smart_open,
writeline_nl,
)
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
from cardinal_pythonlib.hash import (
HashMethods,
make_hasher,
)
log = logging.getLogger(__name__)
def get_first_noncomment_line(filename: str) -> Optional[str]:
try:
with open(filename) as f:
return next(gen_noncomment_lines(f))
except StopIteration:
return None
def bulk_hash(input_filename: str,
output_filename: str,
hash_method: str,
key: str,
keep_id: bool = True):
"""
Hash lines from one file to another.
Args:
input_filename:
input filename, or "-" for stdin
output_filename:
output filename, or "-" for stdin
hash_method:
method to use; e.g. ``HMAC_SHA256``
key:
secret key for hasher
keep_id:
produce CSV with ``hash,id`` pairs, rather than just lines with
the hashes?
Note that the hash precedes the ID with the ``keep_id`` option, which
works best if the ID might contain commas.
"""
log.info(f"Reading from: {input_filename}")
log.info(f"Writing to: {output_filename}")
log.info(f"Using hash method: {hash_method}")
log.info(f"keep_id: {keep_id}")
log.debug(f"Using key: {key!r}") # NB security warning in help
hasher = make_hasher(hash_method=hash_method, key=key)
with smart_open(input_filename, "rt") as i: # type: TextIO
with smart_open(output_filename, "wt") as o: # type: TextIO
for line in gen_noncomment_lines(i):
hashed = hasher.hash(line) if line else ""
outline = f"{hashed},{line}" if keep_id else hashed
# log.debug(f"{line!r} -> {hashed!r}")
writeline_nl(o, outline)
def main() -> None:
"""
Command-line entry point.
"""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="Hash IDs in bulk, using a cryptographic hash function.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'infile', type=str,
help="Input file, or '-' for stdin. "
"Use one line per thing to be hashed. "
"Comments (marked with '#') and blank lines are ignored. "
"Lines have whitespace stripped left and right.")
parser.add_argument(
'--outfile', type=str, default="-",
help="Output file, or '-' for stdout. "
"One line will be written for every input line. "
"Blank lines will be written for commented or blank input.")
parser.add_argument(
'--key', type=str,
help="Secret key for hasher (warning: may be visible in process list; "
"see also --keyfile)")
parser.add_argument(
'--keyfile', type=str,
help="File whose first noncomment line contains the secret key for "
"the hasher. (It will be whitespace-stripped right and left.)")
parser.add_argument(
'--method', choices=[HashMethods.HMAC_MD5,
HashMethods.HMAC_SHA256,
HashMethods.HMAC_SHA512],
default=HashMethods.HMAC_MD5,
help="Hash method")
parser.add_argument(
'--keepid', action="store_true",
help="Produce CSV output with (hash,id) rather than just the hash")
parser.add_argument(
'--verbose', '-v', action="store_true",
help="Be verbose (NB will write key to stderr)")
args = parser.parse_args()
main_only_quicksetup_rootlogger(logging.DEBUG if args.verbose
else logging.INFO)
assert bool(args.key) != bool(args.keyfile), (
"Specify either --key or --keyfile (and not both)."
)
if args.keyfile:
key = get_first_noncomment_line(args.keyfile)
assert key, f"No key found in keyfile: {args.keyfile}"
else:
key = args.key
bulk_hash(
input_filename=args.infile,
output_filename=args.outfile,
hash_method=args.method,
key=key,
keep_id=args.keepid,
)
if __name__ == "__main__":
main()
| RudolfCardinal/crate | crate_anon/linkage/bulk_hash.py | Python | gpl-3.0 | 6,042 |
# This module has moved to zope.annotation.attribute
# and will go away in Zope 3.5
import zope.deprecation
zope.deprecation.moved(
'zope.annotation.attribute',
"Zope 3.5",
)
| Donkyhotay/MoonPy | zope/app/annotation/attribute.py | Python | gpl-3.0 | 187 |
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.contrib.auth.models import User
from application.serializers import UserSerializer, ApplicationSerializer, ApplicationListSerializer
from rest_framework import viewsets, status
from application.models import Application, validate_tc
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.http import Http404
from rest_framework import permissions
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.parsers import FileUploadParser
class ApplicationList(APIView):
def get(self, request, format=None):
applications = Application.objects.all()
serializer = ApplicationListSerializer(applications, many=True)
return Response(serializer.data)
class ApplicationDetail(APIView):
def get_object(self, pk):
try:
return Application.objects.get(pk=pk)
except Application.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
applications = self.get_object(pk)
serializer = ApplicationSerializer(applications)
return Response(serializer.data)
def put(self, request, pk, format=None):
application = self.get_object(pk)
serializer = ApplicationSerializer(application, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
application = self.get_object(pk)
application.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ApplicationCreate(APIView):
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
Application.objects.create(user=User.objects.get(pk=serializer.data['id']))
return Response(serializer.data)
class FileUpload(APIView):
parser_classes = (FileUploadParser,)
def post(self, request, format=None):
file_obj = request.FILES['file']
# do some stuff with uploaded file
return Response(status=204)
@api_view(['GET'])
def tc_validator(request, tc):
if len(User.objects.filter(username=tc)) != 0:
return Response({'tc': tc}, status=status.HTTP_302_FOUND)
try:
validate_tc(tc)
return Response({'tc': tc}, status=status.HTTP_200_OK)
except:
return Response({'tc': tc}, status=status.HTTP_400_BAD_REQUEST)
| mkmeral/TevitolApplication | application/backend/application/views.py | Python | gpl-3.0 | 2,676 |
from .sample_filter import SampleFilter, GtFilter
from .sv_gt_filter import SvGtFilter
import logging
from collections import OrderedDict, defaultdict
class FamilyFilter(object):
'''
Determine whether variants/alleles fit given inheritance
patterns for families.
'''
def __init__(self, ped, vcf, infer_inheritance=True, g2p=None,
check_g2p_consequence=None, force_inheritance=None,
logging_level=logging.WARNING):
'''
Initialize with Family object from ped_file.py and a
VcfReader object from vcf_reader.py. You may also specify an
inheritance pattern (either 'recessive' or 'dominant'). If
inheritance_pattern is not specified an attempt is made to
infer an appropriate inheritance pattern based on the family
structure and affecteds.
Args:
ped: A PedFile object from ped_file.py. Must contain
at least one affected individual.
vcf: A VcfReader object containing data from at least
some of the affected individuals in the given
family.
infer_inheritance:
If True, infer possible inheritance patterns
for each family in the PedFile. Inferred patterns
are stored in self.inheritance_patterns dict
(keys are families, values are lists of
inheritance patterns).
g2p: G2P object from vase.g2p for filtering on
presence and inheritance requirements from a G2P
file.
check_g2p_consequence:
If using a G2P object for gene filtering, also
filter on consequence type as described for each
gene. Note that the mapping of mutation
consequence to consequence type is quite crude
and should be used with caution (see the
mutation_to_csq dict in vase/g2p.py for the
mappings used).
force_inheritance:
Optionally specify an inheritance pattern to
test for each family - either 'dominant' or
'recessive' is allowed. If infer_inheritance is
True, these patterns will be tested in addition
to inferred patterns.
logging_level:
The level at which logging messages are
displayed. Defaults to logging.WARNING
'''
self.logger = self._get_logger(logging_level)
self.affected = tuple(ped.get_affected())
self.unaffected = tuple(ped.get_unaffected())
self.obligate_carriers = dict()
self.ped = ped
self.vcf = vcf
self.g2p = g2p
self.check_g2p_consequence = check_g2p_consequence
if not self.affected:
raise RuntimeError("No affected individuals found in PED file '{}'"
.format(ped.filename))
self.vcf_affected = list(x for x in self.affected
if x in self.vcf.header.samples)
if not self.vcf_affected:
raise RuntimeError("No affected individuals in PED file '{}'"
.format(ped.filename) + " found in VCF " +
"'{}'".format(vcf.filename))
self.vcf_unaffected = list(x for x in self.unaffected
if x in self.vcf.header.samples)
self.vcf_samples = self.vcf_affected + self.vcf_unaffected
self.inheritance_patterns = defaultdict(list)
if infer_inheritance:
self._infer_inheritance()
if force_inheritance:
if force_inheritance not in ('dominant', 'recessive'):
raise RuntimeError("Unrecognised inheritance pattern " +
"specified with 'force_inheritance' " +
"argument. Valid options are 'dominant' " +
"or 'recessive'.")
for fid in self.ped.families:
self.inheritance_patterns[fid].append(force_inheritance)
def _infer_inheritance(self):
'''
Simplistic method for determining likely relevant
inheritance pattern. For affected individuals in a family
a check is made whether parents or grandparents are also
affected. Currently only dominant or recessive inheritance
is inferred, no attempt to infer X-linked or mitochondrial
inheritance is made and it will not spot pseudodominance.
'''
for fid, fam in self.ped.families.items():
n_affected = 0
no_parents = True
both_pars_unaffected = False
dominant = False
denovo = False
recessive = False
self.logger.info("Assessing inheritance pattern of family {}"
.format(fid))
f_aff = tuple(fam.get_affected())
obligate_carriers = set()
if not f_aff:
continue
for iid in f_aff:
self.logger.info("Checking affected individual {}".format(iid))
n_affected += 1
indv = fam.individuals[iid]
if not indv.parents:
self.logger.info("No parents for affected individual {}"
.format(iid))
continue
no_parents = False
p_unaff = 0
for par in indv.parents:
# is parent affected
if par not in fam.individuals:
if par in self.vcf.header.samples:
self.logger.warn("Family '{}' parent '{}' ".format(
fid, par) + "not specified in " +
"PED, but present in VCF - " +
"assuming unaffected")
self.vcf_samples.append(par)
self.vcf_unaffected.append(par)
p_unaff += 1
continue
parent = fam.individuals[par]
par_to_child = False
gpar_to_child = False
if parent.is_affected():
self.logger.info("Apparent vertical transmission " +
"from {} -> {}" .format(par, iid))
par_to_child = True
else:
p_unaff += 1
for gpar in parent.parents:
if fam.individuals[gpar].is_affected():
gpar_to_child = True
msg = "Apparent vertical transmission "
if par_to_child:
msg += ("from {} -> {} -> {}"
.format(gpar, par, iid))
else:
msg += ("with partial penetrance from " +
"{} -> ({}) -> {}"
.format(gpar, par, iid))
obligate_carriers.add(par)
self.logger.info(msg)
if par_to_child or gpar_to_child:
dominant = True
if p_unaff == 2:
both_pars_unaffected = True
if not dominant:
recessive = True
if no_parents or not both_pars_unaffected:
# missing information on one/both parents - could be dominant
dominant = True
if recessive and n_affected == 1 and not no_parents:
f_par = fam.individuals[f_aff[0]].parents
if len(f_par) != 2:
self.logger.info("Can not analyze {} under ".format(fid) +
"a de novo model due to missing parents" +
" in ped")
dominant = True
elif (f_par[0] not in self.vcf.header.samples or
f_par[1] not in self.vcf.header.samples):
self.logger.info("Can not analyze {} under ".format(fid) +
"a de novo model due to missing parents" +
" in VCF")
else:
denovo = True
elif recessive and n_affected > 1:
# we can entertain apparent de novos due to somatic mosaicism
# if all affecteds share a parent
pars = fam.individuals[f_aff[0]].parents
shared_pars = None
if len(pars) != 2:
self.logger.info("Can not analyze {} under ".format(fid) +
"a de novo model due to missing parents" +
" in ped")
dominant = True
else:
shared_pars = set(pars)
for i in range(1, len(f_aff)):
ipars = self.ped.individuals[f_aff[i]].parents
if ipars is None:
break
shared_pars = shared_pars.intersection(ipars)
if not shared_pars:
break
if shared_pars:
denovo = True
for par in shared_pars:
if par not in self.vcf_samples:
self.logger.info("Can not analyze {}".format(fid) +
"under a de novo model due to " +
"missing parents in VCF")
denovo = False
break
self.inheritance_patterns[fid] = []
if recessive:
self.logger.info("Family '{}' " .format(fid) + "can be " +
"analysed under a recessive model")
self.inheritance_patterns[fid].append('recessive')
if denovo:
dmodel = "de novo"
if n_affected > 1:
dmodel += " (with germline mosaicism)"
self.logger.info("Family '{}' " .format(fid) + "can be " +
"analysed under a {} model" .format(dmodel))
self.inheritance_patterns[fid].append('de_novo')
if dominant:
self.logger.info("Family '{}' " .format(fid) + "can be " +
"analysed under a dominant model")
self.inheritance_patterns[fid].append('dominant')
self.obligate_carriers[fid] = tuple(obligate_carriers)
def _get_logger(self, logging_level):
logger = logging.getLogger(__name__)
if not logger.hasHandlers():
logger.setLevel(logging_level)
formatter = logging.Formatter(
'[%(asctime)s] %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logger.level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
class InheritanceFilter(object):
'''
Parent class for RecessiveFilter/DominantFilter/DeNovoFilter
object.
'''
def __init__(self, family_filter, gt_args, min_families=1,
report_file=None, snpeff_mode=False):
'''
Create genotype filter objects and initialise family filtering
arguments.
Args:
family_filter:
Parent FamilyFilter object, initialized with
VCF and PED files.
gt_args:
A dict of arguments to use for filtering
genotypes. These should all correspond to
arguments to provide to SampleFilter objects.
min_families:
Require at least this many families to have
qualifying alleles in a feature before
outputting. Default=1.
report_file:
Deprecated. Use vase_reporter to after
inheritance filtering to process VCFs instead.
snpeff_mode:
Use SnpEff annotations instead of VEP annotations
from input VCF.
'''
self.family_filter = family_filter
self.min_families = min_families
self.ped = family_filter.ped
self.samples = family_filter.vcf_samples
self.unaffected = family_filter.vcf_unaffected
self.gt_filter = GtFilter(family_filter.vcf,
gq=gt_args.get('gq'),
dp=gt_args.get('dp'),
max_dp=gt_args.get('max_dp'),
het_ab=gt_args.get('het_ab'),
hom_ab=gt_args.get('hom_ab'))
self._gt_fields = set(self.gt_filter.fields)
if gt_args.get('min_control_gq') is None:
gt_args['min_control_gq'] = gt_args.get('gq')
if gt_args.get('min_control_dp') is None:
gt_args['min_control_dp'] = gt_args.get('dp')
if gt_args.get('max_control_dp') is None:
gt_args['max_control_dp'] = gt_args.get('max_dp')
if gt_args.get('control_het_ab') is None:
gt_args['control_het_ab'] = gt_args.get('het_ab')
if gt_args.get('control_hom_ab') is None:
gt_args['control_hom_ab'] = gt_args.get('hom_ab')
self.con_gt_filter = GtFilter(family_filter.vcf,
gq=gt_args.get('min_control_gq'),
dp=gt_args.get('min_control_dp'),
max_dp=gt_args.get('max_control_dp'),
het_ab=gt_args.get('control_het_ab'),
hom_ab=gt_args.get('control_hom_ab'),
ref_ab_filter=gt_args.get('con_ref_ab'))
self._gt_fields.update(self.con_gt_filter.fields)
if gt_args.get('sv_min_control_gq') is None:
gt_args['sv_min_control_gq'] = gt_args.get('sv_gq')
if gt_args.get('sv_min_control_dp') is None:
gt_args['sv_min_control_dp'] = gt_args.get('sv_dp')
if gt_args.get('sv_max_control_dp') is None:
gt_args['sv_max_control_dp'] = gt_args.get('sv_max_dp')
if gt_args.get('sv_control_het_ab') is None:
gt_args['sv_control_het_ab'] = gt_args.get('sv_het_ab')
if gt_args.get('sv_control_hom_ab') is None:
gt_args['sv_control_hom_ab'] = gt_args.get('sv_hom_ab')
if gt_args.get('control_del_dhffc') is None:
gt_args['control_del_dhffc'] = gt_args.get('del_dhffc')
if gt_args.get('control_dup_dhbfc') is None:
gt_args['control_dup_dhbfc'] = gt_args.get('dup_dhbfc')
self.sv_gt_filter = SvGtFilter(family_filter.vcf,
gq=gt_args.get('sv_gq'),
dp=gt_args.get('sv_dp'),
max_dp=gt_args.get('sv_max_dp'),
het_ab=gt_args.get('sv_het_ab'),
hom_ab=gt_args.get('sv_hom_ab'),
del_dhffc=gt_args.get('del_dhffc'),
dup_dhbfc=gt_args.get('dup_dhbfc'))
self._sv_gt_fields = set(self.sv_gt_filter.fields)
self.sv_con_gt_filter = SvGtFilter(
family_filter.vcf,
gq=gt_args.get('sv_min_control_gq'),
dp=gt_args.get('sv_min_control_dp'),
max_dp=gt_args.get('sv_max_control_dp'),
het_ab=gt_args.get('sv_control_het_ab'),
hom_ab=gt_args.get('sv_control_hom_ab'),
ref_ab_filter=gt_args.get('sv_con_ref_ab'),
del_dhffc=gt_args.get('control_del_dhffc'),
dup_dhbfc=gt_args.get('control_dup_dhbfc'))
self._sv_gt_fields.update(self.sv_con_gt_filter.fields)
self._prev_coordinate = (None, None) # to ensure records are processed
self._processed_contigs = set() # in coordinate order
if snpeff_mode:
try:
self._csq_header = self.family_filter.vcf.header.ann_fields
except KeyError:
self._csq_header = None # only required for report file
self.csq_attribute = 'ANN'
self.feature_label = 'Feature_ID'
else:
try:
self._csq_header = self.family_filter.vcf.header.csq_fields
except KeyError:
self._csq_header = None # only required for report file
self.csq_attribute = 'CSQ'
self.feature_label = 'Feature'
if self.report_file:
self._write_report_header()
def get_header_fields(self):
'''
Return dict of dicts with INFO header field names as keys
and dicts of features as values. These are suitable for
handing to VcfHeader class's add_header_field() method.
Each INFO field must be defined in self.header_fields in
the child class, which should be a list of tuples where
each tuple consists of the name anddescription of the
field.
'''
hf = dict()
for f in self.header_fields:
hf[f[0]] = {'Number': 'A',
'Type': 'String',
'Description': f[1]}
return hf
def confirm_heterozygous(self, record, samples):
for s in samples:
if len(set(record.samples[s]['GT'])) != 2:
return False
return True
def _get_allele_counts(self, allele, rec):
a_counts = dict()
gt_filter_args = dict()
if rec.IS_SV:
gt_filter = self.sv_gt_filter
control_filter = self.sv_con_gt_filter
gt_filter_args['svtype'] = rec.record.info.get('SVTYPE', '')
else:
gt_filter = self.gt_filter
control_filter = self.con_gt_filter
for samp in self.unaffected:
if control_filter.gt_is_ok(rec.record.samples, samp, allele,
**gt_filter_args):
a_counts[samp] = rec.record.samples[samp]['GT'].count(allele)
else:
a_counts[samp] = None
if (rec.record.samples[samp]['GT'] == (0, 0) and
control_filter.ad_over_threshold is not None):
if control_filter.ad_over_threshold(rec.record.samples, samp,
allele):
a_counts[samp] = 1
for samp in self.affected:
if gt_filter.gt_is_ok(rec.record.samples, samp, allele,
**gt_filter_args):
a_counts[samp] = rec.record.samples[samp]['GT'].count(allele)
else:
a_counts[samp] = None
return a_counts
def _check_sorted(self, record):
if self._prev_coordinate[0] != record.chrom:
if record.chrom in self._processed_contigs:
raise RuntimeError("Input must be sorted by chromosome and " +
"position for recessive filtering. " +
"Contig '{}' " .format(record.chrom) +
"encountered before and after contig " +
"'{}'." .format(self._prev_coordinate[0]))
if self._prev_coordinate[0] is not None:
self._processed_contigs.add(self._prev_coordinate[0])
elif record.pos < self._prev_coordinate[1]:
raise RuntimeError("Input must be sorted by chromosome and " +
"position for inheritance filtering. " +
"Encountered position {}:{} after {}:{}"
.format(record.chrom, record.pos,
self._prev_coordinate[0],
self._prev_coordinate[1]))
self._prev_coordinate = (record.chrom, record.pos)
def process_record(self, record):
'''Return True if record should be printed/kept'''
return NotImplementedError("process_record method should be " +
"overriden by child class!")
def _write_report_header(self):
if self._csq_header is not None:
header = str.join("\t", (x for x in self._csq_header if x !=
'Allele'))
header += "\tALT_No.\t" + str.join("\t", self.annot_fields)
header += "\tCHROM\tPOS\tID\tREF\tALT\tALLELE\tQUAL\tFILTER"
self.report_file.write(header + "\n")
def check_g2p(self, record, ignore_csq, inheritance, csqs=None):
if self.family_filter.g2p:
if csqs is None:
csqs = getattr(record, self.csq_attribute)
if self.family_filter.check_g2p_consequence:
fail = (not x for x in
self.family_filter.g2p.csq_and_allelic_requirement_met(
csqs, inheritance))
else:
fail = (not x for x in
self.family_filter.g2p.allelic_requirement_met(
csqs, inheritance))
if ignore_csq:
ignore_csq = [x or y for x, y in zip(ignore_csq, fail)]
else:
ignore_csq = list(fail)
return ignore_csq
class RecessiveFilter(InheritanceFilter):
'''
This class assumes that each family has a shared biallelic
genetic cause of disease. It will not cope with phenocopies,
pseudodominance or other more complicated inheritance patterns.
'''
def __init__(self, family_filter, gt_args, min_families=1,
snpeff_mode=False, strict=False, exclude_denovo=False,
report_file=None):
'''
Args:
family_filter:
FamilyFilter object
gt_args:
A dict of arguments to use for filtering
genotypes. These should all correspond to
arguments to provide to SampleFilter objects.
min_families:
Require at least this many families to have a
qualifying biallelic combination of alleles in
a feature before outputting. Default=1.
snpeff_mode:
Use SnpEff annotations instead of VEP annotations
from input VCF.
strict: If True, for any affected sample with
parents, require confirmation of parental
genotypes. If either parent genotype is a
no-call for a record, then the record will
be ignored. Default=False.
exclude_denovo:
If True, where there is data available from
both parents for an affected individual
ignore apparent de novo occuring alleles.
Default=False.
report_file:
Output filehandle for writing summaries of
segregating variants to. Default=None.
'''
self.prefix = "VASE_biallelic"
self.header_fields = [
("VASE_biallelic_homozygous",
'Samples that carry homozygous biallelic changes ' +
' parsed by {}' .format(type(self).__name__)),
("VASE_biallelic_compound_het",
'Samples that carry compound heterozygous biallelic changes ' +
'parsed by {}'.format(type(self).__name__)),
("VASE_biallelic_de_novo",
'Samples that carry biallelic alleles that appear to have ' +
'arisen de novo'),
('VASE_biallelic_families',
'Family IDs for VASE_biallelic alleles'),
("VASE_biallelic_features",
'Features (e.g. transcripts) that contain qualifying ' +
'biallelic variants parsed by {}' .format(
type(self).__name__))]
self.annot_fields = ('homozygous', 'compound_het', 'de_novo',
'families', 'features')
self.report_file = report_file
super().__init__(family_filter, gt_args, min_families=min_families,
snpeff_mode=snpeff_mode, report_file=report_file)
self.families = tuple(x for x in
self.family_filter.inheritance_patterns
if 'recessive' in
self.family_filter.inheritance_patterns[x])
self.affected = tuple(x for x in family_filter.vcf_affected if
self.ped.individuals[x].fid in self.families)
self._fam_to_aff = dict()
for fid in self.families:
self._fam_to_aff[fid] = set(x for x in
self.ped.families[fid].get_affected()
if x in self.affected)
self.family_filter.logger.info("Analysing family {} ".format(fid) +
"under a recessive model")
self.strict = strict
self.exclude_denovo = exclude_denovo
self._potential_recessives = dict()
self._current_features = set()
self._processed_features = set()
def process_record(self, record, ignore_alleles=[], ignore_csq=[]):
'''
Returns True if record should be stored for checking against
other records overlapping the same features to see if they
constitute biallelic variation.
Stores potential recessive records per allele for
segregation checking once overlapping features have been
traversed.
Args:
record: VaseRecord
ignore_alleles:
List of booleans indicating for each ALT in
order whether it should be ignored in relation
to possible recessive variation (e.g. if MAF is
too high, no likely pathogenic consequence
etc.). This will normally have been generated
by VaseRunner via VcfFilter and/or VepFilter
classes.
ignore_csq:
List of booleans indicating for each CSQ in
order whether it should be ignored in relation
to possible recessive variation. This should
normally have been generated by a corresponding
VepFilter object.
'''
stored = False
self._check_sorted(record.record)
record_csqs = getattr(record, self.csq_attribute)
self._current_features = set(c[self.feature_label] for c in record_csqs
if c[self.feature_label] != '')
ignore_csq = self.check_g2p(record, ignore_csq, 'recessive',
csqs=record_csqs)
if ignore_csq and all(ignore_csq):
return False
gt_filter_args = dict()
if record.IS_SV:
gt_filter = self.sv_gt_filter
control_filter = self.sv_con_gt_filter
gt_filter_args['svtype'] = record.info.get('SVTYPE', '')
else:
gt_filter = self.gt_filter
control_filter = self.con_gt_filter
skip_fam = set()
added_prs = OrderedDict()
for i in range(len(record.alts)):
if ignore_alleles and ignore_alleles[i]:
continue
alt = i + 1
skip_allele = False
fams_with_allele = []
for un in self.unaffected:
if record.samples[un]['GT'] == (alt, alt):
if control_filter.gt_is_ok(record.samples, un, alt,
**gt_filter_args):
# hom in a control - skip allele
skip_allele = True
break
if skip_allele:
continue
for fid in self.families:
if fid in skip_fam:
continue
have_allele = set() # affecteds carrying this allele
for aff in self._fam_to_aff[fid]:
# check all affecteds carry this allele
if (alt in record.samples[aff]['GT'] and
gt_filter.gt_is_ok(record.samples, aff, alt,
**gt_filter_args)):
have_allele.add(aff)
else:
break
if have_allele == self._fam_to_aff[fid]:
# all affecteds in family carry allele
fams_with_allele.append(fid)
if fams_with_allele:
# store record and consequences
try:
csqs = []
for j in range(len(record_csqs)):
if ignore_csq and ignore_csq[j]:
continue
if record_csqs[j]['alt_index'] == alt:
# store record and csq details
csqs.append(record_csqs[j])
if csqs:
stored = True
alt_counts = self._get_allele_counts(alt, record)
pr = PotentialSegregant(
record=record, allele=alt, csqs=csqs,
allele_counts=alt_counts,
families=fams_with_allele,
feature_label=self.feature_label)
for feat in pr.features:
if feat in added_prs:
added_prs[feat][pr.alt_id] = pr
else:
added_prs[feat] = OrderedDict(
[(pr.alt_id, pr)])
if feat in self._potential_recessives:
self._potential_recessives[feat][pr.alt_id] = pr
else:
self._potential_recessives[feat] = OrderedDict(
[(pr.alt_id, pr)])
except KeyError:
raise RuntimeError("Could not identify CSQ or ANN " +
"fields in VCF header. Please ensure " +
"your input is annotated with " +
"Ensembl's VEP to perform recessive " +
"filtering")
return stored
def process_potential_recessives(self, final=False):
'''
Check whether stored PotentialSegregant alleles make up
biallelic variation in the same transcript for affected
individuals/families. Adds labels to INFO fields of VCF
records and returns an OrderedDict of 'var_ids' to
lists of PotentialSegregant objects that appear to
segregate consistent with recessive inheritance.
Clears the cache of stored PotentialSegregant alleles.
'''
segregating = OrderedDict() # key=alt_id, val=SegregatingBiallelic
for feat, prs in self._potential_recessives.items():
if not final and feat in self._current_features:
continue
feat_segregating = [] # list of tuples of values for creating SegregatingBiallelic
un_hets = defaultdict(list) # store het alleles carried by each unaffected
aff_hets = defaultdict(list) # store het alleles carried by each affected
biallelics = defaultdict(list) # store biallelic combinations for affecteds
for pid, p in prs.items():
for un in self.unaffected:
if p.allele_counts[un] == 1: # already checked for homs when adding
# store allele carried in this unaffected
un_hets[un].append(pid)
for aff in (x for x in self.affected
if self.ped.fid_from_iid(x) in p.families):
if p.allele_counts[aff] == 1:
aff_hets[aff].append(pid)
elif p.allele_counts[aff] == 2:
biallelics[aff].append(tuple([pid]))
incompatibles = [] # create a list of sets of incompatible hets
for hets in un_hets.values():
if len(hets):
incompatibles.append(set(hets))
for aff, hets in aff_hets.items():
for i in range(len(hets)):
for j in range(i+1, len(hets)):
incomp = False
for iset in incompatibles:
if iset.issuperset([hets[i], hets[j]]):
incomp = True
break
if not incomp:
if not prs[hets[i]].record.in_cis_with(sample=aff,
allele=prs[hets[i]].allele,
other=prs[hets[j]].record,
other_allele=prs[hets[j]].allele):
# check phase groups in case alleles in cis
biallelics[aff].append(
tuple([hets[i], hets[j]]))
if not biallelics:
continue
# see if all affecteds in the same family share the same biallelics
for fid, affs in self._fam_to_aff.items():
b_affs = set(x for x in affs if x in biallelics)
if len(b_affs) == 0 or b_affs != affs:
continue
affs = list(affs)
absent_in_aff = False
for i in range(len(affs)):
for bi in biallelics[affs[i]]:
for j in range(i+1, len(affs)):
if bi not in biallelics[affs[j]]:
absent_in_aff = True
break
if not absent_in_aff:
segs, de_novo = self._check_parents(feat, bi, affs)
if not segs:
continue
if len(bi) == 1:
model = 'homozygous'
else:
model = 'compound_het'
for bi_pr in (prs[x] for x in bi):
feat_segregating.append((bi_pr, affs, [fid],
model, [feat],
de_novo[bi_pr.alt_id],
self.prefix))
fam_count = len(set([fam for tup in feat_segregating for fam in
tup[2]]))
if fam_count >= self.min_families:
for tp in feat_segregating:
if tp[0] in segregating:
segregating[tp[0]].add_samples(*tp[1:6])
else:
segregating[tp[0]] = SegregatingVariant(*tp)
var_to_segregants = OrderedDict()
for sb in segregating.values():
sb.annotate_record(self.report_file, self.annot_fields)
if sb.segregant.var_id in var_to_segregants:
var_to_segregants[sb.segregant.var_id].append(sb.segregant)
else:
var_to_segregants[sb.segregant.var_id] = [sb.segregant]
# clear the cache except for the last entry which will be a new gene
# self._potential_recessives = self._last_added
self._potential_recessives = OrderedDict(
(k, v) for k, v in self._potential_recessives.items() if k in
self._current_features)
return var_to_segregants
def _check_parents(self, feat, alleles, samples):
'''
Check transmission of alleles (i.e. one from each parent)
if parents available. Should have already checked that
alleles are not present in this combination in any
unaffected individual.
Returns a tuple of booleans - first value is True if
parental genotypes do not contradict recessive inheritance
while the second value is a dict of alleles to lists of
samples in which the allele allele appears to have arisen
de novo.
'''
dns = defaultdict(list)
counts = []
for al in alleles:
counts.append(self._potential_recessives[feat][al].allele_counts)
if len(counts) == 1: # homozygous
counts.append(counts[0])
for samp in samples:
parents = self.ped.individuals[samp].parents
par = list(x for x in parents if x in self.samples)
if len(par) == 0:
continue
if self.strict:
for p in par:
if None in (counts[i][p] for i in range(len(counts))):
# require both parental genotypes if self.strict
return (False, dns)
if len(par) == 2: # can check for de novos
for i in range(len(counts)):
if counts[i][par[0]] == 0 and counts[i][par[1]] == 0:
# apparent de novo
self.family_filter.logger.debug(
"Apparent de novo allele " +
"{} for sample {} (parents = {} + {}) ".format(
alleles[-i], samp, par[0], par[1]) +
"for recessive combination {}|{}".format(
alleles[0], alleles[-1]))
dns[alleles[-i]].append(samp)
if self.exclude_denovo:
return (False, dns)
elif len(par) == 1:
# if only one parent and both alleles are absent it is more
# likely that the two alleles are in cis from other parent
if counts[0][par[0]] == 0 and counts[1][par[0]] == 0:
return(False, dns)
# NOTE: we could do a check here to ensure that any non-affected
# parent does not carry both alleles, but this *SHOULD* have
# already been done earlier in process_potential_recessives
# function for ALL unaffecteds anyway
return (True, dns)
class DominantFilter(InheritanceFilter):
'''
Identify variants that fit a dominant pattern in
given families.
'''
def __init__(self, family_filter, gt_args, min_families=1,
snpeff_mode=False, report_file=None):
'''
Initialize with parent IDs, children IDs and VcfReader
object.
Args:
family_filter:
FamilyFilter object
gt_args:
A dict of arguments to use for filtering
genotypes. These should all correspond to
arguments to provide to SampleFilter objects.
min_families:
Require at least this many families to have a
qualifying variant in a feature before
outputting. Default=1.
snpeff_mode:
Use SnpEff annotations instead of VEP annotations
from input VCF.
'''
self.prefix = "VASE_dominant"
self.header_fields = [
("VASE_dominant_samples",
'Sample IDs for alleles that segregate according to a ' +
'dominant inheritance pattern in an affected sample as' +
' parsed by {}' .format(type(self).__name__)),
('VASE_dominant_unaffected_carrier',
'Sample IDs for unaffected carriers of ' +
'VASE_dominant alleles'),
('VASE_dominant_families',
'Family IDs for VASE_dominant alleles'),
("VASE_dominant_features",
'Features (e.g. transcripts) that contain qualifying ' +
'dominant variants parsed by {}' .format(
type(self).__name__))]
self.annot_fields = ('samples', 'unaffected_carrier', 'families',
'features')
self.report_file = report_file
super().__init__(family_filter, gt_args, min_families=min_families,
snpeff_mode=snpeff_mode, report_file=report_file,)
self.families = tuple(x for x in
self.family_filter.inheritance_patterns
if 'dominant' in
self.family_filter.inheritance_patterns[x])
self.affected = tuple(x for x in family_filter.vcf_affected if
self.ped.individuals[x].fid in self.families)
self.filters = dict()
self._potential_dominants = dict()
self._last_added = OrderedDict()
self._current_features = set()
for fam in self.families:
f_aff = tuple(x for x in self.ped.families[fam].get_affected()
if (x in self.affected or
x in self.family_filter.obligate_carriers[fam]))
f_unaff = tuple(x for x in self.ped.families[fam].get_unaffected()
if (x in self.unaffected and x not in
self.family_filter.obligate_carriers[fam]))
if fam in self.family_filter.obligate_carriers:
self.obligate_carriers = tuple(
x for x in f_aff if x in
self.family_filter.obligate_carriers[fam])
else:
self.obligate_carriers = ()
dom_filter = SampleFilter(family_filter.vcf, cases=f_aff,
controls=f_unaff, confirm_missing=True,
**gt_args)
self.filters[fam] = dom_filter
self.family_filter.logger.info("Analysing family {} ".format(fam) +
"under a dominant model")
def process_record(self, record, ignore_alleles=[], ignore_csq=[]):
'''
Returns True if an allele segregates consistent with
dominant inheritance.
Args:
record: VaseRecord
ignore_alleles:
List of booleans indicating for each ALT in
order whether it should be ignored in relation
to possible dominant variation (e.g. if MAF is
too high, no likely pathogenic consequence
etc.). This will normally have been generated
by VaseRunner via VcfFilter and/or VepFilter
classes.
'''
dom_alleles = ([[] for i in range(len(record.record.alts))])
fam_alleles = ([[] for i in range(len(record.record.alts))])
ignore_csq = self.check_g2p(record, ignore_csq, 'dominant')
if ignore_csq and all(ignore_csq):
return False
if self.min_families > 1:
self._check_sorted(record.record)
for i in range(len(record.record.alts)):
if ignore_alleles[i]:
continue
allele = i + 1
for fam, dfilter in self.filters.items():
# looking for (potentially shared) de novos in a single family
is_dom = not dfilter.filter(record, allele)
if is_dom:
if self.confirm_heterozygous(record.record, dfilter.cases):
dom_alleles[i].extend(dfilter.cases)
fam_alleles[i].append(fam)
self.family_filter.logger.debug(
"Apparent dominant allele {}:{}-{}/{} ".format(
record.record.chrom, record.record.pos,
record.record.ref,
record.record.alleles[allele]) +
"present in {} ".format(dfilter.cases) +
"and absent in {}".format(dfilter.controls))
segs = []
for i in range(len(dom_alleles)):
if not dom_alleles[i]:
continue
allele = i + 1
csqs = []
record_csqs = getattr(record, self.csq_attribute)
try:
for j in range(len(record_csqs)):
if ignore_csq and ignore_csq[j]:
continue
if record_csqs[j]['alt_index'] == allele:
# store record and csq details
csqs.append(record_csqs[j])
except KeyError:
if self.min_families > 1:
raise RuntimeError("Could not identify CSQ or ANN fields" +
" in VCF header. Please ensure your " +
"input is annotated with Ensembl's " +
"VEP to perform dominant filtering.")
if self.min_families <= 1 or csqs:
a_counts = self._get_allele_counts(allele, record)
pd = PotentialSegregant(record=record, allele=allele,
csqs=csqs, allele_counts=a_counts,
families=fam_alleles[i],
feature_label=self.feature_label)
segs.append(pd)
if self.min_families > 1:
for feat, od in self._last_added.items():
if feat in self._potential_dominants:
self._potential_dominants[feat].update(od)
else:
self._potential_dominants[feat] = od
self._last_added = OrderedDict()
for seg in segs:
for feat in seg.features:
self._last_added[feat] = OrderedDict([(seg.alt_id, seg)])
else:
for seg in segs:
affs = (x for x in self.affected
if x not in self.obligate_carriers and
self.ped.fid_from_iid(x) in seg.families)
sv = SegregatingVariant(seg, affs, seg.families, 'samples',
seg.features, [], self.prefix)
obcs = tuple(x for x in self.obligate_carriers if
self.ped.fid_from_iid(x) in seg.families)
if obcs:
obfs = set(self.ped.fid_from_iid(x) for x in obcs)
sv.add_samples(obcs, obfs, 'unaffected_carrier',
seg.features, [])
sv.annotate_record(self.report_file, self.annot_fields)
return len(segs) > 0
def process_dominants(self, final=False):
'''
Check whether stored PotentialSegregant alleles make up
dominant variation in the same transcript for the minimum
number of families. Adds labels to INFO fields of VCF
records and returns an OrderedDict of 'var_ids' to
lists of PotentialSegregant objects that appear to
constitute dominant variation.
Clears the cache of stored PotentialSegregant alleles.
'''
sds = OrderedDict()
feat_processed = []
if not self._potential_dominants:
# if cache is empy, we never encountered the next set of features
self._potential_dominants = self._last_added
self._last_added = OrderedDict()
elif final:
for feat in self._last_added:
if feat in self._potential_dominants:
self._potential_dominants[feat].update(
self._last_added[feat])
else:
self._potential_dominants[feat] = self._last_added[feat]
self._last_added = OrderedDict()
for feat, pds in self._potential_dominants.items():
if feat in self._current_features: # still processing this feature
continue
feat_fams = set()
feat_processed.append(feat)
for pid, p in pds.items():
feat_fams.update(p.families)
if len(feat_fams) >= self.min_families:
for p in pds.values():
samps = (x for x in self.affected
if self.ped.fid_from_iid(x) in p.families)
if p.alt_id in sds:
sds[p.alt_id].add_samples(samps, p.families,
'samples', [feat], [])
else:
sv = SegregatingVariant(p, samps, p.families,
'samples', [feat], [],
self.prefix)
sds[p.alt_id] = sv
var_to_segregants = OrderedDict()
for sv in sds.values():
sv.annotate_record(self.report_file, self.annot_fields)
if sv.segregant.var_id in var_to_segregants:
var_to_segregants[sv.segregant.var_id].append(sv.segregant)
else:
var_to_segregants[sv.segregant.var_id] = [sv.segregant]
# clear the cache of processed features
for feat in feat_processed:
del self._potential_dominants[feat]
return var_to_segregants
class DeNovoFilter(InheritanceFilter):
'''
Identify and output variants occuring in a child and absent from
the parents.
'''
def __init__(self, family_filter, gt_args, min_families=1,
confirm_het=False, snpeff_mode=False, report_file=None):
'''
Initialize with parent IDs, children IDs and VcfReader
object.
Args:
family_filter:
FamilyFilter object
gt_args:
A dict of arguments to use for filtering
genotypes. These should all correspond to
arguments to provide to SampleFilter objects.
min_families:
Require at least this many families to have a
qualifying variant in a feature before
outputting. Default=1.
confirm_het:
If True, apparent de novos are required to be
called as heterozygous. Default=False.
snpeff_mode:
Use SnpEff annotations instead of VEP annotations
from input VCF.
'''
self.prefix = "VASE_de_novo"
self.header_fields = [("VASE_de_novo_samples",
'Samples that carry alleles occurring de novo parsed by ' +
'{}' .format(type(self).__name__)),
('VASE_de_novo_families',
'Family IDs for VASE_de_novo alleles'),
("VASE_de_novo_features",
'Features (e.g. transcripts) that contain qualifying ' +
'de novo variants parsed by {}' .format(
type(self).__name__)),]
self.annot_fields = ('samples', 'families', 'features')
self.report_file = report_file
super().__init__(family_filter, gt_args, min_families=min_families,
snpeff_mode=snpeff_mode, report_file=report_file)
self.families = tuple(x for x in
self.family_filter.inheritance_patterns if
'de_novo' in
self.family_filter.inheritance_patterns[x])
self.affected = tuple(x for x in family_filter.vcf_affected if
self.ped.individuals[x].fid in self.families)
self._potential_denovos = dict()
self._last_added = OrderedDict()
self._current_features = set()
self.confirm_het = confirm_het
self.filters = defaultdict(list)
self.prefix = "VASE_de_novo"
for fam in self.families:
f_aff = tuple(x for x in self.ped.families[fam].get_affected()
if x in self.affected)
par_child_combos = defaultdict(list)
for aff in f_aff:
pars = tuple(x for x in
self.ped.families[fam].individuals[aff].parents
if x in self.samples)
if len(pars) == 2:
par_child_combos[pars].append(aff)
for parents, children in par_child_combos.items():
par_filter = SampleFilter(family_filter.vcf, cases=children,
controls=parents,
confirm_missing=True, **gt_args)
self.filters[fam].append(par_filter)
self.family_filter.logger.info(
"Analysing family {} parents ({}) and children ({})"
.format(fam, str.join(", ", parents),
str.join(", ", children)) +
" combinations under a de novo dominant model")
def process_record(self, record, ignore_alleles=[], ignore_csq=[]):
'''
Returns True if allele is an apparent de novo variant.
Args:
record: VaseRecord
ignore_alleles:
List of booleans indicating for each ALT in
order whether it should be ignored in relation
to possible de novo variation (e.g. if MAF is
too high, no likely pathogenic consequence
etc.). This will normally have been generated
by VaseRunner via VcfFilter and/or VepFilter
classes.
'''
if self.min_families > 1:
self._check_sorted(record.record)
ignore_csq = self.check_g2p(record, ignore_csq, 'de novo')
if ignore_csq and all(ignore_csq):
return False
denovo_alleles = ([[] for i in range(len(record.record.alts))])
fam_alleles = ([[] for i in range(len(record.record.alts))])
for i in range(len(record.alts)):
if ignore_alleles[i]:
continue
allele = i + 1
for fam, filters in self.filters.items():
# looking for (potentially shared) de novos in a single family
dns = []
for dfilter in filters:
is_denovo = not dfilter.filter(record, allele)
if is_denovo:
if (not self.confirm_het or self.confirm_heterozygous(
record.record, dfilter.cases)):
dns.append(dfilter.cases)
self.family_filter.logger.debug(
"Apparent de novo allele {}:{}-{}/{} ".format(
record.record.chrom, record.record.pos,
record.record.ref,
record.record.alleles[allele]) +
"present in {} ".format(dfilter.cases) +
"and absent in {}".format(dfilter.controls))
if len(dns) == len(filters): # all affecteds in fam have dnm
([denovo_alleles[i].extend(x) for x in dns])
fam_alleles[i].append(fam)
segs = []
for i in range(len(denovo_alleles)):
if not denovo_alleles[i]:
continue
allele = i + 1
csqs = []
try:
record_csqs = getattr(record, self.csq_attribute)
for j in range(len(record_csqs)):
if ignore_csq and ignore_csq[j]:
continue
if record_csqs[j]['alt_index'] == allele:
# store record and csq details
csqs.append(record_csqs[j])
except KeyError:
if self.min_families > 1:
raise RuntimeError("Could not identify CSQ or ANN fields" +
" in VCF header. Please ensure your " +
"input is annotated with Ensembl's " +
"VEP to perform de novo filtering.")
if self.min_families <= 1 or csqs:
a_counts = self._get_allele_counts(allele, record)
pd = PotentialSegregant(record=record, allele=allele,
csqs=csqs, allele_counts=a_counts,
families=fam_alleles[i],
feature_label=self.feature_label)
segs.append(pd)
if self.min_families > 1:
for feat, od in self._last_added.items():
if feat in self._potential_denovos:
self._potential_denovos[feat].update(od)
else:
self._potential_denovos[feat] = od
self._last_added = OrderedDict()
for seg in segs:
for feat in seg.features:
self._last_added[feat] = OrderedDict([(seg.alt_id, seg)])
else:
for seg in segs:
affs = (x for x in self.affected if self.ped.fid_from_iid(x)
in seg.families)
sv = SegregatingVariant(seg, affs, seg.families, 'samples',
seg.features, [], self.prefix)
sv.annotate_record(self.report_file, self.annot_fields)
return len(segs) > 0
def process_de_novos(self, final=False):
'''
Check whether stored PotentialSegregant alleles make up
de novo dominant variation in the same transcript for the
minimum number of families. Adds labels to INFO fields of
VCF records and returns an OrderedDict of 'var_ids' to
lists of PotentialSegregant objects that appear to
constitute de novo dominant variation.
Clears the cache of stored PotentialSegregant alleles.
'''
sds = OrderedDict()
feat_processed = []
if not self._potential_denovos:
# if cache is empy, we never encountered the next set of features
self._potential_denovos = self._last_added
self._last_added = OrderedDict()
elif final:
for feat in self._last_added:
if feat in self._potential_denovos:
self._potential_denovos[feat].update(
self._last_added[feat])
else:
self._potential_denovos[feat] = self._last_added[feat]
self._last_added = OrderedDict()
for feat, pds in self._potential_denovos.items():
if feat in self._current_features: # still processing this feature
continue
feat_fams = set()
feat_processed.append(feat)
for pid, p in pds.items():
feat_fams.update(p.families)
if len(feat_fams) >= self.min_families:
for p in pds.values():
samps = (x for x in self.affected
if self.ped.fid_from_iid(x) in p.families)
if p.alt_id in sds:
sds[p.alt_id].add_samples(samps, p.families,
'samples', [feat], [])
else:
sv = SegregatingVariant(p, samps, p.families,
'samples', [feat], [],
self.prefix)
sds[p.alt_id] = sv
var_to_segregants = OrderedDict()
for sv in sds.values():
sv.annotate_record(self.report_file, self.annot_fields)
if sv.segregant.var_id in var_to_segregants:
var_to_segregants[sv.segregant.var_id].append(sv.segregant)
else:
var_to_segregants[sv.segregant.var_id] = [sv.segregant]
# clear the cache of processed features
for feat in feat_processed:
del self._potential_denovos[feat]
return var_to_segregants
class ControlFilter(SampleFilter):
''' Filter variants if they are present in a control sample. '''
def __init__(self, vcf, family_filter, gt_args, n_controls=0):
'''
Args:
vcf: Input VcfReader object.
family_filter:
FamilyFilter object containing information on
which samples are controls in the input VCF.
gt_args:
A dict of arguments to use for filtering
genotypes. These should all correspond to
arguments to provide to SampleFilter objects.
n_controls:
Minimum number of controls required to carry an
ALT allele for it to be filtered. Alleles will
only be filtered if carried by this number of
controls or more. Default=0.
'''
if n_controls and n_controls > len(family_filter.vcf_unaffected):
n_controls = len(family_filter.vcf_unaffected)
super().__init__(vcf, controls=family_filter.vcf_unaffected,
n_controls=n_controls, confirm_missing=False,
**gt_args)
class SegregatingVariant(object):
'''
Stores details of alleles that segregate in a manner consistent
with inheritance pattern.
'''
__slots__ = ['recessive', 'samples', 'families', 'model', 'features',
'segregant', 'prefix', 'de_novos']
def __init__(self, segregant, samples, families, model, features,
de_novos=(), prefix='VASE_segregant'):
'''
Initialize with a PotentialSegregant object, an iterable of
sample IDs carrying the PotentialSegregant a string
indicating the model of inheritance (e.g. 'compound_het'),
the name of the associated features (e.g. transcript IDs),
prefix for INFO fields and a list of individuals for whom
the allele appears to have arisen de novo.
'''
self.segregant = segregant
self.samples = list(samples)
self.families = set(families)
self.model = [model] * len(self.samples)
self.features = set(features)
self.prefix = prefix
self.de_novos = set(de_novos)
def __eq__(self, other):
return self.segregant == other.segregant
def __hash__(self):
return hash(self.segregant)
def add_samples(self, samples, families, model, features, de_novos):
''' Add samples with corresponding model of inheritance '''
self.samples.extend(samples)
self.families.update(families)
self.model.extend([model] * (len(self.samples) - len(self.model)))
self.features.update(features)
self.de_novos.update(de_novos)
def annotate_record(self, report_file=None, annot_order=[]):
''' Add INFO field annotations for VcfRecords '''
annots = defaultdict(set)
for i in range(len(self.model)):
k = self.prefix
if self.model[i]:
k += "_" + self.model[i]
annots[k].add(self.samples[i])
for k in annots:
annots[k] = str.join("|", sorted(annots[k]))
annots[self.prefix + '_families'] = str.join("|",
sorted(self.families))
annots[self.prefix + '_features'] = str.join("|",
sorted(self.features))
if self.de_novos:
annots[self.prefix + '_de_novo'] = str.join("|",
sorted(self.de_novos))
converted = self._convert_annotations(annots)
for k, v in converted.items():
self.segregant.record.info[k] = v
if report_file:
report_file.write(self._annot_to_string(annots, annot_order)
+ "\n")
def _annot_to_string(self, annots, annot_order):
s = ''
csq_to_join = []
for k in (x for x in self.segregant.csqs[0] if x != 'Allele'):
csq_to_join.append(str.join("|", (str(self.segregant.csqs[i][k])
if self.segregant.csqs[i][k]
else '.' for i in range(
len(self.segregant.csqs)))))
s = str.join("\t", csq_to_join)
if annot_order:
annot_order = [self.prefix + "_" + x for x in annot_order]
s += "\t" + str.join("\t", (annots[k] if isinstance(annots[k], str)
else '.' for k in annot_order))
else:
s += "\t" + str.join("\t", (annots[k] if isinstance(annots[k], str)
else '.' for k in sorted(annots)))
r = self.segregant.record
allele = r.alleles[self.segregant.allele]
s += "\t" + str.join("\t", (str(x) for x in (r.chrom, r.pos, r.id,
r.ref, r.alt, allele,
r.qual, r.filter_string)))
return s
def _convert_annotations(self, annots):
''' Convert to per-allele (Number=A) format for INFO field '''
converted_annots = dict()
for k, v in annots.items():
if k in self.segregant.record.info:
allele_fields = list(self.segregant.record.info[k])
else:
allele_fields = ['.'] * len(self.segregant.record.alts)
i = self.segregant.allele - 1
allele_fields[i] = v
converted_annots[k] = allele_fields
return converted_annots
class PotentialSegregant(object):
'''
Class for storing variant details for records that might make up
biallelic variants in affected samples.
'''
__slots__ = ['allele', 'allele_counts', 'features', 'families', 'alt_id',
'var_id', 'record', 'csqs']
def __init__(self, record, allele, csqs, allele_counts, families,
feature_label='Feature'):
self.allele = allele
self.allele_counts = allele_counts
self.families = families
self.var_id = "{}:{}-{}/{}".format(record.chrom, record.pos,
record.ref, record.alt)
self.alt_id = "{}:{}-{}/{}".format(record.chrom, record.pos,
record.ref, record.alleles[allele])
self.features = set(x[feature_label] for x in csqs if
x[feature_label] != '')
if not self.features:
# if is intergenic and there is no Feature ID, use var ID
# this way we can capture variants at same site if looking for n>1
# in several families, but won't classify all intergenic variants
# as the same "Feature"
self.features.add(self.var_id.replace(',', '_'))
self.csqs = csqs
self.record = record
def __eq__(self, other):
return self.alt_id == other.alt_id
def __hash__(self):
return hash(self.alt_id)
| gantzgraf/vape | vase/family_filter.py | Python | gpl-3.0 | 69,896 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks, digital
import pmt
import numpy as np
import sys
def make_length_tag(offset, length):
return gr.python_to_tag({'offset': offset,
'key': pmt.intern('packet_len'),
'value': pmt.from_long(length),
'srcid': pmt.intern('qa_burst_shaper')})
def make_tag(offset, key, value):
return gr.python_to_tag({'offset': offset,
'key': pmt.intern(key),
'value': value,
'srcid': pmt.intern('qa_burst_shaper')})
def compare_tags(a, b):
return a.offset == b.offset and pmt.equal(a.key, b.key) and \
pmt.equal(a.value, b.value)
class qa_burst_shaper (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_ff(self):
'''
test_ff: test with float values, even length window, zero padding,
and no phasing
'''
prepad = 10
postpad = 10
length = 20
data = np.ones(length)
window = np.concatenate((-2.0 * np.ones(5), -4.0 * np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length - len(window)), window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc(self):
'''
test_cc: test with complex values, even length window, zero padding,
and no phasing
'''
prepad = 10
postpad = 10
length = 20
data = np.ones(length, dtype=complex)
window = np.concatenate((-2.0 * np.ones(5, dtype=complex),
-4.0 * np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad, dtype=complex), window[0:5],
np.ones(length - len(window), dtype=complex),
window[5:10], np.zeros(postpad,
dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_ff_with_phasing(self):
'''
test_ff_with_phasing: test with float values, even length window, zero
padding, and phasing
'''
prepad = 10
postpad = 10
length = 20
data = np.ones(length)
window = np.concatenate((-2.0 * np.ones(5), -4.0 * np.ones(5)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5)
for i in range(5):
phasing[i] = ((-1.0)**i)
expected = np.concatenate((np.zeros(prepad), phasing * window[0:5],
np.ones(length), phasing * window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc_with_phasing(self):
'''
test_cc_with_phasing: test with complex values, even length window, zero
padding, and phasing
'''
prepad = 10
postpad = 10
length = 20
data = np.ones(length, dtype=complex)
window = np.concatenate((-2.0 * np.ones(5, dtype=complex),
-4.0 * np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5, dtype=complex)
for i in range(5):
phasing[i] = complex((-1.0)**i)
expected = np.concatenate((np.zeros(prepad, dtype=complex),
phasing * window[0:5],
np.ones(length, dtype=complex),
phasing * window[5:10],
np.zeros(postpad, dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_odd_window(self):
'''
test_odd_window: test with odd length window; center sample should be
applied at end of up flank and beginning of down flank
'''
prepad = 10
postpad = 10
length = 20
data = np.ones(length)
window = np.concatenate((-2.0 * np.ones(5), -3.0 * np.ones(1),
-4.0 * np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:6],
np.ones(length - len(window) - 1),
window[5:11], np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_short_burst(self):
'''
test_short_burst: test with burst length shorter than window length;
clips the window up and down flanks to FLOOR(length/2) samples
'''
prepad = 10
postpad = 10
length = 9
data = np.ones(length)
window = np.arange(length + 2, dtype=float)
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:4],
np.ones(1), window[5:9],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_consecutive_bursts(self):
'''
test_consecutive_bursts: test with consecutive bursts of different
lengths
'''
prepad = 10
postpad = 10
length1 = 15
length2 = 25
data = np.concatenate((np.ones(length1), -1.0 * np.ones(length2)))
window = np.concatenate((-2.0 * np.ones(5), -4.0 * np.ones(5)))
tags = (make_length_tag(0, length1), make_length_tag(length1, length2))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0 * window[0:5],
-1.0 * np.ones(length2 - len(window)),
-1.0 * window[5:10], np.zeros(postpad)))
etags = (make_length_tag(0, length1 + prepad + postpad),
make_length_tag(length1 + prepad + postpad,
length2 + prepad + postpad))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in range(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_gap(self):
'''
test_tag_gap: test with gap between tags; should drop samples that are
between proper tagged streams
'''
prepad = 10
postpad = 10
length = 20
gap_len = 5
data = np.arange(2 * length + gap_len, dtype=float)
window = np.concatenate((-2.0 * np.ones(5), -4.0 * np.ones(5)))
ewindow = window * \
np.array([1, -1, 1, -1, 1, 1, -1, 1, -1, 1], dtype=float)
tags = (make_length_tag(0, length),
make_length_tag(length + gap_len, length))
expected = np.concatenate((np.zeros(prepad),
ewindow[0:5],
np.arange(0,
length,
dtype=float),
ewindow[5:10],
np.zeros(postpad),
np.zeros(prepad),
ewindow[0:5],
np.arange(length + gap_len,
2 * length + gap_len,
dtype=float),
ewindow[5:10],
np.zeros(postpad)))
burst_len = length + len(window) + prepad + postpad
etags = (make_length_tag(0, burst_len),
make_length_tag(burst_len, burst_len))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in range(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_propagation(self):
'''
test_tag_propagation: test that non length tags are handled correctly
'''
prepad = 10
postpad = 10
length1 = 15
length2 = 25
gap_len = 5
lentag1_offset = 0
lentag2_offset = length1 + gap_len
tag1_offset = 0 # accompanies first length tag
tag2_offset = length1 + gap_len # accompanies second length tag
tag3_offset = 2 # in ramp-up state
tag4_offset = length1 + 2 # in gap; tag will be dropped
tag5_offset = length1 + gap_len + 7 # in copy state
data = np.concatenate((np.ones(length1), np.zeros(gap_len),
-1.0 * np.ones(length2)))
window = np.concatenate((-2.0 * np.ones(5), -4.0 * np.ones(5)))
tags = (make_length_tag(lentag1_offset, length1),
make_length_tag(lentag2_offset, length2),
make_tag(tag1_offset, 'head', pmt.intern('tag1')),
make_tag(tag2_offset, 'head', pmt.intern('tag2')),
make_tag(tag3_offset, 'body', pmt.intern('tag3')),
make_tag(tag4_offset, 'body', pmt.intern('tag4')),
make_tag(tag5_offset, 'body', pmt.intern('tag5')))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0 * window[0:5],
-1.0 * np.ones(length2 - len(window)),
-1.0 * window[5:10], np.zeros(postpad)))
elentag1_offset = 0
elentag2_offset = length1 + prepad + postpad
etag1_offset = 0
etag2_offset = elentag2_offset
etag3_offset = prepad + tag3_offset
etag5_offset = 2 * prepad + postpad + tag5_offset - gap_len
etags = (make_length_tag(elentag1_offset, length1 + prepad + postpad),
make_length_tag(elentag2_offset, length2 + prepad + postpad),
make_tag(etag1_offset, 'head', pmt.intern('tag1')),
make_tag(etag2_offset, 'head', pmt.intern('tag2')),
make_tag(etag3_offset, 'body', pmt.intern('tag3')),
make_tag(etag5_offset, 'body', pmt.intern('tag5')))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for x, y in zip(sorted(sink.tags()), sorted(etags)):
self.assertTrue(compare_tags(x, y))
if __name__ == '__main__':
gr_unittest.run(qa_burst_shaper)
| sdh11/gnuradio | gr-digital/python/digital/qa_burst_shaper.py | Python | gpl-3.0 | 15,116 |
# <pep8-80 compliant>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Nutti <[email protected]>"
__status__ = "production"
__version__ = "4.1"
__date__ = "13 Nov 2016"
import bpy
from . import muv_props
PHI = 3.1415926535
def debug_print(*s):
"""
Print message to console in debugging mode
"""
if muv_props.DEBUG:
print(s)
def check_version(major, minor, unused):
"""
Check blender version
"""
if bpy.app.version[0] == major and bpy.app.version[1] == minor:
return 0
if bpy.app.version[0] > major:
return 1
else:
if bpy.app.version[1] > minor:
return 1
else:
return -1
def redraw_all_areas():
"""
Redraw all areas
"""
for area in bpy.context.screen.areas:
area.tag_redraw()
def get_space(area_type, region_type, space_type):
"""
Get current area/region/space
"""
for area in bpy.context.screen.areas:
if area.type == area_type:
break
for region in area.regions:
if region.type == region_type:
break
for space in area.spaces:
if space.type == space_type:
break
return (area, region, space)
| Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/uv_magic_uv/muv_common.py | Python | gpl-3.0 | 1,988 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from testtools.matchers import Equals
from testtools import TestCase
from snapcraft.plugins.v2.meson import MesonPlugin
class MesonPluginTest(TestCase):
def test_schema(self):
schema = MesonPlugin.get_schema()
self.assertThat(
schema,
Equals(
{
"$schema": "http://json-schema.org/draft-04/schema#",
"additionalProperties": False,
"properties": {
"meson-parameters": {
"default": [],
"items": {"type": "string"},
"type": "array",
"uniqueItems": True,
},
"meson-version": {"default": "", "type": "string"},
},
"required": ["source"],
"type": "object",
}
),
)
def test_get_build_packages(self):
plugin = MesonPlugin(part_name="my-part", options=lambda: None)
self.assertThat(
plugin.get_build_packages(),
Equals(
{
"ninja-build",
"gcc",
"python3-pip",
"python3-setuptools",
"python3-wheel",
}
),
)
def test_get_build_environment(self):
plugin = MesonPlugin(part_name="my-part", options=lambda: None)
self.assertThat(plugin.get_build_environment(), Equals(dict()))
def test_get_build_commands(self):
class Options:
meson_parameters = list()
meson_version = ""
plugin = MesonPlugin(part_name="my-part", options=Options())
self.assertThat(
plugin.get_build_commands(),
Equals(
[
"/usr/bin/python3 -m pip install -U meson",
"[ ! -d .snapbuild ] && meson .snapbuild",
"(cd .snapbuild && ninja)",
'(cd .snapbuild && DESTDIR="${SNAPCRAFT_PART_INSTALL}" ninja install)',
]
),
)
def test_get_build_commands_with_options(self):
class Options:
meson_parameters = ["--buildtype=release"]
meson_version = "2.2"
plugin = MesonPlugin(part_name="my-part", options=Options())
self.assertThat(
plugin.get_build_commands(),
Equals(
[
"/usr/bin/python3 -m pip install -U meson==2.2",
"[ ! -d .snapbuild ] && meson --buildtype=release .snapbuild",
"(cd .snapbuild && ninja)",
'(cd .snapbuild && DESTDIR="${SNAPCRAFT_PART_INSTALL}" ninja install)',
]
),
)
| ubuntu-core/snapcraft | tests/unit/plugins/v2/test_meson.py | Python | gpl-3.0 | 3,544 |
all( {% include "LinkedInObject.IsInstance.py" with variable="element" type=type.name|add:"."|add:type.name only %} for element in {{ variable }} )
| alvarovmz/PyLinkedIn | codegen/templates/LinkedInObject.IsInstance.list.complex.py | Python | gpl-3.0 | 148 |
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
l=len(height)
maxheight=[0 for i in range(l)]
leftmax=0
rightmax=0
res=0
for i in range(l):
if height[i]>leftmax:
leftmax=height[i]
maxheight[i]=leftmax
for i in reversed(range(l)):
if height[i]>rightmax:
rightmax=height[i]
if min(rightmax,maxheight[i])-height[i]>0:
res+=min(rightmax,maxheight[i])-height[i]
return res | dichen001/Go4Jobs | JoeXu/42. Trapping rain water.py | Python | gpl-3.0 | 616 |
# coding=utf-8
import unittest
"""826. Most Profit Assigning Work
https://leetcode.com/problems/most-profit-assigning-work/description/
We have jobs: `difficulty[i]` is the difficulty of the `i`th job, and
`profit[i]` is the profit of the `i`th job.
Now we have some workers. `worker[i]` is the ability of the `i`th worker,
which means that this worker can only complete a job with difficulty at most
`worker[i]`.
Every worker can be assigned at most one job, but one job can be completed
multiple times.
For example, if 3 people attempt the same job that pays $1, then the total
profit will be $3. If a worker cannot complete any job, his profit is $0.
What is the most profit we can make?
**Example 1:**
**Input:** difficulty = [2,4,6,8,10], profit = [10,20,30,40,50], worker = [4,5,6,7]
**Output:** 100
**Explanation: W** orkers are assigned jobs of difficulty [4,4,6,6] and they get profit of [20,20,30,30] seperately.
**Notes:**
* `1 <= difficulty.length = profit.length <= 10000`
* `1 <= worker.length <= 10000`
* `difficulty[i], profit[i], worker[i]` are in range `[1, 10^5]`
Similar Questions:
"""
class Solution(object):
def maxProfitAssignment(self, difficulty, profit, worker):
"""
:type difficulty: List[int]
:type profit: List[int]
:type worker: List[int]
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/lc826-most-profit-assigning-work.py | Python | gpl-3.0 | 1,478 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
## Copyright © 2007-2012, Matthias Urlichs <[email protected]>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from homevent.reactor import ShutdownHandler
from homevent.module import load_module
from homevent.statement import main_words
from test import run
input = """\
block:
if exists path "..":
log DEBUG Yes
else:
log DEBUG No1
if exists path "...":
log DEBUG No2
else:
log DEBUG Yes
if exists directory "..":
log DEBUG Yes
else:
log DEBUG No3
if exists directory "README":
log DEBUG No4
else:
log DEBUG Yes
if exists file "README":
log DEBUG Yes
else:
log DEBUG No5
if exists file "..":
log DEBUG No6
else:
log DEBUG Yes
shutdown
"""
main_words.register_statement(ShutdownHandler)
load_module("logging")
load_module("ifelse")
load_module("path")
load_module("block")
run("path",input)
| smurfix/HomEvenT | test/mod_path.py | Python | gpl-3.0 | 1,386 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def fix_notes_field (apps, schema_editor):
Question = apps.get_model("checklist", "Question")
for question in Question.objects.all():
question.notes = ""
question.save()
class Migration(migrations.Migration):
dependencies = [
('checklist', '0011_auto_20150928_1437'),
]
operations = [
migrations.RunPython(fix_notes_field),
]
| KevinSeghetti/survey | survey/checklist/migrations/0012_auto_20150928_1443.py | Python | gpl-3.0 | 503 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/krl1to5/Work/FULL/Sequence-ToolKit/2016/resources/ui/genrep/dialogs/setup.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_setup(object):
def setupUi(self, setup):
setup.setObjectName("setup")
setup.resize(510, 607)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(setup.sizePolicy().hasHeightForWidth())
setup.setSizePolicy(sizePolicy)
self.verticalLayout_8 = QtWidgets.QVBoxLayout(setup)
self.verticalLayout_8.setContentsMargins(-1, -1, -1, 9)
self.verticalLayout_8.setSpacing(15)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.tab_widget = QtWidgets.QTabWidget(setup)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tab_widget.sizePolicy().hasHeightForWidth())
self.tab_widget.setSizePolicy(sizePolicy)
self.tab_widget.setObjectName("tab_widget")
self.curve_to_show = QtWidgets.QWidget()
self.curve_to_show.setObjectName("curve_to_show")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.curve_to_show)
self.verticalLayout_3.setContentsMargins(12, 12, 12, 12)
self.verticalLayout_3.setSpacing(12)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.curve_1 = QtWidgets.QCheckBox(self.curve_to_show)
self.curve_1.setChecked(True)
self.curve_1.setObjectName("curve_1")
self.verticalLayout_3.addWidget(self.curve_1)
self.curve_2 = QtWidgets.QCheckBox(self.curve_to_show)
self.curve_2.setObjectName("curve_2")
self.verticalLayout_3.addWidget(self.curve_2)
self.curve_3 = QtWidgets.QCheckBox(self.curve_to_show)
self.curve_3.setObjectName("curve_3")
self.verticalLayout_3.addWidget(self.curve_3)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.tab_widget.addTab(self.curve_to_show, "")
self.for_tl = QtWidgets.QWidget()
self.for_tl.setObjectName("for_tl")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.for_tl)
self.verticalLayout_4.setContentsMargins(12, 12, 12, 12)
self.verticalLayout_4.setSpacing(12)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.show_label = QtWidgets.QLabel(self.for_tl)
self.show_label.setObjectName("show_label")
self.verticalLayout_4.addWidget(self.show_label)
self.curve_vs_time = QtWidgets.QRadioButton(self.for_tl)
self.curve_vs_time.setChecked(True)
self.curve_vs_time.setObjectName("curve_vs_time")
self.verticalLayout_4.addWidget(self.curve_vs_time)
self.curve_vs_temperature = QtWidgets.QRadioButton(self.for_tl)
self.curve_vs_temperature.setObjectName("curve_vs_temperature")
self.verticalLayout_4.addWidget(self.curve_vs_temperature)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.tab_widget.addTab(self.for_tl, "")
self.format_horizontal_axis = QtWidgets.QWidget()
self.format_horizontal_axis.setObjectName("format_horizontal_axis")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.format_horizontal_axis)
self.verticalLayout_2.setContentsMargins(12, 12, 12, 12)
self.verticalLayout_2.setSpacing(12)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.layout = QtWidgets.QHBoxLayout()
self.layout.setSpacing(10)
self.layout.setObjectName("layout")
self.units_label = QtWidgets.QLabel(self.format_horizontal_axis)
self.units_label.setObjectName("units_label")
self.layout.addWidget(self.units_label)
self.units = QtWidgets.QComboBox(self.format_horizontal_axis)
self.units.setMinimumSize(QtCore.QSize(100, 28))
self.units.setObjectName("units")
self.units.addItem("")
self.units.addItem("")
self.layout.addWidget(self.units)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout.addItem(spacerItem2)
self.verticalLayout_2.addLayout(self.layout)
self.line = QtWidgets.QFrame(self.format_horizontal_axis)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_2.addWidget(self.line)
self.scale_area = QtWidgets.QGroupBox(self.format_horizontal_axis)
self.scale_area.setObjectName("scale_area")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.scale_area)
self.horizontalLayout_9.setContentsMargins(6, 6, 6, 6)
self.horizontalLayout_9.setSpacing(10)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.lineal = QtWidgets.QRadioButton(self.scale_area)
self.lineal.setChecked(True)
self.lineal.setAutoRepeat(False)
self.lineal.setObjectName("lineal")
self.horizontalLayout_9.addWidget(self.lineal)
self.log10 = QtWidgets.QRadioButton(self.scale_area)
self.log10.setObjectName("log10")
self.horizontalLayout_9.addWidget(self.log10)
self.ln = QtWidgets.QRadioButton(self.scale_area)
self.ln.setObjectName("ln")
self.horizontalLayout_9.addWidget(self.ln)
self.verticalLayout_2.addWidget(self.scale_area)
self.axis_values_area = QtWidgets.QGroupBox(self.format_horizontal_axis)
self.axis_values_area.setObjectName("axis_values_area")
self.verticalLayout = QtWidgets.QVBoxLayout(self.axis_values_area)
self.verticalLayout.setContentsMargins(6, 6, 6, 6)
self.verticalLayout.setSpacing(10)
self.verticalLayout.setObjectName("verticalLayout")
self.layout_2 = QtWidgets.QHBoxLayout()
self.layout_2.setSpacing(10)
self.layout_2.setObjectName("layout_2")
self.minimum_label = QtWidgets.QLabel(self.axis_values_area)
self.minimum_label.setObjectName("minimum_label")
self.layout_2.addWidget(self.minimum_label)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_2.addItem(spacerItem3)
self.group = QtWidgets.QGroupBox(self.axis_values_area)
self.group.setTitle("")
self.group.setFlat(True)
self.group.setObjectName("group")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.group)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(10)
self.horizontalLayout.setObjectName("horizontalLayout")
self.automatic_minimum = QtWidgets.QRadioButton(self.group)
self.automatic_minimum.setChecked(True)
self.automatic_minimum.setObjectName("automatic_minimum")
self.horizontalLayout.addWidget(self.automatic_minimum)
self.fixed_minimum = QtWidgets.QRadioButton(self.group)
self.fixed_minimum.setObjectName("fixed_minimum")
self.horizontalLayout.addWidget(self.fixed_minimum)
self.fixed_minimum_value = QtWidgets.QDoubleSpinBox(self.group)
self.fixed_minimum_value.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_minimum_value.setMaximum(9999999.0)
self.fixed_minimum_value.setObjectName("fixed_minimum_value")
self.horizontalLayout.addWidget(self.fixed_minimum_value)
self.layout_2.addWidget(self.group)
self.verticalLayout.addLayout(self.layout_2)
self.layout_3 = QtWidgets.QHBoxLayout()
self.layout_3.setSpacing(10)
self.layout_3.setObjectName("layout_3")
self.maximum_label = QtWidgets.QLabel(self.axis_values_area)
self.maximum_label.setObjectName("maximum_label")
self.layout_3.addWidget(self.maximum_label)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_3.addItem(spacerItem4)
self.group_1 = QtWidgets.QGroupBox(self.axis_values_area)
self.group_1.setTitle("")
self.group_1.setFlat(True)
self.group_1.setObjectName("group_1")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.group_1)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(10)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.automatic_maximum = QtWidgets.QRadioButton(self.group_1)
self.automatic_maximum.setChecked(True)
self.automatic_maximum.setObjectName("automatic_maximum")
self.horizontalLayout_2.addWidget(self.automatic_maximum)
self.fixed_maximum = QtWidgets.QRadioButton(self.group_1)
self.fixed_maximum.setObjectName("fixed_maximum")
self.horizontalLayout_2.addWidget(self.fixed_maximum)
self.fixed_maximum_value = QtWidgets.QDoubleSpinBox(self.group_1)
self.fixed_maximum_value.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_maximum_value.setMaximum(9999999.0)
self.fixed_maximum_value.setObjectName("fixed_maximum_value")
self.horizontalLayout_2.addWidget(self.fixed_maximum_value)
self.layout_3.addWidget(self.group_1)
self.verticalLayout.addLayout(self.layout_3)
self.layout_4 = QtWidgets.QHBoxLayout()
self.layout_4.setSpacing(10)
self.layout_4.setObjectName("layout_4")
self.greater_label = QtWidgets.QLabel(self.axis_values_area)
self.greater_label.setObjectName("greater_label")
self.layout_4.addWidget(self.greater_label)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_4.addItem(spacerItem5)
self.group_2 = QtWidgets.QGroupBox(self.axis_values_area)
self.group_2.setTitle("")
self.group_2.setFlat(True)
self.group_2.setObjectName("group_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.group_2)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(10)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.automatic_greater = QtWidgets.QRadioButton(self.group_2)
self.automatic_greater.setChecked(False)
self.automatic_greater.setObjectName("automatic_greater")
self.horizontalLayout_3.addWidget(self.automatic_greater)
self.fixed_greater = QtWidgets.QRadioButton(self.group_2)
self.fixed_greater.setChecked(True)
self.fixed_greater.setObjectName("fixed_greater")
self.horizontalLayout_3.addWidget(self.fixed_greater)
self.fixed_greater_value = QtWidgets.QDoubleSpinBox(self.group_2)
self.fixed_greater_value.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_greater_value.setMaximum(9999999.0)
self.fixed_greater_value.setProperty("value", 20.0)
self.fixed_greater_value.setObjectName("fixed_greater_value")
self.horizontalLayout_3.addWidget(self.fixed_greater_value)
self.layout_4.addWidget(self.group_2)
self.verticalLayout.addLayout(self.layout_4)
self.layout_5 = QtWidgets.QHBoxLayout()
self.layout_5.setSpacing(10)
self.layout_5.setObjectName("layout_5")
self.smallest_label = QtWidgets.QLabel(self.axis_values_area)
self.smallest_label.setObjectName("smallest_label")
self.layout_5.addWidget(self.smallest_label)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_5.addItem(spacerItem6)
self.group_3 = QtWidgets.QGroupBox(self.axis_values_area)
self.group_3.setTitle("")
self.group_3.setFlat(True)
self.group_3.setObjectName("group_3")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.group_3)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(10)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.automatic_smallest = QtWidgets.QRadioButton(self.group_3)
self.automatic_smallest.setChecked(False)
self.automatic_smallest.setObjectName("automatic_smallest")
self.horizontalLayout_4.addWidget(self.automatic_smallest)
self.fixed_smallest = QtWidgets.QRadioButton(self.group_3)
self.fixed_smallest.setChecked(True)
self.fixed_smallest.setObjectName("fixed_smallest")
self.horizontalLayout_4.addWidget(self.fixed_smallest)
self.fixed_smallest_value = QtWidgets.QDoubleSpinBox(self.group_3)
self.fixed_smallest_value.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_smallest_value.setMaximum(9999999.0)
self.fixed_smallest_value.setProperty("value", 5.0)
self.fixed_smallest_value.setObjectName("fixed_smallest_value")
self.horizontalLayout_4.addWidget(self.fixed_smallest_value)
self.layout_5.addWidget(self.group_3)
self.verticalLayout.addLayout(self.layout_5)
self.verticalLayout_2.addWidget(self.axis_values_area)
spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem7)
self.tab_widget.addTab(self.format_horizontal_axis, "")
self.format_vertical_axis = QtWidgets.QWidget()
self.format_vertical_axis.setObjectName("format_vertical_axis")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.format_vertical_axis)
self.verticalLayout_6.setContentsMargins(12, 12, 12, 12)
self.verticalLayout_6.setSpacing(12)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.scale_area_2 = QtWidgets.QGroupBox(self.format_vertical_axis)
self.scale_area_2.setObjectName("scale_area_2")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout(self.scale_area_2)
self.horizontalLayout_19.setContentsMargins(6, 6, 6, 6)
self.horizontalLayout_19.setSpacing(10)
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.lineal_2 = QtWidgets.QRadioButton(self.scale_area_2)
self.lineal_2.setChecked(True)
self.lineal_2.setAutoRepeat(False)
self.lineal_2.setObjectName("lineal_2")
self.horizontalLayout_19.addWidget(self.lineal_2)
self.log10_2 = QtWidgets.QRadioButton(self.scale_area_2)
self.log10_2.setObjectName("log10_2")
self.horizontalLayout_19.addWidget(self.log10_2)
self.ln_2 = QtWidgets.QRadioButton(self.scale_area_2)
self.ln_2.setObjectName("ln_2")
self.horizontalLayout_19.addWidget(self.ln_2)
self.verticalLayout_6.addWidget(self.scale_area_2)
self.axis_values_area_2 = QtWidgets.QGroupBox(self.format_vertical_axis)
self.axis_values_area_2.setObjectName("axis_values_area_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.axis_values_area_2)
self.verticalLayout_5.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_5.setSpacing(10)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.layout_6 = QtWidgets.QHBoxLayout()
self.layout_6.setSpacing(10)
self.layout_6.setObjectName("layout_6")
self.minimum_label_2 = QtWidgets.QLabel(self.axis_values_area_2)
self.minimum_label_2.setObjectName("minimum_label_2")
self.layout_6.addWidget(self.minimum_label_2)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_6.addItem(spacerItem8)
self.group_4 = QtWidgets.QGroupBox(self.axis_values_area_2)
self.group_4.setTitle("")
self.group_4.setFlat(True)
self.group_4.setObjectName("group_4")
self.horizontalLayout_12 = QtWidgets.QHBoxLayout(self.group_4)
self.horizontalLayout_12.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_12.setSpacing(10)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.automatic_minimum_2 = QtWidgets.QRadioButton(self.group_4)
self.automatic_minimum_2.setChecked(True)
self.automatic_minimum_2.setObjectName("automatic_minimum_2")
self.horizontalLayout_12.addWidget(self.automatic_minimum_2)
self.fixed_minimum_2 = QtWidgets.QRadioButton(self.group_4)
self.fixed_minimum_2.setObjectName("fixed_minimum_2")
self.horizontalLayout_12.addWidget(self.fixed_minimum_2)
self.fixed_minimum_value_2 = QtWidgets.QDoubleSpinBox(self.group_4)
self.fixed_minimum_value_2.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_minimum_value_2.setMaximum(9999999.0)
self.fixed_minimum_value_2.setObjectName("fixed_minimum_value_2")
self.horizontalLayout_12.addWidget(self.fixed_minimum_value_2)
self.layout_6.addWidget(self.group_4)
self.verticalLayout_5.addLayout(self.layout_6)
self.layout_7 = QtWidgets.QHBoxLayout()
self.layout_7.setSpacing(10)
self.layout_7.setObjectName("layout_7")
self.maximum_label_2 = QtWidgets.QLabel(self.axis_values_area_2)
self.maximum_label_2.setObjectName("maximum_label_2")
self.layout_7.addWidget(self.maximum_label_2)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_7.addItem(spacerItem9)
self.group_5 = QtWidgets.QGroupBox(self.axis_values_area_2)
self.group_5.setTitle("")
self.group_5.setFlat(True)
self.group_5.setObjectName("group_5")
self.horizontalLayout_14 = QtWidgets.QHBoxLayout(self.group_5)
self.horizontalLayout_14.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_14.setSpacing(10)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.automatic_maximum_2 = QtWidgets.QRadioButton(self.group_5)
self.automatic_maximum_2.setChecked(True)
self.automatic_maximum_2.setObjectName("automatic_maximum_2")
self.horizontalLayout_14.addWidget(self.automatic_maximum_2)
self.fixed_maximum_2 = QtWidgets.QRadioButton(self.group_5)
self.fixed_maximum_2.setObjectName("fixed_maximum_2")
self.horizontalLayout_14.addWidget(self.fixed_maximum_2)
self.fixed_maximum_value_2 = QtWidgets.QDoubleSpinBox(self.group_5)
self.fixed_maximum_value_2.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_maximum_value_2.setMaximum(9999999.0)
self.fixed_maximum_value_2.setObjectName("fixed_maximum_value_2")
self.horizontalLayout_14.addWidget(self.fixed_maximum_value_2)
self.layout_7.addWidget(self.group_5)
self.verticalLayout_5.addLayout(self.layout_7)
self.layout_8 = QtWidgets.QHBoxLayout()
self.layout_8.setSpacing(10)
self.layout_8.setObjectName("layout_8")
self.greater_label_2 = QtWidgets.QLabel(self.axis_values_area_2)
self.greater_label_2.setObjectName("greater_label_2")
self.layout_8.addWidget(self.greater_label_2)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_8.addItem(spacerItem10)
self.group_6 = QtWidgets.QGroupBox(self.axis_values_area_2)
self.group_6.setTitle("")
self.group_6.setFlat(True)
self.group_6.setObjectName("group_6")
self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.group_6)
self.horizontalLayout_16.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_16.setSpacing(10)
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.automatic_greater_2 = QtWidgets.QRadioButton(self.group_6)
self.automatic_greater_2.setChecked(False)
self.automatic_greater_2.setObjectName("automatic_greater_2")
self.horizontalLayout_16.addWidget(self.automatic_greater_2)
self.fixed_greater_2 = QtWidgets.QRadioButton(self.group_6)
self.fixed_greater_2.setChecked(True)
self.fixed_greater_2.setObjectName("fixed_greater_2")
self.horizontalLayout_16.addWidget(self.fixed_greater_2)
self.fixed_greater_value_2 = QtWidgets.QDoubleSpinBox(self.group_6)
self.fixed_greater_value_2.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_greater_value_2.setMaximum(9999999.0)
self.fixed_greater_value_2.setProperty("value", 5000.0)
self.fixed_greater_value_2.setObjectName("fixed_greater_value_2")
self.horizontalLayout_16.addWidget(self.fixed_greater_value_2)
self.layout_8.addWidget(self.group_6)
self.verticalLayout_5.addLayout(self.layout_8)
self.layout_9 = QtWidgets.QHBoxLayout()
self.layout_9.setSpacing(10)
self.layout_9.setObjectName("layout_9")
self.smallest_label_2 = QtWidgets.QLabel(self.axis_values_area_2)
self.smallest_label_2.setObjectName("smallest_label_2")
self.layout_9.addWidget(self.smallest_label_2)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_9.addItem(spacerItem11)
self.group_7 = QtWidgets.QGroupBox(self.axis_values_area_2)
self.group_7.setTitle("")
self.group_7.setFlat(True)
self.group_7.setObjectName("group_7")
self.horizontalLayout_18 = QtWidgets.QHBoxLayout(self.group_7)
self.horizontalLayout_18.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_18.setSpacing(10)
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.automatic_smallest_2 = QtWidgets.QRadioButton(self.group_7)
self.automatic_smallest_2.setChecked(False)
self.automatic_smallest_2.setObjectName("automatic_smallest_2")
self.horizontalLayout_18.addWidget(self.automatic_smallest_2)
self.fixed_smallest_2 = QtWidgets.QRadioButton(self.group_7)
self.fixed_smallest_2.setChecked(True)
self.fixed_smallest_2.setObjectName("fixed_smallest_2")
self.horizontalLayout_18.addWidget(self.fixed_smallest_2)
self.fixed_smallest_value_2 = QtWidgets.QDoubleSpinBox(self.group_7)
self.fixed_smallest_value_2.setMinimumSize(QtCore.QSize(0, 28))
self.fixed_smallest_value_2.setMaximum(9999999.0)
self.fixed_smallest_value_2.setProperty("value", 500.0)
self.fixed_smallest_value_2.setObjectName("fixed_smallest_value_2")
self.horizontalLayout_18.addWidget(self.fixed_smallest_value_2)
self.layout_9.addWidget(self.group_7)
self.verticalLayout_5.addLayout(self.layout_9)
spacerItem12 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem12)
self.verticalLayout_6.addWidget(self.axis_values_area_2)
self.tab_widget.addTab(self.format_vertical_axis, "")
self.cursors = QtWidgets.QWidget()
self.cursors.setObjectName("cursors")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.cursors)
self.verticalLayout_7.setContentsMargins(12, 12, 12, 12)
self.verticalLayout_7.setSpacing(12)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.layout_10 = QtWidgets.QHBoxLayout()
self.layout_10.setObjectName("layout_10")
self.show_cursors_label = QtWidgets.QLabel(self.cursors)
self.show_cursors_label.setObjectName("show_cursors_label")
self.layout_10.addWidget(self.show_cursors_label)
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_10.addItem(spacerItem13)
self.default_position_label = QtWidgets.QLabel(self.cursors)
self.default_position_label.setObjectName("default_position_label")
self.layout_10.addWidget(self.default_position_label)
self.verticalLayout_7.addLayout(self.layout_10)
self.layout_11 = QtWidgets.QHBoxLayout()
self.layout_11.setObjectName("layout_11")
self.signal = QtWidgets.QCheckBox(self.cursors)
self.signal.setChecked(True)
self.signal.setObjectName("signal")
self.layout_11.addWidget(self.signal)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_11.addItem(spacerItem14)
self.layout_12 = QtWidgets.QHBoxLayout()
self.layout_12.setObjectName("layout_12")
self.st_signal_low_label = QtWidgets.QLabel(self.cursors)
self.st_signal_low_label.setObjectName("st_signal_low_label")
self.layout_12.addWidget(self.st_signal_low_label)
self.signal_low = QtWidgets.QDoubleSpinBox(self.cursors)
self.signal_low.setMinimumSize(QtCore.QSize(110, 28))
self.signal_low.setMaximumSize(QtCore.QSize(110, 16777215))
self.signal_low.setMinimum(1.0)
self.signal_low.setMaximum(9999999.0)
self.signal_low.setObjectName("signal_low")
self.layout_12.addWidget(self.signal_low)
self.layout_11.addLayout(self.layout_12)
self.layout_13 = QtWidgets.QHBoxLayout()
self.layout_13.setObjectName("layout_13")
self.st_signal_high_label = QtWidgets.QLabel(self.cursors)
self.st_signal_high_label.setObjectName("st_signal_high_label")
self.layout_13.addWidget(self.st_signal_high_label)
self.signal_high = QtWidgets.QDoubleSpinBox(self.cursors)
self.signal_high.setMinimumSize(QtCore.QSize(110, 28))
self.signal_high.setMaximumSize(QtCore.QSize(110, 16777215))
self.signal_high.setMaximum(9999999.0)
self.signal_high.setProperty("value", 10.0)
self.signal_high.setObjectName("signal_high")
self.layout_13.addWidget(self.signal_high)
self.layout_11.addLayout(self.layout_13)
self.verticalLayout_7.addLayout(self.layout_11)
self.layout_14 = QtWidgets.QHBoxLayout()
self.layout_14.setObjectName("layout_14")
self.background = QtWidgets.QCheckBox(self.cursors)
self.background.setChecked(True)
self.background.setObjectName("background")
self.layout_14.addWidget(self.background)
spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_14.addItem(spacerItem15)
self.layout_15 = QtWidgets.QHBoxLayout()
self.layout_15.setObjectName("layout_15")
self.st_background_low_label = QtWidgets.QLabel(self.cursors)
self.st_background_low_label.setObjectName("st_background_low_label")
self.layout_15.addWidget(self.st_background_low_label)
self.background_low = QtWidgets.QDoubleSpinBox(self.cursors)
self.background_low.setMinimumSize(QtCore.QSize(110, 28))
self.background_low.setMaximumSize(QtCore.QSize(110, 16777215))
self.background_low.setMinimum(-9999999.0)
self.background_low.setMaximum(0.0)
self.background_low.setProperty("value", -10.0)
self.background_low.setObjectName("background_low")
self.layout_15.addWidget(self.background_low)
self.layout_14.addLayout(self.layout_15)
self.layout_16 = QtWidgets.QHBoxLayout()
self.layout_16.setObjectName("layout_16")
self.st_background_high_label = QtWidgets.QLabel(self.cursors)
self.st_background_high_label.setObjectName("st_background_high_label")
self.layout_16.addWidget(self.st_background_high_label)
self.background_high = QtWidgets.QDoubleSpinBox(self.cursors)
self.background_high.setMinimumSize(QtCore.QSize(110, 28))
self.background_high.setMaximumSize(QtCore.QSize(110, 16777215))
self.background_high.setMinimum(-9999999.0)
self.background_high.setMaximum(0.0)
self.background_high.setObjectName("background_high")
self.layout_16.addWidget(self.background_high)
self.layout_14.addLayout(self.layout_16)
self.verticalLayout_7.addLayout(self.layout_14)
spacerItem16 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem16)
self.tab_widget.addTab(self.cursors, "")
self.verticalLayout_8.addWidget(self.tab_widget)
self.line_2 = QtWidgets.QFrame(setup)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_8.addWidget(self.line_2)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(10)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
spacerItem17 = QtWidgets.QSpacerItem(0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem17)
self.push_button_accept = QtWidgets.QPushButton(setup)
self.push_button_accept.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_accept.setObjectName("push_button_accept")
self.horizontalLayout_5.addWidget(self.push_button_accept)
self.push_button_cancel = QtWidgets.QPushButton(setup)
self.push_button_cancel.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_cancel.setObjectName("push_button_cancel")
self.horizontalLayout_5.addWidget(self.push_button_cancel)
self.verticalLayout_8.addLayout(self.horizontalLayout_5)
spacerItem18 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_8.addItem(spacerItem18)
self.retranslateUi(setup)
self.tab_widget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(setup)
def retranslateUi(self, setup):
_translate = QtCore.QCoreApplication.translate
setup.setWindowTitle(_translate("setup", "SetUp"))
self.curve_1.setText(_translate("setup", "Curve 1"))
self.curve_2.setText(_translate("setup", "Curve 2"))
self.curve_3.setText(_translate("setup", "Curve 3"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.curve_to_show), _translate("setup", "Curve to Show"))
self.show_label.setText(_translate("setup", "Show"))
self.curve_vs_time.setText(_translate("setup", "Curve vs time"))
self.curve_vs_temperature.setText(_translate("setup", "Curve vs Temperature"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.for_tl), _translate("setup", "For TL"))
self.units_label.setText(_translate("setup", "Units"))
self.units.setItemText(0, _translate("setup", "channels"))
self.units.setItemText(1, _translate("setup", "s"))
self.scale_area.setTitle(_translate("setup", "Scale"))
self.lineal.setText(_translate("setup", "Lineal"))
self.log10.setText(_translate("setup", "Log10"))
self.ln.setText(_translate("setup", "Ln"))
self.axis_values_area.setTitle(_translate("setup", "Axis values"))
self.minimum_label.setText(_translate("setup", "Minimum"))
self.automatic_minimum.setText(_translate("setup", "Automatic"))
self.fixed_minimum.setText(_translate("setup", "Fixed"))
self.maximum_label.setText(_translate("setup", "Maximum"))
self.automatic_maximum.setText(_translate("setup", "Automatic"))
self.fixed_maximum.setText(_translate("setup", "Fixed"))
self.greater_label.setText(_translate("setup", "Greater Unit"))
self.automatic_greater.setText(_translate("setup", "Automatic"))
self.fixed_greater.setText(_translate("setup", "Fixed"))
self.smallest_label.setText(_translate("setup", "Smallest Unit"))
self.automatic_smallest.setText(_translate("setup", "Automatic"))
self.fixed_smallest.setText(_translate("setup", "Fixed"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.format_horizontal_axis), _translate("setup", "Format Horizontal Axis"))
self.scale_area_2.setTitle(_translate("setup", "Scale"))
self.lineal_2.setText(_translate("setup", "Lineal"))
self.log10_2.setText(_translate("setup", "Log10"))
self.ln_2.setText(_translate("setup", "Ln"))
self.axis_values_area_2.setTitle(_translate("setup", "Axis values"))
self.minimum_label_2.setText(_translate("setup", "Minimum"))
self.automatic_minimum_2.setText(_translate("setup", "Automatic"))
self.fixed_minimum_2.setText(_translate("setup", "Fixed"))
self.maximum_label_2.setText(_translate("setup", "Maximum"))
self.automatic_maximum_2.setText(_translate("setup", "Automatic"))
self.fixed_maximum_2.setText(_translate("setup", "Fixed"))
self.greater_label_2.setText(_translate("setup", "Greater Unit"))
self.automatic_greater_2.setText(_translate("setup", "Automatic"))
self.fixed_greater_2.setText(_translate("setup", "Fixed"))
self.smallest_label_2.setText(_translate("setup", "Smallest Unit"))
self.automatic_smallest_2.setText(_translate("setup", "Automatic"))
self.fixed_smallest_2.setText(_translate("setup", "Fixed"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.format_vertical_axis), _translate("setup", "Format Vertical Axis"))
self.show_cursors_label.setText(_translate("setup", "Show Cursors"))
self.default_position_label.setText(_translate("setup", "Default Position"))
self.signal.setText(_translate("setup", "Signal"))
self.st_signal_low_label.setText(_translate("setup", "Low"))
self.st_signal_high_label.setText(_translate("setup", "High"))
self.background.setText(_translate("setup", "Background"))
self.st_background_low_label.setText(_translate("setup", "Low"))
self.st_background_high_label.setText(_translate("setup", "High"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.cursors), _translate("setup", "Cursors"))
self.push_button_accept.setText(_translate("setup", "Accept"))
self.push_button_cancel.setText(_translate("setup", "Cancel"))
| carlos-ferras/Sequence-ToolKit | view/genrep/dialogs/ui_setup.py | Python | gpl-3.0 | 34,982 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-08 19:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0015_auto_20170408_1815'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='team',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='equipe'),
),
]
| sandrofolk/girox | girox/event/migrations/0016_subscription_team.py | Python | gpl-3.0 | 498 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2010 by drubin <drubin at smartcube.co.za>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Allows you to visually see if there are updates to your weechat system
#Versions
# 0.1 drubin - First release.
# - Basic functionality to save version history of your config files (only git, bzr)
# 0.2 ShockkPony - Fixed massive weechat startup time caused by initial config loading
SCRIPT_NAME = "confversion"
SCRIPT_AUTHOR = "drubin <drubin at smartcube.co.za>"
SCRIPT_VERSION = "0.2"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Stores version controlled history of your configuration files"
import_ok = True
import subprocess
try:
import weechat
except ImportError:
print "This script must be run under WeeChat."
print "Get WeeChat now at: http://www.weechat.org/"
import_ok = False
# script options
settings = {
#Currently supports git and bzr and possibly other that support simple "init" "add *.conf" "commit -m "message" "
"versioning_method" : "git",
"commit_each_change" : "true",
"commit_message" : "Commiting changes",
#Allows you to not auto commit stuff that relates to these configs
#, (comma) seperated list of config options
#The toggle_nicklist script can make this property annoying.
"auto_commit_ignore" : "weechat.bar.nicklist.hidden",
}
def shell_in_home(cmd):
try:
output = file("/dev/null","w")
subprocess.Popen(ver_method()+" "+cmd, cwd = weechat_home(),
stdout= output, stderr=output, shell=True)
except Exception as e:
print e
def weechat_home():
return weechat.info_get ("weechat_dir", "")
def ver_method():
return weechat.config_get_plugin("versioning_method")
def init_repo():
#Set up version control (doesn't matter if previously setup for bzr, git)
shell_in_home("init")
#Save first import OR on start up if needed.
commit_cb()
confversion_commit_finish_hook = 0
def commit_cb(data=None, remaning=None):
global confversion_commit_finish_hook
# only hook timer if not already hooked
if confversion_commit_finish_hook == 0:
confversion_commit_finish_hook = weechat.hook_timer(500, 0, 1, "commit_cb_finish", "")
return weechat.WEECHAT_RC_OK
def commit_cb_finish(data=None, remaining=None):
global confversion_commit_finish_hook
# save before doing commit
weechat.command("","/save")
# add all config changes to git
shell_in_home("add ./*.conf")
# do the commit
shell_in_home("commit -m \"%s\"" % weechat.config_get_plugin("commit_message"))
# set hook back to 0
confversion_commit_finish_hook = 0
return weechat.WEECHAT_RC_OK
def conf_update_cb(data, option, value):
#Commit data if not part of ignore list.
if weechat.config_get_plugin("commit_each_change") == "true" and not option in weechat.config_get_plugin("auto_commit_ignore").split(","):
#Call use pause else /save will be called before the config is actually saved to disc
#This is kinda hack but better input would be appricated.
weechat.hook_timer(500, 0, 1, "commit_cb", "")
return weechat.WEECHAT_RC_OK
def confversion_cmd(data, buffer, args):
commit_cb()
return weechat.WEECHAT_RC_OK
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", ""):
for option, default_value in settings.iteritems():
if weechat.config_get_plugin(option) == "":
weechat.config_set_plugin(option, default_value)
weechat.hook_command("confversion", "Saves configurations to version control", "",
"",
"", "confversion_cmd", "")
init_repo()
hook = weechat.hook_config("*", "conf_update_cb", "")
| qguv/config | weechat/plugins/python/confversion.py | Python | gpl-3.0 | 4,417 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
float_or_none,
parse_iso8601,
str_or_none,
try_get,
unescapeHTML,
url_or_none,
ExtractorError,
)
class RteBaseIE(InfoExtractor):
def _real_extract(self, url):
item_id = self._match_id(url)
info_dict = {}
formats = []
ENDPOINTS = (
'https://feeds.rasset.ie/rteavgen/player/playlist?type=iptv&format=json&showId=',
'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=',
)
for num, ep_url in enumerate(ENDPOINTS, start=1):
try:
data = self._download_json(ep_url + item_id, item_id)
except ExtractorError as ee:
if num < len(ENDPOINTS) or formats:
continue
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
error_info = self._parse_json(ee.cause.read().decode(), item_id, fatal=False)
if error_info:
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, error_info['message']),
expected=True)
raise
# NB the string values in the JSON are stored using XML escaping(!)
show = try_get(data, lambda x: x['shows'][0], dict)
if not show:
continue
if not info_dict:
title = unescapeHTML(show['title'])
description = unescapeHTML(show.get('description'))
thumbnail = show.get('thumbnail')
duration = float_or_none(show.get('duration'), 1000)
timestamp = parse_iso8601(show.get('published'))
info_dict = {
'id': item_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
}
mg = try_get(show, lambda x: x['media:group'][0], dict)
if not mg:
continue
if mg.get('url'):
m = re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url'])
if m:
m = m.groupdict()
formats.append({
'url': m['url'] + '/' + m['app'],
'app': m['app'],
'play_path': m['playpath'],
'player_url': url,
'ext': 'flv',
'format_id': 'rtmp',
})
if mg.get('hls_server') and mg.get('hls_url'):
formats.extend(self._extract_m3u8_formats(
mg['hls_server'] + mg['hls_url'], item_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
if mg.get('hds_server') and mg.get('hds_url'):
formats.extend(self._extract_f4m_formats(
mg['hds_server'] + mg['hds_url'], item_id,
f4m_id='hds', fatal=False))
mg_rte_server = str_or_none(mg.get('rte:server'))
mg_url = str_or_none(mg.get('url'))
if mg_rte_server and mg_url:
hds_url = url_or_none(mg_rte_server + mg_url)
if hds_url:
formats.extend(self._extract_f4m_formats(
hds_url, item_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
class RteIE(RteBaseIE):
IE_NAME = 'rte'
IE_DESC = 'Raidió Teilifís Éireann TV'
_VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
'md5': '4a76eb3396d98f697e6e8110563d2604',
'info_dict': {
'id': '10478715',
'ext': 'mp4',
'title': 'iWitness',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'The spirit of Ireland, one voice and one minute at a time.',
'duration': 60.046,
'upload_date': '20151012',
'timestamp': 1444694160,
},
}
class RteRadioIE(RteBaseIE):
IE_NAME = 'rte:radio'
IE_DESC = 'Raidió Teilifís Éireann radio'
# Radioplayer URLs have two distinct specifier formats,
# the old format #!rii=<channel_id>:<id>:<playable_item_id>:<date>:
# the new format #!rii=b<channel_id>_<id>_<playable_item_id>_<date>_
# where the IDs are int/empty, the date is DD-MM-YYYY, and the specifier may be truncated.
# An <id> uniquely defines an individual recording, and is the only part we require.
_VALID_URL = r'https?://(?:www\.)?rte\.ie/radio/utils/radioplayer/rteradioweb\.html#!rii=(?:b?[0-9]*)(?:%3A|:|%5F|_)(?P<id>[0-9]+)'
_TESTS = [{
# Old-style player URL; HLS and RTMPE formats
'url': 'http://www.rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=16:10507902:2414:27-12-2015:',
'md5': 'c79ccb2c195998440065456b69760411',
'info_dict': {
'id': '10507902',
'ext': 'mp4',
'title': 'Gloria',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:9ce124a7fb41559ec68f06387cabddf0',
'timestamp': 1451203200,
'upload_date': '20151227',
'duration': 7230.0,
},
}, {
# New-style player URL; RTMPE formats only
'url': 'http://rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=b16_3250678_8861_06-04-2012_',
'info_dict': {
'id': '3250678',
'ext': 'flv',
'title': 'The Lyric Concert with Paul Herriott',
'thumbnail': r're:^https?://.*\.jpg$',
'description': '',
'timestamp': 1333742400,
'upload_date': '20120406',
'duration': 7199.016,
},
'params': {
# rtmp download
'skip_download': True,
},
}]
| valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/rte.py | Python | gpl-3.0 | 5,026 |
# -*- coding: utf-8 -*-
"""Simulation launch services.
Simulation launch services provide the following functionality:
- generating simulation id based on local date and time;
- generating simulation directory name;
- loading names of simulation directories from a text file;
- creating directory structure for simulation;
- normalizing the format of executable;
- launching simulation.
"""
import os
import shlex
import subprocess
import time
from simtools.argparse import all_options as options
from simtools.base import is_iterable, is_string
TMP_DIR_PREFIX = "_"
def generate_sim_id():
"""Generate simulation id based on local date and time."""
t = time.localtime()
sim_id = "{0:04}{1:02}{2:02}_{3:02}{4:02}{5:02}".format(
t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
return sim_id
def generate_sim_dirname(tmp=False, sim_id=None):
"""Generate simulation directory name."""
if not sim_id:
sim_id = generate_sim_id()
return sim_id if not tmp else TMP_DIR_PREFIX + sim_id
def make_dirs(sim_dirname, sim_master_dirname=None, data_dirname=None):
"""Create directory structure for simulation."""
if sim_master_dirname is not None:
sim_path = os.path.join(sim_master_dirname, sim_dirname)
else:
sim_path = sim_dirname
os.makedirs(sim_path) # raises an error if simulation directory already
# exists
if data_dirname is not None:
os.makedirs(os.path.join(sim_path, data_dirname))
return sim_path
def run_sim(model_filename, params_filename=None, sim_id=None,
data_dirname=None, executable=None, model_args=None):
"""Launch simulation."""
cmd = []
if executable:
if is_string(executable):
cmd.append(executable)
else:
if not is_iterable(executable):
raise TypeError(
"'executable' is neither a string nor iterable.")
cmd += executable
cmd.append(model_filename)
if params_filename:
cmd += [options['params_filename']['arg'][1], params_filename]
if sim_id:
cmd += [options['sim_id']['arg'][1], sim_id]
if data_dirname:
cmd += [options['data_dirname']['arg'][1], data_dirname]
cmd.append(options['save_data']['arg'][1])
if model_args:
cmd += model_args
return subprocess.call(cmd)
def norm_executable(executable):
"""Normalize the format of executable."""
# Split executable name and arguments
executable = shlex.split(executable)
# If necessary, determine the absolute path to the executable
if not os.path.isabs(executable[0]) and os.path.isfile(executable[0]):
executable[0] = os.path.abspath(executable[0])
return executable
def load_sim_dirnames(filename):
"""Load names of simulation directories from a file."""
COMMENT_START_TOKEN = "#"
sim_dirnames = []
with open(filename) as sim_dirnames_file:
for line in sim_dirnames_file:
# Strip leading and trailing whitespace from the line
stripped_line = line.strip()
# If the stripped line is empty or contains only a comment, skip it
if (not stripped_line
or stripped_line.startswith(COMMENT_START_TOKEN)):
continue
# Assume that the stripped line contains a directory path and
# normalize it according to the platform
sim_dirname = os.path.normpath(stripped_line.replace("\\", os.sep))
sim_dirnames.append(sim_dirname)
return sim_dirnames
| macknowak/simtools | simtools/simrun.py | Python | gpl-3.0 | 3,606 |
from io import StringIO
import re
import httpretty
from django.core.management import call_command
from oppia.test import OppiaTestCase
from settings import constants
from settings.models import SettingProperties
from tests.utils import get_file_contents
class CartoDBUpdateTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'tests/test_quiz.json',
'tests/test_permissions.json',
'default_badges.json',
'tests/test_course_permissions.json',
'tests/test_viz.json']
cartodb_valid_response = './oppia/fixtures/tests/cartodb/200_valid.json'
cartodb_uri_regex = re.compile(
"https://[A-Za-z0-9-]+.cartodb.com/api/v2/sql??(?:&?[^=&]*=[^=&]*)*")
@httpretty.activate
def test_cartodb_output(self):
cartodb_response = get_file_contents(self.cartodb_valid_response)
httpretty.register_uri(httpretty.GET,
self.cartodb_uri_regex,
body=cartodb_response)
SettingProperties.set_string(constants.OPPIA_CARTODB_ACCOUNT,
"account")
SettingProperties.set_string(constants.OPPIA_CARTODB_KEY,
"FAKE_APIKEY")
SettingProperties.set_string(constants.OPPIA_HOSTNAME, "localhost")
out = StringIO()
call_command('cartodb_update', stdout=out)
@httpretty.activate
def test_cartodb_no_key_account(self):
cartodb_response = get_file_contents(self.cartodb_valid_response)
httpretty.register_uri(httpretty.GET,
self.cartodb_uri_regex,
body=cartodb_response)
SettingProperties.set_string(constants.OPPIA_CARTODB_ACCOUNT, None)
SettingProperties.set_string(constants.OPPIA_CARTODB_KEY, None)
SettingProperties.set_string(constants.OPPIA_HOSTNAME, None)
out = StringIO()
call_command('cartodb_update', stdout=out)
| DigitalCampus/django-oppia | tests/viz/management/commands/test_cartodb_update.py | Python | gpl-3.0 | 2,062 |
#!/usr/bin/env python
# encoding: utf-8
import sqlite3
from sys import version_info
if version_info >= (3, 0, 0):
def listkey(dicts):
return list(dicts.keys())[0]
else:
def listkey(dicts):
return dicts.keys()[0]
class sqlitei:
'''Encapsulation sql.'''
def __init__(self, path):
self.db = sqlite3.connect(path)
# self.db.text_factory = str
self.cs = self.db.cursor()
def commit(self):
self.db.commit()
def select(self, table, column, dump=None):
'''Select
table str, column list, dump dict.'''
columns = ','.join(column)
sql = 'select ' + columns + ' from ' + table
dumps = []
if dump:
dumpname = listkey(dump)
sql += ' where ' + dumpname + '=?'
dumps.append(dump[dumpname])
return self.cs.execute(sql, dumps)
def update(self, table, column, dump):
'''Update
table str, column dict, dump dict.'''
columns = []
columnx = ''
for c in column:
columnx += c + '=?,'
columns.append(column[c])
dumpname = listkey(dump)
sql = 'update ' + table + ' set '+ columnx[:-1] + ' where ' + dumpname + '=?'
columns.append(dump[dumpname])
return self.cs.execute(sql, columns)
def insert(self, table, column, dump):
'''Insert
table str, column list, dump list'''
dumps = ('?,'*len(dump))[:-1]
columns = ','.join(column)
sql = 'insert into ' + table + ' (' + columns + ') values (' +dumps + ')'
return self.cs.execute(sql, dump)
def delete(self, table, dump):
'''Delete
table str, dump dict'''
dumpname = listkey(dump)
sql = 'delete from ' + table + ' where ' + dumpname + '=?'
return self.cs.execute(sql, [dump[dumpname]])
| 54Pany/gum | data/libdbs.py | Python | gpl-3.0 | 1,870 |
# -*- coding: utf-8 -*-
import re
test = '用户输入的字符串'
if re.match(r'用户', test):
print('ok')
else:
print('failed')
print('a b c'.split(' '))
print(re.split(r'\s*', 'a b c'))
print(re.split(r'[\s\,\;]+', 'a,b;; c d'))
m = re.match(r'^(\d{3})-(\d{3,8})$', '010-12345')
print(m.group(1))
m = re.match(r'^(\S+)@(\S+.com)$', '[email protected]')
print(m.group(2))
print(m.groups())
# <Tom Paris> tom@voyager .org
re_mail = re.compile(r'<(\S+)\s+(\S+)>\s+(\S+)@(\S+.org)')
print(re_mail.match('<Tom Paris> [email protected]').groups())
str = 'abcbacba'
# non-greed match
re = re.compile(r'a.*?a', re.S)
print(re.match(str).group())
| cysuncn/python | study/test/TestRe.py | Python | gpl-3.0 | 655 |
def __load():
import imp, os, sys
ext = 'pygame/display.so'
for path in sys.path:
if not path.endswith('lib-dynload'):
continue
ext_path = os.path.join(path, ext)
if os.path.exists(ext_path):
mod = imp.load_dynamic(__name__, ext_path)
break
else:
raise ImportError(repr(ext) + " not found")
__load()
del __load
| mokuki082/EggDrop | code/build/bdist.macosx-10.6-intel/python3.4-standalone/app/temp/pygame/display.py | Python | gpl-3.0 | 396 |
#!/usr/bin/env python
gplheader = """Harmon Instruments CORDIC generator
Copyright (C) 2014 Harmon Instruments, LLC
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/
"""
import sys, os
import numpy as np
from config import *
def gen_angle_rom(name, bits, angles):
max_angle = np.sum(angles)
bits = min(bits, int(np.ceil(np.log2(max_angle))) + 1)
print "\treg signed [{}:0] {}[0:63];".format(bits-1, name)
print "initial begin"
for i in range(64):
j = np.arange(6)
m = (((i & (1<<j)) == 0) * 2) - 1 # -1 if i[j] is set or else 1
val = np.sum(m * angles[j])
val = np.clip(val, -1.0 * 2**(bits-1), (2**bits-1)-1)
print "\t{}[{}] = {};".format(name, i, int(np.round(val)))
print "end"
def gen_angle_roms(stages, bits):
angles = np.arctan(2.0 ** (-1.0 * np.arange(36)))
angles = np.concatenate([[np.pi*0.5], angles])
angles = angles * (2**(nbits_aout-1))/np.pi
for i in range(stages, len(angles)):
angles[i] = 0
nroms = int(np.ceil(stages/6.0))
for i in range(nroms):
gen_angle_rom('arom_{}'.format(i), bits, angles[6*i:6*i+6])
def gen_translate():
nroms = int(np.ceil(stages/6.0))
gain = np.prod(np.sqrt(1.0 + 2.0 ** (-2.0*np.arange(stages))))
# header
print "/* generated by " + gplheader
print "gain =", gain
print "*/"
print "module {} (".format(name)
print "\tinput clock,"
print "\tinput signed [{}:0] in_re, in_im,".format(nbits_din-1)
print "\toutput signed [{}:0] out_angle,".format(nbits_aout-1)
print "\toutput [{}:0] out_mag);".format(nbits_din-1)
gen_angle_roms(stages, nbits_aout)
# declarations
for i in range(stages):
print "\treg [{0}:0] angle_{0} = 0;".format(i)
for i in range(stages):
print "\treg [{0}:0] re_{1} = 0;".format(nbits_din-1, i)
im_msb = nbits_din * np.ones(stages, dtype=int)
im_msb[1:stages] -= np.arange(stages-1, dtype=int)
for i in range(stages-1):
print "\treg signed [{0}:0] im_{1} = 0;".format(im_msb[i], i)
# assigns
print "\tassign out_mag = re_{};".format(stages-1)
print "\twire [31:0] langle = angle_{};".format(stages-1)
print "\tassign out_angle =",
for i in range(nroms):
print "arom_{}[langle[{}:{}]]".format(i, 6*i+5, 6*i),
if (i+1) != nroms:
print "+",
else:
print ";"
print "always @ (posedge clock) begin"
# prerotate - if im < 0, rotate 90 degrees ccw else rotate 90 ccw
print "\tangle_0 <= (in_im < 0);"
print "\tre_0 <= in_im < 0 ? 2'sd0 - in_im : in_im;"
print "\tim_0 <= in_im < 0 ? in_re : 2'sd0 - in_re;"
# rotate stages
for n in range(1, stages):
sub = "im_{} < 0".format(n-1)
print "\tangle_{0} <= {{{1}, angle_{2}}};".format(n, sub, n-1)
if n < im_msb[n]:
im_shifted = '(im_{0} >>> {0})'.format(n-1)
abs_im = "(im_{0} < 0 ? 2'sd0 - {1} : {1})".format(n-1, im_shifted)
print "\tre_{0} <= $signed(re_{1}) + {2};".format(n, n-1, abs_im)
else:
print "\tre_{} <= re_{};".format(n, n-1)
if n != stages - 1:
re_shifted = '(re_{0} >> {0})'.format(n-1)
print "\tim_{0} <= im_{1} >= 0 ? im_{1} - {2} : im_{1} + {2};"\
.format(n, n-1, re_shifted)
print "end"
print """initial
begin
$dumpfile("dump.vcd");
$dumpvars(0);
end"""
print "endmodule"
if __name__=="__main__":
gen_translate()
| HarmonInstruments/verilog | cordic/cordic.py | Python | gpl-3.0 | 4,069 |
import os
import sys
import numpy as np
import math
def findBinIndexFor(aFloatValue, binsList):
#print "findBinIndexFor: %s" % aFloatValue
returnIndex = -1
for i in range(len(binsList)):
thisBin = binsList[i]
if (aFloatValue >= thisBin[0]) and (aFloatValue < thisBin[1]):
returnIndex = i
break
return returnIndex
def compute_joint_prob(joint_list, vals1, vals2, bins1=None, bins2=None, asFreq=False):
returnDict = {}
for rec in joint_list:
val1 = rec[0]
val2 = rec[1]
#Find name by which first val should appear
dictName1 = val1
if bins1 is not None:
dictName1 = findBinIndexFor(val1, bins1)
#Find name by which second val should appear
dictName2 = val2
if bins2 is not None:
dictName2 = findBinIndexFor(val2, bins2)
#If first name is not present in dict,
#then initialize it
if dictName1 not in returnDict:
returnDict[dictName1] = {}
for val in vals2:
#Determine name under which
#y-values should appear (i.e. as bin names
#or as given names)
asDictName = val
if bins2 is not None:
asDictName = findBinIndexFor(val, bins2)
returnDict[dictName1][asDictName] = 0
returnDict[dictName1][dictName2]+=1
if not asFreq:
#Normalize values
for key in returnDict:
for secondKey in returnDict[key]:
returnDict[key][secondKey] = float(returnDict[key][secondKey]) / len(joint_list)
return returnDict
def getXForFixedY(joint_prob_dist, yVal):
returnList = []
for key in joint_prob_dist:
returnList.append( joint_prob_dist[key][yVal])
return returnList
def compute_h(floatsList):
returnFloat = None
acc = 0
for f in floatsList:
if f != 0:
acc = acc - f * math.log(f, 2)
returnFloat = acc
return returnFloat
# Computes Kullback-Leibler divergence between
# P(X,Y) and P(X)
def conditional_entropy(joint_prob_dist, xVals, yVals):
returnFloat = None
h_acc = 0
marginal_y_dist = getYMarginalDist(joint_prob_dist)
for x in xVals:
for y in yVals:
joint_xy = 0
marginal_y = 0
if not x in joint_prob_dist or y not in joint_prob_dist[x]:
joint_xy = 0
else:
joint_xy = joint_prob_dist[x][y]
if not y in marginal_y_dist:
marginal_y = 0
else:
marginal_y = marginal_y_dist[y]
if joint_xy!=0 and marginal_y!=0:
h_acc-=joint_xy*math.log(joint_xy/marginal_y, 2)
# for yVal in yVals:
# new_xDist = getXForFixedY(joint_prob_dist, yVal)
# h_yVal = compute_h(new_xDist)
# p_yVal = reduce(lambda x, y: x+y, new_xDist)
# h_acc+=p_yVal * h_yVal
returnFloat = h_acc
return returnFloat
def getYMarginalDist(joint_prob_dist):
returnDict = {}
for xKey in joint_prob_dist:
for yKey in joint_prob_dist[xKey]:
if not yKey in returnDict:
returnDict[yKey] = 0
returnDict[yKey]+=joint_prob_dist[xKey][yKey]
return returnDict
def getXMarginalDist(joint_prob_dist):
returnDict = {}
for key in joint_prob_dist:
yVals = joint_prob_dist[key]
marginalVal = reduce(lambda x,y: x+y, [yVals[e] for e in yVals])
returnDict[key] = marginalVal
return returnDict
def entropy_loss(joint_prob_dist, xVals, yVals):
returnFloat = None
priorsDict = getXMarginalDist(joint_prob_dist)
priors = priorsDict.values()
h_prior = compute_h(priors)
h_conditional = conditional_entropy(joint_prob_dist, xVals, yVals)
returnFloat = h_prior - h_conditional
return returnFloat
| omoju/Fundamentals | Data/twitterDataAnalysis/info_gain.py | Python | gpl-3.0 | 3,353 |
# This file is part of HamsiManager.
#
# Copyright (c) 2010 - 2015 Murat Demir <[email protected]>
#
# Hamsi Manager is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Hamsi Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HamsiManager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import Amarok
from Amarok import Commands
from Core.MyObjects import *
import Taggers
import FileUtils as fu
from Core import Universals as uni
from Core import Dialogs
from Core import Records
from Core import ReportBug
def getDirectoriesAndValues(_filter=""):
db = Amarok.checkAndGetDB()
if db is not None:
return Commands.getDirectoriesAndValues(_filter)
return None
def getAllMusicFileValuesWithNames(_filter="", _artistId=None):
db = Amarok.checkAndGetDB()
if db is not None:
return Commands.getAllMusicFileValuesWithNames(_filter, _artistId)
return None
def getAllArtistsValues(_filter=""):
db = Amarok.checkAndGetDB()
if db is not None:
return Commands.getAllArtistsValues(_filter)
return None
def changePaths(_values, _type="auto"):
uni.startThreadAction()
allItemNumber = len(_values)
for valueNo, value in enumerate(_values):
isContinueThreadAction = uni.isContinueThreadAction()
if isContinueThreadAction:
try:
if _type == "file" or (_type == "auto" and fu.isFile(value["newPath"])):
Commands.changeFilePath(value["oldPath"], value["newPath"])
else:
Commands.changeDirectoryPath(value["oldPath"], value["newPath"])
except:
ReportBug.ReportBug()
else:
allItemNumber = valueNo + 1
Dialogs.showState(translate("Amarok/Operations", "Changing Paths In Amarok Database"),
valueNo + 1, allItemNumber, True)
if isContinueThreadAction is False:
break
uni.finishThreadAction()
def changeTags(_values):
uni.startThreadAction()
allItemNumber = len(_values)
for valueNo, value in enumerate(_values):
isContinueThreadAction = uni.isContinueThreadAction()
if isContinueThreadAction:
try:
Commands.changeTag(value)
except:
ReportBug.ReportBug()
else:
allItemNumber = valueNo + 1
Dialogs.showState(translate("Amarok/Operations", "Changing Tags In Amarok Database"),
valueNo + 1, allItemNumber, True)
if isContinueThreadAction is False:
break
uni.finishThreadAction()
def changeArtistValues(_values):
uni.startThreadAction()
allItemNumber = len(_values)
Dialogs.showState(translate("Amarok/Operations", "Writing Music Tags"), 0, allItemNumber, True)
for x, value in enumerate(_values):
isContinueThreadAction = uni.isContinueThreadAction()
if isContinueThreadAction:
try:
musicFilePathAndArtist = Commands.changeArtistValue(value)
if musicFilePathAndArtist is not None:
artistName = musicFilePathAndArtist[0]
for musicFilePath in musicFilePathAndArtist[1]:
if fu.isWritableFileOrDir(musicFilePath, False, True):
Records.add(str(translate("Amarok/Operations", "File will be updated")), str(musicFilePath))
currentArtistName = ""
tagger = Taggers.getTagger()
if tagger is not None:
try:
tagger.loadFileForWrite(musicFilePath)
currentArtistName = tagger.getArtist()
except:
tagger.loadFileForWrite(musicFilePath)
tagger.setArtist(artistName)
tagger.update()
Records.add(str(translate("Amarok/Operations", "Artist")), str(currentArtistName),
artistName)
for musicFilePath in musicFilePathAndArtist[2]:
if fu.isWritableFileOrDir(musicFilePath, False, True):
Records.add(str(translate("Amarok/Operations", "File will be updated")), str(musicFilePath))
currentArtistName = ""
tagger = Taggers.getTagger()
if tagger is not None:
try:
tagger.loadFileForWrite(musicFilePath)
currentArtistName = tagger.getAlbumArtist()
except:
tagger.loadFileForWrite(musicFilePath)
tagger.setAlbumArtist(artistName)
tagger.update()
Records.add(str(translate("Amarok/Operations", "albumArtist")), str(currentArtistName),
artistName)
except:
ReportBug.ReportBug()
else:
allItemNumber = x + 1
Dialogs.showState(translate("Amarok/Operations", "Writing Music Tags"), x + 1, allItemNumber, True)
if isContinueThreadAction is False:
break
uni.finishThreadAction()
| supermurat/hamsi-manager | Amarok/Operations.py | Python | gpl-3.0 | 5,993 |
"""
Example usage:
python3 testf0seqnrcalculation.py 548659 \
../testdata/sample_data_548659/L548659.parset \
../testdata/sample_data_548659/file-sizes.txt \
../testdata/sample_data_548659/f0seqnr-sizes.txt
"""
import os
import sys
sys.path.append("../scripts")
import create_html
def test_main(in_sas_id, in_parset_path, in_file_sizes_path, in_f0seqnr_sizes_path):
result = True
parset = create_html.parset_summary(in_sas_id, in_parset_path)
file_sizes_dict = create_html.parse_file_sizes(in_file_sizes_path)
analysed_file_sizes_dict = create_html.file_size_analysis(parset, file_sizes_dict)
highest_file_size_mb = analysed_file_sizes_dict['max_ms_size_mb'].max()
f0seqnr_sizes_dict = create_html.parse_file_sizes(in_f0seqnr_sizes_path, os.path.dirname)
f0seqnr_completeness_dict = create_html.f0seqnr_size_analysis(parset, f0seqnr_sizes_dict)
f0seqnr_completeness_statistics = create_html.calculate_statistics(f0seqnr_completeness_dict.values(), (100.0, None))
print("Relevant parset details:")
print("clock_mhz: ", parset['clock_mhz'])
print("start-time: ", parset['start_time'])
print("stop-time: ", parset['stop_time'])
print("block_size: ", parset['block_size'])
print("nr_integrations_per_block: ", parset['nr_integrations_per_block'])
print("nr_blocks_per_integration: ", parset['nr_blocks_per_integration'])
print("nr_integration_periods: ", parset['nr_integration_periods'])
print("Correlator locations: ", "\n".join(parset['correlator_locations']))
print("Beamformer locations: ", "\n".join(parset['beamformer_locations']))
print("\nContent of [file_sizes_dict]:")
for data_product_folder, (_, _, file_size_in_mb) in file_sizes_dict.items():
print(data_product_folder, " (", file_size_in_mb, "MB)")
print("\nContent of [analysed_file_sizes_dict]:")
print("max_ms_size_mb: ", analysed_file_sizes_dict['max_ms_size_mb'])
print("max_raw_size_mb: ", analysed_file_sizes_dict['max_raw_size_mb'])
print("missing_data_sets: ", analysed_file_sizes_dict['missing_data_sets'])
print("odd_sized_data_sets: ", analysed_file_sizes_dict['odd_sized_data_sets'])
print("percentage_complete: ", analysed_file_sizes_dict['percentage_complete'])
print("\nContent of [f0seqnr_sizes_dict]:")
for data_product_folder, (_, _, nr_integration_periods_in_file) in f0seqnr_sizes_dict.items():
print(data_product_folder, " (", nr_integration_periods_in_file, ")")
print("\nContent of [f0seqnr_completeness_dict]:")
for data_product_folder, completeness_value in f0seqnr_completeness_dict.items():
print(data_product_folder, " (", completeness_value, ")")
print("\nTotal average completeness: ", f0seqnr_completeness_statistics, " over ", len(f0seqnr_completeness_dict.values()), " number of items")
print("\nIncomplete datasets according to original method (odd_sized_data_sets (=) abs(float(data_size_mb)/float(max_ms_size_mb) -1.0) > 0.01):")
for (name, size) in sorted(analysed_file_sizes_dict['odd_sized_data_sets']):
print("Dataset: ", name, " Size: ", size, "MB")
print("\nIncomplete datasets according to f0seqnr method (completeness_value < 100):")
for data_product_folder, completeness_value in f0seqnr_completeness_dict.items():
if completeness_value < 99.95:
print("Dataset: ", data_product_folder, " Completeness: %0.1f%%" % completeness_value)
print("\nIncomplete datasets based on relative (Max size = %rMB) file size:" % highest_file_size_mb)
for data_product_folder, (_, _, file_size_in_mb) in file_sizes_dict.items():
if file_size_in_mb < highest_file_size_mb:
print("Dataset: ", data_product_folder, " Size: ", file_size_in_mb, "MB ", "(%0.f%%)" % (100*file_size_in_mb/highest_file_size_mb))
print('\n'.join(['%s: %dMB (%0.f%%)' % (name, size, f0seqnr_completeness_dict.get(name, -1)) for (name, size) in sorted(analysed_file_sizes_dict['odd_sized_data_sets'])]))
open("./index.html", 'w').write('''
<html>
<head>
<meta http-equiv="refresh" content="60">
<title>LOFAR Inspection plots</title>
</head>
<body>
<h1>LOFAR inspection plots</h1>
<table>
<tr><th>SAS ID</th> <th>Campaign</th> <th>Target</th> <th>DynSpec</th> <th title="Percentage of odd sized data products per project\n\nWhere 'odd sized' is defined as:\nData products with less than %0.2f%% completeness">Compl</th> <th title="Average completeness percentage of odd sized data products (based on f0seqnr sizes)\n\nWhere 'odd sized' is defined as:\nData products with less than %0.2f%% completeness">Compl*</th> <th>AntennaSet</th> <th>Band</th> <th>Start</th> <th>End</th> <th>Clock</th> <th>Subb</th> <th>Parset</th></tr>
%s
</table>
</body>
</html>
''' % (100*(1-create_html.DATA_INCOMPLETE_THRESHOLD),
100*(1-create_html.DATA_INCOMPLETE_THRESHOLD),
create_html.observation_table_row(parset, analysed_file_sizes_dict, f0seqnr_completeness_dict, "./")))
return result
def parse_arguments(argv):
sas_id = int(argv[1])
parset_path = argv[2]
file_sizes_path = argv[3]
f0seqnr_sizes_file_path = argv[4]
return sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path
if __name__ == '__main__':
print("Called as: %s\n" % (" ".join(sys.argv)))
if len(sys.argv) == 5:
sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path = parse_arguments(sys.argv)
if test_main(sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path):
print("Test successful")
else:
print("Test unsuccessful")
else:
print ("Usage:\ntestf0seqnrcalculation [SAS ID] [parset file_path] [file_sizes_path] [f0seqnr_sizes_file_path]")
| brentjens/pyautoplot | test/testf0seqnrcalculation.py | Python | gpl-3.0 | 6,014 |
import aiohttp
import discord
import random
from config import GoogleAPIKey
from config import GoogleCSECX
async def google(cmd, message, args):
if not args:
await message.channel.send(cmd.help())
return
else:
search = ' '.join(args)
url = 'https://www.googleapis.com/customsearch/v1?q=' + search + '&cx=' + GoogleCSECX + '&key=' + GoogleAPIKey
async with aiohttp.ClientSession() as session:
async with session.get(url) as data:
results = await data.json()
google_colors = [0x4285f4, 0x34a853, 0xfbbc05, 0xea4335, 0x00a1f1, 0x7cbb00, 0xffbb00, 0xf65314]
embed_color = random.choice(google_colors)
try:
title = results['items'][0]['title']
url = results['items'][0]['link']
embed = discord.Embed(color=embed_color)
embed.set_author(name='Google', icon_url='https://avatars2.githubusercontent.com/u/1342004?v=3&s=400',
url='https://www.google.com/search?q=' + search)
embed.add_field(name=title, value='[**Link Here**](' + url + ')')
await message.channel.send(None, embed=embed)
except Exception as e:
cmd.log.error(e)
embed = discord.Embed(color=0xDB0000, title='❗ Daily Limit Reached.')
embed.set_footer(text='Google limits this API feature, and we hit that limit.')
await message.channel.send(None, embed=embed)
| valeth/apex-sigma | sigma/plugins/searches/google/google.py | Python | gpl-3.0 | 1,478 |
#!/usr/bin/python
from setuptools import setup, find_packages
setup(
name='plumber',
version='0.0.1-alpha',
description='simple, mundane script to build and publish containers to marathon/mesos',
author='Giuseppe Lavagetto',
author_email='[email protected]',
url='https://github.com/lavagetto/plumber',
install_requires=['argparse', 'Flask', 'jinja2'],
setup_requires=[],
zip_safe=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'plumber-run = plumber.main:run',
],
},
)
| lavagetto/plumber | setup.py | Python | gpl-3.0 | 589 |
# Sebastian Raschka 2016
#
# `siteinterlock` is a Python package for selecting near-native protein-ligand
# docking poses based upon the hypothesis that interfacial rigidification
# of both the protein and ligand prove to be important characteristics of
# the native binding mode and are sensitive to the spatial coupling of
# interactions and bond-rotational degrees of freedom in the interface.
#
# Copyright (C) 2016 Michigan State University
# License: GPLv3
#
# SiteInterlock was developed in the
# Protein Structural Analysis & Design Laboratory
# (http://www.kuhnlab.bmb.msu.edu)
# Contact email: [email protected]
#
# Package author: Sebastian Raschka <http://sebastianraschka.com>
#
from .hether import hether
__all__ = ["hether"]
| psa-lab/siteinterlock | siteinterlock/proflex_utils/__init__.py | Python | gpl-3.0 | 741 |
#!/usr/bin/env python
# Calculate the 1000th element of the Fibonacci series. Fast.
# (Another popular tech interview question.)
import numpy;
# Definition of Fibonacci numbers:
# F(1) = 1
# F(2) = 1
# For n = 3, 4, 5, ...: F(n) = F(n-2) + F(n-1).
# Method one: recursion.
# Very inefficient: F(n) is called once, it calls F(n-1) once,
# F(n-2) is called twice (once by F(n) and once by F(n-1)),
# F(n-3) is called thrice (once by F(n-1) and twice by F(n-2)),
# F(n-k) is called F(k+1) times, that is an insane number of calls.
fibonaccirecursion = lambda n: 1 if n <=2 else fibonaccirecursion(n-2) + fibonaccirecursion(n-1);
# Method two: dual recursion. Returns the list [F(n-1),F(n)].
# Calling it with n triggers one call for each of 2, 3, ..., n-1: that is only O(n) calls.
def fibonaccidualrecursion(n):
if n >= 3:
a, b = fibonaccidualrecursion(n-1);
# F(n-2) = a, F(n-1) = b, F(n) = a+b.
return b, a+b;
elif n == 2:
return 1, 1;
elif n == 1:
# F(0) = 0.
return 0, 1;
else:
raise NotImplementedError;
# Method three: loop. Nothing fancy.
# Should be much like dual recursion without function call overhead.
def fibonacciloop(n):
a = 1;
b = 1;
for i in xrange(n-2):
c = a+b;
a = b;
b = c;
return b;
# Method four: even loop. Do two iterations at once to avoid moving around values.
# Slightly faster than simple loop.
def fibonaccievenloop(n):
a = 1;
b = 1;
for i in xrange(int(0.5*(n-2))):
a = a+b;
b = a+b;
if n % 2 == 1:
if n == 1:
return 1;
else:
return a+b;
else:
return b;
# Method five: direct formula.
# This is not faster if we need to calculate all Fibonacci numbers up to F(n),
# but much-much faster if we only need F(n), especially if n is large.
# This is how we solve second order homogeneous linear recursions in general:
# The characteristic polynomial of the recursion is x^2 = 1 + x.
# It has two distinct roots, x_12 = (1 pm sqrt(5)) / 2.
# Therefore a general series is alpha*x_1^n + beta*x_2^n.
# Two initial values, two coefficients, two degrees of freedom.
# (We would use alpha*x^n + beta*n*x^n if x was a multiple root.)
# Turns out |x_2| < 1, so we can omit this term and round.
# Note that if n >= 1475, F(n) cannot be represented as long int any more,
# but the float approximation is still valid.
sqrt5 = numpy.sqrt(5);
fibonaccipower = lambda n: int(numpy.power(0.5*(1.0+sqrt5), n)/sqrt5 + 0.5);
print;
print "Testing.";
print [fibonaccirecursion(n) for n in xrange(1,15)];
print [fibonaccidualrecursion(n)[1] for n in xrange(1,15)];
print [fibonacciloop(n) for n in xrange(1,15)];
print [fibonaccievenloop(n) for n in xrange(1,15)];
print [fibonaccipower(n) for n in xrange(1,15)];
if __name__ == "__main__":
import timeit;
number = 20;
n = 25;
print;
print "Timing n={0:d}.".format(n);
for i in ["fibonaccirecursion", "fibonaccidualrecursion", "fibonacciloop", "fibonaccievenloop", "fibonaccipower"]:
print "{0:s}: {1:f} us".format(i, 1e6*timeit.timeit("{0:s}({1:d})".format(i,n), setup="from __main__ import {0:s}".format(i), number=number));
number = 20;
n = 500;
print;
print "Timing n={0:d} (simple recursion would fill up stack).".format(n);
for i in ["fibonaccidualrecursion", "fibonacciloop", "fibonaccievenloop", "fibonaccipower"]:
print "{0:s}: {1:f} us".format(i, 1e6*timeit.timeit("{0:s}({1:d})".format(i,n), setup="from __main__ import {0:s}".format(i), number=number));
number = 20;
n = 1000;
print;
print "Timing n={0:d} (even dual recursion would fill up stack).".format(n);
for i in ["fibonacciloop", "fibonaccievenloop", "fibonaccipower"]:
print "{0:s}: {1:f} us".format(i, 1e6*timeit.timeit("{0:s}({1:d})".format(i,n), setup="from __main__ import {0:s}".format(i), number=number));
| bencebeky/etudes | fibonacci.py | Python | gpl-3.0 | 3,792 |
"""Routines for bubble format validation"""
import os
import itertools as it
from collections import Counter
from bubbletools.bbltree import BubbleTree
from bubbletools import utils
def validate(bbllines:iter, *, profiling=False):
"""Yield lines of warnings and errors about input bbl lines.
profiling -- yield also info lines about input bbl file.
If bbllines is a valid file name, it will be read.
Else, it should be an iterable of bubble file lines.
"""
if isinstance(bbllines, str):
if os.path.exists(bbllines): # filename containing bubble
bbllines = utils.file_lines(bbllines)
elif '\n' not in bbllines or '\t' not in bbllines:
# probably a bad file name: let's rise the proper error
bbllines = utils.file_lines(bbllines)
else: # bubble itself
bbllines = bbllines.split('\n')
bubble = tuple(bbllines)
data = tuple(utils.line_data(line) for line in bubble)
types = tuple(utils.line_type(line) for line in bubble)
# launch profiling
if profiling:
ltype_counts = Counter(types)
for ltype, count in ltype_counts.items():
yield 'INFO {} lines of type {}'.format(count, ltype)
yield 'INFO {} lines of payload'.format(
ltype_counts['EDGE'] + ltype_counts['IN'] +
ltype_counts['NODE'] + ltype_counts['SET'])
# launch validation
for errline in (l for l, t in zip(bubble, types) if t == 'ERROR'):
yield 'ERROR line is not bubble: "{}"'.format(errline)
tree = BubbleTree.from_bubble_data(data)
cc, subroots = tree.connected_components()
# print('cc:', cc)
# print('subroots:', subroots)
if profiling:
yield 'INFO {} top (power)nodes'.format(len(tree.roots))
yield 'INFO {} connected components'.format(len(cc))
yield 'INFO {} nodes are defined, {} are used'.format(
ltype_counts['NODE'], len(tuple(tree.nodes())))
yield 'INFO {} powernodes are defined, {} are used'.format(
ltype_counts['SET'], len(tuple(tree.powernodes())))
yield from inclusions_validation(tree)
yield from mergeability_validation(tree)
def inclusions_validation(tree:BubbleTree) -> iter:
"""Yield message about inclusions inconsistancies"""
# search for powernode overlapping
for one, two in it.combinations(tree.inclusions, 2):
assert len(one) == len(one.strip())
assert len(two) == len(two.strip())
one_inc = set(included(one, tree.inclusions))
two_inc = set(included(two, tree.inclusions))
common_inc = one_inc & two_inc
if len(common_inc) == one_inc:
if not two in one_inc:
yield ("ERROR inconsistency in inclusions: {} is both"
" included and not included in {}.".format(two, one))
if len(common_inc) == two_inc:
if not one in two_inc:
yield ("ERROR inconsistency in inclusions: {} is both"
" included and not included in {}.".format(one, two))
if len(common_inc) > 0: # one and two are not disjoint
if len(common_inc) == len(one_inc) or len(common_inc) == len(two_inc):
# one is included in the other
pass
else: # problem: some nodes are shared, but not all
yield ("ERROR overlapping powernodes:"
" {} nodes are shared by {} and {},"
" which are not in inclusion."
" Shared nodes are {}".format(
len(common_inc), one, two, common_inc))
for pwn in tree.powernodes():
# search for empty powernodes
if len(tree.inclusions[pwn]) == 0:
yield ("WARNING empty powernode: {} is defined,"
" but contains nothing".format(pwn))
# search for singleton powernodes
if len(tree.inclusions[pwn]) == 1:
yield ("WARNING singleton powernode: {} is defined,"
" but contains only {}".format(pwn, tree.inclusions[pwn]))
# search for cycles
nodes_in_cycles = utils.have_cycle(tree.inclusions)
if nodes_in_cycles:
yield ("ERROR inclusion cycle: the following {}"
" nodes are involved: {}".format(
len(nodes_in_cycles), set(nodes_in_cycles)))
def included(powernode:str, inclusions:dict, nodes_only=False) -> iter:
"""Yield (power)nodes below given powernode (contained by it,
or contained by a powernode contained by it, etc).
>>> sorted(included('p1', {'p1': ('p2', 1), 'p2': (3,), 1: (), 3: ()}), key=str)
[1, 3, 'p2']
>>> sorted(included('p1', {'p1': ('p2', 1), 'p2': (3,), 1: (), 3: ()}, nodes_only=True), key=str)
[1, 3]
"""
if nodes_only:
condition = lambda e: e != powernode and inclusions[e] == ()
else:
condition = lambda e: e != powernode
yield from (elem for elem in utils.walk(powernode, (inclusions,))
if condition(elem))
def mergeability_validation(tree:BubbleTree) -> iter:
"""Yield message about mergables powernodes"""
def gen_warnings(one, two, inc_message:str) -> [str]:
"Yield the warning for given (power)nodes if necessary"
nodetype = ''
if tree.inclusions[one] and tree.inclusions[two]:
nodetype = 'power'
elif tree.inclusions[one] or tree.inclusions[two]:
nodetype = '(power)'
if one > two: one, two = two, one
shared = set(tree.edges.get(one, ())) & set(tree.edges.get(two, ()))
if shared:
yield (f"WARNING mergeable {nodetype}nodes: {one} and {two}"
f" are {inc_message}, and share"
f" {len(shared)} neigbor{'s' if len(shared) > 1 else ''}")
for one, two in it.combinations(tree.roots, 2):
yield from gen_warnings(one, two, inc_message='both roots')
for parent, childs in tree.inclusions.items():
for one, two in it.combinations(childs, 2):
yield from gen_warnings(one, two, inc_message=f'in the same level (under {parent})')
| Aluriak/bubble-tools | bubbletools/validator.py | Python | gpl-3.0 | 6,140 |
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
RST = None
# Raspberry Pi pin configuration:
#RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Load default font.
#font = ImageFont.load_default()
thinFont = ImageFont.truetype('GeosansLight.ttf', 10)
draw.text((0, 0), 'CH', font=thinFont, fill=255)
thinFont2 = ImageFont.truetype('GeosansLight.ttf', 14)
#draw.text((40, 0), 'SENDER', font=thinFont2, fill=255)
#draw.text((40, 10), 'RECEIVER', font=thinFont2, fill=255)
draw.text((40, 16), 'REPEATER', font=thinFont2, fill=255)
draw.text((40,-3), '1758', font=thinFont2, fill=255)
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
boldFont = ImageFont.truetype('theboldfont.ttf', 44)
x = 14
draw.text((x, 0), '3', font=boldFont, fill=255)
#draw.text((x, top+20), 'World!', font=font, fill=255)
x=104
top = 0
draw.rectangle((x, top, x+20, top+10), outline=255, fill=0)
draw.rectangle((x+20, top+3, x+23, top+7), outline=255, fill=0)
percent = 90
width=int((percent-5) / 5)
draw.rectangle((x+1, top+1, x+width, top+9), outline=255, fill=255)
x=80
draw.arc([(x, top), (x+16, top+16)], 210, 330, fill=255)
draw.arc([(x+3, top+3), (x+13, top+13)], 210, 335, fill=255)
draw.ellipse((x+7,top+7, x+9, top+9), outline=255, fill=255)
draw.line((x+14,top+8,x+14,top+9), fill=255)
draw.line((x+16,top+6,x+16,top+9), fill=255)
draw.line((x+18,top+4,x+18,top+9), fill=255)
draw.line((x+20,top+2,x+20,top+9), fill=255)
# Draw an ellipse.
#draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
#x += shape_width+padding
# Draw a rectangle.
#draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
#x += shape_width+padding
# Draw a triangle.
#draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
#x += shape_width+padding
# Draw an X.
#draw.line((x, bottom, x+shape_width, top), fill=255)
#draw.line((x, top, x+shape_width, bottom), fill=255)
#x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
draw.text((0, 0), 'CH', font=thinFont, fill=255)
thinFont2 = ImageFont.truetype('GeosansLight.ttf', 14)
#draw.text((40, 0), 'SENDER', font=thinFont2, fill=255)
#draw.text((40, 10), 'RECEIVER', font=thinFont2, fill=255)
draw.text((40, 16), 'REPEATER', font=thinFont2, fill=255)
draw.text((40,-3), '1758', font=thinFont2, fill=255)
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
boldFont = ImageFont.truetype('theboldfont.ttf', 44)
x = 14
draw.text((x, 0), '3', font=boldFont, fill=255)
#draw.text((x, top+20), 'World!', font=font, fill=255)
x=104
top = 0
draw.rectangle((x, top, x+20, top+10), outline=255, fill=0)
draw.rectangle((x+20, top+3, x+23, top+7), outline=255, fill=0)
percent = 90
width=int((percent-5) / 5)
draw.rectangle((x+1, top+1, x+width, top+9), outline=255, fill=255)
x=80
draw.arc([(x, top), (x+16, top+16)], 210, 330, fill=255)
draw.arc([(x+3, top+3), (x+13, top+13)], 210, 335, fill=255)
draw.ellipse((x+7,top+7, x+9, top+9), outline=255, fill=255)
draw.line((x+14,top+8,x+14,top+9), fill=255)
draw.line((x+16,top+6,x+16,top+9), fill=255)
draw.line((x+18,top+4,x+18,top+9), fill=255)
draw.line((x+20,top+2,x+20,top+9), fill=255)
# Display image.
disp.image(image)
disp.display()
"""
| henla464/WiRoc-Python-2 | testOLED.py | Python | gpl-3.0 | 6,908 |
import sys
import random
from linked_list_prototype import ListNode
from reverse_linked_list_iterative import reverse_linked_list
# @include
def zipping_linked_list(L):
if not L or not L.next:
return L
# Finds the second half of L.
slow = fast = L
while fast and fast.next:
slow, fast = slow.next, fast.next.next
first_half_head = L
second_half_head = slow.next
slow.next = None # Splits the list into two lists.
second_half_head = reverse_linked_list(second_half_head)
# Interleave the first half and the reversed of the second half.
first_half_iter, second_half_iter = first_half_head, second_half_head
while second_half_iter:
second_half_iter.next, first_half_iter.next, second_half_iter = (
first_half_iter.next, second_half_iter, second_half_iter.next)
first_half_iter = first_half_iter.next.next
return first_half_head
# @exclude
def main():
head = None
if len(sys.argv) > 2:
for i in sys.argv[1:]:
curr = ListNode(int(i), head)
head = curr
else:
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 1000)
for i in reversed(range(n + 1)):
curr = ListNode(i, head)
head = curr
curr = zipping_linked_list(head)
idx = 0
while curr:
if len(sys.argv) <= 2:
if idx & 1:
assert pre + curr.data == n
idx += 1
print(curr.data)
pre = curr.data
curr = curr.next
if __name__ == '__main__':
main()
| meisamhe/GPLshared | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/zipping_list.py | Python | gpl-3.0 | 1,575 |
# -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog to enter the connection parameters.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from PyQt5.QtSql import QSqlDatabase
from E5Gui.E5Completers import E5FileCompleter
from E5Gui import E5FileDialog
from .Ui_SqlConnectionDialog import Ui_SqlConnectionDialog
import Utilities
import UI.PixmapCache
class SqlConnectionDialog(QDialog, Ui_SqlConnectionDialog):
"""
Class implementing a dialog to enter the connection parameters.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(SqlConnectionDialog, self).__init__(parent)
self.setupUi(self)
self.databaseFileButton.setIcon(UI.PixmapCache.getIcon("open.png"))
self.databaseFileCompleter = E5FileCompleter()
self.okButton = self.buttonBox.button(QDialogButtonBox.Ok)
drivers = QSqlDatabase.drivers()
# remove compatibility names
if "QMYSQL3" in drivers:
drivers.remove("QMYSQL3")
if "QOCI8" in drivers:
drivers.remove("QOCI8")
if "QODBC3" in drivers:
drivers.remove("QODBC3")
if "QPSQL7" in drivers:
drivers.remove("QPSQL7")
if "QTDS7" in drivers:
drivers.remove("QTDS7")
self.driverCombo.addItems(drivers)
self.__updateDialog()
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def __updateDialog(self):
"""
Private slot to update the dialog depending on its contents.
"""
driver = self.driverCombo.currentText()
if driver.startswith("QSQLITE"):
self.databaseEdit.setCompleter(self.databaseFileCompleter)
self.databaseFileButton.setEnabled(True)
else:
self.databaseEdit.setCompleter(None)
self.databaseFileButton.setEnabled(False)
if self.databaseEdit.text() == "" or driver == "":
self.okButton.setEnabled(False)
else:
self.okButton.setEnabled(True)
@pyqtSlot(str)
def on_driverCombo_activated(self, txt):
"""
Private slot handling the selection of a database driver.
@param txt text of the driver combo (string)
"""
self.__updateDialog()
@pyqtSlot(str)
def on_databaseEdit_textChanged(self, txt):
"""
Private slot handling the change of the database name.
@param txt text of the edit (string)
"""
self.__updateDialog()
@pyqtSlot()
def on_databaseFileButton_clicked(self):
"""
Private slot to open a database file via a file selection dialog.
"""
startdir = self.databaseEdit.text()
dbFile = E5FileDialog.getOpenFileName(
self,
self.tr("Select Database File"),
startdir,
self.tr("All Files (*)"))
if dbFile:
self.databaseEdit.setText(Utilities.toNativeSeparators(dbFile))
def getData(self):
"""
Public method to retrieve the connection data.
@return tuple giving the driver name (string), the database name
(string), the user name (string), the password (string), the
host name (string) and the port (integer)
"""
return (
self.driverCombo.currentText(),
self.databaseEdit.text(),
self.usernameEdit.text(),
self.passwordEdit.text(),
self.hostnameEdit.text(),
self.portSpinBox.value(),
)
| davy39/eric | SqlBrowser/SqlConnectionDialog.py | Python | gpl-3.0 | 3,950 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# Benjamin Bean <[email protected]>
# Leif Theden <[email protected]>
#
#
# core.states.combat Combat Start module
#
#
from __future__ import division
import logging
from collections import namedtuple, defaultdict
from functools import partial
from itertools import chain
from operator import attrgetter
import pygame
from core import tools, state
from core.components.locale import translator
from core.components.pyganim import PygAnimation
from core.components.sprite import Sprite
from core.components.technique import Technique
from core.components.ui.draw import GraphicBox
from core.components.ui.text import TextArea
from .combat_animations import CombatAnimations
trans = translator.translate
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("%s successfully imported" % __name__)
EnqueuedAction = namedtuple("EnqueuedAction", "user technique target")
faint = Technique("status_faint")
def check_status(monster, status_name):
return any(t for t in monster.status if t.slug == status_name)
def fainted(monster):
return check_status(monster, "status_faint")
def get_awake_monsters(player):
""" Iterate all non-fainted monsters in party
:param player:
:return:
"""
for monster in player.monsters:
if not fainted(monster):
yield monster
def fainted_party(party):
return all(map(fainted, party))
def defeated(player):
return fainted_party(player.monsters)
class WaitForInputState(state.State):
""" Just wait for input blocking everything
"""
def process_event(self, event):
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.game.pop_state(self)
class CombatState(CombatAnimations):
""" The state-menu responsible for all combat related tasks and functions.
.. image:: images/combat/monster_drawing01.png
General description of this class:
* implements a simple state machine
* various phases are executed using a queue of actions
* "decision queue" is used to queue player interactions/menus
* this class holds mostly logic, though some graphical functions exist
* most graphical functions are contained in "CombatAnimations" class
Currently, status icons are implemented as follows:
each round, all status icons are destroyed
status icons are created for each status on each monster
obvs, not ideal, maybe someday make it better? (see transition_phase)
"""
background_filename = "gfx/ui/combat/battle_bg03.png"
draw_borders = False
escape_key_exits = False
def startup(self, **kwargs):
self.max_positions = 1 # TODO: make dependant on match type
self.phase = None
self.monsters_in_play = defaultdict(list)
self._damage_map = defaultdict(set) # track damage so experience can be awarded later
self._technique_cache = dict() # cache for technique animations
self._decision_queue = list() # queue for monsters that need decisions
self._position_queue = list() # queue for asking players to add a monster into play (subject to change)
self._action_queue = list() # queue for techniques, items, and status effects
self._status_icons = list() # list of sprites that are status icons
self._monster_sprite_map = dict() # monster => sprite
self._hp_bars = dict() # monster => hp bar
self._layout = dict() # player => home areas on screen
self._animation_in_progress = False # if true, delay phase change
self._winner = None # when set, combat ends
self._round = 0
super(CombatState, self).startup(**kwargs)
self.players = list(self.players)
self.show_combat_dialog()
self.transition_phase("begin")
self.task(partial(setattr, self, "phase", "ready"), 3)
def update(self, time_delta):
""" Update the combat state. State machine is checked.
General operation:
* determine what phase to execute
* if new phase, then run transition into new one
* update the new phase, or the current one
"""
super(CombatState, self).update(time_delta)
if not self._animation_in_progress:
new_phase = self.determine_phase(self.phase)
if new_phase:
self.phase = new_phase
self.transition_phase(new_phase)
self.update_phase()
def draw(self, surface):
super(CombatState, self).draw(surface)
self.draw_hp_bars()
def draw_hp_bars(self):
""" Go through the HP bars and redraw them
:returns: None
"""
for monster, hud in self.hud.items():
rect = pygame.Rect(0, 0, tools.scale(70), tools.scale(8))
rect.right = hud.image.get_width() - tools.scale(8)
rect.top += tools.scale(12)
self._hp_bars[monster].draw(hud.image, rect)
def determine_phase(self, phase):
""" Determine the next phase and set it
Part of state machine
Only test and set new phase.
* Do not execute phase actions
* Try not to modify any values
* Return a phase name and phase will change
* Return None and phase will not change
:returns: None or String
"""
if phase == "ready":
return "housekeeping phase"
elif phase == "housekeeping phase":
# this will wait for players to fill battleground positions
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
return
return "decision phase"
elif phase == "decision phase":
# assume each monster executes one action
# if number of actions == monsters, then all monsters are ready
if len(self._action_queue) == len(self.active_monsters):
return "pre action phase"
# TODO: change check so that it doesn't change state
# (state is changed because check_match_status will modify _winner)
# if a player runs, it will be known here
self.determine_winner()
if self._winner:
return "ran away"
elif phase == "pre action phase":
return "action phase"
if phase == "action phase":
if not self._action_queue:
return "post action phase"
elif phase == "post action phase":
if not self._action_queue:
return "resolve match"
elif phase == "ran away":
return "end combat"
elif phase == "has winner":
return "end combat"
elif phase == "resolve match":
if self._winner:
return "has winner"
else:
return "housekeeping phase"
def transition_phase(self, phase):
""" Change from one phase from another.
Part of state machine
* Will be run just -once- when phase changes.
* Do not change phase.
* Execute code only to change into new phase.
* The phase's update will be executed -after- this
:param phase:
:return:
"""
if phase == "housekeeping phase":
self._round += 1
# fill all battlefield positions, but on round 1, don't ask
self.fill_battlefield_positions(ask=self._round > 1)
if phase == "decision phase":
self.reset_status_icons()
if not self._decision_queue:
for player in self.human_players:
# the decision queue tracks human players who need to choose an
# action
self._decision_queue.extend(self.monsters_in_play[player])
for trainer in self.ai_players:
for monster in self.monsters_in_play[trainer]:
opponents = self.monsters_in_play[self.players[0]]
action, target = monster.ai.make_decision(monster, opponents)
self.enqueue_action(monster, action, target)
elif phase == "action phase":
self._action_queue.sort(key=attrgetter("user.speed"))
# TODO: Running happens somewhere else, it should be moved here i think.
# TODO: Sort other items not just healing, Swap/Run?
#Create a new list for items, possibly running/swap
#sort items by speed of monster applied to
#remove items from action_queue and insert them into their new location
precedent = []
for action in self._action_queue:
if action.technique.effect == 'heal':
precedent.append(action)
#sort items by fastest target
precedent.sort(key=attrgetter("target.speed"))
for action in precedent:
self._action_queue.remove(action)
self._action_queue.insert(0,action)
elif phase == "post action phase":
# apply status effects to the monsters
for monster in self.active_monsters:
for technique in monster.status:
self.enqueue_action(None, technique, monster)
elif phase == "resolve match":
self.determine_winner()
elif phase == "ran away":
# after 3 seconds, push a state that blocks until enter is pressed
# after the state is popped, the combat state will clean up and close
# if you run in PvP, you need "defeated message"
self.task(partial(self.game.push_state, "WaitForInputState"), 1)
self.suppress_phase_change(1)
elif phase == "has winner":
if self._winner:
# TODO: proper match check, etc
if self._winner.name == "Maple":
self.alert(trans('combat_defeat'))
else:
self.alert(trans('combat_victory'))
# after 3 seconds, push a state that blocks until enter is pressed
# after the state is popped, the combat state will clean up and close
self.task(partial(self.game.push_state, "WaitForInputState"), 1)
self.suppress_phase_change(1)
elif phase == "end combat":
self.end_combat()
def update_phase(self):
""" Execute/update phase actions
Part of state machine
* Do not change phase.
* Will be run each iteration phase is active.
* Do not test conditions to change phase.
:return: None
"""
if self.phase == "decision phase":
# show monster action menu for human players
if self._decision_queue:
monster = self._decision_queue.pop()
self.show_monster_action_menu(monster)
elif self.phase == "action phase":
self.handle_action_queue()
elif self.phase == "post action phase":
self.handle_action_queue()
def handle_action_queue(self):
""" Take one action from the queue and do it
:return: None
"""
if self._action_queue:
action = self._action_queue.pop()
self.perform_action(*action)
self.check_party_hp()
self.task(self.animate_party_status, 3)
def ask_player_for_monster(self, player):
""" Open dialog to allow player to choose a TXMN to enter into play
:param player:
:return:
"""
def add(menuitem):
monster = menuitem.game_object
if monster.current_hp == 0:
tools.open_dialog(self.game, [trans("combat_fainted", parameters={"name":monster.name})])
elif monster in self.active_monsters:
tools.open_dialog(self.game, [trans("combat_isactive", parameters={"name":monster.name})])
msg = trans("combat_replacement_is_fainted")
tools.open_dialog(self.game, [msg])
else:
self.add_monster_into_play(player, monster)
self.game.pop_state()
state = self.game.push_state("MonsterMenuState")
# must use a partial because alert relies on a text box that may not exist
# until after the state hs been startup
state.task(partial(state.alert, trans("combat_replacement")), 0)
state.on_menu_selection = add
def fill_battlefield_positions(self, ask=False):
""" Check the battlefield for unfilled positions and send out monsters
:param ask: bool. if True, then open dialog for human players
:return:
"""
# TODO: let work for trainer battles
humans = list(self.human_players)
# TODO: integrate some values for different match types
released = False
for player in self.active_players:
positions_available = self.max_positions - len(self.monsters_in_play[player])
if positions_available:
available = get_awake_monsters(player)
for i in range(positions_available):
released = True
if player in humans and ask:
self.ask_player_for_monster(player)
else:
self.add_monster_into_play(player, next(available))
if released:
self.suppress_phase_change()
def add_monster_into_play(self, player, monster):
"""
:param player:
:param monster:
:return:
"""
# TODO: refactor some into the combat animations
feet = list(self._layout[player]['home'][0].center)
feet[1] += tools.scale(11)
self.animate_monster_release_bottom(feet, monster)
self.build_hud(self._layout[player]['hud'][0], monster)
self.monsters_in_play[player].append(monster)
# TODO: not hardcode
if player is self.players[0]:
self.alert(trans('combat_call_tuxemon', {"name": monster.name.upper()}))
else:
self.alert(trans('combat_wild_appeared', {"name": monster.name.upper()}))
def reset_status_icons(self):
""" Update/reset status icons for monsters
TODO: caching, etc
"""
# remove all status icons
for s in self._status_icons:
self.sprites.remove(s)
# add status icons
for monster in self.active_monsters:
for status in monster.status:
if status.icon:
# get the rect of the monster
rect = self._monster_sprite_map[monster].rect
# load the sprite and add it to the display
self.load_sprite(status.icon, layer=200, center=rect.topleft)
def show_combat_dialog(self):
""" Create and show the area where battle messages are displayed
"""
# make the border and area at the bottom of the screen for messages
x, y, w, h = self.game.screen.get_rect()
rect = pygame.Rect(0, 0, w, h // 4)
rect.bottomright = w, h
border = tools.load_and_scale(self.borders_filename)
self.dialog_box = GraphicBox(border, None, self.background_color)
self.dialog_box.rect = rect
self.sprites.add(self.dialog_box, layer=100)
# make a text area to show messages
self.text_area = TextArea(self.font, self.font_color)
self.text_area.rect = self.dialog_box.calc_inner_rect(self.dialog_box.rect)
self.sprites.add(self.text_area, layer=100)
def show_monster_action_menu(self, monster):
""" Show the main window for choosing player actions
:param monster: Monster to choose an action for
:type monster: core.components.monster.Monster
:returns: None
"""
message = trans('combat_monster_choice', {"name": monster.name})
self.alert(message)
x, y, w, h = self.game.screen.get_rect()
rect = pygame.Rect(0, 0, w // 2.5, h // 4)
rect.bottomright = w, h
state = self.game.push_state("MainCombatMenuState", columns=2)
state.monster = monster
state.rect = rect
def skip_phase_change(self):
""" Skip phase change animations
Useful if player wants to skip a battle animation
"""
for ani in self.animations:
ani.finish()
def enqueue_action(self, user, technique, target=None):
""" Add some technique or status to the action queue
:param user:
:param technique:
:param target:
:returns: None
"""
self._action_queue.append(EnqueuedAction(user, technique, target))
def remove_monster_actions_from_queue(self, monster):
""" Remove all queued actions for a particular monster
This is used mainly for removing actions after monster is fainted
:type monster: core.components.monster.Monster
:returns: None
"""
to_remove = set()
for action in self._action_queue:
if action.user is monster or action.target is monster:
to_remove.add(action)
[self._action_queue.remove(action) for action in to_remove]
def suppress_phase_change(self, delay=3):
""" Prevent the combat phase from changing for a limited time
Use this function to prevent the phase from changing. When
animating elements of the phase, call this to prevent player
input as well as phase changes.
:param delay:
:return:
"""
if self._animation_in_progress:
logger.debug("double suppress: bug?")
else:
self._animation_in_progress = True
self.task(partial(setattr, self, "_animation_in_progress", False), delay)
def perform_action(self, user, technique, target=None):
""" Do something with the thing: animated
:param user:
:param technique: Not a dict: a Technique or Item
:param target:
:returns:
"""
technique.advance_round()
# This is the time, in seconds, that the animation takes to finish.
action_time = 3.0
result = technique.use(user, target)
try:
tools.load_sound(technique.sfx).play()
except AttributeError:
pass
# action is performed, so now use sprites to animate it
# this value will be None if the target is off screen
target_sprite = self._monster_sprite_map.get(target, None)
# slightly delay the monster shake, so technique animation
# is synchronized with the damage shake motion
hit_delay = 0
if user:
message = trans('combat_used_x', {"user": user.name, "name": technique.name})
# TODO: a real check or some params to test if should tackle, etc
if result["should_tackle"]:
hit_delay += .5
user_sprite = self._monster_sprite_map[user]
self.animate_sprite_tackle(user_sprite)
if target_sprite:
self.task(partial(self.animate_sprite_take_damage, target_sprite), hit_delay + .2)
self.task(partial(self.blink, target_sprite), hit_delay + .6)
# Track damage
self._damage_map[target].add(user)
else: # assume this was an item used
if result["name"] == "capture":
message += "\n" + trans('attempting_capture')
self.task(partial(self.animate_capture_monster, result["success"], result["num_shakes"], target))
action_time = result["num_shakes"] + 1.8
if result["success"]: # end combat right here
self.task(self.end_combat, action_time + 0.5) # Display 'Gotcha!' first.
self.task(partial(self.alert, trans('gotcha')), action_time)
self.alert(message)
self._animation_in_progress = True
return
if result["success"]:
message += "\n" + trans('item_success')
else:
message += "\n" + trans('item_failure')
self.alert(message)
self.suppress_phase_change(action_time)
else:
if result["success"]:
self.suppress_phase_change()
self.alert(trans('combat_status_damage', {"name": target.name, "status": technique.name}))
if result["success"] and target_sprite and hasattr(technique, "images"):
tech_sprite = self.get_technique_animation(technique)
tech_sprite.rect.center = target_sprite.rect.center
self.task(tech_sprite.image.play, hit_delay)
self.task(partial(self.sprites.add, tech_sprite, layer=50), hit_delay)
self.task(tech_sprite.kill, 3)
def faint_monster(self, monster):
""" Instantly make the monster faint (will be removed later)
:type monster: core.components.monster.Monster
:returns: None
"""
monster.current_hp = 0
monster.status = [faint]
"""
Experience is earned when the target monster is fainted.
Any monsters who contributed any amount of damage will be awarded
Experience is distributed evenly to all participants
"""
if monster in self._damage_map:
# Award Experience
awarded_exp = monster.total_experience / monster.level / len(self._damage_map[monster])
for winners in self._damage_map[monster]:
winners.give_experience(awarded_exp)
# Remove monster from damage map
del self._damage_map[monster]
def animate_party_status(self):
""" Animate monsters that need to be fainted
* Animation to remove monster is handled here
TODO: check for faint status, not HP
:returns: None
"""
for player in self.monsters_in_play.keys():
for monster in self.monsters_in_play[player]:
if fainted(monster):
self.alert(trans('combat_fainted', {"name": monster.name}))
self.animate_monster_faint(monster)
self.suppress_phase_change(3)
def check_party_hp(self):
""" Apply status effects, then check HP, and party status
* Monsters will be removed from play here
:returns: None
"""
for player in self.monsters_in_play.keys():
for monster in self.monsters_in_play[player]:
self.animate_hp(monster)
if monster.current_hp <= 0 and not fainted(monster):
self.remove_monster_actions_from_queue(monster)
self.faint_monster(monster)
def get_technique_animation(self, technique):
""" Return a sprite usable as a technique animation
TODO: move to some generic animation loading thingy
:type technique: core.components.technique.Technique
:rtype: core.components.sprite.Sprite
"""
try:
return self._technique_cache[technique]
except KeyError:
sprite = self.load_technique_animation(technique)
self._technique_cache[technique] = sprite
return sprite
@staticmethod
def load_technique_animation(technique):
"""
TODO: move to some generic animation loading thingy
:param technique:
:rtype: core.components.sprite.Sprite
"""
frame_time = .09
images = list()
for fn in technique.images:
image = tools.load_and_scale(fn)
images.append((image, frame_time))
tech = PygAnimation(images, False)
sprite = Sprite()
sprite.image = tech
sprite.rect = tech.get_rect()
return sprite
@property
def active_players(self):
""" Generator of any non-defeated players/trainers
:rtype: collections.Iterable[core.components.player.Player]
"""
for player in self.players:
if not defeated(player):
yield player
@property
def human_players(self):
for player in self.players:
if player.isplayer:
yield player
@property
def ai_players(self):
for player in set(self.active_players) - set(self.human_players):
yield player
@property
def active_monsters(self):
""" List of any non-defeated monsters on battlefield
:rtype: list
"""
return list(chain.from_iterable(self.monsters_in_play.values()))
def remove_player(self, player):
# TODO: non SP things
self.players.remove(player)
self.suppress_phase_change()
self.alert(trans('combat_player_run'))
def determine_winner(self):
""" Determine if match should continue or not
:return:
"""
if self._winner:
return
players = list(self.active_players)
if len(players) == 1:
self._winner = players[0]
def end_combat(self):
""" End the combat
"""
# TODO: End combat differently depending on winning or losing
# clear action queue
self._action_queue = list()
event_engine = self.game.event_engine
fadeout_action = namedtuple("action", ["type", "parameters"])
fadeout_action.type = "fadeout_music"
fadeout_action.parameters = [1000]
event_engine.actions["fadeout_music"]["method"](self.game, fadeout_action)
# remove any menus that may be on top of the combat state
while self.game.current_state is not self:
self.game.pop_state()
self.game.push_state("FadeOutTransition", caller=self)
| nikitakurylev/TuxemonX | tuxemon/core/states/combat/combat.py | Python | gpl-3.0 | 27,084 |
from jroc.tasks.tokenizers.TokenizerTask import SentenceTokenizerTask, WordTokenizerTask
| domenicosolazzo/jroc | tests/tasks/tokenizers/__init__.py | Python | gpl-3.0 | 89 |
from django.forms import *
from django.forms.formsets import BaseFormSet
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from tradeschool.models import *
class DefaultBranchForm(Form):
def __init__(self, user, redirect_to, *args, **kwargs):
super(DefaultBranchForm, self).__init__(*args, **kwargs)
if user.is_superuser:
branches = Branch.objects.all()
else:
branches = Branch.objects.filter(pk__in=user.branches_organized.all)
choices = [(o.id, unicode(o.title)) for o in branches]
self.fields['default_branch'] = forms.ChoiceField(choices=choices)
if user.default_branch:
self.initial['default_branch'] = user.default_branch.pk
self.initial['organizer_id'] = user.pk
self.initial['redirect_to'] = redirect_to
default_branch = forms.ChoiceField()
organizer_id = forms.IntegerField(widget=forms.HiddenInput)
redirect_to = forms.CharField(widget=forms.HiddenInput)
class TimeModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
from django.utils import timezone
current_tz = timezone.get_current_timezone()
date = obj.start_time.astimezone(current_tz).strftime('%A, %b %d')
time = obj.start_time.astimezone(current_tz).strftime(
'%I:%M%p').lstrip('0').lower()
if obj.venue is not None:
return "%s %s at %s" % (date, time, obj.venue)
return "%s %s" % (date, time)
class TimeSelectionForm(Form):
"""
A simple dropdown menu for teachers to select an available time
when submitting a class. Uses the Time model
"""
time = TimeModelChoiceField(
queryset=Time.objects.all(),
error_messages={'required': _('Please select a time'), }
)
class BranchForm(ModelForm):
def __init__(self, *args, **kwargs):
super(BranchForm, self).__init__(*args, **kwargs)
self.fields['city'].error_messages['required'] = _(
"Please enter a city")
self.fields['country'].error_messages['required'] = _(
"Please enter a country")
self.initial['site'] = Site.objects.get_current()
class Meta:
model = Branch
fields = (
'city',
'state',
'country',
)
class TeacherForm (ModelForm):
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['bio'].error_messages['required'] = _(
"Please tell us about yourself")
self.fields['phone'].error_messages['required'] = _(
"Please enter phone number")
class Meta:
model = Person
fields = ('fullname', 'email', 'phone', 'bio', 'website')
# since bio is set to blank=True in the Person model
# to accommodate students, we're setting it here manually.
bio = forms.CharField(
required=True,
label=_("A few sentences about you"),
help_text=_("For prospective students to see on the website"),
widget=forms.Textarea
)
class OrganizerForm(TeacherForm):
"""
"""
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(TeacherForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['names_of_co_organizers'].error_messages['required'] = _(
"Please enter the names of at least one or two more organizers")
self.fields['bio'].error_messages['required'] = _(
"Please tell us about why you would like to open a Trade School in your area")
class Meta:
model = Person
fields = (
'fullname',
'names_of_co_organizers',
'email',
'bio',
)
# since names_of_co_organizers is set to blank=True in the Person model
# to accommodate students and teachers, we're setting it here manually.
names_of_co_organizers = forms.CharField(
required=True,
label=_("Names of Co-Organizers"),
)
bio = forms.CharField(
required=True,
label=_("A few sentences about why your group wants to open a Trade School"),
widget=forms.Textarea
)
class CourseForm (ModelForm):
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['title'].error_messages['required'] = _(
"Please enter a class title")
self.fields['description'].error_messages['required'] = _(
"Please enter a class description")
self.fields['max_students'].error_messages['required'] = _(
"Please enter the maximum number of students in your class")
class Meta:
model = Course
fields = ('title', 'description', 'max_students')
class BarterItemForm (ModelForm):
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['class'] = 'barter_item'
self.fields['title'].error_messages['required'] = _(
"Barter item cannot be blank")
class Meta:
model = BarterItem
fields = ('title',)
class BaseBarterItemFormSet(BaseFormSet):
def __init__(self, branch, *args, **kwargs):
""
self.branch = branch
super(BaseBarterItemFormSet, self).__init__(*args, **kwargs)
def clean(self):
"Checks that at least 5 barter items form are filled"
count = 0
required = self.branch.min_barteritems
if any(self.errors):
return
for form in self.forms:
if form.is_bound:
if form['title'].data:
count += 1
if count < required:
raise forms.ValidationError(
_("Please add at least %i barter items" % required)
)
class RegistrationForm(ModelForm):
def __init__(self, course, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['items'].queryset = BarterItem.objects.filter(
course=course)
self.fields['items'].error_messages['required'] = _(
"Please select at least one item")
self.fields['items'].empty_label = None
class Meta:
model = Registration
fields = ('items', )
widgets = {'items': CheckboxSelectMultiple(), }
class StudentForm(ModelForm):
def __init__(self, *args, **kwargs):
super(StudentForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['phone'].error_messages['required'] = _(
"Please enter your phone number")
class Meta:
model = Person
fields = ('fullname', 'email', 'phone')
class FeedbackForm(ModelForm):
def __init__(self, *args, **kwargs):
super(FeedbackForm, self).__init__(*args, **kwargs)
self.fields['content'].error_messages['required'] = _(
"Please enter your feedback")
class Meta:
model = Feedback
fields = ('content',)
| orzubalsky/tradeschool | ts/apps/tradeschool/forms.py | Python | gpl-3.0 | 7,844 |
# -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez ([email protected])
# Copyright (C) 2015, Adrian Durán ([email protected])
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk ([email protected])
# Copyright (C) 2016-2017, Marco Germano Zbrun ([email protected])
# Copyright (C) 2016-2017, Raúl Lopez Skuba ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from BaseCreation import BaseCreation
from MLC.db.mlc_repository import MLCRepository
class IndividualSelection(BaseCreation):
"""
Fill a Population with fixed Individuals.
selected_individuals: dictionary containing {Individual: positions inside
the first population}
fill_creator: creator used to fill empty positions.
Empty positions inside the Population will be completed using the neighbor individual,
"""
def __init__(self, selected_individuals, fill_creator):
BaseCreation.__init__(self)
self.__fill_creator = fill_creator
self.__selected_individuals = selected_individuals
self.__individuals = []
def create(self, gen_size):
self.__fill_creator.create(gen_size)
self.__individuals = self.__fill_creator.individuals()
# Add Individuals
for individual, positions in self.__selected_individuals.items():
for position in positions:
if position < gen_size:
individual_id, _ = MLCRepository.get_instance().add_individual(individual)
self.__individuals[position] = (position, individual_id)
def individuals(self):
return self.__individuals
| MachineLearningControl/OpenMLC-Python | MLC/Population/Creation/IndividualSelection.py | Python | gpl-3.0 | 2,329 |
"""
WSGI config for school_registry project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "school_registry.settings")
application = get_wsgi_application()
| agustin380/school-registry | src/school_registry/wsgi.py | Python | gpl-3.0 | 407 |
import card
from card import Card
from player import Player
from hand import Hand
from prompt import Prompt, IntegerPrompt, SetPrompt
import pprint
class Setup:
def run(self, game):
self.game = game
self.cards_accounted_for = 0
self.setup_conviction()
self.initialize_cards()
self.setup_me()
self.setup_opponents()
self.setup_my_cards()
def setup_conviction(self):
self.game.conviction = Hand(card.COUNT_TYPES, game=self.game)
self.game.hands.add(self.game.conviction)
self.cards_accounted_for += card.COUNT_TYPES
def initialize_cards(self):
self.game.cards.add(Card(card.TYPE_ROOM, 'Lounge'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Dining Room'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Kitchen'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Ballroom'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Conservatory'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Billiard Room'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Library'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Study'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Hall'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Miss Scarlett'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Coloniel Mustard'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Misses White'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Mister Green'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Misses Peacock'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Professor Plumb'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Lead Pipe'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Wrench'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Knife'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Revolver'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Candlestick'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Rope'))
def setup_me(self):
name = self.game.prompt(Prompt('Your name:'))
card_count = self.game.prompt(IntegerPrompt('Count your cards:', len(self.game.cards) - self.cards_accounted_for))
player = Player(name, Hand(card_count, game=self.game))
self.game.hands.add(player.hand)
self.game.me = player
self.game.players.add(player)
self.cards_accounted_for += card_count
def setup_opponents(self):
while self.cards_accounted_for < len(self.game.cards):
cards_left = len(self.game.cards) - self.cards_accounted_for
name = self.game.prompt(Prompt('Opponent name:'))
card_count = self.game.prompt(IntegerPrompt(
'Cards held by {} ({} left):'.format(
name,
cards_left
),
cards_left
))
player = Player(name, Hand(card_count, game=self.game))
self.game.hands.add(player.hand)
self.game.players.add(player)
self.cards_accounted_for += card_count
def setup_my_cards(self):
while len(self.game.me.hand.has_set) < self.game.me.hand.count:
self.game.me.hand.has(self.game.prompt(SetPrompt('Your card:', self.game.cards, exclude=self.game.me.hand.has_set)))
| jonahbron/clue | setup.py | Python | gpl-3.0 | 3,316 |
# coding=utf-8
"""
Utility Serializers
"""
from rest_framework.serializers import HyperlinkedModelSerializer
class HybridModelSerializer(HyperlinkedModelSerializer):
"""
ModelSerializer which provides both a `url` and `id` field
"""
def get_pk_field(self, model_field):
return self.get_field(model_field)
| bigbangdev/cityhelpdeskdjango | cityhelpdesk/utility/serializers.py | Python | gpl-3.0 | 333 |
# coding=utf-8
"""Request handler for authentication."""
from __future__ import unicode_literals
import logging
import random
import string
import time
from builtins import range
import jwt
from medusa import app, helpers, notifiers
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import BaseRequestHandler
from six import text_type
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class AuthHandler(BaseRequestHandler):
"""Auth request handler."""
#: resource name
name = 'authenticate'
#: allowed HTTP methods
allowed_methods = ('POST', )
def _check_authentication(self):
"""Override authentication check for the authentication endpoint."""
return None
def post(self, *args, **kwargs):
"""Request JWT."""
username = app.WEB_USERNAME
password = app.WEB_PASSWORD
# If the user hasn't set a username and/or password just let them login
if not username.strip() or not password.strip():
return self._login()
if not self.request.body:
return self._failed_login(error='No Credentials Provided')
if self.request.headers['content-type'] != 'application/json':
return self._failed_login(error='Incorrect content-type')
request_body = json_decode(self.request.body)
submitted_username = request_body.get('username')
submitted_password = request_body.get('password')
submitted_exp = request_body.get('exp', 86400)
if username != submitted_username or password != submitted_password:
return self._failed_login(error='Invalid credentials')
return self._login(submitted_exp)
def _login(self, exp=86400):
self.set_header('Content-Type', 'application/json')
if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
notifiers.notify_login(self.request.remote_ip)
log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
time_now = int(time.time())
return self._ok(data={
'token': jwt.encode({
'iss': 'Medusa ' + text_type(app.APP_VERSION),
'iat': time_now,
# @TODO: The jti should be saved so we can revoke tokens
'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
}, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
})
def _failed_login(self, error=None):
log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
'user': app.WEB_USERNAME,
'ip': self.request.remote_ip
})
return self._unauthorized(error=error)
| pymedusa/SickRage | medusa/server/api/v2/auth.py | Python | gpl-3.0 | 2,962 |
from collections import OrderedDict
from django import forms
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from oioioi.base.utils.input_with_generate import TextInputWithGenerate
from oioioi.base.utils.inputs import narrow_input_field
from oioioi.contests.models import ProblemStatementConfig, RankingVisibilityConfig
from oioioi.problems.models import OriginInfoValue, Problem, ProblemSite
class ProblemUploadForm(forms.Form):
contest_id = forms.CharField(widget=forms.HiddenInput, required=False)
def __init__(self, contest, existing_problem, *args, **kwargs):
user = kwargs.pop('user', None)
super(ProblemUploadForm, self).__init__(*args, **kwargs)
self.round_id = None
self.visibility = None
if contest and not existing_problem:
choices = [(r.id, r.name) for r in contest.round_set.all()]
if len(choices) >= 2:
fields = list(self.fields.items())
fields[0:0] = [
('round_id', forms.ChoiceField(choices=choices, label=_("Round")))
]
self.fields = OrderedDict(fields)
elif len(choices) == 1:
self.round_id = choices[0][0]
if 'oioioi.problemsharing' in settings.INSTALLED_APPS and not existing_problem:
if user and user.has_perm('teachers.teacher'):
choices = [
(Problem.VISIBILITY_FRIENDS, 'Friends'),
(Problem.VISIBILITY_PRIVATE, 'Private'),
(Problem.VISIBILITY_PUBLIC, 'Public'),
]
default_visibility = Problem.VISIBILITY_FRIENDS
if contest:
last_problem = (
Problem.objects.filter(contest=contest, author=user)
.order_by('-id')
.first()
)
if (
last_problem
and last_problem.visibility == Problem.VISIBILITY_PRIVATE
):
default_visibility = Problem.VISIBILITY_PRIVATE
self.initial.update({'visibility': default_visibility})
self.fields.update(
{
'visibility': forms.ChoiceField(
choices=choices,
label=_("Visibility"),
required=True,
initial=default_visibility,
)
}
)
def clean(self):
cleaned_data = super(ProblemUploadForm, self).clean()
if self.round_id:
cleaned_data['round_id'] = self.round_id
if self.visibility:
cleaned_data['visibility'] = self.visibility
return cleaned_data
class PackageUploadForm(ProblemUploadForm):
package_file = forms.FileField(label=_("Package file"))
class ProblemStatementConfigForm(forms.ModelForm):
class Meta(object):
fields = '__all__'
model = ProblemStatementConfig
widgets = {'visible': forms.RadioSelect()}
class RankingVisibilityConfigForm(forms.ModelForm):
class Meta(object):
fields = '__all__'
model = RankingVisibilityConfig
widgets = {'visible': forms.RadioSelect()}
class ProblemSiteForm(forms.ModelForm):
class Meta(object):
fields = ['url_key']
model = ProblemSite
widgets = {'url_key': TextInputWithGenerate()}
class ProblemsetSourceForm(forms.Form):
url_key = forms.CharField(label=_("Enter problem's secret key"), required=True)
def __init__(self, url_key, *args, **kwargs):
super(ProblemsetSourceForm, self).__init__(*args, **kwargs)
if url_key:
self.initial = {'url_key': url_key}
class ProblemStatementReplaceForm(forms.Form):
file_name = forms.ChoiceField(label=_("Statement filename"))
file_replacement = forms.FileField(label=_("Replacement file"), required=True)
def __init__(self, file_names, *args, **kwargs):
super(ProblemStatementReplaceForm, self).__init__(*args, **kwargs)
upload_file_field = self.fields['file_replacement']
file_name_field = self.fields['file_name']
file_name_field.choices = [('', '')] + [(name, name) for name in file_names]
self._set_field_show_always('file_name')
narrow_input_field(file_name_field)
narrow_input_field(upload_file_field)
self.initial.update({'file_name': ''})
def _set_field_show_always(self, field_name):
self.fields[field_name].widget.attrs['data-submit'] = 'always'
class PackageFileReuploadForm(forms.Form):
file_name = forms.ChoiceField(label=_("File name"))
file_replacement = forms.FileField(label=_("Replacement file"), required=False)
def __init__(self, file_names, *args, **kwargs):
super(PackageFileReuploadForm, self).__init__(*args, **kwargs)
upload_file_field = self.fields['file_replacement']
file_name_field = self.fields['file_name']
file_name_field.choices = [('', '')] + [(name, name) for name in file_names]
self._set_field_show_always('file_name')
narrow_input_field(file_name_field)
narrow_input_field(upload_file_field)
self.initial.update({'file_name': ''})
def _set_field_show_always(self, field_name):
self.fields[field_name].widget.attrs['data-submit'] = 'always'
def _localized_formset_get_initial(localized_objects):
return [
{'language': lang[0]}
for lang in settings.LANGUAGES
if not localized_objects.filter(language=lang[0]).exists()
]
class ProblemNameInlineFormSet(forms.models.BaseInlineFormSet):
def __init__(self, *args, **kwargs):
kwargs['initial'] = _localized_formset_get_initial(kwargs['instance'].names)
super(ProblemNameInlineFormSet, self).__init__(*args, **kwargs)
self.max_num = len(settings.LANGUAGES)
class LocalizationFormset(forms.models.BaseInlineFormSet):
def __init__(self, *args, **kwargs):
kwargs['initial'] = _localized_formset_get_initial(
kwargs['instance'].localizations
)
super(LocalizationFormset, self).__init__(*args, **kwargs)
self.min_num = self.max_num = len(settings.LANGUAGES)
for form in self.forms:
form.empty_permitted = False
class OriginInfoValueForm(forms.ModelForm):
@transaction.atomic
def save(self, commit=True):
instance = super(OriginInfoValueForm, self).save(commit=False)
# Ensure parent_tag exists on problems
category = self.cleaned_data['category']
parent_tag = category.parent_tag
instance.parent_tag = parent_tag
problems = self.cleaned_data.get('problems').prefetch_related('origintag_set')
for problem in problems:
if parent_tag not in problem.origintag_set.all():
parent_tag.problems.add(problem)
if commit:
instance.save()
return instance
class Meta(object):
model = OriginInfoValue
fields = ('category', 'value', 'order', 'problems')
exclude = ('parent_tag',)
def _label_from_instance(obj):
return obj.full_name
class OriginTagThroughForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OriginTagThroughForm, self).__init__(*args, **kwargs)
self.fields['origintag'].label_from_instance = _label_from_instance
class Meta(object):
labels = {'origintag': _("Origin Tag")}
help_texts = {
'origintag': _(
"Origin tags inform about the problem's general origin "
"- e.g. a specific competition, olympiad, or programming camp."
)
}
class OriginInfoValueThroughForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OriginInfoValueThroughForm, self).__init__(*args, **kwargs)
self.fields['origininfovalue'].label_from_instance = _label_from_instance
class Meta(object):
labels = {'origininfovalue': _("Origin Information")}
help_texts = {
'origininfovalue': _(
"Origin information values inform about the problem's specific origin"
"- a year, round, day, etc."
)
}
class DifficultyTagThroughForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DifficultyTagThroughForm, self).__init__(*args, **kwargs)
self.fields['tag'].label_from_instance = _label_from_instance
class Meta(object):
labels = {'tag': _("Difficulty Tag")}
help_texts = {
'tag': _(
"Most problems fall into the 'Easy' and 'Medium' category. "
"However, there are problems that are meant for learning "
"the basics of programming (these are 'Very easy') and those "
"that are 'Hard' and exceptionally hard - the latter fall "
"into the 'Very hard' category."
)
}
class AlgorithmTagThroughForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AlgorithmTagThroughForm, self).__init__(*args, **kwargs)
self.fields['tag'].label_from_instance = _label_from_instance
class Meta(object):
labels = {'tag': _("Algorithm Tag")}
help_texts = {
'tag': _(
"Algorithm tags inform about the algorithms, theorems "
"and data structures needed to solve a problem. "
"Algorithm tags can also inform about the type of a "
"problem, e.g. if a problem is a quiz."
)
}
| sio2project/oioioi | oioioi/problems/forms.py | Python | gpl-3.0 | 9,816 |
# This file is part of LilyPond, the GNU music typesetter.
#
# Copyright (C) 1998--2012 Han-Wen Nienhuys <[email protected]>
# Jan Nieuwenhuizen <[email protected]>
#
# LilyPond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LilyPond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LilyPond. If not, see <http://www.gnu.org/licenses/>.
import __main__
import glob
import os
import re
import shutil
import sys
import optparse
import time
################################################################
# Users of python modules should include this snippet
# and customize variables below.
# Python 2.5 only accepts strings with proper Python internal encoding
# (i.e. ASCII or Unicode) when writing to stdout/stderr, so we must
# use ugettext iso gettext, and encode the string when writing to
# stdout/stderr
localedir = '@localedir@'
try:
import gettext
t = gettext.translation ('lilypond', localedir)
_ = t.ugettext
except:
def _ (s):
return s
underscore = _
# Urg, Python 2.4 does not define stderr/stdout encoding
# Maybe guess encoding from LANG/LC_ALL/LC_CTYPE?
reload (sys)
sys.setdefaultencoding ('utf-8')
import codecs
sys.stdout = codecs.getwriter ('utf8') (sys.stdout)
sys.stderr = codecs.getwriter ('utf8') (sys.stderr)
def encoded_write(f, s):
f.write (s.encode (f.encoding or 'utf-8', 'replace'))
# ugh, Python 2.5 optparse requires Unicode strings in some argument
# functions, and refuse them in some other places
def display_encode (s):
return s.encode (sys.stderr.encoding or 'utf-8', 'replace')
# Lilylib globals.
program_version = '@TOPLEVEL_VERSION@'
program_name = os.path.basename (sys.argv[0])
# Check if program_version contains @ characters. This will be the case if
# the .py file is called directly while building the lilypond documentation.
# If so, try to check for the env var LILYPOND_VERSION, which is set by our
# makefiles and use its value.
at_re = re.compile (r'@')
if at_re.match (program_version):
if os.environ.has_key('LILYPOND_VERSION'):
program_version = os.environ['LILYPOND_VERSION']
else:
program_version = "unknown"
# Logging framework: We have the following output functions:
# error
# warning
# progress
# debug
loglevels = {"NONE":0, "ERROR":1, "WARN":2, "BASIC":3, "PROGRESS":4, "INFO":5, "DEBUG":6}
loglevel = loglevels["PROGRESS"]
def set_loglevel (l):
global loglevel
newlevel = loglevels.get (l, -1)
if newlevel > 0:
debug_output (_ ("Setting loglevel to %s") % l)
loglevel = newlevel
else:
error (_ ("Unknown or invalid loglevel '%s'") % l)
def handle_loglevel_option (option, opt_str, value, parser, *args):
if value:
set_loglevel (value);
elif args:
set_loglevel (args[0]);
def is_loglevel (l):
global loglevel
return loglevel >= loglevels[l];
def is_verbose ():
return is_loglevel ("DEBUG")
def stderr_write (s):
encoded_write (sys.stderr, s)
def print_logmessage (level, s, fullmessage = True, newline = True):
if (is_loglevel (level)):
if fullmessage:
stderr_write (program_name + ": " + s + '\n')
elif newline:
stderr_write (s + '\n')
else:
stderr_write (s)
def error (s):
print_logmessage ("ERROR", _ ("error: %s") % s);
def warning (s):
print_logmessage ("WARN", _ ("warning: %s") % s);
def basic_progress (s):
print_logmessage ("BASIC", s);
def progress (s, fullmessage = False, newline = True):
print_logmessage ("PROGRESS", s, fullmessage, newline);
def debug_output (s, fullmessage = False, newline = True):
print_logmessage ("DEBUG", s, fullmessage, newline);
def require_python_version ():
if sys.hexversion < 0x02040000:
error ("Python 2.4 or newer is required to run this program.\n\
Please upgrade Python from http://python.org/download/, and if you use MacOS X,\n\
please read 'Setup for MacOS X' in Application Usage.")
os.system ("open http://python.org/download/")
sys.exit (2)
# A modified version of the commands.mkarg(x) that always uses
# double quotes (since Windows can't handle the single quotes)
# and escapes the characters \, $, ", and ` for unix shells.
def mkarg(x):
if os.name == 'nt':
return ' "%s"' % x
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s
def command_name (cmd):
# Strip all stuf after command,
# deal with "((latex ) >& 1 ) .." too
cmd = re.match ('([\(\)]*)([^\\\ ]*)', cmd).group (2)
return os.path.basename (cmd)
def subprocess_system (cmd,
ignore_error=False,
progress_p=True,
be_verbose=False,
redirect_output=False,
log_file=None):
import subprocess
show_progress= progress_p
name = command_name (cmd)
error_log_file = ''
if redirect_output:
progress (_ ("Processing %s.ly") % log_file)
else:
if be_verbose:
show_progress = 1
progress (_ ("Invoking `%s\'") % cmd)
else:
progress ( _("Running %s...") % name)
stdout_setting = None
stderr_setting = None
if not show_progress:
stdout_setting = subprocess.PIPE
if redirect_output:
stderr_filename = log_file + '.log'
stderr_setting = open(stderr_filename, 'w')
proc = subprocess.Popen (cmd,
shell=True,
universal_newlines=True,
stdout=stdout_setting,
stderr=stderr_setting)
log = ''
if redirect_output:
while proc.poll()==None:
time.sleep(0.01)
retval = proc.returncode
stderr_setting.close()
else:
if show_progress:
retval = proc.wait()
else:
log = proc.communicate ()
retval = proc.returncode
if retval:
print >>sys.stderr, 'command failed:', cmd
if retval < 0:
print >>sys.stderr, "Child was terminated by signal", -retval
elif retval > 0:
print >>sys.stderr, "Child returned", retval
if ignore_error:
print >>sys.stderr, "Error ignored by lilylib"
else:
if not show_progress:
print log[0]
print log[1]
sys.exit (1)
return abs (retval)
def ossystem_system (cmd,
ignore_error=False,
progress_p=True,
be_verbose=False,
redirect_output=False,
log_file=None):
name = command_name (cmd)
if be_verbose:
show_progress = 1
progress (_ ("Invoking `%s\'") % cmd)
else:
progress ( _("Running %s...") % name)
retval = os.system (cmd)
if retval:
print >>sys.stderr, 'command failed:', cmd
if retval < 0:
print >>sys.stderr, "Child was terminated by signal", -retval
elif retval > 0:
print >>sys.stderr, "Child returned", retval
if ignore_error:
print >>sys.stderr, "Error ignored"
else:
sys.exit (1)
return abs (retval)
system = subprocess_system
if sys.platform == 'mingw32':
## subprocess x-compile doesn't work.
system = ossystem_system
def strip_extension (f, ext):
(p, e) = os.path.splitext (f)
if e == ext:
e = ''
return p + e
def search_exe_path (name):
p = os.environ['PATH']
exe_paths = p.split (':')
for e in exe_paths:
full = os.path.join (e, name)
if os.path.exists (full):
return full
return None
def print_environment ():
for (k,v) in os.environ.items ():
sys.stderr.write ("%s=\"%s\"\n" % (k, v))
class NonDentedHeadingFormatter (optparse.IndentedHelpFormatter):
def format_heading(self, heading):
if heading:
return heading[0].upper() + heading[1:] + ':\n'
return ''
def format_option_strings(self, option):
sep = ' '
if option._short_opts and option._long_opts:
sep = ','
metavar = ''
if option.takes_value():
metavar = '=%s' % option.metavar or option.dest.upper()
return "%3s%s %s%s" % (" ".join (option._short_opts),
sep,
" ".join (option._long_opts),
metavar)
# Only use one level of indentation (even for groups and nested groups),
# since we don't indent the headeings, either
def indent(self):
self.current_indent = self.indent_increment
self.level += 1
def dedent(self):
self.level -= 1
if self.level <= 0:
self.current_indent = ''
self.level = 0;
def format_usage(self, usage):
return _("Usage: %s") % usage + '\n'
def format_description(self, description):
return description
def get_option_parser (*args, **kwargs):
p = optparse.OptionParser (*args, **kwargs)
p.formatter = NonDentedHeadingFormatter ()
p.formatter.set_parser (p)
return p
| sehe/Lilypond | python/lilylib.py | Python | gpl-3.0 | 9,740 |
from __future__ import division
import socket, MySQLdb
from PIL import Image
from random import randint, randrange
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('../analogy_config.ini')
db = MySQLdb.connect(
host = parser.get('database','host'),
user = parser.get('database','user'),
passwd = parser.get('database','passwd'),
db = parser.get('database','db'),
port = 3306
)
cursor = db.cursor()
# Set global printing variables
SBMAX = 1.5
SBMIN = 0.0
MIN_PRINT_BEFORE_BALANCE = 10
BALANCE = 0.333
MIN_AVAIL = 8
MIN_POOL = 15
class Publication_Maker():
#Set instanced printing variables
FREE = 0
PUB = 0
FREEPRINTED = 0
PUBPRINTED = 0
TOTALPRINTED = 0
POOL = 0
tier = None
search_pattern = False
pref_premium = False
seeded = False
locked = False
VERBOSE = True
def __init__(self,*args):
#output variables have to be defined first
self.is_premium = None
self.selected_sentences = []
self.selected_image = ""
# (search_pattern, tier) = (True, 'premium')
# search pattern defines if search based on existing material
if len(args) == 0:
print "Publication_Maker: please insert the arguments: \
searchpattern: True/False **Mandatory\npreference: 'free'\'premium' **Optional"
return None
elif len(args) == 1:
args = [ args[0], False ]
else:
args = [args[0], args[1]]
if args[0] == True :
self.analyze_content()
else:
self.pref_premium = False
self.handle_formation()
if args[1] == False:
self.VERBOSE = False
else:
self.VERBOSE = True
print "Attempting to print a publication."
# Take a look at the amount of prints already produced and their distribution
def update_print_count(self):
_s = "SELECT COUNT(*) FROM prints;"
self.TOTALPRINTED = self.return_sql_array(_s)[0]
_s = "SELECT COUNT(*) FROM prints WHERE premium = TRUE;"
self.PUBPRINTED = self.return_sql_array(_s)[0]
self.FREEPRINTED = self.TOTALPRINTED - self.PUBPRINTED
if self.VERBOSE:
print('total prints: {}'.format(self.TOTALPRINTED))
print('published prints: {}'.format(self.PUBPRINTED))
# Analyze
def analyze_content(self):
self.update_print_count()
#read the scores of all the entries in the source material
_s = "SELECT score FROM source_material"
result = self.return_sql_array(_s)
# divide the scores into categories
for score in result:
if score < SBMIN: self.FREE+=1
elif score > SBMAX: self.PUB+=1
else : self.POOL+=1
# see if there's enough data to continue
if self.POOL < MIN_POOL:
print 'Need a bigger datapool, wait until someone interacts with the iPad setup or call the analogy.py script.'
return None
else:
# can we work with balance yet?
if self.TOTALPRINTED > MIN_PRINT_BEFORE_BALANCE:
CURRENT_BALANCE = self.PUBPRINTED / self.FREEPRINTED
if self.VERBOSE:
print 'Free items available:{}\nPublishable items available:{}\nItems in pool:{}\nBalance:{}\n'.format(self.FREE,self.PUB, self.POOL, CURRENT_BALANCE )
# in case of lot of free content, print premium
if CURRENT_BALANCE < BALANCE:
_s = "SELECT COUNT(sentence_data) FROM source_material WHERE score > {}".format(SBMAX);
self.pref_premium = True
#otherwise print free content
else :
_s = "SELECT COUNT(sentence_data) FROM source_material WHERE score < {}".format(SBMIN);
self.pref_premium = False
quantity = self.return_sql_array(_s)[0]
# if there's enough data available to print a new piece of paper, handle forming it
if quantity > MIN_AVAIL:
self.handle_formation()
# otherwise lock the script, this will later on prevent printing
else:
self.locked = True
return False
#if we can't work with balance, but there's enough data, go ahead and print something random
else:
print 'Need more prints to work with balance, will attempt to print random type of publication.'
self.pref_premium = randrange(100) < 50
if self.pref_premium == True:
_s = "SELECT COUNT(sentence_data) FROM source_material WHERE score > {}".format(SBMAX);
else:
_s = "SELECT COUNT(sentence_data) FROM source_material WHERE score < {}".format(SBMIN);
quantity = self.return_sql_array(_s)[0]
if quantity > MIN_AVAIL:
self.handle_formation(self.pref_premium)
else:
self.locked = True
return False
# Update when to print again
def update_interval(self):
return abs(BALANCE / ( self.PUBPRINTED - self.FREEPRINTED ));
# Read from the database
def return_sql_array(self, _s):
try:
cursor.execute(_s)
data=cursor.fetchall()
if len(data) == 0:
return False
else:
_a = []
for row in data:
_a.append(row[0])
return _a
except (MySQLdb.Error, MySQLdb.Warning, TypeError) as e:
return e
# Write to the database
def write_sql(self, _s):
cursor.execute(_s)
return None
# generates a seed to try and find image/text matches with
def find_seed(self):
# WEIGHTING BUG -> SEEDING WITH SAME NUMBER SO WEIGHT HAS NO EFFECT
#JUST RANDOMLY LOOP FOR HITS, SEEMS TO WORK BETTER.
_s = """SELECT selected_data FROM selected_text WHERE score > 0 AND selected_length < 15
ORDER by RAND() ASC LIMIT 5"""
cursor.execute(_s)
data=cursor.fetchall()
str_array = []
# load the chosen words into a list
for row in data:
_p = row[0].split( )
str_array.extend(_p)
# try to find a matching image with the words in the list
for list_item in str_array:
_s = """SELECT image_id FROM images WHERE image_key= "{}" AND key_score >= 1""".format(list_item)
return [list_item, self.return_sql_array(_s)]
# request text graded for premium publications
def create_premium_text(self,seed):
_s = """SELECT `selected_data` , MATCH `selected_data` AGAINST ('{}') AS relevance FROM selected_text ORDER BY relevance DESC LIMIT 10""".format(seed);
result = self.return_sql_array(_s)
if self.VERBOSE:
print 'premium text items found: {} for seed: {}'.format(len(result), seed)
return result
# try to link images
def link_image(self,seed):
_s = """SELECT image_url FROM images WHERE image_key = "{}"
ORDER by key_score DESC LIMIT 1"""
_s = _s.format(seed)
result = self.return_sql_array(_s)
return result
def commit(self):
db.commit()
#db.close()
#outputs the formed data to the controller script
def output(self):
if self.locked is True:
self.commit()
if self.pref_premium: t = 'premium'
else: t= 'free'
return "Can't print a {} publication yet, waiting for more entries to print.".format(t)
else:
if self.is_premium: _v = 1
else: _v = 0
_s = "INSERT INTO prints (premium) values ({})".format(_v);
self.write_sql(_s)
if self.VERBOSE:
print self.selected_sentences
print self.selected_image
print self.is_premium
return [self.selected_sentences, self.selected_image, self.is_premium]
# handles the forming of the publication
def handle_formation(self):
# if it's a premium publication
if self.pref_premium:
i = 0
#look for image/text relation if premium
while not self.seeded:
i+= 1
seed = self.find_seed()
if seed[1] != False : break
# if didn't work in 250 tries break from the loop
elif i > 250:
print "can't find a seed for the image linking sequence quick enough, try to run again."
return None
print('seed: {}'.format(seed));
#generate text and image needed for the publication
self.selected_sentences = self.create_premium_text(seed[0])
self.selected_image = self.link_image(seed[0])
self.is_premium = True
# if it's a free publication
else:
_s = "SELECT sentence_data FROM source_material WHERE score < {} ORDER BY RAND() LIMIT 5".format(SBMIN)
# just generate the text
self.selected_sentences = self.return_sql_array(_s) | LucaClaessens/Analogy | Printer/Publication_Maker.py | Python | gpl-3.0 | 7,901 |
# Copyright (C) 2014 Nicolas Lamirault <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from hyperiontests import hyperion
from hyperiontests import settings
class TestGrafana(hyperion.HyperionTestCase):
def setUp(self):
super(TestGrafana, self).setUp()
self._host = "http://%s:%s" % (settings.HYPERION_HOST,
settings.HYPERION_WEB)
def test_can_retrieve_default_dashboard(self):
response = self.http_get("grafana/#/dashboard/file/default.json")
self.assertEqual(200, response.status_code)
| nlamirault/hyperion-lite | hyperiontests/test_grafana.py | Python | gpl-3.0 | 1,189 |
# PyJVM (pyjvm.org) Java Virtual Machine implemented in pure Python
# Copyright (C) 2014 Andrew Romanenco ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Java bytecode implementation'''
import logging
from pyjvm.bytecode import bytecode
from pyjvm.frame import Frame
from pyjvm.jassert import jassert_ref
from pyjvm.natives import exec_native
from pyjvm.thread import SkipThreadCycle
from pyjvm.utils import args_count
from pyjvm.vmo import vm_obj_call
logger = logging.getLogger(__name__)
@bytecode(code=0xb6)
def invokevirtual(frame):
index = (ord(frame.code[frame.pc]) << 8) + ord(frame.code[frame.pc + 1])
frame.pc += 2
cp_item = frame.this_class.constant_pool[index]
assert cp_item[0] == 10 # CONSTANT_Methodref
klass_info = frame.this_class.constant_pool[cp_item[1]]
assert klass_info[0] == 7 # CONSTANT_Class_info
name_and_type = frame.this_class.constant_pool[cp_item[2]]
assert name_and_type[0] == 12 # name_and_type_index
klass_name = frame.this_class.constant_pool[klass_info[1]][1]
method_name = frame.this_class.constant_pool[name_and_type[1]][1]
method_signature = frame.this_class.constant_pool[name_and_type[2]][1]
logger.debug("%s %s %s", klass_name, method_name, method_signature)
klass = frame.vm.get_class(klass_name)
method = klass.find_method(method_name, method_signature)
nargs = args_count(method_signature) + 1
args = [None] * nargs
while nargs > 0:
value = frame.stack.pop()
if type(value) is tuple and value[0] in ('long', 'double'):
nargs -= 1
args[nargs - 1] = value
nargs -= 1
logger.debug(frame.id)
logger.debug(args)
logger.debug(method_signature)
jassert_ref(args[0])
if args[0] is None:
frame.vm.raise_exception(frame, "java/lang/NullPointerException")
return
if args[0][0] == "vm_ref": # vm owned object call
vm_obj_call(frame, args, method_name, method_signature)
return
# ignore signute polimorphic method
instance = frame.vm.heap[args[0][1]]
klass = instance.java_class
method = None
while method is None and klass is not None:
if method_name in klass.methods:
if method_signature in klass.methods[method_name]:
method = klass.methods[method_name][method_signature]
break
klass = klass.super_class
assert method is not None
assert klass is not None
if method[0] & 0x0100 > 0: # is native?
exec_native(frame, args, klass, method_name, method_signature)
return
obj_mon = None
if method[0] & 0x0020 > 0: # is sync
obj_mon = frame.vm.heap[args[0][1]]
if "@monitor" in obj_mon.fields:
if obj_mon.fields["@monitor"] == frame.thread:
obj_mon.fields["@monitor_count"] += 1
else:
index = 0
while index < len(args):
a = args[index]
if type(a) is tuple and a[0] in ('long', 'double'):
index += 1
else:
frame.stack.append(a)
index += 1
raise SkipThreadCycle()
else:
obj_mon.fields["@monitor"] = frame.thread
obj_mon.fields["@monitor_count"] = 1
m_args = [''] * method[1]
m_args[0:len(args)] = args[0:len(args)]
sub = Frame(frame.thread, klass, method, m_args,
"InvVirt: %s %s in %s" % (method_name, method_signature,
instance.java_class.this_name))
if obj_mon is not None:
sub.monitor = obj_mon
frame.thread.frame_stack.append(sub)
| andrewromanenco/pyjvm | pyjvm/ops/ops_invokevirtual.py | Python | gpl-3.0 | 4,330 |
"""
Bootstrapping script that create a basic Pimlico setup, either for an existing config file, or for a new project.
Distribute this with your Pimlico project code. You don't need to distribute Pimlico itself
with your project, since it can be downloaded later. Just distribute a directory tree containing your config files,
your own code and this Python script, which will fetch everything else it needs.
Another use is to get a whole new project up and running. Use the `newproject.py` script for that purpose, which
calls this script.
"""
from __future__ import print_function
import os
import sys
from io import open
# Provide simply Py2-3 compatibility without requiring other libraries
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
import tarfile
import json
RAW_URL = "https://raw.githubusercontent.com/markgw/pimlico/"
DOWNLOAD_URL = "https://github.com/markgw/pimlico/archive/"
GIT_URL = "https://github.com/markgw/pimlico.git"
GITHUB_API = "https://api.github.com"
def lookup_pimlico_versions():
# Use Github API to find all tagged releases
tag_api_url = "%s/repos/markgw/pimlico/tags" % GITHUB_API
try:
tag_response = urlopen(tag_api_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release tags from {}: {}".format(tag_api_url, e))
sys.exit(1)
tag_data = json.loads(tag_response)
return [tag["name"] for tag in reversed(tag_data)]
def lookup_bleeding_edge(branch_url):
release_url = "{}admin/release.txt".format(branch_url)
try:
release_data = urlopen(release_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release from {}: {}".format(release_url, e))
sys.exit(1)
return release_data.splitlines()[-1].lstrip("v")
def find_config_value(config_path, key, start_in_pipeline=False):
with open(config_path, "r", encoding="utf-8") as f:
in_pipeline = start_in_pipeline
for line in f:
line = line.strip("\n ")
if in_pipeline and line:
# Look for the required key in the pipeline section
line_key, __, line_value = line.partition("=")
if line_key.strip() == key:
return line_value.strip()
elif line.startswith("["):
# Section heading
# Start looking for keys if we're in the pipeline section
in_pipeline = line.strip("[]") == "pipeline"
elif line.upper().startswith("%% INCLUDE"):
# Found include directive: follow into the included file
filename = line[10:].strip()
# Get filename relative to current config file
filename = os.path.join(os.path.dirname(config_path), filename)
found_value = find_config_value(filename, key, start_in_pipeline=in_pipeline)
if found_value is not None:
return found_value
# Didn't find the key anywhere
return
def extract(tar_path):
extract_path = os.path.dirname(tar_path)
with tarfile.open(tar_path, "r:gz") as tar:
for item in tar:
tar.extract(item, extract_path)
def tar_dirname(tar_path):
with tarfile.open(tar_path, "r:gz") as tar:
# Expect first member to be a directory
member = tar.next()
if not member.isdir():
raise ValueError("downloaded tar file was expected to contain a directory, but didn't")
return member.name
def symlink(source, link_name):
"""
Symlink creator that works on Windows.
"""
os_symlink = getattr(os, "symlink", None)
if callable(os_symlink):
os_symlink(source, link_name)
else:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(source) else 0
if csl(link_name, source, flags) == 0:
raise ctypes.WinError()
def bootstrap(config_file, git=False):
current_dir = os.path.abspath(os.path.dirname(__file__))
branch_name = git if type(git) is str else "master"
branch_url = "{}{}/".format(RAW_URL, branch_name)
if os.path.exists(os.path.join(current_dir, "pimlico")):
print("Pimlico source directory already exists: delete it if you want to fetch again")
sys.exit(1)
# Check the config file to find the version of Pimlico we need
version = find_config_value(config_file, "release")
if version is None:
print("Could not find Pimlico release in config file %s" % config_file)
sys.exit(1)
major_version = int(version.partition(".")[0])
print("Config file requires Pimlico version {}".format(version))
available_releases = lookup_pimlico_versions()
bleeding_edge = lookup_bleeding_edge(branch_url)
tags = available_releases
# If the bleeding edge version is compatible (same major version) just use that
if int(bleeding_edge.lstrip("v").partition(".")[0]) == major_version:
print("Bleeding edge ({}) is compatible".format(bleeding_edge))
fetch_release = "master"
else:
if git:
print("Error: tried to clone the Git repo instead of fetching a release, but config file is not " \
"compatible with latest Pimlico version")
sys.exit(1)
# Find the latest release that has the same major version
compatible_tags = [t for t in tags if int(t.lstrip("v").partition(".")[0]) == major_version]
fetch_release = compatible_tags[-1]
print("Fetching latest release of major version {}, which is {}".format(major_version, fetch_release))
if git:
# Clone the latest version of the code from the Git repository
# Allow the git kwarg to name a branch to clone
if type(git) is str:
args = "--branch {} ".format(git)
else:
args = ""
print("Cloning git repository ({})".format("{} branch".format(git) if type(git) is str else "master"))
import subprocess
subprocess.check_call("git clone {}{}".format(args, GIT_URL), shell=True)
else:
archive_url = "%s%s.tar.gz" % (DOWNLOAD_URL, fetch_release)
print("Downloading Pimlico source code from {}".format(archive_url))
tar_download_path = os.path.join(current_dir, "archive.tar.gz")
with open(tar_download_path, "wb") as archive_file:
archive_file.write(urlopen(archive_url).read())
print("Extracting source code")
extracted_dirname = tar_dirname(tar_download_path)
extract(tar_download_path)
# Extracted source code: remove the archive
os.remove(tar_download_path)
os.rename(os.path.join(current_dir, extracted_dirname), os.path.join(current_dir, "pimlico"))
print("Pimlico source (%s) is now available in directory pimlico/" % fetch_release)
# Create symlink to pimlico.sh, so it's easier to run
print("Creating symlink pimlico.sh for running Pimlico")
symlink(os.path.join("pimlico", "bin", "pimlico.sh"), "pimlico.sh")
if __name__ == "__main__":
args = sys.argv[1:]
if "--git" in args:
args.remove("--git")
git = True
else:
git = False
if len(args) == 0:
print("Usage:")
print(" python bootstrap.py [--git] <config_file>")
print()
print("Specify a Pimlico config file to set up Pimlico for")
print("If you want to start a new project, with an empty config file, use the newproject.py script")
print()
print("If you specify --git, Pimlico will be cloned as a Git repository, rather ")
print("than downloaded from a release. This only works on Linux and requires that Git is ")
print("installed. Most of the time, you don't want to do this: it's only for Pimlico development")
sys.exit(1)
else:
config_file = os.path.abspath(args[0])
bootstrap(config_file, git=git)
| markgw/pimlico | admin/bootstrap.py | Python | gpl-3.0 | 8,150 |
#!/usr/bin/env python
import numpy as np
import sys
try:
t1 = int(sys.argv[1])
except:
print "usage:", sys.argv[0], "n (number of years)"
sys.exit(1)
t0 = 1750
u0 = 2
t = np.linspace(t0, t0 + t1 + .5, t1)
u = np.zeros(t1 + 1)
a = 0.0218
u[0] = u0
for i in range(len(u) - 1):
u[i+1] = (1 + a)*u[i]
print "Expected population in year %d is" %(t0 + t1), u[-1]
| qilicun/python | python3/src/ode1/pop_exp_growth.py | Python | gpl-3.0 | 376 |
#!/usr/bin/env python
"""
script to build the latest binaries for each vehicle type, ready to upload
Peter Barker, August 2017
based on build_binaries.sh by Andrew Tridgell, March 2013
"""
from __future__ import print_function
import datetime
import optparse
import os
import re
import shutil
import time
import subprocess
import sys
import zlib
# local imports
import generate_manifest
class build_binaries(object):
def __init__(self, tags):
self.tags = tags
self.dirty = False
def progress(self, string):
'''pretty-print progress'''
print("BB: %s" % string)
def run_git(self, args):
'''run git with args git_args; returns git's output'''
cmd_list = ["git"]
cmd_list.extend(args)
return self.run_program("BB-GIT", cmd_list)
def board_branch_bit(self, board):
'''return a fragment which might modify the branch name.
this was previously used to have a master-AVR branch etc
if the board type was apm1 or apm2'''
return None
def board_options(self, board):
'''return board-specific options'''
if board == "bebop":
return ["--static"]
return []
def run_waf(self, args):
if os.path.exists("waf"):
waf = "./waf"
else:
waf = os.path.join(".", "modules", "waf", "waf-light")
cmd_list = [waf]
cmd_list.extend(args)
self.run_program("BB-WAF", cmd_list)
def run_program(self, prefix, cmd_list):
self.progress("Running (%s)" % " ".join(cmd_list))
p = subprocess.Popen(cmd_list, bufsize=1, stdin=None,
stdout=subprocess.PIPE, close_fds=True,
stderr=subprocess.STDOUT)
output = ""
while True:
x = p.stdout.readline()
if len(x) == 0:
returncode = os.waitpid(p.pid, 0)
if returncode:
break
# select not available on Windows... probably...
time.sleep(0.1)
continue
output += x
x = x.rstrip()
print("%s: %s" % (prefix, x))
(_, status) = returncode
if status != 0:
self.progress("Process failed (%s)" %
str(returncode))
raise subprocess.CalledProcessError(
returncode, cmd_list)
return output
def run_make(self, args):
cmd_list = ["make"]
cmd_list.extend(args)
self.run_program("BB-MAKE", cmd_list)
def run_git_update_submodules(self):
'''if submodules are present initialise and update them'''
if os.path.exists(os.path.join(self.basedir, ".gitmodules")):
self.run_git(["submodule",
"update",
"--init",
"--recursive",
"-f"])
def checkout(self, vehicle, ctag, cboard=None, cframe=None):
'''attempt to check out a git tree. Various permutations are
attempted based on ctag - for examplle, if the board is avr and ctag
is bob we will attempt to checkout bob-AVR'''
if self.dirty:
self.progress("Skipping checkout for dirty build")
return True
self.progress("Trying checkout %s %s %s %s" %
(vehicle, ctag, cboard, cframe))
self.run_git(['stash'])
if ctag == "latest":
vtag = "master"
else:
vtag = "%s-%s" % (vehicle, ctag)
branches = []
if cframe is not None:
# try frame specific tag
branches.append("%s-%s" % (vtag, cframe))
if cboard is not None:
bbb = self.board_branch_bit(cboard)
if bbb is not None:
# try board type specific branch extension
branches.append("".join([vtag, bbb]))
branches.append(vtag)
for branch in branches:
try:
self.progress("Trying branch %s" % branch)
self.run_git(["checkout", "-f", branch])
self.run_git_update_submodules()
self.run_git(["log", "-1"])
return True
except subprocess.CalledProcessError as e:
self.progress("Checkout branch %s failed" % branch)
pass
self.progress("Failed to find tag for %s %s %s %s" %
(vehicle, ctag, cboard, cframe))
return False
def skip_board_waf(self, board):
'''check if we should skip this build because we don't support the
board in this release
'''
try:
if self.string_in_filepath(board,
os.path.join(self.basedir,
'Tools',
'ardupilotwaf',
'boards.py')):
return False
except IOError as e:
if e.errno != 2:
raise
# see if there's a hwdef.dat for this board:
if os.path.exists(os.path.join(self.basedir,
'libraries',
'AP_HAL_ChibiOS',
'hwdef',
board)):
self.progress("ChibiOS build: %s" % (board,))
return False
self.progress("Skipping unsupported board %s" % (board,))
return True
def skip_frame(self, board, frame):
'''returns true if this board/frame combination should not be built'''
if frame == "heli":
if board in ["bebop", "aerofc-v1", "skyviper-v2450"]:
self.progress("Skipping heli build for %s" % board)
return True
return False
def first_line_of_filepath(self, filepath):
'''returns the first (text) line from filepath'''
with open(filepath) as fh:
line = fh.readline()
return line
def skip_build(self, buildtag, builddir):
'''check if we should skip this build because we have already built
this version
'''
if os.getenv("FORCE_BUILD", False):
return False
if not os.path.exists(os.path.join(self.basedir, '.gitmodules')):
self.progress("Skipping build without submodules")
return True
bname = os.path.basename(builddir)
ldir = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(builddir))), buildtag, bname) # FIXME: WTF
oldversion_filepath = os.path.join(ldir, "git-version.txt")
if not os.path.exists(oldversion_filepath):
self.progress("%s doesn't exist - building" % oldversion_filepath)
return False
oldversion = self.first_line_of_filepath(oldversion_filepath)
newversion = self.run_git(["log", "-1"])
newversion = newversion.splitlines()[0]
oldversion = oldversion.rstrip()
newversion = newversion.rstrip()
self.progress("oldversion=%s newversion=%s" %
(oldversion, newversion,))
if oldversion == newversion:
self.progress("Skipping build - version match (%s)" %
(newversion,))
return True
self.progress("%s needs rebuild" % (ldir,))
return False
def write_string_to_filepath(self, string, filepath):
'''writes the entirety of string to filepath'''
with open(filepath, "w") as x:
x.write(string)
def addfwversion_gitversion(self, destdir, src):
# create git-version.txt:
gitlog = self.run_git(["log", "-1"])
gitversion_filepath = os.path.join(destdir, "git-version.txt")
gitversion_content = gitlog
versionfile = os.path.join(src, "version.h")
if os.path.exists(versionfile):
content = self.read_string_from_filepath(versionfile)
match = re.search('define.THISFIRMWARE "([^"]+)"', content)
if match is None:
self.progress("Failed to retrieve THISFIRMWARE from version.h")
self.progress("Content: (%s)" % content)
self.progress("Writing version info to %s" %
(gitversion_filepath,))
gitversion_content += "\nAPMVERSION: %s\n" % (match.group(1))
else:
self.progress("%s does not exist" % versionfile)
self.write_string_to_filepath(gitversion_content, gitversion_filepath)
def addfwversion_firmwareversiontxt(self, destdir, src):
# create firmware-version.txt
versionfile = os.path.join(src, "version.h")
if not os.path.exists(versionfile):
self.progress("%s does not exist" % (versionfile,))
return
ss = ".*define +FIRMWARE_VERSION[ ]+(?P<major>\d+)[ ]*,[ ]*" \
"(?P<minor>\d+)[ ]*,[ ]*(?P<point>\d+)[ ]*,[ ]*" \
"(?P<type>[A-Z_]+)[ ]*"
content = self.read_string_from_filepath(versionfile)
match = re.search(ss, content)
if match is None:
self.progress("Failed to retrieve FIRMWARE_VERSION from version.h")
self.progress("Content: (%s)" % content)
return
ver = "%d.%d.%d-%s\n" % (int(match.group("major")),
int(match.group("minor")),
int(match.group("point")),
match.group("type"))
firmware_version_filepath = "firmware-version.txt"
self.progress("Writing version (%s) to %s" %
(ver, firmware_version_filepath,))
self.write_string_to_filepath(
ver, os.path.join(destdir, firmware_version_filepath))
def addfwversion(self, destdir, src):
'''write version information into destdir'''
self.addfwversion_gitversion(destdir, src)
self.addfwversion_firmwareversiontxt(destdir, src)
def read_string_from_filepath(self, filepath):
'''returns content of filepath as a string'''
with open(filepath, 'rb') as fh:
content = fh.read()
return content
def string_in_filepath(self, string, filepath):
'''returns true if string exists in the contents of filepath'''
return string in self.read_string_from_filepath(filepath)
def mkpath(self, path):
'''make directory path and all elements leading to it'''
'''distutils.dir_util.mkpath was playing up'''
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17: # EEXIST
raise e
def copyit(self, afile, adir, tag, src):
'''copies afile into various places, adding metadata'''
bname = os.path.basename(adir)
tdir = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(adir))), tag, bname)
if tag == "latest":
# we keep a permanent archive of all "latest" builds,
# their path including a build timestamp:
self.mkpath(adir)
self.progress("Copying %s to %s" % (afile, adir,))
shutil.copy(afile, adir)
self.addfwversion(adir, src)
# the most recent build of every tag is kept around:
self.progress("Copying %s to %s" % (afile, tdir))
self.mkpath(tdir)
self.addfwversion(tdir, src)
shutil.copy(afile, tdir)
def touch_filepath(self, filepath):
'''creates a file at filepath, or updates the timestamp on filepath'''
if os.path.exists(filepath):
os.utime(filepath, None)
else:
with open(filepath, "a"):
pass
def build_vehicle(self, tag, vehicle, boards, vehicle_binaries_subdir,
binaryname, px4_binaryname, frames=[None]):
'''build vehicle binaries'''
self.progress("Building %s %s binaries (cwd=%s)" %
(vehicle, tag, os.getcwd()))
# if not self.checkout(vehicle, tag):
# self.progress("Failed to check out (%s)" % tag)
# return
# # begin pointless checkout
# if not self.checkout(vehicle, "latest"):
# self.progress("Failed to check out (%s)" % "latest")
# return
# # end pointless checkout
for board in boards:
self.progress("Building board: %s" % board)
for frame in frames:
if frame is not None:
self.progress("Considering frame %s for board %s" %
(frame, board))
if frame is None:
framesuffix = ""
else:
framesuffix = "-%s" % frame
if not self.checkout(vehicle, tag, board, frame):
msg = ("Failed checkout of %s %s %s %s" %
(vehicle, board, tag, frame,))
self.progress(msg)
self.error_strings.append(msg)
continue
if self.skip_board_waf(board):
continue
self.progress("Building %s %s %s binaries %s" %
(vehicle, tag, board, frame))
ddir = os.path.join(self.binaries,
vehicle_binaries_subdir,
self.hdate_ym,
self.hdate_ymdhm,
"".join([board, framesuffix]))
if self.skip_build(tag, ddir):
continue
if self.skip_frame(board, frame):
continue
self.remove_tmpdir();
self.progress("Configuring for %s in %s" %
(board, self.buildroot))
try:
waf_opts = ["configure",
"--board", board,
"--out", self.buildroot,
"clean"]
waf_opts.extend(self.board_options(board))
self.run_waf(waf_opts)
except subprocess.CalledProcessError as e:
self.progress("waf configure failed")
continue
try:
target = os.path.join("bin",
"".join([binaryname, framesuffix]))
self.run_waf(["build", "--targets", target])
except subprocess.CalledProcessError as e:
msg = ("Failed build of %s %s%s %s" %
(vehicle, board, framesuffix, tag))
self.progress(msg)
self.error_strings.append(msg)
continue
bare_path = os.path.join(self.buildroot,
board,
"bin",
"".join([binaryname, framesuffix]))
files_to_copy = []
if os.path.exists(bare_path):
files_to_copy.append(bare_path)
for extension in [".px4", ".apj", ".abin"]:
filepath = "".join([bare_path, extension])
if os.path.exists(filepath):
files_to_copy.append(filepath)
for path in files_to_copy:
try:
self.copyit(path, ddir, tag, vehicle)
except Exception as e:
self.progress("Failed to copy %s to %s: %s" % (path, ddir, str(e)))
# why is touching this important? -pb20170816
self.touch_filepath(os.path.join(self.binaries,
vehicle_binaries_subdir, tag))
# PX4-building
board = "px4"
for frame in frames:
self.progress("Building frame %s for board %s" % (frame, board))
if frame is None:
framesuffix = ""
else:
framesuffix = "-%s" % frame
if not self.checkout(vehicle, tag, "PX4", frame):
msg = ("Failed checkout of %s %s %s %s" %
(vehicle, "PX4", tag, frame))
self.progress(msg)
self.error_strings.append(msg)
self.checkout(vehicle, "latest")
continue
try:
deadwood = "../Build.%s" % vehicle
if os.path.exists(deadwood):
shutil.rmtree(os.path.join(deadwood))
except Exception as e:
self.progress("FIXME: narrow exception (%s)" % repr(e))
self.progress("Building %s %s PX4%s binaries" %
(vehicle, tag, framesuffix))
ddir = os.path.join(self.binaries,
vehicle_binaries_subdir,
self.hdate_ym,
self.hdate_ymdhm,
"".join(["PX4", framesuffix]))
if self.skip_build(tag, ddir):
continue
for v in ["v1", "v2", "v3", "v4", "v4pro"]:
px4_v = "%s-%s" % (board, v)
if self.skip_board_waf(px4_v):
continue
self.progress("Configuring for %s in %s" %
(px4_v, self.buildroot))
try:
self.run_waf(["configure", "--board", px4_v,
"--out", self.buildroot, "clean"])
except subprocess.CalledProcessError as e:
self.progress("waf configure failed")
continue
try:
self.run_waf([
"build",
"--targets",
os.path.join("bin",
"".join([binaryname, framesuffix]))])
except subprocess.CalledProcessError as e:
msg = ("Failed build of %s %s%s %s for %s" %
(vehicle, board, framesuffix, tag, v))
self.progress(msg)
self.error_strings.append(msg)
continue
oldfile = os.path.join(self.buildroot, px4_v, "bin",
"%s%s.px4" % (binaryname, framesuffix))
newfile = "%s-%s.px4" % (px4_binaryname, v)
self.progress("Copying (%s) to (%s)" % (oldfile, newfile,))
try:
shutil.copyfile(oldfile, newfile)
except Exception as e:
self.progress("FIXME: narrow exception (%s)" % repr(e))
msg = ("Failed build copy of %s PX4%s %s for %s" %
(vehicle, framesuffix, tag, v))
self.progress(msg)
self.error_strings.append(msg)
continue
# FIXME: why the two stage copy?!
self.copyit(newfile, ddir, tag, vehicle)
self.checkout(vehicle, "latest")
def common_boards(self):
'''returns list of boards common to all vehicles'''
# note that while we do not use these for AntennaTracker!
return ["fmuv2",
"fmuv3",
"fmuv4",
"mindpx-v2",
"erlebrain2",
"navio",
"navio2",
"pxf",
"pxfmini"]
def build_arducopter(self, tag):
'''build Copter binaries'''
boards = []
boards.extend(["skyviper-v2450", "aerofc-v1", "bebop"])
boards.extend(self.common_boards()[:])
self.build_vehicle(tag,
"ArduCopter",
boards,
"Copter",
"arducopter",
"ArduCopter",
frames=[None, "heli"])
def build_arduplane(self, tag):
'''build Plane binaries'''
boards = self.common_boards()[:]
boards.append("disco")
self.build_vehicle(tag,
"ArduPlane",
boards,
"Plane",
"arduplane",
"ArduPlane")
def build_antennatracker(self, tag):
'''build Tracker binaries'''
boards = ['navio', 'navio2']
self.build_vehicle(tag,
"AntennaTracker",
boards,
"AntennaTracker",
"antennatracker",
"AntennaTracker",)
def build_rover(self, tag):
'''build Rover binaries'''
boards = self.common_boards()
self.build_vehicle(tag,
"APMrover2",
boards,
"Rover",
"ardurover",
"APMrover2")
def build_ardusub(self, tag):
'''build Sub binaries'''
self.build_vehicle(tag,
"ArduSub",
self.common_boards(),
"Sub",
"ardusub",
"ArduSub")
def generate_manifest(self):
'''generate manigest files for GCS to download'''
self.progress("Generating manifest")
base_url = 'http://firmware.ardupilot.org'
generator = generate_manifest.ManifestGenerator(self.binaries,
base_url)
content = generator.json()
new_json_filepath = os.path.join(self.binaries, "manifest.json.new")
self.write_string_to_filepath(content, new_json_filepath)
# provide a pre-compressed manifest. For reference, a 7M manifest
# "gzip -9"s to 300k in 1 second, "xz -e"s to 80k in 26 seconds
compressed = zlib.compress(content, 9)
new_json_filepath_gz = os.path.join(self.binaries,
"manifest.json.gz.new")
self.write_string_to_filepath(compressed, new_json_filepath_gz)
json_filepath = os.path.join(self.binaries, "manifest.json")
json_filepath_gz = os.path.join(self.binaries, "manifest.json.gz")
shutil.move(new_json_filepath, json_filepath)
shutil.move(new_json_filepath_gz, json_filepath_gz)
self.progress("Manifest generation successful")
def validate(self):
'''run pre-run validation checks'''
if "dirty" in self.tags:
if len(self.tags) > 1:
raise ValueError("dirty must be only tag if present (%s)" %
(str(self.tags)))
self.dirty = True
def pollute_env_from_file(self, filepath):
with open(filepath) as f:
for line in f:
try:
(name, value) = str.split(line, "=")
except ValueError as e:
self.progress("%s: split failed: %s" % (filepath, str(e)))
continue
value = value.rstrip()
self.progress("%s: %s=%s" % (filepath, name, value))
os.environ[name] = value
def remove_tmpdir(self):
if os.path.exists(self.tmpdir):
self.progress("Removing (%s)" % (self.tmpdir,))
shutil.rmtree(self.tmpdir)
def run(self):
self.validate()
prefix_bin_dirpath = os.path.join(os.environ.get('HOME'),
"prefix", "bin")
origin_env_path = os.environ.get("PATH")
os.environ["PATH"] = ':'.join([prefix_bin_dirpath, origin_env_path,
"/bin", "/usr/bin"])
self.tmpdir = os.path.join(os.getcwd(), 'build.tmp.binaries')
os.environ["TMPDIR"] = self.tmpdir
print(self.tmpdir)
self.remove_tmpdir();
self.progress("Building in %s" % self.tmpdir)
now = datetime.datetime.now()
self.progress(now)
if not self.dirty:
self.run_git(["checkout", "-f", "master"])
githash = self.run_git(["rev-parse", "HEAD"])
githash = githash.rstrip()
self.progress("git hash: %s" % str(githash))
self.hdate_ym = now.strftime("%Y-%m")
self.hdate_ymdhm = now.strftime("%Y-%m-%d-%H:%m")
self.mkpath(os.path.join("binaries", self.hdate_ym,
self.hdate_ymdhm))
self.binaries = os.path.join(os.getcwd(), "..", "buildlogs",
"binaries")
self.basedir = os.getcwd()
self.error_strings = []
if os.path.exists("config.mk"):
# FIXME: narrow exception
self.pollute_env_from_file("config.mk")
if not self.dirty:
self.run_git_update_submodules()
self.buildroot = os.path.join(os.environ.get("TMPDIR"),
"binaries.build")
if os.path.exists(self.buildroot):
shutil.rmtree(self.buildroot)
for tag in self.tags:
self.build_arducopter(tag)
self.build_arduplane(tag)
self.build_rover(tag)
self.build_antennatracker(tag)
self.build_ardusub(tag)
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
self.generate_manifest()
for error_string in self.error_strings:
self.progress("%s" % error_string)
sys.exit(len(self.error_strings))
if __name__ == '__main__':
parser = optparse.OptionParser("build_binaries.py")
parser.add_option("", "--tags", action="append", type="string",
default=[], help="tags to build")
cmd_opts, cmd_args = parser.parse_args()
tags = cmd_opts.tags
if len(tags) == 0:
# FIXME: wedge this defaulting into parser somehow
tags = ["stable", "beta", "latest"]
bb = build_binaries(tags)
bb.run()
| sharescience/ardupilot | Tools/scripts/build_binaries.py | Python | gpl-3.0 | 26,405 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import config_speller_8
import config_robot_8
class Config(object):
def __init__(self):
self.number_of_decisions = 8
speller = config_speller_8.Config()
robot = config_robot_8.Config()
self.state = []
self.actions = []
self.letters = []
#MENU
menu_state = 0
self.letters.append([u"Speller",u"Robot"
,"Switch", "SSVEP", #u"High SSVEP",u"Low SSVEP"
u"", u"", u"", u""])
self.actions.append([
"",
"start_robot_feedback()",
"transform_scenario('switch')", #restart_scenario('"+self._high_ssvep_scenario()+"')",
"transform_scenario('ssvep')", #restart_scenario('"+self._low_ssvep_scenario()+"')",
"", "", "", ""])
self.state.append([0]*self.number_of_decisions)
self._setup_menu()
zero_state = 1
#SPELLER
speller_state = zero_state
for i, s in enumerate(speller.state):
self.state.append([x+speller_state for x in s])
self.actions.append(speller.actions[i])
self.letters.append(speller.letters[i])
self.state[zero_state][-1] = 0 #GOTO MENU
self.actions[zero_state][-1] = "clear()"
zero_state += len(speller.state)
#ROBOT
robot_state = zero_state
for i, s in enumerate(robot.state):
self.state.append([x+robot_state for x in s])
self.actions.append(robot.actions[i])
self.letters.append(robot.letters[i])
self.state[zero_state][-1] = 0 #GOTO MENU
self.actions[zero_state][-1] = "stop_robot_feedback()"
zero_state += len(robot.state)
self.state[menu_state][0] = speller_state
self.state[menu_state][1] = robot_state
self.number_of_states = zero_state
self.states_configs = ['state', 'letters', 'actions', 'letters_solver', 'actions_solver']
self.other_configs = []
self.letters_solver = self.number_of_states * [self.number_of_decisions * [""]]
self.actions_solver = self.number_of_states * [self.number_of_decisions * [""]]
def _setup_menu(self):
pass
| BrainTech/openbci | obci/logic/configs/config_multiple_8.py | Python | gpl-3.0 | 2,287 |
import numpy as np
import time
import matplotlib.pyplot as plt
from AndorSpectrometer import Spectrometer
spec = Spectrometer(start_cooler=False,init_shutter=True)
#time.sleep(30)
spec.SetCentreWavelength(650)
spec.SetSlitWidth(100)
# spec.SetImageofSlit()
# slit = spec.TakeImageofSlit()
#
#
spec.SetSingleTrack()
spec.SetExposureTime(5.0)
d = spec.TakeSingleTrack()
spec.SetExposureTime(1)
d2 = spec.TakeSingleTrack()
#
# spec.SetFullImage()
# img = spec.TakeFullImage()
#
#
# print(d.shape)
plt.plot(spec.GetWavelength(),d)
plt.show()
plt.plot(spec.GetWavelength(),d2)
plt.show()
# plt.imshow(img)
# plt.show()
#
# plt.imshow(slit)
# plt.show()
| sdickreuter/python-andor | test.py | Python | gpl-3.0 | 660 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.