code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
"""
Post processing (subset of columns) to calculate intermediate sum edit counts
and other variables. Date sorted.
Usage:
calculate_intermediate_sums (-h|--help)
calculate_intermediate_sums <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to file to process.
<output> Where revisions results
will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import sys
import logging
import operator
from collections import defaultdict
import mysqltsv
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(open(args['<input>'], "r"), headers=True,
types=[int, float, int, int, int, int])
output_file = mysqltsv.Writer(open(args['<output>'], "w"), headers=[
'yyyymm', 'aligned_entities', 'difference_in_alignment_with_previous',
'bot_edits', 'semi_automated_edits', 'non_bot_edits', 'anon_edits',
'current_bot_edits_count', 'current_semi_automated_edits_count',
'current_non_bot_edits_count', 'current_anon_edits_count'])
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
current_bot_edits_count = 0
semi_automated_edits_count = 0
non_bot_edits_count = 0
anon_edits_count = 0
all_edits_count = 0
previous_alignment = 0
for i, line in enumerate(input_file):
current_bot_edits_count += line['bot_edits']
semi_automated_edits_count += line['semi_automated_edits']
non_bot_edits_count += line['non_bot_edits']
anon_edits_count += line['anon_edits']
output_file.write([line['yyyymm'],
line['aligned_entities'],
line['aligned_entities'] - previous_alignment,
line['bot_edits'],
line['semi_automated_edits'],
line['non_bot_edits'],
line['anon_edits'],
current_bot_edits_count,
semi_automated_edits_count,
non_bot_edits_count,
anon_edits_count])
previous_alignment = line['aligned_entities']
main()
| hall1467/wikidata_usage_tracking | python_analysis_scripts/longitudinal_misalignment/calculate_intermediate_sums.py | Python | mit | 2,652 |
from collections import defaultdict
from django import template
from django.utils.safestring import mark_safe
from censusreporter.apps.census.utils import parse_table_id, generic_table_description, table_link
register = template.Library()
@register.filter
def format_subtables_for_results(table_ids):
parts = []
deferred_racials = defaultdict(list)
deferred_pr = []
for table in table_ids:
parsed = parse_table_id(table)
if parsed['racial']:
key = parsed['table_type']
if parsed['puerto_rico']:
key += 'PR'
deferred_racials[key].append(parsed)
elif parsed['puerto_rico']:
deferred_pr.append(table)
else:
parts.append(table_link(table, generic_table_description(table)))
for table in deferred_pr:
parts.append(table_link(table, generic_table_description(table)))
racial_label_tests = [
('B', 'Detailed (by race)'),
('C', 'Simplified (by race)'),
('BPR', 'Detailed (by race) for Puerto Rico'),
('CPR', 'Simplified (by race) for Puerto Rico'),
]
for test, label in racial_label_tests:
try:
iteration_parts = []
for table_dict in deferred_racials[test]:
iteration_parts.append(table_link(table_dict['table_id'], table_dict['race']))
group_table_id = table_dict['table_id']
if iteration_parts:
contents = ' / '.join(iteration_parts)
iter_wrapper = """
<a class="toggler" data-id="{}">{}</a>
<span data-id="{}" class='racial-iteration'>{}</span>
""".format(group_table_id, label, group_table_id, contents)
parts.append(iter_wrapper)
except Exception as e:
parts.append(e.message)
return mark_safe(', '.join(parts))
| censusreporter/censusreporter | censusreporter/apps/census/templatetags/results.py | Python | mit | 1,848 |
# -*- coding: utf8 -*-
import os
import os.path
import flask
import flask_assets
import flask_sqlalchemy
from .cross_domain_app import CrossDomainApp
from zeeguu.util.configuration import load_configuration_or_abort
import sys
if sys.version_info[0] < 3:
raise "Must be using Python 3"
# *** Starting the App *** #
app = CrossDomainApp(__name__)
load_configuration_or_abort(app, 'ZEEGUU_WEB_CONFIG',
['HOST', 'PORT', 'DEBUG', 'SECRET_KEY', 'MAX_SESSION',
'SMTP_SERVER', 'SMTP_USERNAME', 'SMTP_PASSWORD',
'INVITATION_CODES'])
# The zeeguu.model module relies on an app being injected from outside
# ----------------------------------------------------------------------
import zeeguu
zeeguu.app = app
import zeeguu.model
assert zeeguu.model
# -----------------
from .account import account
app.register_blueprint(account)
from .exercises import exercises
app.register_blueprint(exercises)
from zeeguu_exercises import ex_blueprint
app.register_blueprint(ex_blueprint, url_prefix="/practice")
from umr import umrblue
app.register_blueprint(umrblue, url_prefix="/read")
env = flask_assets.Environment(app)
env.cache = app.instance_path
env.directory = os.path.join(app.instance_path, "gen")
env.url = "/gen"
env.append_path(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "static"
), "/static")
# create the instance folder and return the path
def instance_path(app):
path = os.path.join(app.instance_path, "gen")
try:
os.makedirs(path)
except Exception as e:
print(("exception" + str(e)))
if not os.path.isdir(path):
raise
return path
instance = flask.Blueprint("instance", __name__, static_folder=instance_path(app))
app.register_blueprint(instance)
| MrAlexDeluxe/Zeeguu-Web | zeeguu_web/app.py | Python | mit | 1,825 |
import re
import os
import struct
import sys
import numbers
from collections import namedtuple, defaultdict
def int_or_float(s):
# return number, trying to maintain int format
if s.isdigit():
return int(s, 10)
else:
return float(s)
DBCSignal = namedtuple(
"DBCSignal", ["name", "start_bit", "size", "is_little_endian", "is_signed",
"factor", "offset", "tmin", "tmax", "units"])
class dbc():
def __init__(self, fn):
self.name, _ = os.path.splitext(os.path.basename(fn))
with open(fn, encoding="ascii") as f:
self.txt = f.readlines()
self._warned_addresses = set()
# regexps from https://github.com/ebroecker/canmatrix/blob/master/canmatrix/importdbc.py
bo_regexp = re.compile(r"^BO\_ (\w+) (\w+) *: (\w+) (\w+)")
sg_regexp = re.compile(r"^SG\_ (\w+) : (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
sgm_regexp = re.compile(r"^SG\_ (\w+) (\w+) *: (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
val_regexp = re.compile(r"VAL\_ (\w+) (\w+) (\s*[-+]?[0-9]+\s+\".+?\"[^;]*)")
# A dictionary which maps message ids to tuples ((name, size), signals).
# name is the ASCII name of the message.
# size is the size of the message in bytes.
# signals is a list signals contained in the message.
# signals is a list of DBCSignal in order of increasing start_bit.
self.msgs = {}
# A dictionary which maps message ids to a list of tuples (signal name, definition value pairs)
self.def_vals = defaultdict(list)
# lookup to bit reverse each byte
self.bits_index = [(i & ~0b111) + ((-i - 1) & 0b111) for i in range(64)]
for l in self.txt:
l = l.strip()
if l.startswith("BO_ "):
# new group
dat = bo_regexp.match(l)
if dat is None:
print("bad BO {0}".format(l))
name = dat.group(2)
size = int(dat.group(3))
ids = int(dat.group(1), 0) # could be hex
if ids in self.msgs:
sys.exit("Duplicate address detected %d %s" % (ids, self.name))
self.msgs[ids] = ((name, size), [])
if l.startswith("SG_ "):
# new signal
dat = sg_regexp.match(l)
go = 0
if dat is None:
dat = sgm_regexp.match(l)
go = 1
if dat is None:
print("bad SG {0}".format(l))
sgname = dat.group(1)
start_bit = int(dat.group(go + 2))
signal_size = int(dat.group(go + 3))
is_little_endian = int(dat.group(go + 4)) == 1
is_signed = dat.group(go + 5) == '-'
factor = int_or_float(dat.group(go + 6))
offset = int_or_float(dat.group(go + 7))
tmin = int_or_float(dat.group(go + 8))
tmax = int_or_float(dat.group(go + 9))
units = dat.group(go + 10)
self.msgs[ids][1].append(
DBCSignal(sgname, start_bit, signal_size, is_little_endian,
is_signed, factor, offset, tmin, tmax, units))
if l.startswith("VAL_ "):
# new signal value/definition
dat = val_regexp.match(l)
if dat is None:
print("bad VAL {0}".format(l))
ids = int(dat.group(1), 0) # could be hex
sgname = dat.group(2)
defvals = dat.group(3)
defvals = defvals.replace("?", r"\?") # escape sequence in C++
defvals = defvals.split('"')[:-1]
# convert strings to UPPER_CASE_WITH_UNDERSCORES
defvals[1::2] = [d.strip().upper().replace(" ", "_") for d in defvals[1::2]]
defvals = '"' + "".join(str(i) for i in defvals) + '"'
self.def_vals[ids].append((sgname, defvals))
for msg in self.msgs.values():
msg[1].sort(key=lambda x: x.start_bit)
self.msg_name_to_address = {}
for address, m in self.msgs.items():
name = m[0][0]
self.msg_name_to_address[name] = address
def lookup_msg_id(self, msg_id):
if not isinstance(msg_id, numbers.Number):
msg_id = self.msg_name_to_address[msg_id]
return msg_id
def reverse_bytes(self, x):
return ((x & 0xff00000000000000) >> 56) | \
((x & 0x00ff000000000000) >> 40) | \
((x & 0x0000ff0000000000) >> 24) | \
((x & 0x000000ff00000000) >> 8) | \
((x & 0x00000000ff000000) << 8) | \
((x & 0x0000000000ff0000) << 24) | \
((x & 0x000000000000ff00) << 40) | \
((x & 0x00000000000000ff) << 56)
def encode(self, msg_id, dd):
"""Encode a CAN message using the dbc.
Inputs:
msg_id: The message ID.
dd: A dictionary mapping signal name to signal data.
"""
msg_id = self.lookup_msg_id(msg_id)
msg_def = self.msgs[msg_id]
size = msg_def[0][1]
result = 0
for s in msg_def[1]:
ival = dd.get(s.name)
if ival is not None:
ival = (ival / s.factor) - s.offset
ival = int(round(ival))
if s.is_signed and ival < 0:
ival = (1 << s.size) + ival
if s.is_little_endian:
shift = s.start_bit
else:
b1 = (s.start_bit // 8) * 8 + (-s.start_bit - 1) % 8
shift = 64 - (b1 + s.size)
mask = ((1 << s.size) - 1) << shift
dat = (ival & ((1 << s.size) - 1)) << shift
if s.is_little_endian:
mask = self.reverse_bytes(mask)
dat = self.reverse_bytes(dat)
result &= ~mask
result |= dat
result = struct.pack('>Q', result)
return result[:size]
def decode(self, x, arr=None, debug=False):
"""Decode a CAN message using the dbc.
Inputs:
x: A collection with elements (address, time, data), where address is
the CAN address, time is the bus time, and data is the CAN data as a
hex string.
arr: Optional list of signals which should be decoded and returned.
debug: True to print debugging statements.
Returns:
A tuple (name, data), where name is the name of the CAN message and data
is the decoded result. If arr is None, data is a dict of properties.
Otherwise data is a list of the same length as arr.
Returns (None, None) if the message could not be decoded.
"""
if arr is None:
out = {}
else:
out = [None] * len(arr)
msg = self.msgs.get(x[0])
if msg is None:
if x[0] not in self._warned_addresses:
# print("WARNING: Unknown message address {}".format(x[0]))
self._warned_addresses.add(x[0])
return None, None
name = msg[0][0]
if debug:
print(name)
st = x[2].ljust(8, b'\x00')
le, be = None, None
for s in msg[1]:
if arr is not None and s[0] not in arr:
continue
start_bit = s[1]
signal_size = s[2]
little_endian = s[3]
signed = s[4]
factor = s[5]
offset = s[6]
if little_endian:
if le is None:
le = struct.unpack("<Q", st)[0]
tmp = le
shift_amount = start_bit
else:
if be is None:
be = struct.unpack(">Q", st)[0]
tmp = be
b1 = (start_bit // 8) * 8 + (-start_bit - 1) % 8
shift_amount = 64 - (b1 + signal_size)
if shift_amount < 0:
continue
tmp = (tmp >> shift_amount) & ((1 << signal_size) - 1)
if signed and (tmp >> (signal_size - 1)):
tmp -= (1 << signal_size)
tmp = tmp * factor + offset
# if debug:
# print("%40s %2d %2d %7.2f %s" % (s[0], s[1], s[2], tmp, s[-1]))
if arr is None:
out[s[0]] = tmp
else:
out[arr.index(s[0])] = tmp
return name, out
def get_signals(self, msg):
msg = self.lookup_msg_id(msg)
return [sgs.name for sgs in self.msgs[msg][1]]
if __name__ == "__main__":
from opendbc import DBC_PATH
dbc_test = dbc(os.path.join(DBC_PATH, 'toyota_prius_2017_pt_generated.dbc'))
msg = ('STEER_ANGLE_SENSOR', {'STEER_ANGLE': -6.0, 'STEER_RATE': 4, 'STEER_FRACTION': -0.2})
encoded = dbc_test.encode(*msg)
decoded = dbc_test.decode((0x25, 0, encoded))
assert decoded == msg
dbc_test = dbc(os.path.join(DBC_PATH, 'hyundai_santa_fe_2019_ccan.dbc'))
decoded = dbc_test.decode((0x2b0, 0, "\xfa\xfe\x00\x07\x12"))
assert abs(decoded[1]['SAS_Angle'] - (-26.2)) < 0.001
msg = ('SAS11', {'SAS_Stat': 7.0, 'MsgCount': 0.0, 'SAS_Angle': -26.200000000000003, 'SAS_Speed': 0.0, 'CheckSum': 0.0})
encoded = dbc_test.encode(*msg)
decoded = dbc_test.decode((0x2b0, 0, encoded))
assert decoded == msg
| vntarasov/openpilot | opendbc/can/dbc.py | Python | mit | 8,588 |
"""Provides helper classes for testing option handling in pip
"""
import os
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.commands import commands_dict
class FakeCommand(Command):
name = 'fake'
summary = name
def main(self, args):
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.add_option_group(index_opts)
return self.parse_args(args)
class AddFakeCommandMixin(object):
def setup(self):
self.environ_before = os.environ.copy()
commands_dict[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands_dict.pop(FakeCommand.name)
| techtonik/pip | tests/lib/options_helpers.py | Python | mit | 792 |
# -*- coding: utf-8 -*-
# 3章 ニューラルネットワーク
import numpy as np
class NeuralTrain:
def step_function(self, x):
return np.array(x > 0, dtype=np.int)
def sigmoid_function(self, x):
return 1 / (1 + np.exp(-x))
def relu_function(self, x):
return np.maximum(0, x) | Arahabica/NNTrain | train/neural/NeuralTrain.py | Python | mit | 318 |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TanitJobsCategory(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return "%s" % self.name
class KeeJobsCategory(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return "%s" % self.name
| firasbenmakhlouf/JobLookup | metadata/models.py | Python | mit | 467 |
# -*- coding: utf-8 -*-
""" OneLogin_Saml2_Utils class
Copyright (c) 2014, OneLogin, Inc.
All rights reserved.
Auxiliary class of OneLogin's Python Toolkit.
"""
import base64
from datetime import datetime
import calendar
from hashlib import sha1, sha256, sha384, sha512
from isodate import parse_duration as duration_parser
import re
from textwrap import wrap
from uuid import uuid4
import zlib
import xmlsec
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.errors import OneLogin_Saml2_Error
from onelogin.saml2.xml_utils import OneLogin_Saml2_XML
try:
from urllib.parse import quote_plus # py3
except ImportError:
from urllib import quote_plus # py2
class OneLogin_Saml2_Utils(object):
"""
Auxiliary class that contains several utility methods to parse time,
urls, add sign, encrypt, decrypt, sign validation, handle xml ...
"""
@staticmethod
def escape_url(url, lowercase_urlencoding=False):
"""
escape the non-safe symbols in url
The encoding used by ADFS 3.0 is not compatible with
python's quote_plus (ADFS produces lower case hex numbers and quote_plus produces
upper case hex numbers)
:param url: the url to escape
:type url: str
:param lowercase_urlencoding: lowercase or no
:type lowercase_urlencoding: boolean
:return: the escaped url
:rtype str
"""
encoded = quote_plus(url)
return re.sub(r"%[A-F0-9]{2}", lambda m: m.group(0).lower(), encoded) if lowercase_urlencoding else encoded
@staticmethod
def b64encode(data):
"""base64 encode"""
return compat.to_string(base64.b64encode(compat.to_bytes(data)))
@staticmethod
def b64decode(data):
"""base64 decode"""
return base64.b64decode(data)
@staticmethod
def decode_base64_and_inflate(value, ignore_zip=False):
"""
base64 decodes and then inflates according to RFC1951
:param value: a deflated and encoded string
:type value: string
:param ignore_zip: ignore zip errors
:returns: the string after decoding and inflating
:rtype: string
"""
encoded = OneLogin_Saml2_Utils.b64decode(value)
try:
return zlib.decompress(encoded, -15)
except zlib.error:
if not ignore_zip:
raise
return encoded
@staticmethod
def deflate_and_base64_encode(value):
"""
Deflates and then base64 encodes a string
:param value: The string to deflate and encode
:type value: string
:returns: The deflated and encoded string
:rtype: string
"""
return OneLogin_Saml2_Utils.b64encode(zlib.compress(compat.to_bytes(value))[2:-4])
@staticmethod
def format_cert(cert, heads=True):
"""
Returns a x509 cert (adding header & footer if required).
:param cert: A x509 unformatted cert
:type: string
:param heads: True if we want to include head and footer
:type: boolean
:returns: Formatted cert
:rtype: string
"""
x509_cert = cert.replace('\x0D', '')
x509_cert = x509_cert.replace('\r', '')
x509_cert = x509_cert.replace('\n', '')
if len(x509_cert) > 0:
x509_cert = x509_cert.replace('-----BEGIN CERTIFICATE-----', '')
x509_cert = x509_cert.replace('-----END CERTIFICATE-----', '')
x509_cert = x509_cert.replace(' ', '')
if heads:
x509_cert = "-----BEGIN CERTIFICATE-----\n" + "\n".join(wrap(x509_cert, 64)) + "\n-----END CERTIFICATE-----\n"
return x509_cert
@staticmethod
def format_private_key(key, heads=True):
"""
Returns a private key (adding header & footer if required).
:param key A private key
:type: string
:param heads: True if we want to include head and footer
:type: boolean
:returns: Formated private key
:rtype: string
"""
private_key = key.replace('\x0D', '')
private_key = private_key.replace('\r', '')
private_key = private_key.replace('\n', '')
if len(private_key) > 0:
if private_key.find('-----BEGIN PRIVATE KEY-----') != -1:
private_key = private_key.replace('-----BEGIN PRIVATE KEY-----', '')
private_key = private_key.replace('-----END PRIVATE KEY-----', '')
private_key = private_key.replace(' ', '')
if heads:
private_key = "-----BEGIN PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END PRIVATE KEY-----\n"
else:
private_key = private_key.replace('-----BEGIN RSA PRIVATE KEY-----', '')
private_key = private_key.replace('-----END RSA PRIVATE KEY-----', '')
private_key = private_key.replace(' ', '')
if heads:
private_key = "-----BEGIN RSA PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END RSA PRIVATE KEY-----\n"
return private_key
@staticmethod
def redirect(url, parameters={}, request_data={}):
"""
Executes a redirection to the provided url (or return the target url).
:param url: The target url
:type: string
:param parameters: Extra parameters to be passed as part of the url
:type: dict
:param request_data: The request as a dict
:type: dict
:returns: Url
:rtype: string
"""
assert isinstance(url, compat.str_type)
assert isinstance(parameters, dict)
if url.startswith('/'):
url = '%s%s' % (OneLogin_Saml2_Utils.get_self_url_host(request_data), url)
# Verify that the URL is to a http or https site.
if re.search('^https?://', url) is None:
raise OneLogin_Saml2_Error(
'Redirect to invalid URL: ' + url,
OneLogin_Saml2_Error.REDIRECT_INVALID_URL
)
# Add encoded parameters
if url.find('?') < 0:
param_prefix = '?'
else:
param_prefix = '&'
for name, value in parameters.items():
if value is None:
param = OneLogin_Saml2_Utils.escape_url(name)
elif isinstance(value, list):
param = ''
for val in value:
param += OneLogin_Saml2_Utils.escape_url(name) + '[]=' + OneLogin_Saml2_Utils.escape_url(val) + '&'
if len(param) > 0:
param = param[0:-1]
else:
param = OneLogin_Saml2_Utils.escape_url(name) + '=' + OneLogin_Saml2_Utils.escape_url(value)
if param:
url += param_prefix + param
param_prefix = '&'
return url
@staticmethod
def get_self_url_host(request_data):
"""
Returns the protocol + the current host + the port (if different than
common ports).
:param request_data: The request as a dict
:type: dict
:return: Url
:rtype: string
"""
current_host = OneLogin_Saml2_Utils.get_self_host(request_data)
port = ''
if OneLogin_Saml2_Utils.is_https(request_data):
protocol = 'https'
else:
protocol = 'http'
if 'server_port' in request_data and request_data['server_port'] is not None:
port_number = str(request_data['server_port'])
port = ':' + port_number
if protocol == 'http' and port_number == '80':
port = ''
elif protocol == 'https' and port_number == '443':
port = ''
return '%s://%s%s' % (protocol, current_host, port)
@staticmethod
def get_self_host(request_data):
"""
Returns the current host.
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string
"""
if 'http_host' in request_data:
current_host = request_data['http_host']
elif 'server_name' in request_data:
current_host = request_data['server_name']
else:
raise Exception('No hostname defined')
if ':' in current_host:
current_host_data = current_host.split(':')
possible_port = current_host_data[-1]
try:
int(possible_port)
current_host = current_host_data[0]
except ValueError:
current_host = ':'.join(current_host_data)
return current_host
@staticmethod
def is_https(request_data):
"""
Checks if https or http.
:param request_data: The request as a dict
:type: dict
:return: False if https is not active
:rtype: boolean
"""
is_https = 'https' in request_data and request_data['https'] != 'off'
is_https = is_https or ('server_port' in request_data and str(request_data['server_port']) == '443')
return is_https
@staticmethod
def get_self_url_no_query(request_data):
"""
Returns the URL of the current host + current view.
:param request_data: The request as a dict
:type: dict
:return: The url of current host + current view
:rtype: string
"""
self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data)
script_name = request_data['script_name']
if script_name:
if script_name[0] != '/':
script_name = '/' + script_name
else:
script_name = ''
self_url_no_query = self_url_host + script_name
if 'path_info' in request_data:
self_url_no_query += request_data['path_info']
return self_url_no_query
@staticmethod
def get_self_routed_url_no_query(request_data):
"""
Returns the routed URL of the current host + current view.
:param request_data: The request as a dict
:type: dict
:return: The url of current host + current view
:rtype: string
"""
self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data)
route = ''
if 'request_uri' in request_data and request_data['request_uri']:
route = request_data['request_uri']
if 'query_string' in request_data and request_data['query_string']:
route = route.replace(request_data['query_string'], '')
return self_url_host + route
@staticmethod
def get_self_url(request_data):
"""
Returns the URL of the current host + current view + query.
:param request_data: The request as a dict
:type: dict
:return: The url of current host + current view + query
:rtype: string
"""
self_url_host = OneLogin_Saml2_Utils.get_self_url_host(request_data)
request_uri = ''
if 'request_uri' in request_data:
request_uri = request_data['request_uri']
if not request_uri.startswith('/'):
match = re.search('^https?://[^/]*(/.*)', request_uri)
if match is not None:
request_uri = match.groups()[0]
return self_url_host + request_uri
@staticmethod
def generate_unique_id():
"""
Generates an unique string (used for example as ID for assertions).
:return: A unique string
:rtype: string
"""
return 'ONELOGIN_%s' % sha1(compat.to_bytes(uuid4().hex)).hexdigest()
@staticmethod
def parse_time_to_SAML(time):
"""
Converts a UNIX timestamp to SAML2 timestamp on the form
yyyy-mm-ddThh:mm:ss(\.s+)?Z.
:param time: The time we should convert (DateTime).
:type: string
:return: SAML2 timestamp.
:rtype: string
"""
data = datetime.utcfromtimestamp(float(time))
return data.strftime('%Y-%m-%dT%H:%M:%SZ')
@staticmethod
def parse_SAML_to_time(timestr):
"""
Converts a SAML2 timestamp on the form yyyy-mm-ddThh:mm:ss(\.s+)?Z
to a UNIX timestamp. The sub-second part is ignored.
:param timestr: The time we should convert (SAML Timestamp).
:type: string
:return: Converted to a unix timestamp.
:rtype: int
"""
try:
data = datetime.strptime(timestr, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
data = datetime.strptime(timestr, '%Y-%m-%dT%H:%M:%S.%fZ')
return calendar.timegm(data.utctimetuple())
@staticmethod
def now():
"""
:return: unix timestamp of actual time.
:rtype: int
"""
return calendar.timegm(datetime.utcnow().utctimetuple())
@staticmethod
def parse_duration(duration, timestamp=None):
"""
Interprets a ISO8601 duration value relative to a given timestamp.
:param duration: The duration, as a string.
:type: string
:param timestamp: The unix timestamp we should apply the duration to.
Optional, default to the current time.
:type: string
:return: The new timestamp, after the duration is applied.
:rtype: int
"""
assert isinstance(duration, compat.str_type)
assert timestamp is None or isinstance(timestamp, int)
timedelta = duration_parser(duration)
if timestamp is None:
data = datetime.utcnow() + timedelta
else:
data = datetime.utcfromtimestamp(timestamp) + timedelta
return calendar.timegm(data.utctimetuple())
@staticmethod
def get_expire_time(cache_duration=None, valid_until=None):
"""
Compares 2 dates and returns the earliest.
:param cache_duration: The duration, as a string.
:type: string
:param valid_until: The valid until date, as a string or as a timestamp
:type: string
:return: The expiration time.
:rtype: int
"""
expire_time = None
if cache_duration is not None:
expire_time = OneLogin_Saml2_Utils.parse_duration(cache_duration)
if valid_until is not None:
if isinstance(valid_until, int):
valid_until_time = valid_until
else:
valid_until_time = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
if expire_time is None or expire_time > valid_until_time:
expire_time = valid_until_time
if expire_time is not None:
return '%d' % expire_time
return None
@staticmethod
def delete_local_session(callback=None):
"""
Deletes the local session.
"""
if callback is not None:
callback()
@staticmethod
def calculate_x509_fingerprint(x509_cert, alg='sha1'):
"""
Calculates the fingerprint of a x509cert.
:param x509_cert: x509 cert
:type: string
:param alg: The algorithm to build the fingerprint
:type: string
:returns: fingerprint
:rtype: string
"""
assert isinstance(x509_cert, compat.str_type)
lines = x509_cert.split('\n')
data = ''
for line in lines:
# Remove '\r' from end of line if present.
line = line.rstrip()
if line == '-----BEGIN CERTIFICATE-----':
# Delete junk from before the certificate.
data = ''
elif line == '-----END CERTIFICATE-----':
# Ignore data after the certificate.
break
elif line == '-----BEGIN PUBLIC KEY-----' or line == '-----BEGIN RSA PRIVATE KEY-----':
# This isn't an X509 certificate.
return None
else:
# Append the current line to the certificate data.
data += line
decoded_data = base64.b64decode(compat.to_bytes(data))
if alg == 'sha512':
fingerprint = sha512(decoded_data)
elif alg == 'sha384':
fingerprint = sha384(decoded_data)
elif alg == 'sha256':
fingerprint = sha256(decoded_data)
else:
fingerprint = sha1(decoded_data)
return fingerprint.hexdigest().lower()
@staticmethod
def format_finger_print(fingerprint):
"""
Formats a fingerprint.
:param fingerprint: fingerprint
:type: string
:returns: Formatted fingerprint
:rtype: string
"""
formatted_fingerprint = fingerprint.replace(':', '')
return formatted_fingerprint.lower()
@staticmethod
def generate_name_id(value, sp_nq, sp_format, cert=None, debug=False, nq=None):
"""
Generates a nameID.
:param value: fingerprint
:type: string
:param sp_nq: SP Name Qualifier
:type: string
:param sp_format: SP Format
:type: string
:param cert: IdP Public Cert to encrypt the nameID
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:returns: DOMElement | XMLSec nameID
:rtype: string
:param nq: IDP Name Qualifier
:type: string
"""
root = OneLogin_Saml2_XML.make_root('{%s}container' % OneLogin_Saml2_Constants.NS_SAML, nsmap={'saml': OneLogin_Saml2_Constants.NS_SAML})
name_id = OneLogin_Saml2_XML.make_child(root, '{%s}NameID' % OneLogin_Saml2_Constants.NS_SAML, nsmap={'saml2': OneLogin_Saml2_Constants.NS_SAML})
if sp_nq is not None:
name_id.set('SPNameQualifier', sp_nq)
name_id.set('Format', sp_format)
if nq is not None:
name_id.set('NameQualifier', nq)
name_id.text = value
if cert is not None:
xmlsec.enable_debug_trace(debug)
# Load the public cert
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_memory(cert, xmlsec.KeyFormat.CERT_PEM, None))
# Prepare for encryption
enc_data = xmlsec.template.encrypted_data_create(
root, xmlsec.Transform.AES128, type=xmlsec.EncryptionType.ELEMENT, ns="xenc")
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
key_info = xmlsec.template.encrypted_data_ensure_key_info(enc_data, ns="dsig")
enc_key = xmlsec.template.add_encrypted_key(key_info, xmlsec.Transform.RSA_OAEP)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_key)
# Encrypt!
enc_ctx = xmlsec.EncryptionContext(manager)
enc_ctx.key = xmlsec.Key.generate(xmlsec.KeyData.AES, 128, xmlsec.KeyDataType.SESSION)
enc_data = enc_ctx.encrypt_xml(enc_data, name_id)
return '<saml:EncryptedID>' + compat.to_string(OneLogin_Saml2_XML.to_string(enc_data)) + '</saml:EncryptedID>'
else:
return OneLogin_Saml2_XML.extract_tag_text(root, "saml:NameID")
@staticmethod
def get_status(dom):
"""
Gets Status from a Response.
:param dom: The Response as XML
:type: Document
:returns: The Status, an array with the code and a message.
:rtype: dict
"""
status = {}
status_entry = OneLogin_Saml2_XML.query(dom, '/samlp:Response/samlp:Status')
if len(status_entry) == 0:
raise Exception('Missing Status on response')
code_entry = OneLogin_Saml2_XML.query(dom, '/samlp:Response/samlp:Status/samlp:StatusCode', status_entry[0])
if len(code_entry) == 0:
raise Exception('Missing Status Code on response')
code = code_entry[0].values()[0]
status['code'] = code
message_entry = OneLogin_Saml2_XML.query(dom, '/samlp:Response/samlp:Status/samlp:StatusMessage', status_entry[0])
if len(message_entry) == 0:
subcode_entry = OneLogin_Saml2_XML.query(dom, '/samlp:Response/samlp:Status/samlp:StatusCode/samlp:StatusCode', status_entry[0])
if len(subcode_entry) > 0:
status['msg'] = subcode_entry[0].values()[0]
else:
status['msg'] = ''
else:
status['msg'] = message_entry[0].text
return status
@staticmethod
def decrypt_element(encrypted_data, key, debug=False):
"""
Decrypts an encrypted element.
:param encrypted_data: The encrypted data.
:type: lxml.etree.Element | DOMElement | basestring
:param key: The key.
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:returns: The decrypted element.
:rtype: lxml.etree.Element
"""
encrypted_data = OneLogin_Saml2_XML.to_etree(encrypted_data)
xmlsec.enable_debug_trace(debug)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, None))
enc_ctx = xmlsec.EncryptionContext(manager)
return enc_ctx.decrypt(encrypted_data)
@staticmethod
def add_sign(xml, key, cert, debug=False, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Adds signature key and senders certificate to an element (Message or
Assertion).
:param xml: The element we should sign
:type: string | Document
:param key: The private key
:type: string
:param cert: The public
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem, ["ID"])
# Sign the metadata with our private key.
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.Transform.RSA_SHA1)
signature = xmlsec.template.create(elem, xmlsec.Transform.EXCL_C14N, sign_algorithm_transform, ns='ds')
issuer = OneLogin_Saml2_XML.query(elem, '//saml:Issuer')
if len(issuer) > 0:
issuer = issuer[0]
issuer.addnext(signature)
else:
elem[0].insert(0, signature)
elem_id = elem.get('ID', None)
if elem_id:
elem_id = '#' + elem_id
ref = xmlsec.template.add_reference(signature, xmlsec.Transform.SHA1, uri=elem_id)
xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED)
xmlsec.template.add_transform(ref, xmlsec.Transform.EXCL_C14N)
key_info = xmlsec.template.ensure_key_info(signature)
xmlsec.template.add_x509_data(key_info)
dsig_ctx = xmlsec.SignatureContext()
sign_key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, None)
sign_key.load_cert_from_memory(cert, xmlsec.KeyFormat.PEM)
dsig_ctx.key = sign_key
dsig_ctx.sign(signature)
return OneLogin_Saml2_XML.to_string(elem)
@staticmethod
def validate_sign(xml, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature (Message or Assertion).
:param xml: The element we should validate
:type: string | Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
"""
try:
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem, ["ID"])
signature_nodes = OneLogin_Saml2_XML.query(elem, '/samlp:Response/ds:Signature')
if not len(signature_nodes) > 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/samlp:Response/ds:Signature')
signature_nodes += OneLogin_Saml2_XML.query(elem, '/samlp:Response/saml:Assertion/ds:Signature')
if len(signature_nodes) == 1:
signature_node = signature_nodes[0]
return OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, debug)
else:
return False
except xmlsec.Error as e:
if debug:
print(e)
return False
@staticmethod
def validate_metadata_sign(xml, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature of a EntityDescriptor.
:param xml: The element we should validate
:type: string | Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
"""
try:
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem, ["ID"])
signature_nodes = OneLogin_Saml2_XML.query(elem, '/md:EntitiesDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/md:SPSSODescriptor/ds:Signature')
signature_nodes += OneLogin_Saml2_XML.query(elem, '/md:EntityDescriptor/md:IDPSSODescriptor/ds:Signature')
if len(signature_nodes) > 0:
for signature_node in signature_nodes:
if not OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, debug):
return False
return True
else:
return False
except Exception:
return False
@staticmethod
def validate_node_sign(signature_node, elem, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature node.
:param signature_node: The signature node
:type: Node
:param xml: The element we should validate
:type: Document
:param cert: The public cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
"""
try:
if (cert is None or cert == '') and fingerprint:
x509_certificate_nodes = OneLogin_Saml2_XML.query(signature_node, '//ds:Signature/ds:KeyInfo/ds:X509Data/ds:X509Certificate')
if len(x509_certificate_nodes) > 0:
x509_certificate_node = x509_certificate_nodes[0]
x509_cert_value = x509_certificate_node.text
x509_fingerprint_value = OneLogin_Saml2_Utils.calculate_x509_fingerprint(x509_cert_value, fingerprintalg)
if fingerprint == x509_fingerprint_value:
cert = OneLogin_Saml2_Utils.format_cert(x509_cert_value)
if cert is None or cert == '':
return False
# Check if Reference URI is empty
reference_elem = OneLogin_Saml2_XML.query(signature_node, '//ds:Reference')
if len(reference_elem) > 0:
if reference_elem[0].get('URI') == '':
reference_elem[0].set('URI', '#%s' % signature_node.getparent().get('ID'))
if validatecert:
manager = xmlsec.KeysManager()
manager.load_cert_from_memory(cert, xmlsec.KeyFormat.CERT_PEM, xmlsec.KeyDataType.TRUSTED)
dsig_ctx = xmlsec.SignatureContext(manager)
else:
dsig_ctx = xmlsec.SignatureContext()
dsig_ctx.key = xmlsec.Key.from_memory(cert, xmlsec.KeyFormat.CERT_PEM, None)
dsig_ctx.set_enabled_key_data([xmlsec.KeyData.X509])
dsig_ctx.verify(signature_node)
return True
except xmlsec.Error as e:
if debug:
print(e)
@staticmethod
def sign_binary(msg, key, algorithm=xmlsec.Transform.RSA_SHA1, debug=False):
"""
Sign binary message
:param msg: The element we should validate
:type: bytes
:param key: The private key
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:return signed message
:rtype str
"""
if isinstance(msg, str):
msg = msg.encode('utf8')
xmlsec.enable_debug_trace(debug)
dsig_ctx = xmlsec.SignatureContext()
dsig_ctx.key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, None)
return dsig_ctx.sign_binary(compat.to_bytes(msg), algorithm)
@staticmethod
def validate_binary_sign(signed_query, signature, cert=None, algorithm=OneLogin_Saml2_Constants.RSA_SHA1, debug=False):
"""
Validates signed binary data (Used to validate GET Signature).
:param signed_query: The element we should validate
:type: string
:param signature: The signature that will be validate
:type: string
:param cert: The public cert
:type: string
:param algorithm: Signature algorithm
:type: string
:param debug: Activate the xmlsec debug
:type: bool
"""
try:
xmlsec.enable_debug_trace(debug)
dsig_ctx = xmlsec.SignatureContext()
dsig_ctx.key = xmlsec.Key.from_memory(cert, xmlsec.KeyFormat.CERT_PEM, None)
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(algorithm, xmlsec.Transform.RSA_SHA1)
dsig_ctx.verify_binary(compat.to_bytes(signed_query),
sign_algorithm_transform,
compat.to_bytes(signature))
return True
except xmlsec.Error as e:
if debug:
print(e)
return False
| jkgneu12/python3-saml | src/onelogin/saml2/utils.py | Python | mit | 32,408 |
#!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
from Tkinter import *
import Pmw
import os
import numpy
class Ekin_map_annotate:
def map_datatab2structure(self):
"""If the PEATDB record has a structure, then we allow the user to map each datatab
to a specific part of the protein.
One can map a datatab to an atom, a residue, a chain, or define a structural group and map to it"""
if not self.parent:
import tkMessageBox
tkMessageBox.showinfo("No PEAT",
"This option is only available when Ekin is started from PEAT",
parent=self.ekin_win)
return
#
# Do we have a record name
#
if not self.protein:
import tkMessageBox
tkMessageBox.showinfo("No PEAT record",
"This option is only available when Ekin has been started by clicking a PEAT record",
parent=self.ekin_win)
return
#
# Is there a structure?
#
error=None
if not self.parent.data.has_key('DBinstance'):
error=1
else:
DB=self.parent.data['DBinstance'].DB
if not DB[self.protein].has_key('Structure'):
error=1
else:
print 'Trying to get PDB'
self.pdblines,X=self.parent.get_structure(self.protein,'Structure')
if not self.pdblines:
error=1
if error:
import tkMessageBox
tkMessageBox.showinfo("No Structure in PEAT",
"This option is only available when the PEAT record has a structure",
parent=self.ekin_win)
return
#
# Open the mapping window
#
mapper_win=Toplevel()
mapper_win.title('Map datatab to structure. %s - %s' %(self.protein,self.field))
self.set_geometry(self.ekin_win,mapper_win)
#
# Mapping Manager
#
row=0
Label(mapper_win,text='Mapping Manager',bg='lightblue').grid(row=row,column=0,columnspan=3,sticky='news')
row=row+1
Label(mapper_win,textvariable=self.currentdataset.get()).grid(row=row,column=0,columnspan=3,sticky='news')
#
# Headers
#
#row=row+1
#Label(mapper_win,text='Structural group type').grid(row=row,column=0,sticky='news')
#Label(mapper_win,text='Structural element').grid(row=row,column=1,sticky='news')
#Label(mapper_win,text='Datatab property').grid(row=row,column=2,sticky='news')
#
# Structural groupings for this protein
#
#if not DB[self.protein].has_key('structgroups'):
# DB[self.protein]['structgroups']={}
#structgroups=DB[self.protein]['structgroups'].keys()
#
# Load the residue definitions
#
import Protool.mutate
self.M_instance=Protool.mutate.Mutate(onlydefs=1)
self.AAdefs=self.M_instance.aadefs
#
# Struct group types
#
row=row+1
listbox_height=5
self.group_type_box = Pmw.ScrolledListBox(mapper_win,
items=['Residues','Atoms','Titratable groups'],
labelpos='nw',
label_text='Group type',
listbox_height = listbox_height,
usehullsize = 1,
hull_width = 200,
hull_height = 100,
selectioncommand=self.update_elements)
self.group_type_box.grid(row=row,column=0,columnspan=1,sticky='news')
self.group_type_box.configure(listbox_bg='white')
self.group_type_box.configure(listbox_selectmode='single')
self.group_type_box.configure(listbox_exportselection=0)
#
#
# Dropdown list of elements of each structgroup type
#
self.group_elements_box = Pmw.ScrolledListBox(mapper_win,
items=[],
labelpos='nw',
label_text='Group Elements',
listbox_height = listbox_height,
usehullsize = 1,
hull_width = 200,
hull_height = 100)
self.group_elements_box.grid(row=row,column=1,columnspan=1,sticky='news')
self.group_elements_box.configure(listbox_bg='white')
self.group_elements_box.configure(listbox_selectmode='extended')
self.group_elements_box.configure(listbox_exportselection=0)
# Parameters that we can map to structgroups
import Fitter
self.FIT=Fitter.FITTER('1 pKa 2 Chemical shifts',self)
self.dataprops=['Data source']+self.FIT.parameter_names
self.data_prop_box = Pmw.ScrolledListBox(mapper_win,
items=self.dataprops,
labelpos='nw',
label_text='Data properties',
listbox_height = listbox_height,
usehullsize = 1,
hull_width = 200,
hull_height = 100)
self.data_prop_box.grid(row=row,column=2,columnspan=1,sticky='news')
self.data_prop_box.configure(listbox_bg='white')
self.data_prop_box.configure(listbox_selectmode='extended')
self.data_prop_box.configure(listbox_exportselection=0)
#
# List of existing mappings
#
row=row+1
datatab=self.currentdataset.get()
print 'Loading this datatab in mapper',datatab
mappings=self.get_structmappings(datatab)
self.mapping_box = Pmw.ScrolledListBox(mapper_win,
items=mappings,
labelpos='nw',
label_text='Existing mappings',
listbox_height = 6,
usehullsize = 1,
hull_width = 200,
hull_height = 200)
self.mapping_box.grid(row=row,column=0,columnspan=3,sticky='news')
self.mapping_box.configure(listbox_selectmode='single')
self.mapping_box.configure(listbox_bg='white')
#
# Buttons
#
row=row+1
Button(mapper_win,text='Create mapping',bg='lightgreen',borderwidth=2, relief=GROOVE, command=self.create_mapping).grid(row=row,column=0,sticky='news',padx=2,pady=2)
Button(mapper_win,text='Delete mapping',bg='yellow',borderwidth=2, relief=GROOVE, command=self.delete_mapping).grid(row=row,column=1,sticky='news',padx=2,pady=2)
Button(mapper_win,text='Export',bg='#CFECEC',borderwidth=2, relief=GROOVE, command=self.export_dialog).grid(row=row,column=2,sticky='news',padx=2,pady=2)
row=row+1
Button(mapper_win,text='Close',borderwidth=2, relief=GROOVE,command=self.close_mapper_window).grid(row=row,column=1,columnspan=2,sticky='news',padx=2,pady=2)
#
# Structural group manager
#
#row=row+1
#Label(mapper_win,text='Structural Group Manager',bg='lightblue').grid(row=row,column=0,columnspan=3,sticky='news')
#import os, sys
#PEAT_dir=os.path.split(__file__)[0]
#sys.path.append(PEAT_dir)
#import protein_selector
#row=row+1
#SEL=protein_selector.select_residue(mapper_win,self.pdblines)
#SEL.box.grid(row=row,column=0)
##
#row=row+1
#Label(mapper_win,text='Atoms').grid(row=row,column=1)
#row=row+1
#Button(mapper_win,text='Create new structural grouping',command=self.create_new_structgroup).grid(row=row,column=0)
#Button(mapper_win,text='Add to structural grouping',command=self.add_to_structgroup).grid(row=row,column=1)
#Button(mapper_win,text='Close',command=mapper_win.destroy).grid(row=row,column=2,sticky='news')
mapper_win.rowconfigure(2,weight=1)
self.mapper_win=mapper_win
self.mapper_win.transient(master=self.ekin_win)
return
#
# ----
#
def close_mapper_window(self):
"""Close the mapping window and delete references to it"""
self.mapper_win.destroy()
if hasattr(self,"mapper_win"):
delattr(self,"mapper_win")
return
#
# ----
#
def update_elements(self):
"""Insert a new dropdown list for the element"""
#
# Get the group type
#
elements=None
group_type=self.group_type_box.getcurselection()[0]
import Protool
if group_type=='Residues':
P=Protool.structureIO()
P.parsepdb(self.pdblines)
residues=P.residues.keys()
residues.sort()
elements=[]
for res in residues:
elements.append('%s %s' %(res,P.resname(res)))
elif group_type=='Atoms':
P=Protool.structureIO()
P.parsepdb(self.pdblines)
atoms=P.atoms.keys()
for res in P.residues.keys():
resname=P.resname(res)
if self.AAdefs.has_key(resname):
defatoms=self.AAdefs[resname]['atoms']
#print defatoms
for defatom,coord,dummy in defatoms:
atom_name='%s:%s' %(res,defatom)
if not P.atoms.has_key(atom_name):
atoms.append(atom_name)
#print 'Adding',atom_name
atoms.sort()
elements=[]
for at in atoms:
elements.append(at)
elif group_type=='Titratable groups':
P=Protool.structureIO()
P.parsepdb(self.pdblines)
P.get_titratable_groups()
titgrps=P.titratable_groups.keys()
titgrps.sort()
elements=[]
for res in titgrps:
for titgrp in P.titratable_groups[res]:
name='%s %s' %(res,titgrp['name'])
elements.append(name)
else:
print 'Unkown group type',group_type
#
# Make the new dropdown list
#
if elements:
self.group_elements_box.setlist(elements)
return
#
# -----
#
def create_mapping(self):
"""Create the mapping"""
g_type=self.group_type_box.getcurselection()
if len(g_type)==0:
return
g_type=g_type[0]
g_elements=self.group_elements_box.getcurselection()
props=self.data_prop_box.getcurselection()
#
if not getattr(self,'structmappings',None):
self.structmappings={}
datatab=self.currentdataset.get()
if not self.structmappings.has_key(datatab):
self.structmappings[datatab]={}
#
# Get the dict of current mappings
#
curmappings=self.structmappings[datatab]
map_keys=curmappings.keys()
map_keys.sort()
#
# Get the number of the last mapping
#
last_num=0
if len(map_keys)>0:
last_num=map_keys[-1]
#
# Add the new mapping
#
if props and g_elements and g_type:
self.structmappings[datatab][last_num+1]={'Group type':g_type,'Group elements':g_elements,'Data property':props}
#
# Display the updated list of mappings
#
mappings=self.get_structmappings(datatab)
self.mapping_box.setlist(mappings)
return
#
# ----
#
def get_structmappings(self,datatab):
"""Get a printable list of structural mappings for this datatab"""
if not getattr(self,'structmappings',None):
return []
if self.structmappings.has_key(datatab):
map_keys=self.structmappings[datatab].keys()
map_keys.sort()
mappings=[]
for map_key in map_keys:
thismap=self.structmappings[datatab][map_key]
mappings.append('%2d: %s mapped to type "%s" elements %s' %(map_key,thismap['Data property'],thismap['Group type'],thismap['Group elements']))
else:
mappings=[]
return mappings
#
# -----
#
def delete_mapping(self):
"""Delete a structmapping"""
delete=self.mapping_box.getcurselection()
if len(delete)==0:
print 'length is zero'
return
delete=str(delete[0])
number=int(delete.split(':')[0])
print 'NUMBER',number
datatab=self.currentdataset.get()
print self.structmappings.keys()
if self.structmappings.has_key(datatab):
if self.structmappings[datatab].has_key(number):
del self.structmappings[datatab][number]
mappings=self.get_structmappings(datatab)
self.mapping_box.setlist(mappings)
return
#
# -----
#
def update_mapping_window(self):
"""Update the mapping window when we change datatabs"""
#
# Update list of current mappings
#
datatab=self.currentdataset.get()
mappings=self.get_structmappings(datatab)
self.mapping_box.setlist(mappings)
#
# Update List of parameters
#
dataprops=['Data source']+self.FIT.parameter_names
self.data_prop_box.setlist(dataprops)
return
def get_assigned(self):
"""Get all unique assigned elements from the mapping dict"""
if not getattr(self,'structmappings',None):
return []
assigned=[]
for key in self.structmappings.keys():
for val in self.structmappings[key].keys():
elements=self.structmappings[key][val]['Group elements']
for e in elements:
if not e in assigned:
assigned.append(e)
return assigned
#
# -----
#
def export_dialog(self):
if hasattr(self, 'export_win'):
if self.export_win != None :
self.export_win.deiconify()
return
self.export_win=Toplevel()
self.export_win.title('Export mappings')
self.set_geometry(self.ekin_win,self.export_win)
#self.setgeometry(self.ekin_win,self.export_win)
self.grouptype = StringVar() #group type
grptypes=['Residues','Atoms','Titratable groups','Any']
self.grouptype.set(grptypes[0])
self.assignedto = StringVar() #titratable group assigned
#self.expdataprops=['Data source']+self.FIT.parameter_names
self.expdataprops=['Data source','pK','span','offset']
self.dataprop = StringVar() #required property
self.dataprop.set(self.expdataprops[0])
elements=self.get_assigned()
elements.append('All')
elements.sort()
self.assignedto.set(elements[0])
row=0
help=Label(self.export_win,text='Use the list of currently assigned mappings to select\n'
+'an assigned residue/element from.\n'
+'A file will be created for the chosen group element',
bg='#CFECEC' )
help.grid(row=row,column=0,columnspan=2,sticky='news',padx=2,pady=2)
row=1
#drop down labels for grp element, data property and assignedto
Label(self.export_win,text='Assigned:').grid(row=row,column=0,sticky='news',padx=2,pady=2)
w = OptionMenu(self.export_win, self.assignedto, *elements)
w.grid(row=row,column=1,sticky='news',padx=2,pady=2)
'''row=row+1
Label(self.export_win,text='group type:').grid(row=row,column=0,sticky='news',padx=2,pady=2)
w = OptionMenu(self.export_win, self.grouptype, *grptypes)
w.grid(row=row,column=1,sticky='news',padx=2,pady=2)'''
row=row+1
Label(self.export_win,text='data property:').grid(row=row,column=0,sticky='news',padx=2,pady=2)
print self.dataprops
w = OptionMenu(self.export_win, self.dataprop, *self.expdataprops)
w.grid(row=row,column=1,sticky='news',padx=2,pady=2)
row=row+1
Button(self.export_win,text='Cancel',bg='#CFECEC',borderwidth=2, relief=GROOVE, width=10,
command=self.close_exp_dialog).grid(row=row,column=0,sticky='news',padx=2,pady=2)
Button(self.export_win,text='Go',bg='#CFECEC',borderwidth=2, relief=GROOVE, width=10,
command=self.export_as_csv).grid(row=row,column=1,sticky='news',padx=2,pady=2)
return
def close_exp_dialog(self):
if hasattr(self,'export_win'):
self.export_win.destroy()
self.export_win=None
return
def choose_savedir(self):
"""Get a directory to save to"""
import tkFileDialog, os
if self.defaultsavedir == None:
self.defaultsavedir = os.getcwd()
dirname=tkFileDialog.askdirectory(parent=self.export_win,
initialdir=self.defaultsavedir)
if not dirname:
print 'Returning'
return NoneType
return dirname
#
# -----
#
def export_as_csv(self):
"""export struct mapping for specific filters as csv"""
#prompt user for save dir
savedir = self.choose_savedir()
if savedir==None:
return
if self.currplatform == 'Windows':
print 'using windows'
import List_Utils
#sub function for tidiness
def getexplist(assignedto):
reslist={}
reskeys=[]
for key in self.structmappings.keys():
for n in self.structmappings[key].keys():
#check if any dataprop list element contains the key eg 'pK' in pK1, pK2 etc..
datapropkey = List_Utils.elements_contain(self.structmappings[key][n]['Data property'], self.dataprop.get())
if datapropkey != None:
#try to extract the value from the ekin dataset
val = self.get_dataprop_value(key, datapropkey)
print 'found ',val,' for ', datapropkey
#print 'val: ', val
#iterate over group elements list
elements=self.structmappings[key][n]['Group elements']
for e in elements:
if assignedto in e:
reslist[key] = ([key,val])
reskeys.append(key)
if len(reslist.keys())==0:
return
#write the list to a csv file, first add heading
import string
#remove whitespace
name=string.join(assignedto.split(), '')
name=name.replace(':', '')
if self.currplatform == 'Windows':
filename = savedir+'/'+name+'.csv'
else:
filename = os.path.join(savedir, name+'.csv')
print filename
writer = open(filename, "wb")
writer.write(assignedto+'\n')
import csv
csvwriter = csv.writer(open(filename, "a"))
keyssorted = self.sort_by_Num(reskeys)
#print reslist
#print keyssorted
p=[];names=[]
#use key sorted mapping to list residues by number
for item in keyssorted:
k=item[1]
csvwriter.writerow(reslist[k])
p.append(reslist[k][1])
names.append(k)
writer.close()
#do a plot and save to same dir as file
try:
import pylab
except:
return
f=pylab.figure(figsize=(10,4))
pylab.rc("font", family='serif')
a=f.add_subplot(111)
ind=numpy.arange(len(names))
a.bar(ind, p , linewidth=0.5)
a.set_xticks(ind)
a.set_ylabel(self.dataprop.get())
a.set_title(name+' assignments')
a.set_xticklabels(names, rotation='vertical', size=5)
f.savefig(savedir+'/'+name+'.png',dpi=300)
return
if self.assignedto.get() == 'All':
for a in self.get_assigned():
getexplist(a)
else:
getexplist(self.assignedto.get())
self.close_exp_dialog()
return
#
# -----
#
def get_dataprop_value(self, key, dataprop):
"""Annoying but necessary helper func to get value of assigned property
from the ekin fit data"""
tabnum = self.currentdataset.get()
if self.fitter_data.has_key(key):
fitdata = self.fitter_data[key]
else:
return None
model = fitdata['model']
#extracts index number from fit model field name
i = self.FIT.get_param_index(dataprop, model)
print tabnum, key
print fitdata, dataprop, i
if i!=None:
val = fitdata[i]
return val
#
# -----
#
def create_new_structgroup(self):
return
#
# ------
#
def add_to_structgroup(self):
return
def sort_by_Num(self, p):
"""Sort text keys by contained numbers - should be put in utils class"""
splitkeys={}
import re
r=re.compile('\D')
for k in p:
splitkeys[k]=int(r.split(k)[1])
items = splitkeys.items()
items = [(v, k) for (k, v) in items]
items.sort()
return items
| dmnfarrell/peat | PEATDB/Ekin/Ekin_map.py | Python | mit | 23,383 |
from temp_tools import TestClient
from test_template import ApiTestTemplate
class TokensTest(ApiTestTemplate):
def setUp(self):
super(TokensTest, self).setUp()
TestClient.execute("""TRUNCATE auth_token""")
self.test_token_data = {'description': 'Test token 1',
'scope_push': True,
'scope_pull': True}
def test_tokens_root(self):
# test unauthorized
r = TestClient.post('api/v1/projects/%s/tokens' % self.project_id, data=self.test_token_data,
headers=TestClient.get_job_authorization(self.job_id))
self.assertEqual(r['message'], 'Unauthorized')
# test token creation
r = TestClient.post('api/v1/projects/%s/tokens' % self.project_id, data=self.test_token_data,
headers=TestClient.get_user_authorization(self.user_id))
self.assertEqual(r['message'], 'Successfully added token')
self.assertEqual(r['status'], 200)
# test token receiving
r = TestClient.get('api/v1/projects/%s/tokens' % self.project_id,
headers=TestClient.get_user_authorization(self.user_id))
self.assertGreater(len(r), 0)
self.assertEqual(r[0]['description'], self.test_token_data['description'])
self.assertEqual(r[0]['scope_push'], self.test_token_data['scope_push'])
self.assertEqual(r[0]['scope_pull'], self.test_token_data['scope_pull'])
def test_tokens_delete(self):
r = TestClient.execute_one('''
INSERT INTO auth_token (description, scope_push, scope_pull, project_id)
VALUES (%s, %s, %s, %s) RETURNING id
''', [self.test_token_data['description'], self.test_token_data['scope_push'],
self.test_token_data['scope_pull'], self.project_id])
token_id = r['id']
r = TestClient.execute_one("""SELECT count(*) FROM auth_token WHERE id = '%s'""" % token_id)
self.assertGreater(r[0], 0)
r = TestClient.delete('api/v1/projects/%s/tokens/%s' % (self.project_id, token_id),
headers=TestClient.get_user_authorization(self.user_id))
self.assertEqual(r['message'], 'Successfully deleted token')
self.assertEqual(r['status'], 200)
r = TestClient.execute_one("""SELECT count(*) FROM auth_token WHERE id = '%s'""" % token_id)
self.assertEqual(r[0], 0)
| InfraBox/infrabox | infrabox/test/api/tokens_test.py | Python | mit | 2,458 |
import six
#==============================================================================
# https://docs.python.org/2/library/csv.html
#==============================================================================
if six.PY2:
import csv
import codecs
import cStringIO
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| memee/py-tons | pytons/files.py | Python | mit | 2,232 |
from app import db
class Alternative(db.Model):
id = db.Column(db.Integer, primary_key=True)
experiment = db.Column(db.String(500), unique=True)
copy = db.Column(db.String(2500))
def __init__(self, id, experiment, copy):
self.id = id
self.experiment = experiment
self.copy = copy
def __repr__(self):
return "<Alt {0} {1} {2}>".format(self.id, self.experiment, self.copy)
| kwikiel/bounce | models.py | Python | mit | 427 |
from rest_framework import serializers
from .models import User, Activity, Period
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email')
extra_kwargs = {
'url': {'view_name': 'timeperiod:user-detail'},
}
class ActivitySerializer(serializers.HyperlinkedModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Activity
fields = ('url', 'user', 'name', 'total', 'running')
extra_kwargs = {
'url': {'view_name': 'timeperiod:activity-detail'},
'user': {'view_name': 'timeperiod:user-detail'},
}
class PeriodSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Period
fields = ('url', 'activity', 'start', 'end', 'valid')
extra_kwargs = {
'url': {'view_name': 'timeperiod:period-detail'},
'activity': {'view_name': 'timeperiod:activity-detail'},
}
| maurob/timeperiod | serializers.py | Python | mit | 1,063 |
#
# Metrix++, Copyright 2009-2013, Metrix++ Project
# Link: http://metrixplusplus.sourceforge.net
#
# This file is a part of Metrix++ Tool.
#
# Metrix++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Metrix++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
#
if __name__ == '__main__':
import metrixpp
metrixpp.start() | vijaysm/mmodel-software-analysis | contrib/metrixplusplus-1.3.168/metrix++.py | Python | mit | 860 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from frappe.utils.minify import JavascriptMinify
"""
Build the `public` folders and setup languages
"""
import os, frappe, json, shutil, re
# from cssmin import cssmin
app_paths = None
def setup():
global app_paths
pymodules = []
for app in frappe.get_all_apps(True):
try:
pymodules.append(frappe.get_module(app))
except ImportError: pass
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
def bundle(no_compress, make_copy=False, verbose=False):
"""concat / minify js files"""
# build js files
setup()
make_asset_dirs(make_copy=make_copy)
build(no_compress, verbose)
def watch(no_compress):
"""watch and rebuild if necessary"""
setup()
import time
compile_less()
build(no_compress=True)
while True:
compile_less()
if files_dirty():
build(no_compress=True)
time.sleep(3)
def make_asset_dirs(make_copy=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for dir_path in [
os.path.join(assets_path, 'js'),
os.path.join(assets_path, 'css')]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# symlink app/public > assets/app
for app_name in frappe.get_all_apps(True):
pymodule = frappe.get_module(app_name)
app_base_path = os.path.abspath(os.path.dirname(pymodule.__file__))
symlinks = []
symlinks.append([os.path.join(app_base_path, 'public'), os.path.join(assets_path, app_name)])
symlinks.append([os.path.join(app_base_path, 'docs'), os.path.join(assets_path, app_name + '_docs')])
for source, target in symlinks:
source = os.path.abspath(source)
if not os.path.exists(target) and os.path.exists(source):
if make_copy:
shutil.copytree(source, target)
else:
os.symlink(source, target)
def build(no_compress=False, verbose=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for target, sources in get_build_maps().iteritems():
pack(os.path.join(assets_path, target), sources, no_compress, verbose)
def get_build_maps():
"""get all build.jsons with absolute paths"""
# framework js and css files
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, 'public', 'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for target, sources in json.loads(f.read()).iteritems():
# update app path
source_paths = []
for source in sources:
if isinstance(source, list):
s = frappe.get_pymodule_path(source[0], *source[1].split("/"))
else:
s = os.path.join(app_path, source)
source_paths.append(s)
build_maps[target] = source_paths
except ValueError, e:
print path
print 'JSON syntax error {0}'.format(str(e))
return build_maps
timestamps = {}
def pack(target, sources, no_compress, verbose):
from cStringIO import StringIO
outtype, outtxt = target.split(".")[-1], ''
jsm = JavascriptMinify()
for f in sources:
suffix = None
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
print "did not find " + f
continue
timestamps[f] = os.path.getmtime(f)
try:
with open(f, 'r') as sourcefile:
data = unicode(sourcefile.read(), 'utf-8', errors='ignore')
extn = f.rsplit(".", 1)[1]
if outtype=="js" and extn=="js" and (not no_compress) and suffix!="concat" and (".min." not in f):
tmpin, tmpout = StringIO(data.encode('utf-8')), StringIO()
jsm.minify(tmpin, tmpout)
minified = tmpout.getvalue()
if minified:
outtxt += unicode(minified or '', 'utf-8').strip('\n') + ';'
if verbose:
print "{0}: {1}k".format(f, int(len(minified) / 1024))
elif outtype=="js" and extn=="html":
# add to frappe.templates
outtxt += html_to_js_template(f, data)
else:
outtxt += ('\n/*\n *\t%s\n */' % f)
outtxt += '\n' + data + '\n'
except Exception:
print "--Error in:" + f + "--"
print frappe.get_traceback()
if not no_compress and outtype == 'css':
pass
#outtxt = cssmin(outtxt)
with open(target, 'w') as f:
f.write(outtxt.encode("utf-8"))
print "Wrote %s - %sk" % (target, str(int(os.path.getsize(target)/1024)))
def html_to_js_template(path, content):
'''returns HTML template content as Javascript code, adding it to `frappe.templates`'''
return """frappe.templates["{key}"] = '{content}';\n""".format(\
key=path.rsplit("/", 1)[-1][:-5], content=scrub_html_template(content))
def scrub_html_template(content):
'''Returns HTML content with removed whitespace and comments'''
# remove whitespace to a single space
content = re.sub("\s+", " ", content)
# strip comments
content = re.sub("(<!--.*?-->)", "", content)
return content.replace("'", "\'")
def files_dirty():
for target, sources in get_build_maps().iteritems():
for f in sources:
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f): continue
if os.path.getmtime(f) != timestamps.get(f):
print f + ' dirty'
return True
else:
return False
def compile_less():
from distutils.spawn import find_executable
if not find_executable("lessc"):
return
for path in app_paths:
less_path = os.path.join(path, "public", "less")
if os.path.exists(less_path):
for fname in os.listdir(less_path):
if fname.endswith(".less") and fname != "variables.less":
fpath = os.path.join(less_path, fname)
mtime = os.path.getmtime(fpath)
if fpath in timestamps and mtime == timestamps[fpath]:
continue
timestamps[fpath] = mtime
print "compiling {0}".format(fpath)
css_path = os.path.join(path, "public", "css", fname.rsplit(".", 1)[0] + ".css")
os.system("lessc {0} > {1}".format(fpath, css_path))
| rohitwaghchaure/frappe | frappe/build.py | Python | mit | 5,802 |
import hashlib
import os
import tempfile
import zipfile
from bs4 import BeautifulSoup
from django.test import Client
from django.test import TestCase
from mock import patch
from ..models import LocalFile
from ..utils.paths import get_content_storage_file_path
from kolibri.core.auth.test.helpers import provision_device
from kolibri.utils.tests.helpers import override_option
DUMMY_FILENAME = "hashi123.js"
empty_content = '<html><head><script src="/static/content/hashi123.js"></script></head><body></body></html>'
@patch("kolibri.core.content.views.get_hashi_filename", return_value=DUMMY_FILENAME)
@override_option("Paths", "CONTENT_DIR", tempfile.mkdtemp())
class ZipContentTestCase(TestCase):
"""
Testcase for zipcontent endpoint
"""
index_name = "index.html"
index_str = "<html></html>"
other_name = "other.html"
other_str = "<html><head></head></html>"
script_name = "script.html"
script_str = "<html><head><script>test</script></head></html>"
async_script_name = "async_script.html"
async_script_str = (
'<html><head><script async src="url/url.js"></script></head></html>'
)
empty_html_name = "empty.html"
empty_html_str = ""
doctype_name = "doctype.html"
doctype = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
"""
doctype_str = doctype + "<html><head><script>test</script></head></html>"
html5_doctype_name = "html5_doctype.html"
html5_doctype = "<!DOCTYPE HTML>"
html5_doctype_str = (
html5_doctype + "<html><head><script>test</script></head></html>"
)
test_name_1 = "testfile1.txt"
test_str_1 = "This is a test!"
test_name_2 = "testfile2.txt"
test_str_2 = "And another test..."
embedded_file_name = "test/this/path/test.txt"
embedded_file_str = "Embedded file test"
def setUp(self):
self.client = Client()
provision_device()
self.hash = hashlib.md5("DUMMYDATA".encode()).hexdigest()
self.extension = "zip"
self.filename = "{}.{}".format(self.hash, self.extension)
self.zip_path = get_content_storage_file_path(self.filename)
zip_path_dir = os.path.dirname(self.zip_path)
if not os.path.exists(zip_path_dir):
os.makedirs(zip_path_dir)
with zipfile.ZipFile(self.zip_path, "w") as zf:
zf.writestr(self.index_name, self.index_str)
zf.writestr(self.other_name, self.other_str)
zf.writestr(self.script_name, self.script_str)
zf.writestr(self.async_script_name, self.async_script_str)
zf.writestr(self.empty_html_name, self.empty_html_str)
zf.writestr(self.doctype_name, self.doctype_str)
zf.writestr(self.html5_doctype_name, self.html5_doctype_str)
zf.writestr(self.test_name_1, self.test_str_1)
zf.writestr(self.test_name_2, self.test_str_2)
zf.writestr(self.embedded_file_name, self.embedded_file_str)
self.zip_file_obj = LocalFile(
id=self.hash, extension=self.extension, available=True
)
self.zip_file_base_url = self.zip_file_obj.get_storage_url()
def test_zip_file_url_reversal(self, filename_patch):
file = LocalFile(id=self.hash, extension=self.extension, available=True)
self.assertEqual(
file.get_storage_url(), "/zipcontent/{}/".format(self.filename)
)
def test_non_zip_file_url_reversal(self, filename_patch):
file = LocalFile(id=self.hash, extension="otherextension", available=True)
filename = file.get_filename()
self.assertEqual(
file.get_storage_url(),
"/content/storage/{}/{}/{}".format(filename[0], filename[1], filename),
)
def test_zip_file_internal_file_access(self, filename_patch):
# test reading the data from file #1 inside the zip
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(next(response.streaming_content).decode(), self.test_str_1)
# test reading the data from file #2 inside the zip
response = self.client.get(self.zip_file_base_url + self.test_name_2)
self.assertEqual(next(response.streaming_content).decode(), self.test_str_2)
def test_nonexistent_zip_file_access(self, filename_patch):
bad_base_url = self.zip_file_base_url.replace(
self.zip_file_base_url[20:25], "aaaaa"
)
response = self.client.get(bad_base_url + self.test_name_1)
self.assertEqual(response.status_code, 404)
def test_zip_file_nonexistent_internal_file_access(self, filename_patch):
response = self.client.get(self.zip_file_base_url + "qqq" + self.test_name_1)
self.assertEqual(response.status_code, 404)
def test_non_allowed_file_internal_file_access(self, filename_patch):
response = self.client.get(
self.zip_file_base_url.replace("zip", "png") + self.test_name_1
)
self.assertEqual(response.status_code, 404)
def test_not_modified_response_when_if_modified_since_header_set(
self, filename_patch
):
caching_client = Client(HTTP_IF_MODIFIED_SINCE="Sat, 10-Sep-2016 19:14:07 GMT")
response = caching_client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.status_code, 304)
def test_content_security_policy_header(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(
response.get("Content-Security-Policy"),
"default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob: http://testserver",
)
def test_content_security_policy_header_http_referer(self, filename_patch):
response = self.client.get(
self.zip_file_base_url + self.test_name_1,
HTTP_REFERER="http://testserver:1234/iam/a/real/path/#thatsomeonemightuse",
)
self.assertEqual(
response.get("Content-Security-Policy"),
"default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob: http://testserver:1234",
)
def test_access_control_allow_origin_header(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.get("Access-Control-Allow-Origin"), "*")
response = self.client.options(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.get("Access-Control-Allow-Origin"), "*")
def test_x_frame_options_header(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.get("X-Frame-Options", ""), "")
def test_access_control_allow_headers(self, filename_patch):
headerval = "X-Penguin-Dance-Party"
response = self.client.options(
self.zip_file_base_url + self.test_name_1,
HTTP_ACCESS_CONTROL_REQUEST_HEADERS=headerval,
)
self.assertEqual(response.get("Access-Control-Allow-Headers", ""), headerval)
response = self.client.get(
self.zip_file_base_url + self.test_name_1,
HTTP_ACCESS_CONTROL_REQUEST_HEADERS=headerval,
)
self.assertEqual(response.get("Access-Control-Allow-Headers", ""), headerval)
def test_request_for_html_no_head_return_hashi_modified_html(self, filename_patch):
response = self.client.get(self.zip_file_base_url)
content = '<html><head><script src="/static/content/hashi123.js"></script></head><body></body></html>'
self.assertEqual(response.content.decode("utf-8"), content)
def test_request_for_html_body_no_script_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.other_name)
self.assertEqual(response.content.decode("utf-8"), empty_content)
def test_request_for_html_body_script_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.script_name)
content = (
'<html><head><template hashi-script="true"><script>test</script></template><script src="/static/content/hashi123.js"></script></head>'
+ "<body></body></html>"
)
self.assertEqual(response.content.decode("utf-8"), content)
def test_request_for_html_body_script_with_extra_slash_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + "/" + self.script_name)
content = (
'<html><head><template hashi-script="true"><script>test</script></template><script src="/static/content/hashi123.js"></script></head>'
+ "<body></body></html>"
)
self.assertEqual(response.content.decode("utf-8"), content)
def test_request_for_embedded_file_return_embedded_file(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.embedded_file_name)
self.assertEqual(
next(response.streaming_content).decode(), self.embedded_file_str
)
def test_request_for_embedded_file_with_double_slashes_return_embedded_file(
self, filename_patch
):
response = self.client.get(
self.zip_file_base_url + self.embedded_file_name.replace("/", "//")
)
self.assertEqual(
next(response.streaming_content).decode(), self.embedded_file_str
)
def test_request_for_html_body_script_skip_get_param_return_unmodified_html(
self, filename_patch
):
response = self.client.get(
self.zip_file_base_url + self.script_name + "?SKIP_HASHI=true"
)
self.assertEqual(next(response.streaming_content).decode(), self.script_str)
def test_request_for_html_doctype_return_with_doctype(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.doctype_name)
content = response.content.decode("utf-8")
self.assertEqual(
content[:92].lower().replace(" ", " "), self.doctype.strip().lower()
)
def test_request_for_html5_doctype_return_with_doctype(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.html5_doctype_name)
content = response.content.decode("utf-8")
self.assertEqual(content[:15].lower(), self.html5_doctype.strip().lower())
def test_request_for_html_body_script_return_correct_length_header(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.script_name)
file_size = len(
'<html><head><template hashi-script="true"><script>test</script></template><script src="/static/content/hashi123.js"></script></head>'
+ "<body></body></html>"
)
self.assertEqual(int(response["Content-Length"]), file_size)
def test_request_for_html_body_async_script_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.async_script_name)
soup = BeautifulSoup(response.content, "html.parser")
template = soup.find("template")
self.assertEqual(template.attrs["async"], "true")
def test_request_for_html_empty_html_no_modification(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.empty_html_name)
self.assertEqual(response.content.decode("utf-8"), empty_content)
def test_not_modified_response_when_if_modified_since_header_set_index_file(
self, filename_patch
):
caching_client = Client(HTTP_IF_MODIFIED_SINCE="Sat, 10-Sep-2016 19:14:07 GMT")
response = caching_client.get(self.zip_file_base_url)
self.assertEqual(response.status_code, 304)
def test_not_modified_response_when_if_modified_since_header_set_other_html_file(
self, filename_patch
):
caching_client = Client(HTTP_IF_MODIFIED_SINCE="Sat, 10-Sep-2016 19:14:07 GMT")
response = caching_client.get(self.zip_file_base_url + self.other_name)
self.assertEqual(response.status_code, 304)
| mrpau/kolibri | kolibri/core/content/test/test_zipcontent.py | Python | mit | 12,256 |
from django.http import HttpResponse
from django.shortcuts import render
from survey.models import Choice
from survey.forms import ChoiceForm
import csv
import random
# Create your views here.
def index(request):
examples = ['controlling Exposure', 'changing Temperature', 'modifying Highlights', 'changing Shadows', 'Zooming in/out', 'changing the Constrast']
if request.method == 'POST':
f = ChoiceForm(request.POST)
if f.is_valid():
newChoice = f.save()
if request.session.get('previous_responses', False):
prev_response_array = request.session['previous_responses']
prev_response_array.append({'program':newChoice.program, 'text':newChoice.text})
request.session['previous_responses'] = prev_response_array
else:
request.session['previous_responses'] = [{'program':newChoice.program, 'text':newChoice.text}];
return render(request, 'index.html', {'previous':1, 'previous_responses':request.session['previous_responses'], 'example':random.choice(examples)})
if request.session.get('previous_responses', False):
return render(request, 'index.html', {'previous':1, 'previous_responses':request.session['previous_responses'], 'example':random.choice(examples)})
else:
return render(request, 'index.html', {'previous':None, 'previous_responses':None, 'example':random.choice(examples)})
def responses(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="responses.csv"'
writer = csv.writer(response)
writer.writerow(['Date', 'Application', 'Suggested Operation'])
for aChoice in Choice.objects.all():
writer.writerow([aChoice.date, aChoice.program, aChoice.text])
return response
| thegouger/django-quicksurvey | surveyapp/survey/views.py | Python | mit | 1,774 |
"""
Run on cluster
"""
import argparse
import os
import itertools
import networkx as nx
import pandas as pd
from . import compare_cases
def generate_run(graph, iterations, epsilon_control, epsilon_damage,
out_dir, nodes=None, mem=6000, runtime=120, activate=''):
"""
Generate bash scripts for an array run in qsub/bsub cluster environments
``graph`` (string): can be either "regular", "scalefree", or the
path to a GraphML file.
``nodes`` must be given if graph is regular or scalefree.
Other default parameters as specified in the corresponding ``run_``
functions in compare_cases.py are used, and cannot be overriden here.
``activate`` (string): additional commands to execute before calling
sandpile (e.g. activating a virtualenv)
"""
if graph == 'regular' or graph == 'scalefree':
assert nodes is not None
runs = [i for i in itertools.product(epsilon_control, epsilon_damage)]
name = out_dir.replace("/", "_")
df_runs = pd.DataFrame(runs, columns=['epsilon_control', 'epsilon_damage'])
df_runs.to_csv(os.path.join(out_dir, 'iterations.csv'))
strings = ['#!/bin/sh\ncase "$1" in\n']
for index, run in enumerate(runs):
e1, ed = run
if nodes:
nodes_string = '--nodes={}'.format(nodes)
else:
nodes_string = ''
run_string = ('{idx}) {act}\n'
'sandpile {idx} {G} {i} {e1} {ed} {nodes}\n'
';;\n'.format(idx=index + 1,
G=graph, i=iterations,
e1=e1, ed=ed,
nodes=nodes_string,
act=activate))
strings.append(run_string)
strings.append('esac')
bsub_run_str = ('#!/bin/sh\n'
'#BSUB -J {name}[1-{to}]\n'
'#BSUB -R "rusage[mem={mem}]"\n'
'#BSUB -n 1\n'
'#BSUB -W {runtime}\n'
'#BSUB -o logs/run_%I.log\n\n'.format(name=name,
to=index + 1,
mem=mem,
runtime=runtime))
bsub_run_str += './array_run.sh ${LSB_JOBINDEX}\n'
qsub_run_str = ('#!/bin/sh\n'
'#$ -t 1-{to}\n'
'#$ -N {name}\n'
'#$ -j y -o logs/run_$TASK_ID.log\n'
'#$ -l mem_total={mem:.1f}G\n'
'#$ -cwd\n'.format(name=name, to=index + 1,
mem=mem / 1000))
qsub_run_str += './array_run.sh ${SGE_TASK_ID}\n'
with open(os.path.join(out_dir, 'array_run.sh'), 'w') as f:
for l in strings:
f.write(l + '\n')
with open(os.path.join(out_dir, 'run_bsub.sh'), 'w') as f:
f.write(bsub_run_str + '\n')
with open(os.path.join(out_dir, 'run_qsub.sh'), 'w') as f:
f.write(qsub_run_str + '\n')
with open(os.path.join(out_dir, 'prep.sh'), 'w') as f:
f.write('chmod +x *.sh\n')
f.write('mkdir logs\n')
f.write('mkdir results\n')
def main():
parser = argparse.ArgumentParser(description='Run model.')
parser.add_argument('run_id', metavar='run_id', type=int)
parser.add_argument('graph', metavar='graph', type=str)
parser.add_argument('iterations', metavar='iterations', type=int)
parser.add_argument('epsilon_control', metavar='epsilon_control', type=float)
parser.add_argument('epsilon_damage', metavar='epsilon_damage', type=float)
parser.add_argument('--nodes', metavar='nodes', type=int)
args = parser.parse_args()
if args.graph == 'regular':
runner = compare_cases.run_regular
elif args.graph == 'scalefree':
runner = compare_cases.run_scalefree
else:
runner = compare_cases.run_on_graph
G = nx.read_graphml(args.graph)
G = G.to_undirected() # Force undirected
if runner == compare_cases.run_on_graph:
result = runner(G=G, iterations=args.iterations,
epsilon_control=args.epsilon_control,
epsilon_damage=args.epsilon_damage)
else:
result = runner(nodes=args.nodes, iterations=args.iterations,
epsilon_control=args.epsilon_control,
epsilon_damage=args.epsilon_damage)
(uncontrolled, controlled, df, costs) = result
df.to_csv('results/cascades_{:0>4d}.csv'.format(args.run_id))
with open('results/costs_{:0>4d}.csv'.format(args.run_id), 'w') as f:
f.write(str(costs[0]) + '\n')
f.write(str(costs[1]) + '\n')
if __name__ == '__main__':
main()
| sjpfenninger/sandpile | sandpile/cluster.py | Python | mit | 4,792 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://mpdev.mattew.se'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = False
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| mattew/mattew.github.io-src | publishconf.py | Python | mit | 531 |
'''
Created on Feb 20, 2013
@author: Maribel Acosta
@author: Fabian Floeck
'''
from wmf import dump
from difflib import Differ
from time import time
from structures.Revision import Revision
from structures.Paragraph import Paragraph
from structures.Sentence import Sentence
from structures.Word import Word
from structures import Text
from structures.Relation import Relation
from sys import argv,exit
import getopt
from copy import deepcopy, copy
# Container of revisions.
revisions = {}
revision_order = []
# Hash tables.
paragraphs_ht = {}
sentences_ht = {}
spam = []
# SPAM detection variables.
CHANGE_PERCENTAGE = -0.40
PREVIOUS_LENGTH = 1000
CURR_LENGTH = 1000
FLAG = "move"
UNMATCHED_PARAGRAPH = 0.0
WORD_DENSITY = 10
WORD_LEN = 100
# Word global id.
WORD_ID = 0
def analyseArticle(file_name):
# Container of relationships.
relations = {}
# Revisions to compare.
revision_curr = Revision()
revision_prev = Revision()
text_curr = None
# Access the file.
dumpIterator = dump.Iterator(file_name)
# Iterate over the pages.
for page in dumpIterator.readPages():
i = 0
# Iterate over revisions of the article.
for revision in page.readRevisions():
vandalism = False
#print "processing rev", revision.getId()
# Update the information about the previous revision.
revision_prev = revision_curr
if (revision.getSha1() == None):
revision.setSha1(Text.calculateHash(revision.getText().encode("utf-8")))
if (revision.getSha1() in spam):
vandalism = True
#TODO: SPAM detection: DELETION
if (revision.getComment()!= None and revision.getComment().find(FLAG) > 0):
pass
else:
if (revision_prev.length > PREVIOUS_LENGTH) and (len(revision.getText()) < CURR_LENGTH) and (((len(revision.getText())-revision_prev.length)/float(revision_prev.length)) <= CHANGE_PERCENTAGE):
vandalism = True
revision_curr = revision_prev
#if (vandalism):
#print "---------------------------- FLAG 1"
#print "SPAM", revision.getId()
#print revision.getText()
#print
if (not vandalism):
# Information about the current revision.
revision_curr = Revision()
revision_curr.id = i
revision_curr.wikipedia_id = int(revision.getId())
revision_curr.length = len(revision.getText())
revision_curr.timestamp = revision.getTimestamp()
revision_curr.comment = revision.getComment()
# Relation of the current relation.
relation = Relation()
relation.revision = int(revision.getId())
relation.length = len(revision.getText())
# Some revisions don't have contributor.
if (revision.getContributor() != None):
revision_curr.contributor_id = revision.getContributor().getId()
revision_curr.contributor_name = revision.getContributor().getUsername().encode('utf-8')
relation.author = revision.getContributor().getUsername().encode('utf-8')
else:
revision_curr.contributor_id = 'Not Available ' + revision.getId()
revision_curr.contribur_name = 'Not Available ' + revision.getId()
relation.author = 'Not Available ' + revision.getId()
# Content within the revision.
text_curr = revision.getText().encode('utf-8')
text_curr = text_curr.lower()
revision_curr.content = text_curr
# Perform comparison.
vandalism = determineAuthorship(revision_curr, revision_prev, text_curr, relation)
if (not vandalism):
#print "NOT SPAM", revision.getId()
# Add the current revision with all the information.
revisions.update({revision_curr.wikipedia_id : revision_curr})
relations.update({revision_curr.wikipedia_id : relation})
revision_order.append((revision_curr.wikipedia_id, False))
# Update the fake revision id.
i = i+1
# Calculate the number of tokens in the revision.
total = 0
for p in revision_curr.ordered_paragraphs:
for paragraph_curr in revision_curr.paragraphs[p]:
for hash_sentence_curr in paragraph_curr.sentences.keys():
for sentence_curr in paragraph_curr.sentences[hash_sentence_curr]:
total = total + len(sentence_curr.words)
revision_curr.total_tokens = total
relation.total_tokens = total
else:
#print "---------------------------- FLAG 2"
#print "SPAM", revision.getId()
#print revision.getText()
#print
revision_order.append((revision_curr.wikipedia_id, True))
revision_curr = revision_prev
spam.append(revision.getSha1())
return (revisions, revision_order, relations)
def determineAuthorship(revision_curr, revision_prev, text_curr, relation):
# Containers for unmatched paragraphs and sentences in both revisions.
unmatched_sentences_curr = []
unmatched_sentences_prev = []
matched_sentences_prev = []
matched_words_prev = []
possible_vandalism = False
vandalism = False
# Analysis of the paragraphs in the current revision.
(unmatched_paragraphs_curr, unmatched_paragraphs_prev, matched_paragraphs_prev) = analyseParagraphsInRevision(revision_curr, revision_prev, text_curr, relation)
# Analysis of the sentences in the unmatched paragraphs of the current revision.
if (len(unmatched_paragraphs_curr)>0):
(unmatched_sentences_curr, unmatched_sentences_prev, matched_sentences_prev, _) = analyseSentencesInParagraphs(unmatched_paragraphs_curr, unmatched_paragraphs_prev, revision_curr, revision_prev, relation)
#TODO: SPAM detection
if (len(unmatched_paragraphs_curr)/float(len(revision_curr.ordered_paragraphs)) > UNMATCHED_PARAGRAPH):
possible_vandalism = True
# Analysis of words in unmatched sentences (diff of both texts).
if (len(unmatched_sentences_curr)>0):
(matched_words_prev, vandalism) = analyseWordsInSentences(unmatched_sentences_curr, unmatched_sentences_prev, revision_curr, possible_vandalism, relation)
if (len(unmatched_paragraphs_curr) == 0):
for paragraph in unmatched_paragraphs_prev:
for sentence_key in paragraph.sentences.keys():
for sentence in paragraph.sentences[sentence_key]:
if not(sentence.matched):
unmatched_sentences_prev.append(sentence)
# Add the information of 'deletion' to words
for unmatched_sentence in unmatched_sentences_prev:
#if (revision_curr.wikipedia_id == 74544182):
#print "unmatched sentence", unmatched_sentence.value, revision_curr.wikipedia_id
for word_prev in unmatched_sentence.words:
if not(word_prev.matched):
for elem in word_prev.deleted:
if (elem != revision_curr.wikipedia_id) and (elem in revisions.keys()):
if (revisions[elem].contributor_name != revision_curr.contributor_name):
if (elem in relation.redeleted.keys()):
relation.redeleted.update({elem : relation.redeleted[elem] + 1})
else:
relation.redeleted.update({elem : 1})
else:
if (elem in relation.self_redeleted.keys()):
relation.self_redeleted.update({elem : relation.self_redeleted[elem] + 1})
else:
relation.self_redeleted.update({elem : 1})
# Revert: deleting something that somebody else reintroduced.
for elem in word_prev.freq:
#if (revision_curr.wikipedia_id == 11):
# print "Revert in 11", word_prev.value, word_prev.deleted, relation.revert
if (elem != revision_curr.wikipedia_id) and (elem in revisions.keys()):
if (revisions[elem].contributor_name != revision_curr.contributor_name):
if (elem in relation.revert.keys()):
relation.revert.update({elem: relation.revert[elem] +1})
else:
relation.revert.update({elem: 1})
else:
if (elem in relation.self_revert.keys()):
relation.self_revert.update({elem: relation.self_revert[elem] +1})
else:
relation.self_revert.update({elem: 1})
#print "relation.revert", word_prev.value, word_prev.deleted, relation.revert, revision_curr.wikipedia_id
word_prev.deleted.append(revision_curr.wikipedia_id)
#if (revision_curr.wikipedia_id == 74544182):
# print word_prev.value, word_prev.revision, revisions[word_prev.revision].contributor_name, revision_curr.contributor_name
if (revisions[word_prev.revision].contributor_name != revision_curr.contributor_name):
if (word_prev.revision in relation.deleted.keys()):
relation.deleted.update({word_prev.revision : relation.deleted[word_prev.revision] + 1 })
else:
relation.deleted.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_deleted.keys()):
relation.self_deleted.update({word_prev.revision : relation.self_deleted[word_prev.revision] + 1 })
else:
relation.self_deleted.update({word_prev.revision : 1 })
# Reset matched structures from old revisions.
for matched_paragraph in matched_paragraphs_prev:
matched_paragraph.matched = False
for sentence_hash in matched_paragraph.sentences.keys():
for sentence in matched_paragraph.sentences[sentence_hash]:
sentence.matched = False
for word in sentence.words:
word.matched = False
for matched_sentence in matched_sentences_prev:
matched_sentence.matched = False
for word in matched_sentence.words:
word.matched = False
for matched_word in matched_words_prev:
matched_word.matched = False
if (not vandalism):
# Add the new paragraphs to hash table of paragraphs.
for unmatched_paragraph in unmatched_paragraphs_curr:
if (unmatched_paragraph.hash_value in paragraphs_ht.keys()):
paragraphs_ht[unmatched_paragraph.hash_value].append(unmatched_paragraph)
else:
paragraphs_ht.update({unmatched_paragraph.hash_value : [unmatched_paragraph]})
# Add the new sentences to hash table of sentences.
for unmatched_sentence in unmatched_sentences_curr:
if (unmatched_sentence.hash_value in sentences_ht.keys()):
sentences_ht[unmatched_sentence.hash_value].append(unmatched_sentence)
else:
sentences_ht.update({unmatched_sentence.hash_value : [unmatched_sentence]})
return vandalism
def analyseParagraphsInRevision(revision_curr, revision_prev, text_curr, relation):
# Containers for unmatched and matched paragraphs.
unmatched_paragraphs_curr = []
unmatched_paragraphs_prev = []
matched_paragraphs_prev = []
# Split the text of the current into paragraphs.
paragraphs = Text.splitIntoParagraphs(text_curr)
# Iterate over the paragraphs of the current version.
for paragraph in paragraphs:
# Build Paragraph structure and calculate hash value.
paragraph = paragraph.strip()
hash_curr = Text.calculateHash(paragraph)
matched_curr = False
# If the paragraph is in the previous revision,
# update the authorship information and mark both paragraphs as matched (also in HT).
if (hash_curr in revision_prev.ordered_paragraphs):
for paragraph_prev in revision_prev.paragraphs[hash_curr]:
if (not paragraph_prev.matched):
matched_curr = True
paragraph_prev.matched = True
matched_paragraphs_prev.append(paragraph_prev)
# TODO: added this (CHECK).
for hash_sentence_prev in paragraph_prev.sentences.keys():
for sentence_prev in paragraph_prev.sentences[hash_sentence_prev]:
sentence_prev.matched = True
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
#if (word_prev.revision in relation.reintroduced.keys()):
# relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
#else:
# relation.reintroduced.update({word_prev.revision : 1 })
# Add paragraph to current revision.
if (hash_curr in revision_curr.paragraphs.keys()):
revision_curr.paragraphs[paragraph_prev.hash_value].append(paragraph_prev)
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
else:
revision_curr.paragraphs.update({paragraph_prev.hash_value : [paragraph_prev]})
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
break
# If the paragraph is not in the previous revision, but it is in an older revision
# update the authorship information and mark both paragraphs as matched.
if ((not matched_curr) and (hash_curr in paragraphs_ht)):
for paragraph_prev in paragraphs_ht[hash_curr]:
if (not paragraph_prev.matched):
matched_curr = True
paragraph_prev.matched = True
matched_paragraphs_prev.append(paragraph_prev)
# TODO: added this (CHECK).
for hash_sentence_prev in paragraph_prev.sentences.keys():
for sentence_prev in paragraph_prev.sentences[hash_sentence_prev]:
sentence_prev.matched = True
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
if (revision_prev.wikipedia_id not in word_prev.used):
word_prev.freq.append(revision_curr.wikipedia_id)
# Revert: reintroducing something that somebody else deleted,
# (and was not used in the previous revision)
if (revision_prev.wikipedia_id not in word_prev.used):
#if (revision_curr.wikipedia_id == 11):
# print "Revert in 11", word_prev.value, word_prev.deleted, relation.revert
for elem in word_prev.deleted:
if (elem in revisions.keys()):
if (revisions[elem].contributor_name != revision_curr.contributor_name):
if (elem in relation.revert.keys()):
relation.revert.update({elem : relation.revert[elem] + 1})
else:
relation.revert.update({elem : 1})
else:
if (elem in relation.self_revert.keys()):
relation.self_revert.update({elem : relation.self_revert[elem] + 1})
else:
relation.self_revert.update({elem : 1})
if (revision_prev.wikipedia_id not in word_prev.used):
if (elem in revisions.keys()):
if (revisions[word_prev.revision].contributor_name != revision_curr.contributor_name):
if (word_prev.revision in relation.reintroduced.keys()):
relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
else:
relation.reintroduced.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_reintroduced.keys()):
relation.self_reintroduced.update({word_prev.revision : relation.self_reintroduced[word_prev.revision] + 1})
else:
relation.self_reintroduced.update({word_prev.revision : 1})
# Add paragraph to current revision.
if (hash_curr in revision_curr.paragraphs.keys()):
revision_curr.paragraphs[paragraph_prev.hash_value].append(paragraph_prev)
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
else:
revision_curr.paragraphs.update({paragraph_prev.hash_value : [paragraph_prev]})
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
break
# If the paragraph did not match with previous revisions,
# add to container of unmatched paragraphs for further analysis.
if (not matched_curr):
paragraph_curr = Paragraph()
paragraph_curr.hash_value = Text.calculateHash(paragraph)
paragraph_curr.value = paragraph
revision_curr.ordered_paragraphs.append(paragraph_curr.hash_value)
if (paragraph_curr.hash_value in revision_curr.paragraphs.keys()):
revision_curr.paragraphs[paragraph_curr.hash_value].append(paragraph_curr)
else:
revision_curr.paragraphs.update({paragraph_curr.hash_value : [paragraph_curr]})
unmatched_paragraphs_curr.append(paragraph_curr)
# Identify unmatched paragraphs in previous revision for further analysis.
for paragraph_prev_hash in revision_prev.ordered_paragraphs:
for paragraph_prev in revision_prev.paragraphs[paragraph_prev_hash]:
if (not paragraph_prev.matched):
unmatched_paragraphs_prev.append(paragraph_prev)
return (unmatched_paragraphs_curr, unmatched_paragraphs_prev, matched_paragraphs_prev)
def analyseSentencesInParagraphs(unmatched_paragraphs_curr, unmatched_paragraphs_prev, revision_curr, revision_prev, relation):
# Containers for unmatched and matched sentences.
unmatched_sentences_curr = []
unmatched_sentences_prev = []
matched_sentences_prev = []
total_sentences = 0
# Iterate over the unmatched paragraphs of the current revision.
for paragraph_curr in unmatched_paragraphs_curr:
# Split the current paragraph into sentences.
sentences = Text.splitIntoSentences(paragraph_curr.value)
# Iterate over the sentences of the current paragraph
for sentence in sentences:
# Create the Sentence structure.
sentence = sentence.strip()
sentence = ' '.join(Text.splitIntoWords(sentence))
hash_curr = Text.calculateHash(sentence)
matched_curr = False
total_sentences = total_sentences + 1
# Iterate over the unmatched paragraphs from the previous revision.
for paragraph_prev in unmatched_paragraphs_prev:
if (hash_curr in paragraph_prev.sentences.keys()):
for sentence_prev in paragraph_prev.sentences[hash_curr]:
if (not sentence_prev.matched):
matched_one = False
matched_all = True
for word_prev in sentence_prev.words:
if (word_prev.matched):
matched_one = True
else:
matched_all = False
if not(matched_one):
sentence_prev.matched = True
matched_curr = True
matched_sentences_prev.append(sentence_prev)
# TODO: CHECK this
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
#if (word_prev.revision in relation.reintroduced.keys()):
# relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
#else:
# relation.reintroduced.update({word_prev.revision : 1 })
# Add the sentence information to the paragraph.
if (hash_curr in paragraph_curr.sentences.keys()):
paragraph_curr.sentences[hash_curr].append(sentence_prev)
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
else:
paragraph_curr.sentences.update({sentence_prev.hash_value : [sentence_prev]})
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
break
elif (matched_all):
sentence_prev.matched = True
matched_sentences_prev.append(sentence_prev)
if (matched_curr):
break
# Iterate over the hash table of sentences from old revisions.
if ((not matched_curr) and (hash_curr in sentences_ht.keys())):
for sentence_prev in sentences_ht[hash_curr]:
if (not sentence_prev.matched):
matched_one = False
matched_all = True
for word_prev in sentence_prev.words:
if (word_prev.matched):
matched_one = True
else:
matched_all = False
if not(matched_one):
sentence_prev.matched = True
matched_curr = True
matched_sentences_prev.append(sentence_prev)
# TODO: CHECK this
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
if (revision_prev.wikipedia_id not in word_prev.used):
word_prev.freq.append(revision_curr.wikipedia_id)
# Revert: reintroducing something that somebody else deleted
if (revision_prev.wikipedia_id not in word_prev.used):
for elem in word_prev.deleted:
#if (revision_curr.wikipedia_id == 11):
# print "Revert in 11", word_prev.value, word_prev.deleted, relation.revert
if (elem in revisions.keys()):
if (revisions[elem].contributor_name != revision_curr.contributor_name):
if (elem in relation.revert.keys()):
relation.revert.update({elem : relation.revert[elem] + 1})
else:
relation.revert.update({elem : 1})
else:
if (elem in relation.self_revert.keys()):
relation.self_revert.update({elem : relation.self_revert[elem] + 1})
else:
relation.self_revert.update({elem : 1})
#print "relation.revert", word_prev.value, word_prev.deleted, relation.revert, revision_curr.wikipedia_id
if (revision_prev.wikipedia_id not in word_prev.used):
if (elem in revisions.keys()):
if (revisions[word_prev.revision].contributor_name != revision_curr.contributor_name):
if (word_prev.revision in relation.reintroduced.keys()):
relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
else:
relation.reintroduced.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_reintroduced.keys()):
relation.self_reintroduced.update({word_prev.revision : relation.self_reintroduced[word_prev.revision] + 1})
else:
relation.self_reintroduced.update({word_prev.revision : 1})
# Add the sentence information to the paragraph.
if (hash_curr in paragraph_curr.sentences.keys()):
paragraph_curr.sentences[hash_curr].append(sentence_prev)
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
else:
paragraph_curr.sentences.update({sentence_prev.hash_value : [sentence_prev]})
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
break
elif (matched_all):
sentence_prev.matched = True
matched_sentences_prev.append(sentence_prev)
# If the sentence did not match, then include in the container of unmatched sentences for further analysis.
if (not matched_curr):
sentence_curr = Sentence()
sentence_curr.value = sentence
sentence_curr.hash_value = hash_curr
paragraph_curr.ordered_sentences.append(sentence_curr.hash_value)
if (sentence_curr.hash_value in paragraph_curr.sentences.keys()):
paragraph_curr.sentences[sentence_curr.hash_value].append(sentence_curr)
else:
paragraph_curr.sentences.update({sentence_curr.hash_value : [sentence_curr]})
unmatched_sentences_curr.append(sentence_curr)
# Identify the unmatched sentences in the previous paragraph revision.
for paragraph_prev in unmatched_paragraphs_prev:
for sentence_prev_hash in paragraph_prev.ordered_sentences:
for sentence_prev in paragraph_prev.sentences[sentence_prev_hash]:
if (not sentence_prev.matched):
unmatched_sentences_prev.append(sentence_prev)
sentence_prev.matched = True
matched_sentences_prev.append(sentence_prev)
return (unmatched_sentences_curr, unmatched_sentences_prev, matched_sentences_prev, total_sentences)
def analyseWordsInSentences(unmatched_sentences_curr, unmatched_sentences_prev, revision_curr, possible_vandalism, relation):
matched_words_prev = []
unmatched_words_prev = []
global WORD_ID
# Split sentences into words.
text_prev = []
for sentence_prev in unmatched_sentences_prev:
for word_prev in sentence_prev.words:
if (not word_prev.matched):
text_prev.append(word_prev.value)
unmatched_words_prev.append(word_prev)
text_curr = []
for sentence_curr in unmatched_sentences_curr:
splitted = Text.splitIntoWords(sentence_curr.value)
text_curr.extend(splitted)
sentence_curr.splitted.extend(splitted)
# Edit consists of removing sentences, not adding new content.
if (len(text_curr) == 0):
return (matched_words_prev, False)
# SPAM detection.
if (possible_vandalism):
density = Text.computeAvgWordFreq(text_curr, revision_curr.wikipedia_id)
if (density > WORD_DENSITY):
return (matched_words_prev, possible_vandalism)
else:
possible_vandalism = False
if (len(text_prev) == 0):
for sentence_curr in unmatched_sentences_curr:
for word in sentence_curr.splitted:
word_curr = Word()
word_curr.internal_id = WORD_ID
word_curr.author_id = revision_curr.contributor_id
word_curr.author_name = revision_curr.contributor_name
word_curr.revision = revision_curr.wikipedia_id
word_curr.value = word
sentence_curr.words.append(word_curr)
word_curr.used.append(revision_curr.wikipedia_id)
relation.added = relation.added + 1
WORD_ID = WORD_ID + 1
return (matched_words_prev, possible_vandalism)
d = Differ()
diff = list(d.compare(text_prev, text_curr))
for sentence_curr in unmatched_sentences_curr:
for word in sentence_curr.splitted:
curr_matched = False
pos = 0
while (pos < len(diff)):
word_diff = diff[pos]
if (word == word_diff[2:]):
if (word_diff[0] == ' '):
for word_prev in unmatched_words_prev:
if ((not word_prev.matched) and (word_prev.value == word)):
word_prev.used.append(revision_curr.wikipedia_id)
word_prev.matched = True
curr_matched = True
sentence_curr.words.append(word_prev)
matched_words_prev.append(word_prev)
diff[pos] = ''
pos = len(diff)+1
#if (word_prev.revision in relation.reintroduced.keys()):
# relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
#else:
# relation.reintroduced.update({word_prev.revision : 1 })
break
elif (word_diff[0] == '-'):
for word_prev in unmatched_words_prev:
if ((not word_prev.matched) and (word_prev.value == word)):
word_prev.matched = True
matched_words_prev.append(word_prev)
diff[pos] = ''
word_prev.deleted.append(revision_curr.wikipedia_id)
if (revisions[word_prev.revision].contributor_name != revision_curr.contributor_name):
if (word_prev.revision in relation.deleted.keys()):
relation.deleted.update({word_prev.revision : relation.deleted[word_prev.revision] + 1 })
else:
relation.deleted.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_deleted.keys()):
relation.self_deleted.update({word_prev.revision : relation.self_deleted[word_prev.revision] + 1 })
else:
relation.self_deleted.update({word_prev.revision : 1 })
break
elif (word_diff[0] == '+'):
curr_matched = True
word_curr = Word()
word_curr.internal_id = WORD_ID
word_curr.value = word
word_curr.author_id = revision_curr.contributor_id
word_curr.author_name = revision_curr.contributor_name
word_curr.revision = revision_curr.wikipedia_id
word_curr.used.append(revision_curr.wikipedia_id)
sentence_curr.words.append(word_curr)
relation.added = relation.added + 1
WORD_ID = WORD_ID + 1
diff[pos] = ''
pos = len(diff)+1
pos = pos + 1
if not(curr_matched):
word_curr = Word()
word_curr.internal_id = WORD_ID
word_curr.value = word
word_curr.author_id = revision_curr.contributor_id
word_curr.author_name = revision_curr.contributor_name
word_curr.revision = revision_curr.wikipedia_id
word_curr.used.append(revision_curr.wikipedia_id)
sentence_curr.words.append(word_curr)
relation.added = relation.added + 1
WORD_ID = WORD_ID + 1
return (matched_words_prev, possible_vandalism)
def printAllRevisions(order, revisions):
for (revision, vandalism) in order:
if not(vandalism):
printRevision(revisions[revision])
def printRevision(revision):
print "Printing authorhship for revision: ", revision.wikipedia_id
text = []
authors = []
for hash_paragraph in revision.ordered_paragraphs:
#print hash_paragraph
#text = ''
p_copy = deepcopy(revision.paragraphs[hash_paragraph])
paragraph = p_copy.pop(0)
#print paragraph.value
#print len(paragraph.sentences)
for hash_sentence in paragraph.ordered_sentences:
#print hash_sentence
sentence = paragraph.sentences[hash_sentence].pop(0)
#print sentence.words
for word in sentence.words:
#print word
#text = text + ' ' + unicode(word.value,'utf-8') + "@@" + str(word.revision)
text.append(word.value)
authors.append(word.revision)
print text
print authors
def printRevisionTrackAppearance(revision):
print "Printing authorship for revision: ", revision.wikipedia_id
text = []
authors = []
for hash_paragraph in revision.ordered_paragraphs:
#print hash_paragraph
#text = ''
p_copy = deepcopy(revision.paragraphs[hash_paragraph])
paragraph = p_copy.pop(0)
#print paragraph.value
#print len(paragraph.sentences)
for hash_sentence in paragraph.ordered_sentences:
#print hash_sentence
sentence = paragraph.sentences[hash_sentence].pop(0)
#print sentence.words
for word in sentence.words:
appeared = copy(word.used)
disappeared = copy(word.deleted)
changes = []
changes.append("+(" + str(appeared.pop(0))+")")
while len(disappeared) > 0:
d = disappeared.pop(0)
if (d > revision.wikipedia_id):
break
changes.append("-(" + str(d)+")")
while len(appeared) > 0:
a = appeared.pop(0)
if (a > d):
changes.append("+(" + str(a)+")")
break
#print word.used
#print word.deleted
print unicode(word.value,'utf-8') + "@@" + str(word.revision) + "@@" + str(changes)
text.append(word.value)
authors.append(word.revision)
#print text
#print authors
def printRelationships(relations, order):
print "Printing relationships"
header = ["revision", "author", "deleted(-)", "revert(-)", "reintroduced(+)", "redeleted(+)", "added", "total", "self-deleted", "self-revert", "self-reintroduced", "self-redeleted"]
print "\t".join(header)
for (revision, vandalism) in order:
if (vandalism):
continue
relation = relations[revision]
#print relation.author
print str(relation.revision) + "\t" + (relation.author).decode("utf-8") + "\t" + str(relation.deleted) + "\t" + str(relation.revert) + "\t" + str(relation.reintroduced) + "\t" + str(relation.redeleted) + "\t" + str(relation.added) + "\t" + str(relation.total_tokens) + "\t" + str(relation.self_deleted) + "\t" + str(relation.self_revert) + "\t" + str(relation.self_reintroduced) + "\t" + str(relation.self_redeleted)
#def printJSON(relations, order):
#
# deleted_values = {}
# revert_values = {}
# reintroduced_values = {}
# redeleted_values = {}
#
# for (revision, vandalism) in order:
# if (vandalism):
# continue
# relation = relations[revision]
# print str(relation.revision) + "\t" + str(relation.author) + "\t" + str(relation.deleted) + "\t" + str(relation.revert) + "\t" + str(relation.reintroduced) + "\t" + str(relation.redeleted) + "\t" + str(relation.added) + "\t" + str(relation.total_tokens)
def printRevisionJSON(revision):
tokens = []
for hash_paragraph in revision.ordered_paragraphs:
p_copy = deepcopy(revision.paragraphs[hash_paragraph])
paragraph = p_copy.pop(0)
for hash_sentence in paragraph.ordered_sentences:
sentence = paragraph.sentences[hash_sentence].pop(0)
for word in sentence.words:
i = 0
while i < len(word.deleted):
if word.deleted[i] > revision.wikipedia_id:
break
i = i+1
j = 0
while j < len(word.freq):
if word.freq[j] > revision.wikipedia_id:
break
j = j +1
tokens.append(str({"token": word.value, "author_name": word.author_name.encode("utf-8"), "rev_id": str(word.revision), "disappering": word.deleted[0:i], "incoming": word.freq[0:j]}))
print tokens #cjson.encode({"tokens" : tokens})
def main(my_argv):
inputfile = ''
revision = None
output = ''
if (len(my_argv) <= 3):
try:
opts, _ = getopt.getopt(my_argv,"i:",["ifile="])
except getopt.GetoptError:
print 'Usage: Wikiwho.py -i <inputfile> -o <output> [-rev <revision_id>]'
exit(2)
else:
try:
opts, _ = getopt.getopt(my_argv,"i:o:r:",["ifile=","revision=", "output="])
except getopt.GetoptError:
print 'Usage: Wikiwho.py -i <inputfile> -o output [-rev <revision_id>]'
exit(2)
for opt, arg in opts:
if opt in ('-h', "--help"):
print "WikiWho: An algorithm for detecting attribution of authorship in revisioned content"
print
print 'Usage: Wikiwho.py -i <inputfile> [-rev <revision_id>]'
print "-i --ifile File to analyze"
print "-o --type of output: <a> for authorship, <r> for relations"
print "-r --revision Revision to analyse. If not specified, the last revision is printed."
print "-h --help This help."
exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-r", "--revision"):
revision = arg
elif opt in ("-o", "--output"):
output = arg
return (inputfile,revision,output)
if __name__ == '__main__':
(file_name, revision, output) = main(argv[1:])
#print "Calculating authorship for:", file_name
time1 = time()
(revisions, order, relations) = analyseArticle(file_name)
time2 = time()
#pos = file_name.rfind("/")
#print file_name[pos+1: len(file_name)-len(".xml")], time2-time1
if (output == 'r'):
printRelationships(relations, order)
if (output == 'a'):
print "revision", revision
if (revision == 'all'):
printAllRevisions(order, revisions)
else:
printRevisionJSON(revisions[int(revision)])
#print "Execution time:", time2-time1
| wikiwho/whovis | WikiwhoRelationships.py | Python | mit | 45,341 |
"""
The MIT License (MIT)
Copyright (c) <2015> <sarangis>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from src.ir.function import *
from src.ir.constants import Number
from src.ir.base_ir_visitor import IRBaseVisitor
from src.ir.irbuilder import IRBuilder
from src.optimizer.pass_support import *
from src.utils.print_utils import draw_header
class ConstPropagationPass(FunctionPass, IRBaseVisitor):
def __init__(self):
FunctionPass.__init__(self)
IRBaseVisitor.__init__(self)
self.insts_to_remove = []
self.irbuilder = None
@verify(node=Function)
def run_on_function(self, node):
draw_header("Instruction Combining: %s" % node.name)
func = node
for bb in func.basic_blocks:
self.visit_basicblock(bb)
print(node) | ssarangi/spiderjit | src/optimizer/inst_combining.py | Python | mit | 1,791 |
import unittest
import os
from unittest.mock import patch
import migration
from configuration import Builder
import configuration
from tests import testhelper
class MigrationTestCase(unittest.TestCase):
def setUp(self):
self.rootfolder = os.path.dirname(os.path.realpath(__file__))
@patch('migration.Commiter')
@patch('migration.Initializer')
@patch('migration.RTCInitializer')
@patch('migration.os')
@patch('configuration.shutil')
def testDeletionOfLogFolderOnInitalization(self, shutil_mock, os_mock, rtc_initializer_mock, git_initializer_mock,
git_comitter_mock):
config = Builder().setrootfolder(self.rootfolder).build()
anylogpath = config.getlogpath("testDeletionOfLogFolderOnInitalization")
os_mock.path.exists.return_value = False
configuration.config = config
migration.initialize()
expectedlogfolder = self.rootfolder + os.sep + "Logs"
shutil_mock.rmtree.assert_called_once_with(expectedlogfolder)
def testExistRepo_Exists_ShouldReturnTrue(self):
with testhelper.createrepo(folderprefix="test_migration"):
self.assertTrue(migration.existsrepo())
def testExistRepo_DoesntExist_ShouldReturnFalse(self):
configuration.config = Builder().setworkdirectory(self.rootfolder).setgitreponame("test.git").build()
self.assertFalse(migration.existsrepo())
| akchinSTC/rtc2git | tests/test_migration.py | Python | mit | 1,445 |
from flask import Blueprint
error = Blueprint('error', __name__, )
from . import view
| XiMuYouZi/PythonDemo | Web/zhihu/error/__init__.py | Python | mit | 88 |
# 1. del: funkcije
#gender: female = 2, male = 0
def calculate_score_for_gender(gender):
if gender == "male":
return 0
else: return 2
#age: 0-100 if age < 10 --> 0, 11 < age < 20 --> 5, 21 < age < 35 --> 2, 36 < age < 50 --> 4, 50+ --> 1
def calculate_score_for_age(age):
if (age > 11 and age <= 20) or (age > 36 and age <= 50):
return 5
elif age > 20 and age <= 35:
return 2
elif age < 10:
return 0
else:
return 1
#status: 0 = single, 1 = relationship, 2 = in open relationship, 3 = it's complicated, 4 = I'm a pizza, 5 = depends who's asking
def calculate_score_for_status(status):
if status == "single":
return 0
elif status == "in a relationship":
return 1
elif status == "in an open relationship":
return 2
elif status == "it's complicated":
return 3
elif status == "I'm a pizza":
return 0
else:
return 5
# ignorance: 0 = Problem is my challenge, 1 = Who gives a fuck, 2 = I'm an angel
def calculate_score_for_ignorance(ignorance):
if ignorance == "Ignorance is bliss":
return 0
elif ignorance == "not at all":
return 2
elif ignorance == "I'm an angel":
return 4
# money_have: -10000+ = 6, (-10000)-(-5000) = 5, -5000-0 = 4, 0-500 = 3, 500-3000 = 2, 3000-10000 = 1, 10000+ = 0
def calculate_score_for_money_have(money_have):
if money_have <= (-10000.0):
return 8.0
elif money_have > (-10000.0) and money_have <= (-5000.0):
return 5.0
elif money_have > (-5000.0) and money_have <= 0.0:
return 4.0
elif money_have > 0.0 and money_have <= 500.0:
return 3.0
elif money_have > 500.0 and money_have <= 3000.0:
return 2.0
else:
return 0.0
# ---ZAKAJ MI NE PREPOZNA POZITIVNIH FLOATING NUMBERS IN NOBENE NEGATIVE (INTEGER ALI FLOATING NEGATIVNE) KOT STEVILKO?
# -->PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
# money_want: 0 = 0, 0-1000 = 1, 1000-5000 = 3, 5000-10000 = 4, 10000+ = 5
def caluculate_score_for_money_want(money_want):
if money_want == 0:
return 0
elif money_want > 0.0 and money_want <= 1000.0:
return 1
elif money_want > 1000.0 and money_want <= 5000.0:
return 3
elif money_want > 5000.0 and money_want <= 10000.0:
return 4
else:
return 5
#real friends: 0 = 5, 1-3 = 1, 4-6 = 2, 7-9 = 3, 10+ = 4
def calculate_score_for_rl_friends(rl_friends):
if rl_friends == 0:
return 5
elif rl_friends >= 1 and rl_friends <= 3:
return 1
elif rl_friends >= 4 and rl_friends <= 6:
return 2
elif rl_friends >= 7 and rl_friends <= 9:
return 3
else:
return 4
#children: 0 = 1, 1-2 = 2, 3 = 3, 4 = 4, 5+ = 5
def calculate_score_for_children(children):
if children == 0:
return 1
elif children == 1 and children == 2:
return 2
elif children == 3:
return 3
elif children == 4:
return 4
else:
return 5
# 2. del: sestevek funkcij
def calculate_score(gender, age, status, ignorance, money_have, money_want, rl_friends, children):
result = calculate_score_for_gender(gender)
result += calculate_score_for_age(age)
result += calculate_score_for_status(status)
result += calculate_score_for_ignorance(ignorance)
result += calculate_score_for_money_have(money_have)
result += caluculate_score_for_money_want(money_want)
result += calculate_score_for_rl_friends(rl_friends)
result += calculate_score_for_children(children)
return result
# 3. del: ------------- output za userja
#gender
print "Are you male or female?"
gender = raw_input(">> ")
#note to self: "while" pomeni da cekira na loop, "if" cekira enkratno
while (gender != "male") and (gender != "female"):
gender = raw_input("Check your gender again: ")
#age
print "How old are you?"
age = raw_input(">> ")
while not age.isdigit():
age = raw_input("Admit it, you're old. Now write your real age: ")
#status
print "What is your marital status?"
status = raw_input(">> ")
while (status != "single") and (status != "in a relationship") and (status != "in an open relationship") and (status != "it's complicated") and (status != "I'm a pizza"):
status = raw_input("Yeah, right... Think again: ")
#ignorance
print "How ignorant are you?"
ignorance = raw_input(">> ")
while (ignorance != "problem is my challenge") and (ignorance != "who gives a fuck") and (ignorance != "I'm an angel"):
ignorance = raw_input("You can't be that ignorant. Try again: ")
#money_have
print "How much money have you got?"
money_have = float(raw_input(">> "))
while not money_have:
money_have = float(raw_input("We aren't tax collectors, so be honest: "))
# PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
#money_want
print "In addition to the money you've got, how much money do you want to have?"
money_want = float(raw_input(">> "))
while money_want < 0: #---->zato, da je pozitivno stevilo!
money_want = float(raw_input("I didn't ask for apples and peaches. So, how much money do you want? "))
#rl_friends
print "How many real friends have you got?"
rl_friends = raw_input(">> ")
while not rl_friends.isdigit():
rl_friends = raw_input("Spock doesn't count. Think again - how many? ")
#children
print "How many children have you got?"
children = raw_input(">> ")
while not children.isdigit():
children = raw_input("No aliens, just humans, please: ")
# 4.del: sestevek
print "On a scale from 0 to 40, your life complication is : ", calculate_score(gender, int(age), status, ignorance, money_have, money_want, rl_friends, children)
| CodeCatz/litterbox | ajda/complicajda.py | Python | mit | 5,477 |
"""Angles and anomalies.
"""
from astropy import units as u
from poliastro.core.angles import (
D_to_M as D_to_M_fast,
D_to_nu as D_to_nu_fast,
E_to_M as E_to_M_fast,
E_to_nu as E_to_nu_fast,
F_to_M as F_to_M_fast,
F_to_nu as F_to_nu_fast,
M_to_D as M_to_D_fast,
M_to_E as M_to_E_fast,
M_to_F as M_to_F_fast,
fp_angle as fp_angle_fast,
nu_to_D as nu_to_D_fast,
nu_to_E as nu_to_E_fast,
nu_to_F as nu_to_F_fast,
)
@u.quantity_input(D=u.rad)
def D_to_nu(D):
"""True anomaly from parabolic eccentric anomaly.
Parameters
----------
D : ~astropy.units.Quantity
Eccentric anomaly.
Returns
-------
nu : ~astropy.units.Quantity
True anomaly.
Notes
-----
Taken from Farnocchia, Davide, Davide Bracali Cioci, and Andrea Milani.
"Robust resolution of Kepler’s equation in all eccentricity regimes."
Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.
"""
return (D_to_nu_fast(D.to_value(u.rad)) * u.rad).to(D.unit)
@u.quantity_input(nu=u.rad)
def nu_to_D(nu):
"""Parabolic eccentric anomaly from true anomaly.
Parameters
----------
nu : ~astropy.units.Quantity
True anomaly.
Returns
-------
D : ~astropy.units.Quantity
Hyperbolic eccentric anomaly.
Notes
-----
Taken from Farnocchia, Davide, Davide Bracali Cioci, and Andrea Milani.
"Robust resolution of Kepler’s equation in all eccentricity regimes."
Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.
"""
return (nu_to_D_fast(nu.to_value(u.rad)) * u.rad).to(nu.unit)
@u.quantity_input(nu=u.rad, ecc=u.one)
def nu_to_E(nu, ecc):
"""Eccentric anomaly from true anomaly.
.. versionadded:: 0.4.0
Parameters
----------
nu : ~astropy.units.Quantity
True anomaly.
ecc : ~astropy.units.Quantity
Eccentricity.
Returns
-------
E : ~astropy.units.Quantity
Eccentric anomaly.
"""
return (nu_to_E_fast(nu.to_value(u.rad), ecc.value) * u.rad).to(nu.unit)
@u.quantity_input(nu=u.rad, ecc=u.one)
def nu_to_F(nu, ecc):
"""Hyperbolic eccentric anomaly from true anomaly.
Parameters
----------
nu : ~astropy.units.Quantity
True anomaly.
ecc : ~astropy.units.Quantity
Eccentricity (>1).
Returns
-------
F : ~astropy.units.Quantity
Hyperbolic eccentric anomaly.
Notes
-----
Taken from Curtis, H. (2013). *Orbital mechanics for engineering students*. 167
"""
return (nu_to_F_fast(nu.to_value(u.rad), ecc.value) * u.rad).to(nu.unit)
@u.quantity_input(E=u.rad, ecc=u.one)
def E_to_nu(E, ecc):
"""True anomaly from eccentric anomaly.
.. versionadded:: 0.4.0
Parameters
----------
E : ~astropy.units.Quantity
Eccentric anomaly.
ecc : ~astropy.units.Quantity
Eccentricity.
Returns
-------
nu : ~astropy.units.Quantity
True anomaly.
"""
return (E_to_nu_fast(E.to_value(u.rad), ecc.value) * u.rad).to(E.unit)
@u.quantity_input(F=u.rad, ecc=u.one)
def F_to_nu(F, ecc):
"""True anomaly from hyperbolic eccentric anomaly.
Parameters
----------
F : ~astropy.units.Quantity
Hyperbolic eccentric anomaly.
ecc : ~astropy.units.Quantity
Eccentricity (>1).
Returns
-------
nu : ~astropy.units.Quantity
True anomaly.
"""
return (F_to_nu_fast(F.to_value(u.rad), ecc.value) * u.rad).to(F.unit)
@u.quantity_input(M=u.rad, ecc=u.one)
def M_to_E(M, ecc):
"""Eccentric anomaly from mean anomaly.
.. versionadded:: 0.4.0
Parameters
----------
M : ~astropy.units.Quantity
Mean anomaly.
ecc : ~astropy.units.Quantity
Eccentricity.
Returns
-------
E : ~astropy.units.Quantity
Eccentric anomaly.
"""
return (M_to_E_fast(M.to_value(u.rad), ecc.value) * u.rad).to(M.unit)
@u.quantity_input(M=u.rad, ecc=u.one)
def M_to_F(M, ecc):
"""Hyperbolic eccentric anomaly from mean anomaly.
Parameters
----------
M : ~astropy.units.Quantity
Mean anomaly.
ecc : ~astropy.units.Quantity
Eccentricity (>1).
Returns
-------
F : ~astropy.units.Quantity
Hyperbolic eccentric anomaly.
"""
return (M_to_F_fast(M.to_value(u.rad), ecc.value) * u.rad).to(M.unit)
@u.quantity_input(M=u.rad, ecc=u.one)
def M_to_D(M):
"""Parabolic eccentric anomaly from mean anomaly.
Parameters
----------
M : ~astropy.units.Quantity
Mean anomaly.
Returns
-------
D : ~astropy.units.Quantity
Parabolic eccentric anomaly.
"""
return (M_to_D_fast(M.to_value(u.rad)) * u.rad).to(M.unit)
@u.quantity_input(E=u.rad, ecc=u.one)
def E_to_M(E, ecc):
"""Mean anomaly from eccentric anomaly.
.. versionadded:: 0.4.0
Parameters
----------
E : ~astropy.units.Quantity
Eccentric anomaly.
ecc : ~astropy.units.Quantity
Eccentricity.
Returns
-------
M : ~astropy.units.Quantity
Mean anomaly.
"""
return (E_to_M_fast(E.to_value(u.rad), ecc.value) * u.rad).to(E.unit)
@u.quantity_input(F=u.rad, ecc=u.one)
def F_to_M(F, ecc):
"""Mean anomaly from eccentric anomaly.
Parameters
----------
F : ~astropy.units.Quantity
Hyperbolic eccentric anomaly.
ecc : ~astropy.units.Quantity
Eccentricity (>1).
Returns
-------
M : ~astropy.units.Quantity
Mean anomaly.
"""
return (F_to_M_fast(F.to_value(u.rad), ecc.value) * u.rad).to(F.unit)
@u.quantity_input(D=u.rad, ecc=u.one)
def D_to_M(D):
"""Mean anomaly from eccentric anomaly.
Parameters
----------
D : ~astropy.units.Quantity
Parabolic eccentric anomaly.
Returns
-------
M : ~astropy.units.Quantity
Mean anomaly.
"""
return (D_to_M_fast(D.to_value(u.rad)) * u.rad).to(D.unit)
@u.quantity_input(nu=u.rad, ecc=u.one)
def fp_angle(nu, ecc):
"""Flight path angle.
.. versionadded:: 0.4.0
Parameters
----------
nu : ~astropy.units.Quantity
True anomaly.
ecc : ~astropy.units.Quantity
Eccentricity.
Notes
-----
Algorithm taken from Vallado 2007, pp. 113.
"""
return (fp_angle_fast(nu.to_value(u.rad), ecc.value) * u.rad).to(nu.unit)
| poliastro/poliastro | src/poliastro/twobody/angles.py | Python | mit | 6,419 |
import sys
sys.path.append("/mnt/moehlc/home/idaf_library")
#import mahotas
import vigra
import libidaf.idafIO as io
import numpy as np
from scipy import ndimage
from scipy.stats import nanmean
#import matplotlib.pyplot as plt
import time
import pickle
import os
import multiprocessing as mp
def gaussWeight(dat,sigma,mu):
return 1./np.sqrt(2*np.pi*np.square(sigma))*np.exp(-np.square(dat-mu)/(2*np.square(sigma)))
def streaming3Dfilter(data,outdata,sigma):
fsize = int(np.round(sigma*3)) # filter size
amax=np.array(data.shape)-1 #max index
amin=amax-amax #min index
xyz = np.array(data.nonzero())#coordinates
x = xyz[0,:]
y = xyz[1,:]
z = xyz[2,:]
datxyz = np.array(data[x,y,z])
for i in range(amax[0]+1): #x dim
for j in range(amax[1]+1): # y dim
for k in range(amax[2]+1): # z dim
dist = np.sqrt(np.square(i-x) + np.square(j-y) + np.square(k-z))
ind = dist<= fsize
weight = gaussWeight(dist[ind],sigma,0)
datsel = datxyz[ind]
if datsel.size == 0:
outdata[i,j,k] = np.nan
else:
outdata[i,j,k] = np.average(datsel,weights = weight)
print('writing slice ' + str(i) + 'to '+ outdata.filename)
print('progress: ' + str(i/float(amax[0])*100) + ' percent done')
outdata.flush() #write to disk
def importStack(path,fname,tmpStackDir):
absname = path +fname
zsize = vigra.impex.numberImages(absname)
im =vigra.readImage(absname, index = 0, dtype='FLOAT')
#vol = np.zeros([im.height,im.width,zsize])
try:
os.makedirs(tmpStackDir)
except:
print(tmpStackDir+' already exists')
vol = np.memmap(tmpStackDir + fname[0:-4],dtype='float64',mode = 'w+', shape = (im.height,im.width,zsize))
#raise('hallo')
for i in range(zsize):
print("importing slice " + str(i) + ' of file '+fname)
im=np.squeeze(vigra.readImage(absname, index = i, dtype='FLOAT'))
vol[:,:,i] = im
vol.flush()
return vol
def filterAndSave(fname,path,savepath,filterSize,volpath):
vol = importStack(path,fname,volpath)
try:
os.makedirs(savepath)
except:
print(savepath+' already exists')
res = np.memmap(savepath + 'filtered_Size_'+ str(filterSize) + fname,dtype = 'float64', mode = 'w+', shape = vol.shape)
streaming3Dfilter(vol, res,filterSize)
def filterAndSave_batch(pattern,path,savepath,filterSize,volpath):
fnames = io.getFilelistFromDir(path,pattern) #list of tiff stacks to be filtered
for i in range(len(fnames)):
#for i in range(1):
print('start filter process for '+fnames[i])
mp.Process(target = filterAndSave, args = (fnames[i],path,savepath,filterSize,volpath)).start() #parallel processing
def filterAndSave_batch_serial(pattern,path,savepath,filterSize,volpath):
fnames = io.getFilelistFromDir(path,pattern) #list of tiff stacks to be filtered
for i in range(len(fnames)):
#for i in range(1):
print('start filter process for '+fnames[i])
filterAndSave(fnames[i],path,savepath,filterSize,volpath) #parallel processing
if __name__ == '__main__':
path = '/home/moehlc/raman_bloodvessel_dat/segmented/angio_wt/'
savepath = '/home/moehlc/raman_bloodvessel_dat/filteredVoldDatGauss1/angio_wt/'
volpath = '/home/moehlc/raman_bloodvessel_dat/rawVoldat2/angio_wt/'
filterSize = 20
filterAndSave_batch('flowSkel',path,savepath,filterSize,volpath)
filterAndSave_batch('distanceSkel',path,savepath,filterSize,volpath)
#filterAndSave_batch_serial('flowSkel',path,savepath,filterSize)
#filterAndSave_batch('distanceSkel',path,savepath,filterSize)
| cmohl2013/140327 | parallel_nanmeanGaussWeightedFilterOptimizedSize20.py | Python | mit | 3,450 |
#!/usr/bin/env python
import sys
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| desec-io/desec-stack | api/manage.py | Python | mit | 248 |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from matplotlib import path
from quadr import quadr
from LapSLPmatrix import LapSLPmatrix
def test_SLPn():
# test data dir
import os
dir_path = os.path.dirname(os.path.realpath(__file__)) + "/TestData/"
circle = sio.loadmat(dir_path+'circle.mat')
x = circle['x']
N, M = x.shape
# set up source
s = {}
for l in range(0,M):
s_temp = {}
s_temp['x'] = x[:,l][:,np.newaxis]
s_temp = quadr(s_temp,N)
s[str(l)] = s_temp
# set up target
nx = 100
gx = np.arange(1,nx+1)/nx
ny = 100
gy = np.arange(1,ny+1)/ny # set up plotting
xx, yy = np.meshgrid(gx,gy)
zz = xx + 1j*yy
t = {}
ii = np.ones((nx*ny, ), dtype=bool)
for l in range(0,M):
s_temp = s[str(l)]
p = path.Path(np.vstack((np.real(s_temp['x']).T,np.imag(s_temp['x']).T)).T)
ii = (~p.contains_points(np.vstack((np.real(zz).flatten('F'),np.imag(zz).flatten('F'))).T))&ii
t['x'] = zz.flatten('F')[ii][np.newaxis].T
# multipole evaluation
u = 0*(1+1j)*zz
idx = ii.reshape(ny,nx,order='F')
for l in range(0,M):
s_temp = s[str(l)]
A = LapSLPmatrix(t,s_temp,0)
tau = np.sin(2*np.pi*np.real(s_temp['x'])) + np.cos(np.pi*np.imag(s_temp['x']))
u_temp = A.dot(tau)
u.T[idx.T] = u.T[idx.T] + u_temp.flatten()
if np.mod(l,25) == 0:
fig = plt.figure()
logerr = plt.imshow(np.real(u),aspect=nx/ny, interpolation='none')
fig.colorbar(logerr)
plt.grid(True)
plt.show()
fig = plt.figure()
logerr = plt.imshow(np.real(u),aspect=nx/ny, interpolation='none')
fig.colorbar(logerr)
plt.grid(True)
plt.show()
if __name__ == '__main__':
test_SLPn()
| bobbielf2/ners590_final_project | python_code/test_SLPn.py | Python | mit | 2,019 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..client import MODE_CHOICES
from ..client.tasks import ReadChannelValues, WriteChannelValue
from ..client.worker import Worker
from ..utils.command import BaseCommand
from ..values.channel import Channel
from ..values.signals import ValueChangeEvent
from optparse import OptionGroup, OptionConflictError
import logging
import sys
import time
class Command(BaseCommand):
def make_option_groups(self, parser):
groups = super(Command, self).make_option_groups(parser)
# Runtime options
runtime_group = OptionGroup(parser, 'runtime options')
runtime_group.add_option('--sleep', default=1, type='float',
help='time to sleep (s) between two probing loops')
# Channel options
channel_group = OptionGroup(parser, 'channel options')
self._append_channel_options(channel_group)
groups.append(channel_group)
# Data output options
data_group = OptionGroup(parser, 'data logging options')
data_group.add_option('--file',dest='datafile',
help="File to store data in")
groups.append(data_group)
return groups
def _append_channel_options(self, group):
# initial frequency
group.add_option('--set-freq', '-F',
help='set initial frequency (MHz)')
group.add_option('--mode', '-m', type='choice',
choices = MODE_CHOICES,
help='set device mode {mes|rds|stereo}')
group.add_option('--mes',
action='store_const', dest='mode', const='mes',
help='set measurement mode')
group.add_option('--rdsm', '--rds-mode',
action='store_const', dest='mode', const='rds',
help='set rds mode')
group.add_option('--stereo',
action='store_const', dest='mode', const='stereo',
help='set stereo mode')
# measurements
for descriptor in Channel.iter_descriptors():
if not descriptor.readable:
continue
longopt = '--%s' % descriptor.key
shortopt = None
if descriptor.short_key is not None:
shortopt = '-%s' % descriptor.short_key
kwargs= {
'dest': descriptor.key,
'action': 'store_true',
'help': 'enable %s measurement' % descriptor,
}
try:
group.add_option(longopt, shortopt, **kwargs)
except OptionConflictError:
group.add_option(longopt, **kwargs)
return group
def configure_logging(self):
super(Command, self).configure_logging()
datalogger_name = '%s.data' % self.get_logger_name()
datalogger = logging.getLogger(datalogger_name)
datalogger.propagate = False
datalogger.setLevel(logging.DEBUG)
data_streamhandler = logging.StreamHandler(sys.stdout)
datalogger.addHandler(data_streamhandler)
if self.options.datafile:
data_filehandler = logging.FileHandler(self.options.datafile)
datalogger.addHandler(data_filehandler)
self.datalogger = datalogger
def stop(self, signal, frame):
self.logger.info(u"stopping on signal %s..." % signal)
if hasattr(self, 'worker'):
self.worker.stop()
def execute(self):
ValueChangeEvent.connect(self.on_value_changed)
channel = self.make_channel()
self.worker.run()
mode = self.options.mode
if mode is not None:
mode_variable = channel.get_variable('mode')
mode_variable.set_command(mode)
self.worker.enqueue(WriteChannelValue, variable=mode_variable)
freq = self.options.set_freq
if freq is not None:
freq_variable = channel.get_variable('frequency')
freq_variable.set_command(freq)
self.worker.enqueue(WriteChannelValue, variable=freq_variable)
while self.worker.is_alive():
task = self.worker.enqueue(ReadChannelValues, channel=channel)
task.wait(blocking=False, timeout=2)
time.sleep(self.options.sleep)
def make_channel(self):
channel = Channel()
for variable in channel.get_variables():
enabled = getattr(self.options, variable.descriptor.key)
variable.enabled = enabled
return channel
def on_value_changed(self, sender, event):
message = self.format_event(event)
self.log_data(message)
def format_event(self, event):
descriptor = event.sender.descriptor
return '%s: %s' % (descriptor.key, descriptor.format_value(event.new_value))
def log_data(self, message):
self.datalogger.info(message)
def main():
sys.exit(Command().run())
if __name__ == '__main__':
main()
| gaftech/fmanalyser | fmanalyser/commands/fmlogger.py | Python | mit | 4,946 |
from unittest import TestCase
from plivo import plivoxml
from tests import PlivoXmlTestCase
class RecordElementTest(TestCase, PlivoXmlTestCase):
def test_set_methods(self):
expected_response = '<Response><Record action="https://foo.example.com" callbackMethod="GET" ' \
'callbackUrl="https://foo.example.com" fileFormat="wav" finishOnKey="#" ' \
'maxLength="10" method="GET" playBeep="false" recordSession="false" ' \
'redirect="false" startOnDialAnswer="false" timeout="100" transcriptionMethod="GET" ' \
'transcriptionType="hybrid" transcriptionUrl="https://foo.example.com"/>' \
'</Response>'
action = 'https://foo.example.com'
method = 'GET'
fileFormat = 'wav'
redirect = False
timeout = 100
maxLength = 10
recordSession = False
startOnDialAnswer = False
playBeep = False
finishOnKey = '#'
transcriptionType = 'hybrid'
transcriptionUrl = 'https://foo.example.com'
transcriptionMethod = 'GET'
callbackUrl = 'https://foo.example.com'
callbackMethod = 'GET'
element = plivoxml.ResponseElement()
response = element.add(
plivoxml.RecordElement().set_action(action).set_method(method)
.set_file_format(fileFormat).set_redirect(redirect).set_timeout(
timeout).set_max_length(maxLength).set_play_beep(playBeep)
.set_finish_on_key(finishOnKey).set_record_session(recordSession).
set_start_on_dial_answer(startOnDialAnswer).set_transcription_type(
transcriptionType).set_transcription_url(transcriptionUrl)
.set_transcription_method(transcriptionMethod).set_callback_url(
callbackUrl).set_callback_method(callbackMethod)).to_string(False)
self.assertXmlEqual(response, expected_response)
| plivo/plivo-python | tests/xml/test_recordElement.py | Python | mit | 1,989 |
#!/usr/local/munkireport/munkireport-python2
# encoding: utf-8
from . import display
from . import prefs
from . import constants
from . import FoundationPlist
from munkilib.purl import Purl
from munkilib.phpserialize import *
import subprocess
import pwd
import sys
import hashlib
import platform
from urllib import urlencode
import re
import time
import os
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from Foundation import NSArray, NSDate, NSMetadataQuery, NSPredicate
from Foundation import CFPreferencesAppSynchronize
from Foundation import CFPreferencesCopyAppValue
from Foundation import CFPreferencesCopyKeyList
from Foundation import CFPreferencesSetValue
from Foundation import kCFPreferencesAnyUser
from Foundation import kCFPreferencesCurrentUser
from Foundation import kCFPreferencesCurrentHost
from Foundation import NSHTTPURLResponse
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
# pylint: enable=E0611
# our preferences "bundle_id"
BUNDLE_ID = "MunkiReport"
class CurlError(Exception):
def __init__(self, status, message):
display_error(message)
finish_run()
def set_verbosity(level):
"""Set verbosity level."""
display.verbose = int(level)
def display_error(msg, *args):
"""Call display error msg handler."""
display.display_error("%s" % msg, *args)
def display_warning(msg, *args):
"""Call display warning msg handler."""
display.display_warning("%s" % msg, *args)
def display_detail(msg, *args):
"""Call display detail msg handler."""
display.display_detail("%s" % msg, *args)
def finish_run():
remove_run_file()
display_detail("## Finished run")
exit(0)
def remove_run_file():
touchfile = '/Users/Shared/.com.github.munkireport.run'
if os.path.exists(touchfile):
os.remove(touchfile)
def curl(url, values):
options = dict()
options["url"] = url
options["method"] = "POST"
options["content_type"] = "application/x-www-form-urlencoded"
options["body"] = urlencode(values)
options["logging_function"] = display_detail
options["connection_timeout"] = 60
if pref("UseMunkiAdditionalHttpHeaders"):
custom_headers = prefs.pref(constants.ADDITIONAL_HTTP_HEADERS_KEY)
if custom_headers:
options["additional_headers"] = dict()
for header in custom_headers:
m = re.search(r"^(?P<header_name>.*?): (?P<header_value>.*?)$", header)
if m:
options["additional_headers"][m.group("header_name")] = m.group(
"header_value"
)
else:
raise CurlError(
-1,
"UseMunkiAdditionalHttpHeaders defined, "
"but not found in Munki preferences",
)
# Build Purl with initial settings
connection = Purl.alloc().initWithOptions_(options)
connection.start()
try:
while True:
# if we did `while not connection.isDone()` we'd miss printing
# messages if we exit the loop first
if connection.isDone():
break
except (KeyboardInterrupt, SystemExit):
# safely kill the connection then re-raise
connection.cancel()
raise
except Exception, err: # too general, I know
# Let us out! ... Safely! Unexpectedly quit dialogs are annoying...
connection.cancel()
# Re-raise the error as a GurlError
raise CurlError(-1, str(err))
if connection.error != None:
# Gurl returned an error
display.display_detail(
"Download error %s: %s",
connection.error.code(),
connection.error.localizedDescription(),
)
if connection.SSLerror:
display_detail("SSL error detail: %s", str(connection.SSLerror))
display_detail("Headers: %s", connection.headers)
raise CurlError(
connection.error.code(), connection.error.localizedDescription()
)
if connection.response != None and connection.status != 200:
display.display_detail("Status: %s", connection.status)
display.display_detail("Headers: %s", connection.headers)
if connection.redirection != []:
display.display_detail("Redirection: %s", connection.redirection)
connection.headers["http_result_code"] = str(connection.status)
description = NSHTTPURLResponse.localizedStringForStatusCode_(connection.status)
connection.headers["http_result_description"] = description
if str(connection.status).startswith("2"):
return connection.get_response_data()
else:
# there was an HTTP error of some sort.
raise CurlError(
connection.status,
"%s failed, HTTP returncode %s (%s)"
% (
url,
connection.status,
connection.headers.get("http_result_description", "Failed"),
),
)
def get_hardware_info():
"""Uses system profiler to get hardware info for this machine."""
cmd = ["/usr/sbin/system_profiler", "SPHardwareDataType", "-xml"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, dummy_error) = proc.communicate()
try:
plist = FoundationPlist.readPlistFromString(output)
# system_profiler xml is an array
sp_dict = plist[0]
items = sp_dict["_items"]
sp_hardware_dict = items[0]
return sp_hardware_dict
except BaseException:
return {}
def get_long_username(username):
try:
long_name = pwd.getpwnam(username)[4]
except:
long_name = ""
return long_name.decode("utf-8")
def get_uid(username):
try:
uid = pwd.getpwnam(username)[2]
except:
uid = ""
return uid
def get_computername():
cmd = ["/usr/sbin/scutil", "--get", "ComputerName"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
return output.decode("utf-8")
def get_cpuinfo():
cmd = ["/usr/sbin/sysctl", "-n", "machdep.cpu.brand_string"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
return output.decode("utf-8")
def get_buildversion():
cmd = ["/usr/bin/sw_vers", "-buildVersion"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
return output.decode("utf-8")
def get_uptime():
cmd = ["/usr/sbin/sysctl", "-n", "kern.boottime"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
sec = int(re.sub(".*sec = (\d+),.*", "\\1", output))
up = int(time.time() - sec)
return up if up > 0 else -1
def set_pref(pref_name, pref_value):
"""Sets a preference, See prefs.py for details."""
CFPreferencesSetValue(
pref_name,
pref_value,
BUNDLE_ID,
kCFPreferencesAnyUser,
kCFPreferencesCurrentHost,
)
CFPreferencesAppSynchronize(BUNDLE_ID)
print "set pref"
try:
CFPreferencesSetValue(
pref_name,
pref_value,
BUNDLE_ID,
kCFPreferencesAnyUser,
kCFPreferencesCurrentHost,
)
CFPreferencesAppSynchronize(BUNDLE_ID)
except Exception:
pass
def pref(pref_name):
"""Return a preference.
See prefs.py for details
"""
pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID)
return pref_value
def process(serial, items):
"""Process receives a list of items, checks if they need updating and
updates them if necessary."""
# Sanitize serial
serial = "".join([c for c in serial if c.isalnum()])
# Get prefs
baseurl = pref("BaseUrl") or prefs.pref("SoftwareRepoURL") + "/report/"
hashurl = baseurl + "index.php?/report/hash_check"
checkurl = baseurl + "index.php?/report/check_in"
# Get passphrase
passphrase = pref("Passphrase")
# Get hashes for all scripts
for key, i in items.items():
if i.get("path"):
i["hash"] = getmd5hash(i.get("path"))
# Check dict
check = {}
for key, i in items.items():
if i.get("hash"):
check[key] = {"hash": i.get("hash")}
# Send hashes to server
values = {"serial": serial, "items": serialize(check), "passphrase": passphrase}
server_data = curl(hashurl, values)
# = response.read()
# Decode response
try:
result = unserialize(server_data)
except Exception, e:
display_error("Could not unserialize server data: %s" % str(e))
display_error("Request: %s" % str(values))
display_error("Response: %s" % str(server_data))
return -1
if result.get("error") != "":
display_error("Server error: %s" % result["error"])
return -1
if result.get("info") != "":
display_detail("Server info: %s" % result["info"])
# Retrieve hashes that need updating
total_size = 0
for i in items.keys():
if i in result:
if items[i].get("path"):
try:
f = open(items[i]["path"], "r")
items[i]["data"] = f.read()
except:
display_warning("Can't open %s" % items[i]["path"])
del items[i]
continue
size = len(items[i]["data"])
display_detail("Need to update %s (%s)" % (i, sizeof_fmt(size)))
total_size = total_size + size
else: # delete items that don't have to be uploaded
del items[i]
# Send new files with hashes
if len(items):
display_detail("Sending items (%s)" % sizeof_fmt(total_size))
response = curl(
checkurl,
{"serial": serial, "items": serialize(items), "passphrase": passphrase},
)
display_detail(response)
else:
display_detail("No changes")
def runExternalScriptWithTimeout(
script, allow_insecure=False, script_args=(), timeout=30
):
"""Run a script (e.g. preflight/postflight) and return its exit status.
Args:
script: string path to the script to execute.
allow_insecure: bool skip the permissions check of executable.
args: args to pass to the script.
Returns:
Tuple. (integer exit status from script, str stdout, str stderr).
Raises:
ScriptNotFoundError: the script was not found at the given path.
RunExternalScriptError: there was an error running the script.
"""
from munkilib import utils
if not os.path.exists(script):
raise ScriptNotFoundError("script does not exist: %s" % script)
if not allow_insecure:
try:
utils.verifyFileOnlyWritableByMunkiAndRoot(script)
except utils.VerifyFilePermissionsError, e:
msg = (
"Skipping execution due to failed file permissions "
"verification: %s\n%s" % (script, str(e))
)
raise utils.RunExternalScriptError(msg)
if os.access(script, os.X_OK):
cmd = [script]
if script_args:
cmd.extend(script_args)
proc = subprocess.Popen(
cmd,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
while timeout > 0:
if proc.poll() is not None:
(stdout, stderr) = proc.communicate()
return (
proc.returncode,
stdout.decode("UTF-8", "replace"),
stderr.decode("UTF-8", "replace"),
)
time.sleep(0.1)
timeout -= 0.1
else:
try:
proc.kill()
except OSError, e:
if e.errno != 3:
raise
raise utils.RunExternalScriptError("%s timed out" % script)
return (0, None, None)
else:
raise utils.RunExternalScriptError("%s not executable" % script)
def rundir(scriptdir, runtype, abort=False, submitscript=""):
"""Run scripts in directory scriptdir runtype is passed to the script if
abort is True, a non-zero exit status will abort munki submitscript is put
at the end of the scriptlist."""
if os.path.exists(scriptdir):
from munkilib import utils
# Get timeout for scripts
scriptTimeOut = 30
if pref("scriptTimeOut"):
scriptTimeOut = int(pref("scriptTimeOut"))
display_detail("# Set custom script timeout to %s seconds" % scriptTimeOut)
# Directory containing the scripts
parentdir = os.path.basename(scriptdir)
display_detail("# Executing scripts in %s" % parentdir)
# Get all files in scriptdir
files = os.listdir(scriptdir)
# Sort files
files.sort()
# Find submit script and stick it on the end of the list
if submitscript:
try:
sub = files.pop(files.index(submitscript))
files.append(sub)
except Exception, e:
display_error("%s not found in %s" % (submitscript, parentdir))
for script in files:
# Skip files that start with a period
if script.startswith("."):
continue
# Concatenate dir and filename
scriptpath = os.path.join(scriptdir, script)
# Skip directories
if os.path.isdir(scriptpath):
continue
try:
# Attempt to execute script
display_detail("Running %s" % script)
result, stdout, stderr = runExternalScriptWithTimeout(
scriptpath,
allow_insecure=False,
script_args=[runtype],
timeout=scriptTimeOut,
)
if stdout:
display_detail(stdout)
if stderr:
display_detail("%s Error: %s" % (script, stderr))
if result:
if abort:
display_detail("Aborted by %s" % script)
exit(1)
else:
display_warning("%s return code: %d" % (script, result))
except utils.ScriptNotFoundError:
pass # Script has disappeared - pass.
except Exception, e:
display_warning("%s: %s" % (script, str(e)))
def sizeof_fmt(num):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1000.0:
return "%.0f%s" % (num, unit)
num /= 1000.0
return "%.1f%s" % (num, "YB")
def gethash(filename, hash_function):
"""Calculates the hashvalue of the given file with the given hash_function.
Args:
filename: The file name to calculate the hash value of.
hash_function: The hash function object to use, which was instantiated
before calling this function, e.g. hashlib.md5().
Returns:
The hashvalue of the given file as hex string.
"""
if not os.path.isfile(filename):
return "NOT A FILE"
fileref = open(filename, "rb")
while 1:
chunk = fileref.read(2 ** 16)
if not chunk:
break
hash_function.update(chunk)
fileref.close()
return hash_function.hexdigest()
def getmd5hash(filename):
"""Returns hex of MD5 checksum of a file."""
hash_function = hashlib.md5()
return gethash(filename, hash_function)
def getOsVersion(only_major_minor=True, as_tuple=False):
"""Returns an OS version.
Args:
only_major_minor: Boolean. If True, only include major/minor versions.
as_tuple: Boolean. If True, return a tuple of ints, otherwise a string.
"""
os.environ["SYSTEM_VERSION_COMPAT"] = '0'
cmd = ["/usr/bin/sw_vers -productVersion"]
proc = subprocess.Popen(
cmd,
shell=True,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
os_version_tuple = output.split(".")
if only_major_minor:
os_version_tuple = os_version_tuple[0:2]
if as_tuple:
return tuple(map(int, os_version_tuple))
else:
return ".".join(os_version_tuple)
def getconsoleuser():
"""Return console user."""
cfuser = SCDynamicStoreCopyConsoleUser(None, None, None)
return cfuser[0]
# End of reportcommon
| munkireport/munkireport-php | public/assets/client_installer/payload/usr/local/munkireport/munkilib/reportcommon.py | Python | mit | 17,507 |
#!/usr/bin/python
from pcitweak.bitstring import BitString
for n in range(0x10):
b = BitString(uint=n, length=4)
print " % 3d 0x%02x %s" % (n, n, b.bin)
| luken/pcitweak | examples/printbin.py | Python | mit | 166 |
import luhn
def test_checksum_len1():
assert luhn.checksum('7') == 7
def test_checksum_len2():
assert luhn.checksum('13') == 5
def test_checksum_len3():
assert luhn.checksum('383') == 3
def test_checksum_len4():
assert luhn.checksum('2827') == 3
def test_checksum_len13():
assert luhn.checksum('4346537657597') == 9
def test_checksum_len14():
assert luhn.checksum('27184931073326') == 1
def test_valid():
assert luhn.verify('356938035643809')
def test_invalid():
assert not luhn.verify('4222222222222222')
def test_generate():
assert luhn.generate('7992739871') == 3
def test_append():
assert luhn.append('53461861341123') =='534618613411234'
| mmcloughlin/luhn | test.py | Python | mit | 693 |
import unittest2
from zounds.util import simple_in_memory_settings
from .preprocess import MeanStdNormalization, PreprocessingPipeline
import featureflow as ff
import numpy as np
class MeanStdTests(unittest2.TestCase):
def _forward_backward(self, shape):
@simple_in_memory_settings
class Model(ff.BaseModel):
meanstd = ff.PickleFeature(
MeanStdNormalization,
store=False)
pipeline = ff.PickleFeature(
PreprocessingPipeline,
needs=(meanstd,),
store=True)
training = np.random.random_sample((100,) + shape)
_id = Model.process(meanstd=training)
model = Model(_id)
data_shape = (10,) + shape
data = np.random.random_sample(data_shape)
result = model.pipeline.transform(data)
self.assertEqual(data_shape, result.data.shape)
inverted = result.inverse_transform()
self.assertEqual(inverted.shape, data.shape)
np.testing.assert_allclose(inverted, data)
def test_can_process_1d(self):
self._forward_backward((9,))
def test_can_process_2d(self):
self._forward_backward((3, 4))
def test_can_process_3d(self):
self._forward_backward((5, 4, 7))
| JohnVinyard/zounds | zounds/learn/test_meanstd.py | Python | mit | 1,301 |
"""
Django settings for expense_tracker project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%wm#(m4jd8f9iipb)d6@nr#_fr@n8vnsur96#xxs$!0m627ewe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'tracker.apps.TrackerConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'expense_tracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'expense_tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| yusuf-musleh/Expense-Tracker | expense_tracker/expense_tracker/settings.py | Python | mit | 3,157 |
def create_pos_n_neg():
for file_type in ['neg']:
for img in os.listdir(file_type):
if file_type == 'pos':
line = file_type+'/'+img+' 1 0 0 50 50\n'
with open('info.dat','a') as f:
f.write(line)
elif file_type == 'neg':
line = file_type+'/'+img+'\n'
with open('bg.txt','a') as f:
f.write(line)
| Tianyi94/EC601Project_Somatic-Parkour-Game-based-on-OpenCV | Old Code/ControlPart/Create_pos&neg.py | Python | mit | 444 |
import socket
import nlp
class NLPServer(object):
def __init__(self, ip, port):
self.sock = socket.socket()
self.sock.bind((ip, port))
self.processor = nlp.NLPProcessor()
print "Established Server"
def listen(self):
import thread
self.sock.listen(5)
print "Started listening at port."
while True:
c = self.sock.accept()
cli_sock, cli_addr = c
try:
print 'Got connection from', cli_addr
thread.start_new_thread(self.manageRequest, (cli_sock,))
except Exception, Argument:
print Argument
self.sock.close()
quit()
def manageRequest(self, cli_sock):
data = cli_sock.recv(8192)
result = self.processor.processQuestion(data)
cli_sock.send(str(result))
cli_sock.close()
# server = NLPServer('127.0.0.1', 3369)
import sys
server = NLPServer(str(sys.argv[1]), int(sys.argv[2]))
server.listen()
| jeffw16/elephant | nlp/nlpserver.py | Python | mit | 849 |
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests the pooled server
:license: Apache License 2.0
"""
# JSON-RPC library
from jsonrpclib import ServerProxy
from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
from jsonrpclib.threadpool import ThreadPool
# Standard library
import random
import threading
import unittest
# ------------------------------------------------------------------------------
def add(a, b):
return a+b
class PooledServerTests(unittest.TestCase):
"""
These tests verify that the pooled server works correctly
"""
def test_default_pool(self, pool=None):
"""
Tests the default pool
"""
# Setup server
server = PooledJSONRPCServer(("localhost", 0), thread_pool=pool)
server.register_function(add)
# Serve in a thread
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
# Find its port
port = server.socket.getsockname()[1]
# Make the client
client = ServerProxy("http://localhost:{0}".format(port))
# Check calls
for _ in range(10):
a, b = random.random(), random.random()
result = client.add(a, b)
self.assertEqual(result, a+b)
# Close server
server.server_close()
thread.join()
def test_custom_pool(self):
"""
Tests the ability to have a custom pool
"""
# Setup the pool
pool = ThreadPool(2)
pool.start()
self.test_default_pool(pool)
| CloudI/CloudI | src/service_api/python/jsonrpclib/tests/test_server.py | Python | mit | 1,597 |
import re, random, string
from Service import Service
class StringHelper(Service):
articles = ["a", "an", "the", "of", "is"]
def randomLetterString(self, numCharacters = 8):
return "".join(random.choice(string.ascii_letters) for i in range(numCharacters))
def tagsToTuple(self, tags):
return tuple(self.titleCase(tag) for tag in tags.split(",") if tag.strip())
def titleCase(self, s):
wordList = s.split(" ")
result = [wordList[0].capitalize()]
for word in wordList[1:]:
result.append(word in self.articles and word or word.capitalize())
return " ".join(result)
def validEmail(self, email):
return re.match(r"[^@]+@[^@]+\.[^@]+", email)
| adampresley/trackathon | model/StringHelper.py | Python | mit | 668 |
import unittest
from katas.kyu_6.help_the_bookseller import stock_list
class StockListTestCase(unittest.TestCase):
def setUp(self):
self.a = ['ABAR 200', 'CDXE 500', 'BKWR 250', 'BTSQ 890', 'DRTY 600']
self.b = ['A', 'B']
def test_equals(self):
self.assertEqual(stock_list(self.a, self.b), '(A : 200) - (B : 1140)')
def test_equals_2(self):
self.assertEqual(stock_list(self.a, []), '')
def test_equals_3(self):
self.assertEqual(stock_list([], self.b), '')
| the-zebulan/CodeWars | tests/kyu_6_tests/test_help_the_bookseller.py | Python | mit | 518 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <[email protected]>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
import unittest
import smart_open.utils
class ClampTest(unittest.TestCase):
def test_low(self):
self.assertEqual(smart_open.utils.clamp(5, 0, 10), 5)
def test_high(self):
self.assertEqual(smart_open.utils.clamp(11, 0, 10), 10)
def test_out_of_range(self):
self.assertEqual(smart_open.utils.clamp(-1, 0, 10), 0)
| piskvorky/smart_open | smart_open/tests/test_utils.py | Python | mit | 528 |
# -*- coding: utf-8 -*-
import os, argparse
from flask import Flask
parser = argparse.ArgumentParser() #настройка аргументов принимаемых с консоли
parser.add_argument("--port", default='7000', type=int, help='Port to listen'),
parser.add_argument("--hash-algo", default='sha1', type=str, help='Hashing algorithm to use'),
parser.add_argument("--content-dir", default='UPLOADS', type=str, help='Enable folder to upload'),
parser.add_argument("--secret", default='d41d8cd98f00b204e9800998ecf8427e', type=str, help='secret key'),
args = parser.parse_args()
port = args.port #обработка параметров получаемых с консоли
hash_algo = args.hash_algo
content_dir = args.content_dir
secret = args.secret
BASE_DIR = os.path.abspath('.')
if os.path.exists(os.path.join(BASE_DIR, content_dir)) == False: #если нету папки 'UPLOADS', в которой будут храниться все загрузки, создаем ее
os.mkdir(content_dir)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = os.path.join(BASE_DIR, content_dir)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 #Максимальный размер загружаемых файлов (16 mb)
| radioteria/file-server | config.py | Python | mit | 1,264 |
# -*- coding: utf-8 -*-
"""
Python module for generating fake total emission in a magnitude band along a sightline.
This uses the Arepo/Illustris output GFM_Photometrics to get photometric band data,
which may or may not be accurate.
"""
from __future__ import print_function
import math
import os.path as path
import shutil
import h5py
import numpy as np
from . import spectra as ss
def maginJy(mag, band):
"""Convert a magnitude to flux in Jansky, according to wikipedia's table"""
bandfluxes = {'U':1810, 'B':4260, 'V':3640,'K':670,'g':3730,'r':4490,'i':4760 ,'z':4810}
return 10**(mag/(-2.5))*bandfluxes[band]
def apparentflux(DL):
"""Convert flux from absolute magnitudes (flux at 10 pc distance) to apparent flux in Jy.
DL is luminosity distance in Mpc"""
return (10/(DL*1e6))**2
def distance(arcsec, redshift, hubble, OmegaM):
"""Find the size of something in comoving kpc/h from the size on the sky in arcseconds.
"""
#First arcsec to radians
#2 pi radians -> degrees -> arcminute -> arcsecond
rad = 2*math.pi/360./60./60. * arcsec
#Then to physical kpc
atime = 1./(1+redshift)
(_, DA, _) = calculator(hubble*100, OmegaM, redshift)
size = DA * rad * 1000
#Comoving kpc/h
size = size /( atime/ hubble)
return size
class EmissionSpectra(ss.Spectra):
"""Class to compute the emission from stars in B band around the DLA spectrum"""
stellar = {}
def _read_stellar_data(self,fn, band, hhmult=10.):
"""Read the particle data for a single interpolation"""
bands = {'U':0, 'B':1, 'V':2,'K':3,'g':4,'r':5,'i':6,'z':7}
nband = bands[band]
pos = self.snapshot_set.get_data(4,"Position", segment = fn).astype(np.float32)
#Set each stellar radius to the pixel size
hh = hhmult*np.ones(np.shape(pos)[0], dtype=np.float32)
#Find particles we care about
ind = self.particles_near_lines(pos, hh,self.axis,self.cofm)
#print np.size(ind)
#Do nothing if there aren't any, and return a suitably shaped zero array
if np.size(ind) == 0:
raise ValueError("No stars")
pos = pos[ind,:]
hh = hh[ind]
#Find the magnitude of stars in this band
emflux = maginJy(self.snapshot_set.get_data(4,"GFM_StellarPhotometrics", segment = fn).astype(np.float32)[ind][:,nband],band)
fluxx = np.array([ np.sum(emflux[self.particles_near_lines(pos, hh,np.array([ax,]),np.array([cofm,]))]) for (ax, cofm) in zip(self.axis, self.cofm)])
#print np.sum(emflux)
return fluxx
#return (pos, emflux, hh)
def get_emflux(self, band, pixelsz=1):
"""
Get the density weighted flux in each pixel for a given species.
band: rest-frame optical band observed in
pixelsz: Angular size of the pixels in arcseconds
"""
#Mapping from bandname to number
dist = distance(pixelsz, 1./self.atime-1, self.hubble, self.OmegaM)
try:
self._really_load_array((band,dist), self.stellar, "stellar")
emflux = self.stellar[(band,dist)]
except KeyError:
emflux = np.zeros(self.NumLos,dtype=np.float32)
for fn in self.snapshot_set.get_n_segments():
try:
emflux += self._read_stellar_data(fn, band,dist)
except ValueError:
pass
self.stellar[(band,dist)] = emflux
(_,_,DL) = calculator(self.hubble*100, self.OmegaM, 1./self.atime-1)
emflux *= apparentflux(DL)
return emflux
def save_file(self):
"""
Saves spectra to a file, because they are slow to generate.
File is by default to be $snap_dir/snapdir_$snapnum/spectra.hdf5.
"""
#We should make sure we have loaded all lazy-loaded things first.
self._load_all_multihash(self.stellar, "stellar")
self._load_all_multihash(self.tau_obs, "tau_obs")
self._load_all_multihash(self.tau, "tau")
self._load_all_multihash(self.colden, "colden")
try:
self._load_all_multihash(self.colden, "velocity")
except IOError:
pass
try:
if path.exists(self.savefile):
shutil.move(self.savefile,self.savefile+".backup")
f=h5py.File(self.savefile,'w')
except IOError:
try:
f=h5py.File(self.savefile,'w')
except IOError:
raise IOError("Could not open ",self.savefile," for writing")
grp_grid = f.create_group("stellar")
self._save_multihash(self.stellar, grp_grid)
self._save_file(f)
def calculator(H0, Omega_M, zz):
"""Compute luminosity distance for a given cosmology. Assumes flatness.
Freely adapted from James Schombert's python version of Ned Wright's cosmology calculator.
Inputs:
H0 - Hubble constant in km/s/Mpc
Omega_M - Omega_matter
zz - redshift to compute distances to
Returns:
(Comoving distance, angular distancem luminosity distance) (all in physical Mpc)"""
light = 299792.458 # speed of light in km/sec
h = H0/100.
WR = 4.165E-5/(h*h) # includes 3 massless neutrino species, T0 = 2.72528
#Assume flat
WV = 1- Omega_M-WR
#scale factor to compute distance to
az = 1.0/(1.+zz)
n=1000 # number of points in integrals
# do integral over a=1/(1+z) from az to 1 in n steps, midpoint rule
a = np.logspace(np.log10(az), 0, n)
a2H = a*a*np.sqrt(Omega_M/a**3+WR/(a**4)+WV)
#Comoving distance
DCMR = np.trapz(1./a2H, a)
#In Mpc
DC_Mpc = (light/H0) * DCMR
# angular size distance In Mpc
DA_Mpc = (light/H0)*az*DCMR
#Luminosity distance in Mpc
DL_Mpc = DA_Mpc/(az*az)
#print 'The comoving radial distance is %1.1f' % DC_Mpc + ' Mpc'
#print 'The angular size distance D_A is ' + '%1.1f' % DA_Mpc + ' Mpc'
#print 'The luminosity distance D_L is ' + '%1.1f' % DL_Mpc + ' Mpc'
return (DC_Mpc, DA_Mpc, DL_Mpc)
| sbird/fake_spectra | fake_spectra/emission.py | Python | mit | 6,065 |
#!/usr/bin/env python3
from django.shortcuts import render
# Create your views here.
from CnbetaApis.datas.Models import *
from CnbetaApis.datas.get_letv_json import get_letv_json
from CnbetaApis.datas.get_youku_json import get_youku_json
from django.views.decorators.csrf import csrf_exempt
from django.http import *
from datetime import timezone, timedelta
import json
def getrelate(ids, session):
relateds = session.query(Article).filter(Article.id.in_(ids))
relateds_arr = []
for related in relateds:
relateds_arr.append({
'id': related.id,
'title': related.title,
'url': related.url,
})
return relateds_arr
def get_home_data(request):
if not request.method == 'GET':
raise HttpResponseNotAllowed('GET')
lastID = request.GET.get('lastid')
limit = request.GET.get('limit') or 20
session = DBSession()
datas = None
if lastID:
datas = session.query(Article).order_by(desc(Article.id)).filter(and_(Article.introduction != None, Article.id < lastID)).limit(limit).all()
else:
datas = session.query(Article).order_by(desc(Article.id)).limit(limit).all()
values = []
for data in datas:
values.append({
'id': data.id,
'title': data.title,
'url': data.url,
'source': data.source,
'imgUrl': data.imgUrl,
'introduction': data.introduction,
'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(),
'related': getrelate(data.related.split(','), session),
'readCount': data.readCount,
'opinionCount': data.opinionCount,
})
session.close()
return JsonResponse({"result": values})
def get_article_content(request):
if not request.method == 'GET':
raise HttpResponseNotAllowed('GET')
article_id = request.GET.get('id')
session = DBSession()
datas = session.query(Article).filter(Article.id == article_id).all()
if not len(datas):
raise Http404('Article not exist')
data = datas[0]
result = {'result': {
'id': data.id,
'title': data.title,
'url': data.url,
'imgUrl': data.imgUrl,
'source': data.source,
'introduction': data.introduction,
'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(),
'related': getrelate(data.related.split(','), session),
'readCount': data.readCount,
'opinionCount': data.opinionCount,
'content': json.loads(data.content),
}}
session.close()
return JsonResponse(result)
@csrf_exempt
def get_video_realUrl(req):
if not req.method == 'POST':
raise HttpResponseNotAllowed('POST')
source_url = req.POST.get('url')
source_type = req.POST.get('type')
if source_type == "youku":
source_url = get_youku_json(source_url)
elif source_type == "letv":
source_url = get_letv_json(source_url)
else:
raise Http404('Article not exist')
return JsonResponse({"result": source_url})
| kagenZhao/cnBeta | CnbetaApi/CnbetaApis/views.py | Python | mit | 3,183 |
# -*- coding: utf-8 -*-
from django.utils import six
from sortedone2many.fields import SortedOneToManyField
def inject_extra_field_to_model(from_model, field_name, field):
if not isinstance(from_model, six.string_types):
field.contribute_to_class(from_model, field_name)
return
raise Exception('from_model must be a Model Class')
# app_label, model_name = from_model.split('.')
# from django.apps import apps
# try:
# from_model_cls = apps.get_registered_model(app_label, model_name)
# field.contribute_to_class(from_model_cls, field_name)
# except:
# from django.db.models.signals import class_prepared
# def add_field(sender, **kwargs):
# if sender.__name__ == model_name and sender._meta.app_label == app_label:
# field.contribute_to_class(sender, field_name)
# # TODO: `add_field` is never called. `class_prepared` already fired or never fire??
# class_prepared.connect(add_field)
def add_sorted_one2many_relation(model_one,
model_many,
field_name_on_model_one=None,
related_name_on_model_many=None):
field_name = field_name_on_model_one or model_many._meta.model_name + '_set'
related_name = related_name_on_model_many or model_one._meta.model_name
field = SortedOneToManyField(model_many, related_name=related_name)
field.contribute_to_class(model_one, field_name)
| ShenggaoZhu/django-sortedone2many | sortedone2many/utils.py | Python | mit | 1,509 |
from subprocess import *
import gzip
import string
import os
import time
import ApplePythonReporter
class ApplePythonReport:
vendorId = YOUR_VENDOR_ID
userId = 'YOUR_ITUNES_CONNECT_ACCOUNT_MAIL'
password = 'ITUNES_CONNECT_PASSWORD'
account = 'ACCOUNT_ID'
mode = 'Robot.XML'
dateType = 'Daily'
eventIndex = 1
activeSubscriberIndex = 16
quantityIndex = 25
subscribers = 0
cancellations = 0
activeSubscribers = 0
maxAttempts = 5
def __init__(self, reportDate):
self.DownloadSubscriptionEventReport(reportDate)
self.DownloadSubscriptionReport(reportDate)
self.FetchSubscriptionEventData(reportDate)
self.FetchSubscriptionData(reportDate)
self.CleanUp(reportDate)
def DownloadSubscriptionEventReport(self, date):
print 'Downloading Apple Financial Report for Subscriptions (' + date + ')..'
credentials = (self.userId, self.password, self.account, self.mode)
command = 'Sales.getReport, {0},SubscriptionEvent,Summary,{1},{2}'.format(self.vendorId, self.dateType, date)
try:
ApplePythonReporter.output_result(ApplePythonReporter.post_request(ApplePythonReporter.ENDPOINT_SALES,
credentials, command))
except Exception:
pass
#return iter(p.stdout.readline, b'')
def DownloadSubscriptionReport(self, date):
print 'Downloading Apple Financial Report for Active Users (' + date + ')..'
credentials = (self.userId, self.password, self.account, self.mode)
command = 'Sales.getReport, {0},Subscription,Summary,{1},{2}'.format(self.vendorId, self.dateType, date)
try:
ApplePythonReporter.output_result(ApplePythonReporter.post_request(ApplePythonReporter.ENDPOINT_SALES,
credentials, command))
except:
pass
#return iter(p.stdout.readline, b'')
#Uncompress and extract needed values (cancellations and new subscribers)
def FetchSubscriptionEventData(self, date):
fileName = 'Subscription_Event_'+self.vendorId+'_' + date + '.txt'
attempts = 0
while not os.path.isfile(fileName):
if(attempts >= self.maxAttempts):
break
attempts += 1
time.sleep(1)
if os.path.isfile(fileName):
print 'Fetching SubscriptionEvents..'
with open(fileName, 'rb') as inF:
text = inF.read().splitlines()
for row in text[1:]:
line = string.split(row, '\t')
# print line[self.eventIndex].__str__()
if line[0].__str__().endswith(date[-2:]):
if line[self.eventIndex] == 'Cancel':
self.cancellations += int(line[self.quantityIndex])
if line[self.eventIndex] == 'Subscribe':
self.subscribers += int(line[self.quantityIndex])
else:
print 'SubscriptionEvent: There were no sales for the date specified'
# Uncompress and extract needed values (active users)
def FetchSubscriptionData(self, date):
fileName = 'Subscription_'+self.vendorId+'_' + date + '.txt'
attempts = 0
while not os.path.isfile(fileName):
if (attempts >= self.maxAttempts):
break
attempts += 1
time.sleep(1)
if os.path.isfile(fileName):
print 'Fetching Subscriptions..'
with open(fileName, 'rb') as inF:
text = inF.read().splitlines()
for row in text[1:]:
line = string.split(row, '\t')
# print line[0].__str__()
self.activeSubscribers += int(line[self.activeSubscriberIndex])
else:
print 'Subscription: There were no sales for the date specified'
def CleanUp(self, date):
if os.path.isfile('Subscription_'+self.vendorId.__str__() +'_' + date + '.txt'):
os.remove('Subscription_'+self.vendorId.__str__()+'_' + date + '.txt')
else:
print 'Subscription_'+self.vendorId.__str__()+'_' + date + '.txt doesnt exist: Maybe there were no Sales at the specified date'
if os.path.isfile('Subscription_Event_'+self.vendorId.__str__()+'_' + date + '.txt'):
os.remove('Subscription_Event_'+self.vendorId.__str__()+'_' + date + '.txt')
else:
print 'Subscription_Event_'+self.vendorId.__str__()+'_' + date + '.txt doesnt exist: Maybe there were no Sales at the specified date' | Acimaz/Google_Apple_Financial_Reporter | AppleReporter.py | Python | mit | 4,732 |
from django_evolution.mutations import AddField, RenameField
from django.db import models
MUTATIONS = [
RenameField('FileDiff', 'diff', 'diff64', db_column='diff_base64'),
RenameField('FileDiff', 'parent_diff', 'parent_diff64',
db_column='parent_diff_base64'),
AddField('FileDiff', 'diff_hash', models.ForeignKey, null=True,
related_model='diffviewer.FileDiffData'),
AddField('FileDiff', 'parent_diff_hash', models.ForeignKey, null=True,
related_model='diffviewer.FileDiffData'),
]
| reviewboard/reviewboard | reviewboard/diffviewer/evolutions/add_diff_hash.py | Python | mit | 542 |
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='poloniex',
version='0.1',
packages=[
'poloniex',
'poloniex.wamp',
'poloniex.api'
],
include_package_data=True,
description='Python Poloniex API',
long_description=README,
url='https://github.com/absortium/poloniex.git',
author='Andrey Samokhvalov',
license='MIT',
author_email='[email protected]',
install_requires=[
'asyncio',
'aiohttp',
'autobahn',
'pp-ez',
'requests'
],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
],
)
| absortium/poloniex-api | setup.py | Python | mit | 909 |
from __future__ import absolute_import
from __future__ import print_function
import sys
import glob
import time
import numpy as np
import pandas as pd
import os.path
import time
import datetime
import re
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, Graph, Model
from keras.models import model_from_json
from keras.layers import Input, merge, Flatten, Dense, Activation, Convolution1D, ZeroPadding1D
#from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense, Flatten, Reshape, Permute, Merge, Lambda
#from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D, UpSampling1D, UpSampling2D, ZeroPadding1D
from keras.layers.advanced_activations import ParametricSoftplus, SReLU
from keras.callbacks import ModelCheckpoint, Callback
import matplotlib.pyplot as plt
path = "./training_data_large/" # to make sure signal files are written in same directory as data files
def draw_model(model):
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
from keras.utils.visualize_util import plot
#graph = to_graph(model, show_shape=True)
#graph.write_png("UFCNN_1.png")
SVG(model_to_dot(model).create(prog='dot', format='svg'))
plot(model, to_file='UFCNN_1.png')
def print_nodes_shapes(model):
for k, v in model.inputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.nodes.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.outputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
def print_layers_shapes(model):
for l in model.layers:
print("{} : {} : {}".format(type(l), l.input_shape, l.output_shape))
def save_neuralnet (model, model_name):
json_string = model.to_json()
open(path + model_name + '_architecture.json', 'w').write(json_string)
model.save_weights(path + model_name + '_weights.h5', overwrite=True)
yaml_string = model.to_yaml()
with open(path + model_name + '_data.yml', 'w') as outfile:
outfile.write( yaml_string)
def load_neuralnet(model_name):
"""
reading the model from disk - including all the trained weights and the complete model design (hyperparams, planes,..)
"""
arch_name = path + model_name + '_architecture.json'
weight_name = path + model_name + '_weights.h5'
if not os.path.isfile(arch_name) or not os.path.isfile(weight_name):
print("model_name given and file %s and/or %s not existing. Aborting." % (arch_name, weight_name))
sys.exit()
print("Loaded model: ",model_name)
model = model_from_json(open(arch_name).read())
model.load_weights(weight_name)
return model
def ufcnn_model_concat(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_deconv(sequence_length=5000,
features=4,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = False,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_seq(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform"):
model = Sequential()
model.add(ZeroPadding1D(2, input_shape=(None, features)))
#########################################################
model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init))
model.add(Activation('relu'))
model.add(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init))
model.add(Activation('sigmoid'))
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform",
mode='concat'):
if mode == 'concat':
return ufcnn_model_concat(sequence_length,
features,
nb_filter,
filter_length,
output_dim,
optimizer,
loss,
regression,
class_mode,
init)
else:
raise NotImplemented
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
Ernst 20160301 from https://github.com/fchollet/keras/blob/master/examples/stateful_lstm.py
as a first test for the ufcnn
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
print("Cos. Shape",cos.shape)
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
def train_and_predict_regression(model, sequence_length=5000, batch_size=128, epochs=5):
lahead = 1
cos = gen_cosine_amp(xn = sequence_length * 100)
expected_output = np.zeros((len(cos), 1, 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
model.fit({'input': cos, 'output': expected_output},
verbose=1,
nb_epoch=1,
shuffle=False,
batch_size=batch_size)
print('Predicting')
predicted_output = model.predict({'input': cos,}, batch_size=batch_size)
return {'model': model, 'predicted_output': predicted_output, 'expected_output': expected_output}
def treat_X_tradcom(mean):
""" treat some columns of the dataframe together when normalizing the dataframe:
col. 1, 2, 4 ... Mkt Price, Bid price, Ask Price
col 3 and 5 ... Ask & Bid price
"""
result = mean.copy()
#print("Result before max",result)
mkt = mean[1]
bid_px = mean[2]
ask_px = mean[4]
px_max=max(mkt,bid_px,ask_px)
result[1] = px_max
result[2] = px_max
result[4] = px_max
bid = mean[3]
ask = mean[5]
ba_max=max(bid,ask)
result[3] = ba_max
result[5] = ba_max
print("Result after max",result)
return result
def standardize_inputs(source, colgroups=None, mean=None, std=None):
"""
Standardize input features.
Groups of features could be listed in order to be standardized together.
source: Pandas.DataFrame or filename of csv file with features
colgroups: list of lists of groups of features to be standardized together (e.g. bid/ask price, bid/ask size)
returns Xdf ...Pandas.DataFrame, mean ...Pandas.DataFrame, std ...Pandas.DataFrame
"""
import itertools
import types
#if isinstance(source, types.StringTypes):
if isinstance(source, str):
Xdf = pd.read_csv(source, sep=" ", index_col = 0, header = None)
elif isinstance(source, pd.DataFrame):
Xdf = source
else:
raise TypeError
df = pd.DataFrame()
me = pd.DataFrame()
st = pd.DataFrame()
for colgroup in colgroups:
_df,_me,_st = standardize_columns(Xdf[colgroup])
# if mean & std are given, do not multiply with colgroup mean
if mean is not None and std is not None:
_df = Xdf[colgroup]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
print("In Group me")
print(me)
# _temp_list = list(itertools.chain.from_iterable(colgroups))
separate_features = [col for col in Xdf.columns if col not in list(itertools.chain.from_iterable(colgroups))]
if mean is None and std is None:
_me = Xdf[separate_features].mean()
_df = Xdf[separate_features].sub(_me)
_st = Xdf[separate_features].std()
_df = _df[separate_features].div(_st)
else:
_df = Xdf[separate_features]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
me = pd.Series(me[0])
st = pd.Series(st[0])
if mean is not None and std is not None:
df = df.sub(mean)
df = df.div(std)
return df, me, st
def standardize_columns(colgroup):
"""
Standardize group of columns together
colgroup: Pandas.DataFrame
returns: Pandas.DataFrames: Colum Group standardized, Mean of the colgroup, stddeviation of the colgroup
"""
_me = np.mean(colgroup.values.flatten())
centered = colgroup.sub(_me)
me = pd.DataFrame(np.full(len(colgroup.columns),_me), index=colgroup.columns)
_st = np.std(colgroup.values.flatten())
standardized = centered.div(_st)
st = pd.DataFrame(np.full(len(colgroup.columns),_st), index=colgroup.columns)
return standardized, me, st
def get_tradcom_normalization(filename, mean=None, std=None):
""" read in all X Data Frames and find mean and std of all columns...
"""
Xdf = pd.read_csv(filename, sep=" ", index_col = 0, header = None)
meanLoc = treat_X_tradcom(Xdf.mean())
print("Mean Loc")
print (meanLoc)
sys.stdout.flush()
if mean is None:
mean = meanLoc
mean = mean.to_frame().transpose()
meanDf=pd.concat([mean, meanLoc.to_frame().transpose()])
mean = meanDf.max()
print("Mean")
print (mean)
sys.stdout.flush()
stdLoc = treat_X_tradcom(Xdf.std())
print("Std Loc")
print (stdLoc)
sys.stdout.flush()
if std is None:
std = stdLoc
std = std.to_frame().transpose()
stdDf=pd.concat([std, stdLoc.to_frame().transpose()])
std = stdDf.max()
print("Std")
print (std)
sys.stdout.flush()
return(mean, std)
def prepare_tradcom_classification(training=True,
ret_type='df',
sequence_length=5000,
features_list=[1,2,3,4],
output_dim=3,
file_list=None,
mean=None,
std=None,
training_count=None):
"""
prepare the datasets for the trading competition. training determines which datasets will be read
returns: X and y: Pandas.DataFrames or np-Arrays storing the X - and y values for the fitting.
TODO: refactor - move file operations to separate functions, move stacking to function,
remove commented blocks and undesired print statements
"""
load_file = {'df': pd.read_pickle,
'stack': np.load,
'flat': np.load}
save_file = {'df': lambda filename, obj: obj.to_pickle(filename),
'stack': lambda filename, obj: np.save(filename, obj),
'flat': lambda filename, obj: np.save(filename, obj)}
print("Features_list",features_list)
Xdf = pd.DataFrame()
ydf = pd.DataFrame()
outfile = "training_data_large/save_"+str(len(file_list))
if training:
outfile += "_train"
else:
if training_count is None:
print("Training count needs to be given for testing")
raise ValueError
if mean is None or std is None:
print("Mean & std to be given for testing")
raise ValueError
outfile += "_"+str(training_count)+"_test"
filetype = '.pickle' if ret_type == 'df' else '.npy'
outfile_X = outfile+"_X" + filetype
outfile_y = outfile+"_y" + filetype
outfile_m = outfile+"_m" + filetype
outfile_s = outfile+"_s" + filetype
if os.path.isfile(outfile_X) and os.path.isfile(outfile_y):
X = load_file[ret_type](outfile_X)
y = load_file[ret_type](outfile_y)
#X = np.load(outfile_X)
#y = np.load(outfile_y)
if training:
mean = pd.Series(np.load(outfile_m))
std = pd.Series(np.load(outfile_s))
print("Found files ", outfile_X , " and ", outfile_y)
return (X,y,mean,std)
for filename in file_list:
signalfile = filename.replace('prod_data','signal')
signalfile = signalfile.replace('txt','csv')
print("Working on Input files: ",filename, ", ",signalfile)
if not os.path.isfile(signalfile):
print("File ",signalfile," is not existing. Aborting.")
sys.exit()
# get the date...
r = re.compile('^\D*(\d*)\D*', re.UNICODE)
date = re.search(r, filename).group(1)
print("Date is ",date)
date_ux = time.mktime(datetime.datetime.strptime(date,"%Y%m%d").timetuple())
# load dataframes and reindex
Xdf_loc = pd.read_csv(filename, sep=" ", header = None,)
# print(Xdf_loc.iloc[:3])
Xdf_loc['Milliseconds'] = Xdf_loc[0]
Xdf_loc['Date'] = pd.to_datetime(date_ux*1000*1000*1000)
# Xdf_loc[0] = pd.to_datetime(date_ux*1000*1000*1000 + Xdf_loc[0]*1000*1000)
# Xdf_loc = Xdf_loc.set_index([0])
Xdf_loc = Xdf_loc.set_index(['Date', 'Milliseconds'], append=False, drop=True)
# print(Xdf_loc.iloc[:3])
Xdf = pd.concat([Xdf, Xdf_loc])
print(Xdf.index[0])
print(Xdf.index[-1])
ydf_loc = pd.read_csv(signalfile, names = ['Milliseconds','signal',], )
# print(ydf_loc.iloc[:3])
#ydf_loc['Milliseconds'] = ydf_loc[0]
ydf_loc['Date'] = pd.to_datetime(date_ux*1000*1000*1000)
#ydf_loc[0] = pd.to_datetime(date_ux*1000*1000*1000 + ydf_loc[0]*1000*1000)
#ydf_loc = ydf_loc.set_index([0])
ydf_loc = ydf_loc.set_index(['Date', 'Milliseconds'], append=False, drop=True)
# print(Xdf_loc.iloc[:3])
ydf = pd.concat([ydf, ydf_loc])
#select by features_list
Xdf = Xdf[features_list]
# print("XDF After")
# print(Xdf)
Xdf, mean, std = standardize_inputs(Xdf, colgroups=[[2, 4], [3, 5]], mean=mean, std=std)
# Xdf, mean, std = standardize_inputs(Xdf, colgroups=[[0, 1], ], mean=mean, std=std)
# if nothing from above, the use the calculated data
print("X-Dataframe after standardization")
print(Xdf)
print("Input check")
print("Mean (should be 0)")
print (Xdf.mean())
print("Variance (should be 1)")
print (Xdf.std())
Xdf_array = Xdf.values
X_xdim, X_ydim = Xdf_array.shape
if ret_type == 'stack':
#start_time = time.time()
X = np.zeros((Xdf.shape[0]-sequence_length+1, sequence_length, len(features_list)))
for i in range(0, Xdf.shape[0]-sequence_length+1):
slice = Xdf.values[i:i+sequence_length]
X[i] = slice
#print("Time for Array Fill ", time.time()-start_time)
print(X.shape)
elif ret_type == 'flat':
X = Xdf_array.reshape((1, Xdf_array.shape[0], Xdf_array.shape[1]))
elif ret_type == 'df':
X = Xdf
else:
raise ValueError
#print(X[-1])
#print(_X[-1])
# print(Xdf.iloc[-5:])
ydf['sell'] = ydf.apply(lambda row: (1 if row['signal'] < -0.9 else 0 ), axis=1)
ydf['buy'] = ydf.apply(lambda row: (1 if row['signal'] > 0.9 else 0 ), axis=1)
ydf['hold'] = ydf.apply(lambda row: (1 if row['buy'] < 0.9 and row['sell'] < 0.9 else 0 ), axis=1)
del ydf['signal']
print("Buy signals:", ydf[ydf['buy'] !=0 ].shape[0])
print("Sell signals:", ydf[ydf['sell'] !=0 ].shape[0])
print("% of activity signals", float((ydf[ydf['buy'] !=0 ].shape[0] + ydf[ydf['sell'] !=0 ].shape[0])/ydf.shape[0]))
if ret_type == 'stack':
y = np.zeros((ydf.shape[0]-sequence_length+1, sequence_length, output_dim))
for i in range(0, ydf.shape[0]-sequence_length+1):
slice = ydf.values[i:i+sequence_length]
y[i] = slice
print(y.shape)
elif ret_type == 'flat':
y = ydf.values
y = y.reshape((1, y.shape[0], y.shape[1]))
elif ret_type == 'df':
y = ydf
else:
raise ValueError
save_file[ret_type](outfile_X, X)
save_file[ret_type](outfile_y, y)
# np.save(outfile_X, X)
# np.save(outfile_y, y)
save_file[ret_type](outfile_m, mean)
save_file[ret_type](outfile_s, std)
#np.save(outfile_m, m)
#np.save(outfile_s, s)
return (X,y,mean,std)
def generator(X, y):
print("Call to generator")
print(X.index.equals(y.index))
c = 1
#dates = X.index.get_level_values(0).unique()
while True:
for date_idx in X.index.get_level_values(0).unique():
#print(date_idx)
#print(X.loc[date_idx].shape)
#print(y.loc[date_idx].shape)
X_array = X.loc[date_idx].values
y_array = y.loc[date_idx].values
X_samples = X_array.reshape((1, X_array.shape[0], X_array.shape[1]))
y_samples = y_array.reshape((1, y_array.shape[0], y_array.shape[1]))
yield {'input': X_samples, 'output': y_samples}
def train_and_predict_classification(model, sequence_length=5000, features=32, output_dim=3, batch_size=128, epochs=5, name = "model", training_count=3, testing_count=3):
final_loss = 0
file_list = sorted(glob.glob('./training_data_large/prod_data_*v.txt'))
if len(file_list) == 0:
print ("Files ./training_data_large/product_data_*txt and signal_*.csv are needed. Please copy them in the ./training_data_large/ . Aborting.")
sys.exit()
line = []
mean = None
std = None
for j in range(training_count):
filename = file_list[j]
print('Normalizing: ',filename)
# (mean, std) = get_tradcom_normalization(filename = filename, mean = mean, std = std)
# here i removed some intendation
for j in range(training_count):
filename = file_list[j]
print('Training: ',filename)
X,y = prepare_tradcom_classification(training = True, sequence_length = sequence_length, features = features, output_dim = output_dim, filename = filename, mean = mean, std = std)
# running over all epochs to get the optimizer working well...
history = model.fit({'input': X, 'output': y},
verbose=1,
nb_epoch=epochs,
shuffle=False,
batch_size=batch_size)
print(history.history)
sys.stdout.flush()
final_loss = history.history['loss']
line.extend(final_loss)
save_neuralnet (model, "ufcnn_"+str(j))
plt.figure()
plt.plot(line)
plt.savefig("Convergence.png")
#plt.show()
total_class_count = 0
total_correct_class_count = 0
for k in range(testing_count):
filename = file_list[training_count + k]
print("Predicting: ",filename)
X,y,mean,std = prepare_tradcom_classification(training=False, sequence_length=sequence_length, features=features, output_dim=output_dim, filename=filename, mean=mean, std=std )
predicted_output = model.predict({'input': X,}, batch_size=batch_size, verbose = 2)
#print(predicted_output)
yp = predicted_output['output']
xdim, ydim = yp.shape
## MSE for testing
total_error = 0
correct_class= 0
for i in range (xdim):
delta = 0.
for j in range(ydim):
delta += (y[i][j] - yp[i][j]) * (y[i][j] - yp[i][j])
#print ("Row %d, MSError: %8.5f " % (i, delta/ydim))
total_error += delta
if np.argmax(y[i]) == np.argmax(yp[i]):
correct_class += 1
print ("FIN Correct Class Assignment: %6d /%7d" % (correct_class, xdim))
print ("FIN Final Loss: ", final_loss)
total_class_count += xdim
total_correct_class_count += correct_class
print ("FINFIN Correct Class Assignment: %6d /%7d" % (total_correct_class_count, total_class_count))
return {'model': model, 'predicted_output': predicted_output['output'], 'expected_output': y}
def check_prediction(Xdf, y, yp, mean, std):
""" Check the predicted classes and print results
"""
## MSE for testing
total_error = 0
correct_class= 0
y_pred_class = np.zeros((y.shape[2],))
y_corr_pred_class = np.zeros((y.shape[2],))
y_class = np.zeros((y.shape[2],))
y_labels = np.zeros((y.shape[1], y.shape[2]))
a=['Buy','Sell','Hold']
for i in range (y.shape[1]):
delta = 0.
for j in range(y.shape[2]):
delta += (y[0][i][j] - yp[0][i][j]) * (y[0][i][j] - yp[0][i][j])
total_error += delta
#if np.any(y[0][i] != 0): # some debug output, comment if not needed!
# print("Actual: ", y[0][i])
# print("Predicted: ", yp[0][i])
if np.argmax(y[0][i]) == np.argmax(yp[0][i]):
correct_class += 1
y_corr_pred_class[np.argmax(yp[0][i])] += 1.
y_pred_class[np.argmax(yp[0][i])] += 1.
y_class[np.argmax(y[0][i])] += 1.
y_labels[i][np.argmax(yp[0][i])] = 1
print()
print("Total MSE Error: ", total_error / y.shape[1])
print("Correct Class Assignment: %6d /%7d" % (correct_class, y.shape[1]))
for i in range(y.shape[2]):
print("%4s: Correctly Predicted / Predicted / Total: %6d/%6d/%7d" %(a[i], y_corr_pred_class[i], y_pred_class[i], y_class[i]))
Xdf = Xdf * std
Xdf = Xdf + mean
yp_p = yp.reshape((yp.shape[1],yp.shape[2]))
#print(yp_p)
ydf2 = pd.DataFrame(yp_p, columns=['buy','sell','hold'])
Xdf2 = Xdf.reset_index(drop=True)
Xdf2 = pd.concat([Xdf2,ydf2], axis = 1)
Xdf2['signal'] = 0.
print(Xdf2)
xy_df = pd.concat([Xdf, pd.DataFrame(y_labels, columns=['buy','sell','hold'], index=Xdf.index)], axis=1)
xy_df = xy_df.rename(columns={2: "bidpx_", 3: "bidsz_", 4: "askpx_", 5: "asksz_"})
# store everything in signal
# -1 for short, 1 for long...
Xdf2['signal'] = Xdf2.apply(lambda row: (1 if row['buy'] > row['hold'] and row['buy'] > row['sell'] else 0 ), axis=1)
Xdf2['signal'] = Xdf2.apply(lambda row: (-1 if row['sell'] > row['hold'] and row['sell'] > row['buy'] else row['signal'] ), axis=1)
invested_tics = 0
pnl = 0.
position = 0.
last_row = None
nr_trades = 0
trade_pnl = 0.
for (index, row) in Xdf2.iterrows():
(pnl_, position, is_trade) = calculate_pnl(position, last_row, row, fee_per_roundtrip=0.0)
pnl += pnl_
last_row = row
if position < -0.1 or position > 0.1:
invested_tics +=1
if is_trade:
nr_trades += 1
trade_pnl = 0.
trade_pnl += pnl_
sig_pnl, sig_trades = get_pnl(xy_df)
print("Signals PnL: {}, # of trades: {}".format(sig_pnl, sig_trades))
print ("Nr of trades: %5d /%7d" % (nr_trades, y.shape[1]))
print ("PnL: %8.2f InvestedTics: %5d /%7d" % (pnl, invested_tics, y.shape[1]))
### END
def get_pnl(df, max_position=1, comission=0):
deals = []
pnl = 0
position = 0
df_with_signals = df[(df['sell'] != 0) | (df['buy'] != 0)]
for idx, row in df_with_signals.iterrows():
if row['buy'] == 1 and position < max_position:
print(row)
current_trade = -row['buy'] * row["askpx_"]
position += 1
pnl = pnl + current_trade - comission
deals.append(current_trade)
print("Running PnL: {}, position: {}".format(pnl, position))
elif row['sell'] == 1 and position > -max_position:
print(row)
current_trade = row['sell'] * row["bidpx_"]
position -= 1
pnl = pnl + current_trade - comission
deals.append(current_trade)
print("Running PnL: {}, position: {}".format(pnl, position))
if position == 1:
day_closing_trade = df.iloc[-1]["bidpx_"]
pnl = pnl + day_closing_trade - comission
deals.append(day_closing_trade)
print("Close last hanging deal on the end of the day, PnL: {}, position: {}".format(pnl, position))
elif position == -1:
day_closing_trade = -df.iloc[-1]["askpx_"]
pnl = pnl + day_closing_trade - comission
deals.append(day_closing_trade)
print("Close last hanging deal on the end of the day, PnL: {}, position: {}".format(pnl, position))
print("Check PnL: {} vs {}".format(pnl, np.sum(deals)))
return pnl, len(deals)
def calculate_pnl(position, row, next_row, fee_per_roundtrip=0.):
if row is None:
return (0.,0., False)
old_position = position
pnl = 0.
if position < -0.1:
pnl = position * (next_row[4] - row[4]) # ASK
if position > 0.1:
pnl = position * (next_row[2] - row[2]) # BID
signal = row['signal']
# if we are short and need to go long...
if position < -0.1 and signal > 0.1:
position = 0.
# if we are long and need to go short...
if position > 0.1 and signal < -0.1:
position = 0.
trade = False
if position == 0. and abs(signal) > 0.1:
position = signal
if position < -0.1:
pnl = position * (next_row[4] - row[2]) # ASK
if position > 0.1:
pnl = position * (next_row[2] - row[4]) # BID
pnl -= fee_per_roundtrip
trade = True
#print ("SIGNAL:",signal, ", old_position: ", old_position, " position:", position, ", pnl: ",pnl, "Bid: ",row[2],next_row[2],", ASK ",row[4], next_row[4] )
return (pnl, position, trade)
## End calculate_pnl
def get_tracking_data (sequence_length=5000, count=2000, D=10, delta=0.3, omega_w=0.005, omega_ny=0.005):
""" get tracking data for a target moving in a square with 2D side length
delta ... radius of the round target
omega_ ... random noise strength
"""
A = np.array([[1,1,0,0],[0,1,0,0],[0,0,1,1],[0,0,0,1]])
X = np.zeros((count,sequence_length,1))
y = np.zeros((count,sequence_length,2))
for i in range(count):
z_t = np.random.normal(1,.5,4)
g_t = np.random.normal(1,.5,4)
x_t = z_t[0]
xp_t = z_t[1]
y_t = z_t[2]
yp_t = z_t[3]
for j in range(sequence_length):
# reflect at the border of the square with length 2D
if -D + delta < x_t and x_t < D - delta:
xp_new_t = xp_t
elif -D + delta <= x_t:
xp_new_t = -abs(xp_t)
else:
xp_new_t = abs(xp_t)
if -D + delta < y_t and y_t < D - delta:
yp_new_t = yp_t
elif -D + delta <= y_t:
yp_new_t = -abs(yp_t)
else:
yp_new_t = abs(yp_t)
g_t[0] = x_t
g_t[1] = xp_new_t
g_t[2] = y_t
g_t[3] = yp_new_t
w_t = np.random.normal(0.,0.5*omega_w,4)
w_t[1] = 0.
w_t[3] = 0.
ny_t = np.random.normal(0.,0.5*omega_ny,1)
z_t = np.dot(A, g_t) + w_t
x_t = z_t[0]
xp_t = z_t[1]
y_t = z_t[2]
yp_t = z_t[3]
theta = np.arctan(y_t/x_t) + ny_t[0]
# params for the nn
# learn to predict x&y by bearing (theta)
X[i][j][0] = theta
y[i][j][0] = x_t
y[i][j][1] = y_t
#print ("X_T: ", x_t, ", Y_T: ",y_t)
return (X,y)
def get_simulation(write_spans = True):
"""
Make trading competition-like input and output data from the cosine function
"""
from signals import find_all_signals, make_spans, set_positions, pnl
from datetime import date
df = pd.DataFrame(data={"askpx_": np.round(gen_cosine_amp(k=0, period=10, amp=20)[:, 0, 0]+201),
"bidpx_": np.round(gen_cosine_amp(k=0, period=10, amp=20)[:, 0, 0]+200)})
df = find_all_signals(df)
df = make_spans(df, 'Buy')
df = make_spans(df, 'Sell')
print("Simulation PnL", pnl(df))
Xdf = df[["askpx_", "bidpx_"]]
df['buy'] = df['Buy'] if not write_spans else df['Buys']
df['sell'] = df['Sell'] if not write_spans else df['Sells']
ydf = df[["buy", "sell"]]
Xdf['Milliseconds'] = Xdf.index
Xdf['Date'] = pd.to_datetime(date.today())
Xdf = Xdf.set_index(['Date', 'Milliseconds'], append=False, drop=True)
#print(Xdf.index[0:100])
ydf['Milliseconds'] = ydf.index
ydf['Date'] = pd.to_datetime(date.today())
ydf = ydf.set_index(['Date', 'Milliseconds'], append=False, drop=True)
#print(ydf.index[0:100])
Xdf, mean, std = standardize_inputs(Xdf, colgroups=[["askpx_", "bidpx_"], ])
ydf['hold'] = ydf.apply(lambda row: (1 if row['buy'] == 0 and row['sell'] == 0 else 0 ), axis=1)
print("Buy signals:", ydf[ydf['buy'] !=0 ].shape[0])
print("Sell signals:", ydf[ydf['sell'] !=0 ].shape[0])
print("% of activity signals", float(ydf[ydf['buy'] !=0 ].shape[0] + ydf[ydf['sell'] !=0 ].shape[0])/float(ydf.shape[0]))
print(Xdf.shape, Xdf.columns)
print(ydf.shape, ydf.columns)
return (Xdf,ydf,mean,std)
#########################################################
## Test the net with damped cosine / remove later...
#########################################################
if len(sys.argv) < 2 :
print ("Usage: UFCNN1.py action with action from [cos_small, cos, tradcom, tradcom_simple, tracking] [model_name]")
print(" ... with model_name = name of the saved file (without addition like _architecture...) to load the net from file")
sys.exit()
action = sys.argv[1]
if len(sys.argv) == 3:
model_name = sys.argv[2]
else:
model_name = None
sequence_length = 64 # same as in Roni Mittelman's paper - this is 2 times 32 - a line in Ronis input contains 33 numbers, but 1 is time and is omitted
features = 1 # guess changed Ernst 20160301
nb_filter = 150 # same as in Roni Mittelman's paper
filter_length = 5 # same as in Roni Mittelman's paper
output_dim = 1 # guess changed Ernst 20160301
if action == 'cos_small':
print("Running model: ", action)
UFCNN_1 = ufcnn_model(sequence_length=sequence_length)
print_nodes_shapes(UFCNN_1)
case_1 = train_and_predict_regression(UFCNN_1, sequence_length=sequence_length)
print('Ploting Results')
plt.figure(figsize=(18,3))
plt.plot(case_1['expected_output'].reshape(-1)[-10000:]) #, predicted_output['output'].reshape(-1))
plt.plot(case_1['predicted_output']['output'].reshape(-1)[-10000:])
#plt.savefig('sinus.png')
plt.show()
if action == 'cos':
print("Running model: ", action)
UFCNN_2 = ufcnn_model()
print_nodes_shapes(UFCNN_2)
case_2 = train_and_predict_regression(UFCNN_2)
print('Ploting Results')
plt.figure(figsize=(18,3))
plt.plot(case_2['expected_output'].reshape(-1)[-10000:]) #, predicted_output['output'].reshape(-1))
plt.plot(case_2['predicted_output']['output'].reshape(-1)[-10000:])
#plt.savefig('sinus.png')
plt.show()
if action == 'tradcom':
print("Running model: ", action)
sequence_length = 500
features = 4
output_dim = 3
# Roni used rmsprop
sgd = SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)
UFCNN_TC = ufcnn_model(regression = False, output_dim=output_dim, features=features,
loss="categorical_crossentropy", sequence_length=sequence_length, optimizer=sgd )
#print_nodes_shapes(UFCNN_TC)
case_tc = train_and_predict_classification(UFCNN_TC, features=features, output_dim=output_dim, sequence_length=sequence_length, epochs=50, training_count=10, testing_count = 6 )
if action == 'tracking':
print("Running model: ", action)
sequence_length = 5000
count=20
output_dim = 2
# Roni used rmsprop
sgd = SGD(lr=0.00005, decay=1e-6, momentum=0.9, nesterov=True)
rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)
model = ufcnn_model_concat(regression = True, output_dim=output_dim, features=features,
loss="mse", sequence_length=sequence_length, optimizer=rms )
#print_nodes_shapes(UFCNN_TC)
(X,y) = get_tracking_data (sequence_length=sequence_length, count=count)
X = np.subtract(X,X.mean())
y = np.subtract(y,y.mean())
#plt.figure()
#plt.plot(x1, y1)
#plt.savefig("TrackingTracking.png")
history = model.fit({'input': X, 'output': y},
verbose=1,
nb_epoch=300)
print(history.history)
if action == 'tradcom_simple':
simulation = False # Use True for simulated cosine data, False - for data from files
training_count = 20 # FIXED: Does not work with other numbers - the treatment of X and y in prepare_tradcom_classification needs to be changed
validation_count = 2
testing_count = 8
sequence_length = 5000
#features_list = list(range(0,2)) # list(range(2,6)) #list(range(1,33))
if not simulation:
features_list = list(range(2,6)) ## to run with Bid/Ask price/vol only
features_list = list(range(1,33)) ## FULL
file_list = sorted(glob.glob('./training_data_large/prod_data_*v.txt'))[:training_count]
print ("Training file list ", file_list)
(X, y, mean, std) = prepare_tradcom_classification(training=True,
ret_type='df',
sequence_length=sequence_length,
features_list=features_list,
output_dim=3,
file_list=file_list)
file_list = sorted(glob.glob('./training_data_large/prod_data_*v.txt'))[training_count:training_count+validation_count]
print ("Validation file list ", file_list)
(X_val, y_val, mean_, std_) = prepare_tradcom_classification(training=True,
ret_type='df',
sequence_length=sequence_length,
features_list=features_list,
output_dim=3,
file_list=file_list,
mean=mean,
std=std,
training_count=training_count)
else:
features_list = list(range(0,2))
print("Using simulated data for training...")
(X, y, mean, std) = get_simulation()
#
print("X shape: ", X.shape)
# print(X)
print("Y shape: ", y.shape)
#
# print("Mean")
# print(mean)
# print("Std")
# print(std)
#for _d in generator(X, y):
# print(_d)
sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
#rmsprop = RMSprop (lr=0.00001, rho=0.9, epsilon=1e-06) # for sequence length 500
rmsprop = RMSprop (lr=0.000005, rho=0.9, epsilon=1e-06) # for sequence length 5000
# load the model from disk if model name is given...
if model_name is not None:
model = load_neuralnet(model_name)
else:
model = ufcnn_model_concat(regression = False, output_dim=3, features=len(features_list),
loss="categorical_crossentropy", sequence_length=sequence_length, optimizer=rmsprop )
print_nodes_shapes(model)
#draw_model(model)
#history = model.fit({'input': X, 'output': y},
# verbose=2,
# nb_epoch=5,
# shuffle=False,
# batch_size=1)
start_time = time.time()
epoch = 400
history = model.fit_generator(generator(X, y),
nb_worker=1,
samples_per_epoch=training_count,
verbose=1,
nb_epoch=epoch,
show_accuracy=True,
validation_data=generator(X_val, y_val),
nb_val_samples=validation_count)
print(history.history)
print("--- Fitting: Elapsed: %d seconds per iteration %5.3f" % ( (time.time() - start_time),(time.time() - start_time)/epoch))
save_neuralnet (model, "ufcnn_sim") if simulation else save_neuralnet (model, "ufcnn_concat")
if not simulation:
# and get the files for testing
file_list = sorted(glob.glob('./training_data_large/prod_data_*v.txt'))[training_count:training_count+testing_count]
(X_pred, y_pred, mean_, std_) = prepare_tradcom_classification(training=False,
ret_type='df',
sequence_length=sequence_length,
features_list=features_list,
output_dim=3,
file_list=file_list,
mean=mean,
std=std,
training_count=training_count)
else:
print("Using simulated data for training...")
(X_pred, y_pred, mean_, std_) = get_simulation()
print(X_pred.iloc[0:200])
print(y_pred.iloc[0:200])
i=1
for date_idx in X_pred.index.get_level_values(0).unique():
X_array = X_pred.loc[date_idx].values
y_array = y_pred.loc[date_idx].values
X_samples = X_array.reshape((1, X_array.shape[0], X_array.shape[1]))
y_samples = y_array.reshape((1, y_array.shape[0], y_array.shape[1]))
print(y_samples[0, 0:200, :])
inp = {'input': X_samples, 'output': y_samples}
print("Predicting: day ",i ,": ", date_idx)
predicted_output = model.predict({'input': X_samples,}, batch_size=1, verbose = 2)
check_prediction(X_pred.loc[date_idx], y_samples, predicted_output['output'], mean, std)
i += 1
| lukovkin/ufcnn-keras | models/UFCNN1_REPO_V16_TESTMODE.py | Python | mit | 51,403 |
from sklearn.linear_model import Lasso
def get_lasso_prediction(train_data, train_truth, test_data, test_truth, alpha=1.0, iter_id=0):
clf = Lasso(alpha=alpha)
clf.fit(train_data, train_truth)
predicted = clf.predict(test_data)
return predicted.ravel()
| rileymcdowell/genomic-neuralnet | genomic_neuralnet/methods/lasso_regression.py | Python | mit | 270 |
import sqlite3
from typing import List
from pyspatial.cs import CoordinatesSystem, Axis, CoordinatesSystemType, AxisOrientation, AxisType
from pyspatial.uom import UnitOfMeasure
from . import db, EPSGException, make_id, parse_id
from .uom import get_unit
def get_coordinates_system(uid: str) -> CoordinatesSystem:
"""
Get the coordinates system with the given unique id.
:param uid: the unique id of the coordinates system
:return: the EPSG coordinates system with the given id.
"""
authority, code = parse_id(uid)
if authority != "EPSG":
raise EPSGException(f"Unsupported authority. Expected 'EPSG'. Got '{authority}'.")
with sqlite3.connect(db, detect_types=sqlite3.PARSE_COLNAMES) as conn:
c = conn.execute('SELECT coord_sys_name, coord_sys_type AS "coord_sys_type [CoordinatesSystemType]" '
'FROM main.epsg_coordinatesystem '
'WHERE coord_sys_code = ?;', (code,))
row = c.fetchone()
if row:
name, cs_type = row
axes = _get_cs_axes(code, conn)
return EPSGCoordinatesSystem(code, name, cs_type, axes)
else:
raise EPSGException(f"No coordinates system with uid '{make_id(code)}'")
def _get_cs_axes(cs_code: int, conn: sqlite3.Connection) -> List[Axis]:
c = conn.execute('SELECT a.coord_axis_order, '
'a.coord_axis_code, '
'n.coord_axis_name AS "axis_type [AxisType]", '
'a.coord_axis_abbreviation, '
'a.coord_axis_orientation AS "axis_orientation [AxisOrientation]", '
'a.uom_code '
'FROM epsg_coordinateaxis AS a JOIN epsg_coordinateaxisname AS n '
'ON a.coord_axis_name_code = n.coord_axis_name_code '
'WHERE a.coord_sys_code = ? '
'ORDER BY a.coord_axis_order;', (cs_code,))
return [EPSGAxis(row[1], row[2], row[3], row[4], get_unit(make_id(row[5]))) for row in c.fetchall()]
class EPSGAxis(Axis):
def __init__(self, code: int, axis_type: AxisType, abbreviation: str,
orientation: AxisOrientation, unit: UnitOfMeasure):
self.__code = code
self.__axis_type = axis_type
self.__abbreviation = abbreviation
self.__orientation = orientation
self.__unit = unit
@property
def uid(self) -> str:
return make_id(self.__code)
@property
def axis_type(self) -> AxisType:
return self.__axis_type
@property
def abbreviation(self) -> str:
return self.__abbreviation
@property
def orientation(self) -> AxisOrientation:
return self.__orientation
@property
def unit(self) -> UnitOfMeasure:
return self.__unit
class EPSGCoordinatesSystem(CoordinatesSystem):
def __init__(self, code: int, name: str, cs_type: CoordinatesSystemType, axes: List[Axis]):
self.__code = code
self.__name = name
self.__cs_type = cs_type
self.__axes = list(axes)
@property
def uid(self) -> str:
return make_id(self.__code)
@property
def name(self) -> str:
return self.__name
@property
def cs_type(self) -> CoordinatesSystemType:
return self.__cs_type
@property
def dimension(self) -> int:
return len(self.__axes)
def get_axis(self, dim: int) -> Axis:
return self.__axes[dim]
| applequist/pyspatial | pyspatial/epsg/cs.py | Python | mit | 3,473 |
from datetime import datetime
import webapp2
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.ext import ndb
from models import Email, Subscriber
from google.appengine.api import mail
class LogSenderHandler(InboundMailHandler):
def receive(self, message):
# Rebuild body
content = ""
for content_type, body in message.bodies('text/html'):
content += body.decode()
# Save email
email = Email(parent=Email.get_root(),
sender=message.sender,
subject=message.subject,
content=content,
# Correct format would be "%a, %d %b %Y %H:%M:%S %z", but "%z" has issues...
date=datetime.strptime(message.date[:-6], "%a, %d %b %Y %H:%M:%S"))
email.put()
# See if any subscriber wants this email
subscribers = Subscriber.query()
for subscriber in subscribers:
for word in subscriber.words:
if word in email.content:
subscriber.forwarded.append(email)
subscriber.put()
mail.send_mail(sender="[email protected]",
to=subscriber.email,
subject=email.subject,
body=content)
break
app = webapp2.WSGIApplication([LogSenderHandler.mapping()], debug=True)
| LaercioAsano/gae-filtered-forwarding | handle_email.py | Python | mit | 1,480 |
from model import User
from geo.geomodel import geotypes
def get(handler, response):
lat1 = handler.request.get('lat1')
lon1 = handler.request.get('lng1')
lat2 = handler.request.get('lat2')
lon2 = handler.request.get('lng2')
response.users = User.bounding_box_fetch(
User.all(),
geotypes.Box(float(lat1),float(lon2),float(lat2),float(lon1)),
)
| globalspin/haemapod | haemapod/handlers/people/bounding_box.py | Python | mit | 365 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for graph_stix.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
from __future__ import print_function, absolute_import, division
import pytest
| arangaraju/graph-stix | tests/conftest.py | Python | mit | 316 |
from io import BytesIO
from itertools import groupby
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from flask import make_response, render_template, abort
from webapp import app
from webapp.evaluation import *
from webapp.ioutils import *
from webapp import config
@app.route('/')
def index():
experiments = get_experiments_list()
# group by date, newest first
experiments = sorted(experiments, key=lambda r: r.timestamp.date(), reverse=True)
experiments = [(date, list(items)) for date, items in groupby(experiments, lambda r: r.timestamp.date())]
# for each date sort its results, best first
experiments = [(date, sorted(items, key=lambda r: r.score, reverse=True))
for date, items in experiments]
return render_template('overview.html', experiments=experiments, score_name=config.score_name)
@app.route('/<timestamp>')
def details(timestamp):
# will fail with 404 if exp not known
get_labels_predictions(timestamp)
return render_template('details.html', timestamp=timestamp)
@app.route("/<timestamp>/norm_confusions")
def normalized_confusion_matrix(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
matrix_fig = plot_normalized_confusion_matrix(test_labels, test_predictions)
return serve_matplotlib_fig(matrix_fig)
@app.route("/<timestamp>/importances")
def feature_importances(timestamp):
features, importances = get_feature_importances(timestamp)
importance_fig = plot_feature_importances(features, importances)
return serve_matplotlib_fig(importance_fig)
@app.route("/<timestamp>/precision-recall")
def precision_recall(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
prec_recall_fig = plot_precision_recall_n(test_labels, test_predictions)
return serve_matplotlib_fig(prec_recall_fig)
@app.route("/<timestamp>/precision-cutoff")
def precision_cutoff(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
prec_cutoff_fig = plot_precision_cutoff(test_labels, test_predictions)
return serve_matplotlib_fig(prec_cutoff_fig)
@app.route("/<timestamp>/ROC")
def ROC(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
roc_fig = plot_ROC(test_labels, test_predictions)
return serve_matplotlib_fig(roc_fig)
@app.route("/growth")
def growth():
experiments = get_experiments_list()
# group by date, newest first
experiments = sorted(experiments, key=lambda r: r.timestamp.date(), reverse=True)
experiments = [(date, list(items)) for date, items in groupby(experiments, lambda r: r.timestamp.date())]
# only keep best result for each day
experiments = [(date, sorted(items, key=lambda r: r.score, reverse=True)[0])
for date, items in experiments]
experiments = [(date, best.score) for date, best in experiments]
growth_fig = plot_growth(experiments)
return serve_matplotlib_fig(growth_fig)
def serve_matplotlib_fig(fig):
canvas=FigureCanvas(fig)
png_output = BytesIO()
canvas.print_png(png_output)
response = make_response(png_output.getvalue())
response.headers['Content-Type'] = 'image/png'
return response
| dssg/cincinnati2015-public | evaluation/webapp/views.py | Python | mit | 3,341 |
import logging
from django.core.management.base import BaseCommand
from notifications.engine import send_all
class Command(BaseCommand):
help = "Emit queued notices."
def handle(self, *args, **options):
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logging.info("-" * 72)
send_all(*args)
| haystack/eyebrowse-server | notifications/management/commands/emit_notices.py | Python | mit | 342 |
from __future__ import annotations
import abc
import shutil
import functools
from pathlib import Path
import urllib.parse
from typing import (
Callable, Any, TypeVar, cast, Tuple, Dict, Optional,
Union, Hashable,
)
import logging
from edgar_code.types import PathLike, Serializer, UserDict
from edgar_code.util.picklable_threading import RLock
logger = logging.getLogger(__name__)
CacheKey = TypeVar('CacheKey')
CacheReturn = TypeVar('CacheReturn')
CacheFunc = TypeVar('CacheFunc', bound=Callable[..., Any])
class Cache:
@classmethod
def decor(
cls,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
hit_msg: bool = False, miss_msg: bool = False, suffix: str = '',
) -> Callable[[CacheFunc], CacheFunc]:
'''Decorator that creates a cached function
>>> @Cache.decor(ObjectStore())
>>> def foo():
... pass
'''
def decor_(function: CacheFunc) -> CacheFunc:
return cast(
CacheFunc,
functools.wraps(function)(
cls(obj_store, function, hit_msg, miss_msg, suffix)
)
)
return decor_
disabled: bool
#pylint: disable=too-many-arguments
def __init__(
self,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
function: CacheFunc,
hit_msg: bool = False, miss_msg: bool = False, suffix: str = ''
) -> None:
'''Cache a function.
Note this uses `function.__qualname__` to determine the file
name. If this is not unique within your program, define
suffix.
Note this uses `function.version` when defined, so objects of
the same functions of different versions will not collide.
'''
self.function = function
self.name = '-'.join(filter(bool, [
self.function.__qualname__,
suffix,
getattr(self.function, 'version', ''),
]))
self.obj_store = obj_store(self.name)
self.hit_msg = hit_msg
self.miss_msg = miss_msg
self.sem = RLock()
self.__qualname__ = f'Cache({self.name})'
self.disabled = False
def __call__(self, *pos_args: Any, **kwargs: Any) -> Any:
if self.disabled:
return self.function(*pos_args, **kwargs)
else:
with self.sem:
args_key = self.obj_store.args2key(pos_args, kwargs)
if args_key in self.obj_store:
if self.hit_msg:
logger.info('hit %s with %s, %s',
self.name, pos_args, kwargs)
res = self.obj_store[args_key]
else:
if self.miss_msg:
logger.info('miss %s with %s, %s',
self.name, pos_args, kwargs)
res = self.function(*pos_args, **kwargs)
self.obj_store[args_key] = res
return res
def clear(self) -> None:
'''Removes all cached items'''
self.obj_store.clear()
def __str__(self) -> str:
store_type = type(self.obj_store).__name__
return f'Cache of {self.name} with {store_type}'
ObjectStoreKey = TypeVar('ObjectStoreKey')
ObjectStoreValue = TypeVar('ObjectStoreValue')
class ObjectStore(UserDict[ObjectStoreKey, ObjectStoreValue], abc.ABC):
@classmethod
def create(
cls, *args: Any, **kwargs: Any
) -> Callable[[str], ObjectStore[ObjectStoreKey, ObjectStoreValue]]:
'''Curried init. Name will be applied later.'''
@functools.wraps(cls)
def create_(name: str) -> ObjectStore[ObjectStoreKey, ObjectStoreValue]:
return cls(*args, name=name, **kwargs) # type: ignore
return create_
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
@abc.abstractmethod
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> ObjectStoreKey:
# pylint: disable=unused-argument,no-self-use
...
class MemoryStore(ObjectStore[Hashable, Any]):
def __init__(self, name: str):
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
class FileStore(MemoryStore):
'''An obj_store that persists at ./${CACHE_PATH}/${FUNCTION_NAME}_cache.pickle'''
def __init__(
self, cache_path: PathLike, name: str, serializer: Optional[Serializer] = None,
):
# pylint: disable=non-parent-init-called,super-init-not-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(cache_path) / (self.name + '_cache.pickle')
self.loaded = False
self.data = {}
def load_if_not_loaded(self) -> None:
if not self.loaded:
self.loaded = True
if self.cache_path.exists():
with self.cache_path.open('rb') as fil:
self.data = self.serializer.load(fil)
else:
self.cache_path.parent.mkdir(parents=True, exist_ok=True)
self.data = {}
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
def commit(self) -> None:
self.load_if_not_loaded()
if self.data:
with self.cache_path.open('wb') as fil:
self.serializer.dump(self.data, fil)
else:
if self.cache_path.exists():
print('deleting ', self.cache_path)
self.cache_path.unlink()
def __setitem__(self, key: Hashable, obj: Any) -> None:
self.load_if_not_loaded()
super().__setitem__(key, obj)
self.commit()
def __delitem__(self, key: Hashable) -> None:
self.load_if_not_loaded()
super().__delitem__(key)
self.commit()
def clear(self) -> None:
self.load_if_not_loaded()
super().clear()
self.commit()
class DirectoryStore(ObjectStore[PathLike, Any]):
'''Stores objects at ./${CACHE_PATH}/${FUNCTION_NAME}/${urlencode(args)}.pickle'''
def __init__(
self, object_path: PathLike, name: str,
serializer: Optional[Serializer] = None
) -> None:
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(object_path) / self.name
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> PathLike:
if kwargs:
args = args + (kwargs,)
fname = urllib.parse.quote(f'{safe_str(args)}.pickle', safe='')
return self.cache_path / fname
def __setitem__(self, path: PathLike, obj: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open('wb') as fil:
self.serializer.dump(obj, fil)
def __delitem__(self, path: PathLike) -> None:
path.unlink()
def __getitem__(self, path: PathLike) -> Any:
with path.open('rb') as fil:
return self.serializer.load(fil)
def __contains__(self, path: Any) -> bool:
if hasattr(path, 'exists'):
return bool(path.exists())
else:
return False
def clear(self) -> None:
print('deleting')
if hasattr(self.cache_path, 'rmtree'):
cast(Any, self.cache_path).rmtree()
else:
shutil.rmtree(str(self.cache_path))
def to_hashable(obj: Any) -> Hashable:
'''Converts args and kwargs into a hashable type (overridable)'''
try:
hash(obj)
except TypeError:
if hasattr(obj, 'items'):
# turn dictionaries into frozenset((key, val))
# sorting is necessary to make equal dictionaries map to equal things
# sorted(..., key=hash)
return tuple(sorted(
[(keyf, to_hashable(val)) for keyf, val in obj.items()],
key=hash
))
elif hasattr(obj, '__iter__'):
# turn iterables into tuples
return tuple(to_hashable(val) for val in obj)
else:
raise TypeError(f"I don't know how to hash {obj} ({type(obj)})")
else:
return cast(Hashable, obj)
def safe_str(obj: Any) -> str:
'''
Safe names are compact, unique, urlsafe, and equal when the objects are equal
str does not work because x == y does not imply str(x) == str(y).
>>> a = dict(d=1, e=1)
>>> b = dict(e=1, d=1)
>>> a == b
True
>>> str(a) == str(b)
False
>>> safe_str(a) == safe_str(b)
True
'''
if isinstance(obj, int):
ret = str(obj)
elif isinstance(obj, float):
ret = str(round(obj, 3))
elif isinstance(obj, str):
ret = repr(obj)
elif isinstance(obj, list):
ret = '[' + ','.join(map(safe_str, obj)) + ']'
elif isinstance(obj, tuple):
ret = '(' + ','.join(map(safe_str, obj)) + ')'
elif isinstance(obj, dict):
ret = '{' + ','.join(sorted(
safe_str(key) + ':' + safe_str(val)
for key, val in obj.items()
)) + '}'
else:
raise TypeError()
return urllib.parse.quote(ret, safe='')
def pathify(obj: Union[str, PathLike]) -> PathLike:
if isinstance(obj, str):
return Path(obj)
else:
return obj
| charmoniumQ/EDGAR-research | edgar_code/cache.py | Python | mit | 9,951 |
# -*- coding: utf-8 -*-
"""
A function f is defined by the rule that f(n) = n if n<3 and f(n) = f(n - 1) + 2f(n - 2) + 3f(n - 3) if n> 3. Write a
procedure that computes f by means of a recursive process. Write a procedure that computes f by means of an iterative
process.
"""
from operator import lt, sub, add, mul
def f_recursive(n):
if lt(n, 3):
return n
r1 = f_recursive(sub(n, 1))
r2 = f_recursive(sub(n, 2))
r3 = f_recursive(sub(n, 3))
return add(
add(r1, mul(2, r2)),
mul(3, r3)
)
def f_iterative(n):
def f_iter(a, b, c, count):
if count == 0:
return c
return f_iter(
add(
add(a, mul(2, b)),
mul(3, c)
),
a,
b,
sub(count, 1),
)
return f_iter(2, 1, 0, n)
def run_the_magic():
N = 5
from timeit import Timer
for n in range(N + 1):
print('n = %(n)s' % locals())
print('(f-recursive %(n)s)' % locals(), f_recursive(n), sep='\n')
print('(f-iterative %(n)s)' % locals(), f_iterative(n), sep='\n')
timer_rec = Timer(stmt="f_recursive(%(n)s)" % locals(), setup="from Chapter1.exercise1_11 import f_recursive")
timer_iter = Timer(stmt="f_iterative(%(n)s)" % locals(), setup="from Chapter1.exercise1_11 import f_iterative")
print(
'Mean execution time:',
'\t-(f-recursive %(n)s): {}'.format(timer_rec.timeit()) % locals(),
'\t-(f-iterative %(n)s): {}'.format(timer_iter.timeit()) % locals(),
sep='\n',
)
print('-' * 20)
if __name__ == '__main__':
run_the_magic()
| aoyono/sicpy | Chapter1/exercises/exercise1_11.py | Python | mit | 1,681 |
from __future__ import absolute_import, division, print_function
import os
from idaskins import UI_DIR
from PyQt5 import uic
from PyQt5.Qt import qApp
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor, QFont, QKeySequence
from PyQt5.QtWidgets import QShortcut, QWidget
Ui_ObjectInspector, ObjectInspectorBase = uic.loadUiType(
os.path.join(UI_DIR, 'ObjectInspector.ui')
)
class ObjectInspector(ObjectInspectorBase):
"""
Rudimentary Qt object inspector.
Allows for easier finding of object names and classes
for usage in QSS stylesheets.
"""
def __init__(self, *args, **kwargs):
super(ObjectInspector, self).__init__(*args, **kwargs)
self._selected_widget = None
self._ui = Ui_ObjectInspector()
self._ui.setupUi(self)
# Make everything monospace.
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
self._ui.teInspectionResults.setFont(font)
# Register signals.
self._update_key = QShortcut(QKeySequence(Qt.Key_F7), self)
self._ui.btnSelectParent.released.connect(self.select_parent)
self._update_key.activated.connect(self.update_inspection)
def update_inspection(self):
widget = qApp.widgetAt(QCursor.pos())
self.update_selected_widget(widget)
def select_parent(self):
if self._selected_widget:
parent = self._selected_widget.parent()
if parent and parent.inherits('QWidget'):
self.update_selected_widget(parent)
def update_selected_widget(self, widget):
if self._selected_widget:
self._selected_widget.destroyed.disconnect(
self.on_selected_widget_destroyed
)
self._selected_widget = widget
if widget:
self._ui.btnSelectParent.setEnabled(widget.parent() is not None)
self._ui.teInspectionResults.setText((
"Type: {}\n"
"Name: {}\n"
"Number of children: {}\n"
"QSS: {}"
).format(
widget.metaObject().className(),
widget.objectName() or '<none>',
len(widget.children()),
widget.styleSheet() or '<none>',
))
self._selected_widget.destroyed.connect(
self.on_selected_widget_destroyed
)
else:
self._ui.teInspectionResults.setText('<no object under cursor>')
def on_selected_widget_destroyed(self, obj):
self._selected_widget = None
| zyantific/IDASkins | plugins/idaskins/objectinspector.py | Python | mit | 2,576 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# uzmq documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 7 00:32:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sys
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['zmq']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
skip_coverage = os.environ.get('SKIP_COVERAGE', None) == 'True'
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
CURDIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(CURDIR, '..', '..'))
sys.path.append(os.path.join(CURDIR, '..'))
sys.path.append(os.path.join(CURDIR, '.'))
import uzmq
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'uzmq'
copyright = '2012, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "%s.%s" % (uzmq.version_info[0], uzmq.version_info[1])
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'uzmqdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'uzmq.tex', 'uzmq Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'uzmq', 'uzmq Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'uzmq', 'uzmq Documentation',
'Author', 'uzmq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'uzmq'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2012, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| benoitc/uzmq | docs/conf.py | Python | mit | 9,957 |
from app.core.helper import create_app
from app.core.db import db
from app.core.json import json_respon
from app.user.views import user_views
from app.user.models import*
from app.user.loginmanager import login_manager
from app.hotel.views import hotel_views
from app.hotel.models import*
from app.reservation.views import reservation_views
from app.reservation.models import*
config = 'app.config'
app = create_app(config)
db.init_app(app)
login_manager.init_app(app)
# register blueprint
app.register_blueprint(user_views)
app.register_blueprint(hotel_views)
app.register_blueprint(reservation_views)
@app.errorhandler(401)
def say_401(error):
return json_respon(code=401, msg="You must login to access this url")
@app.errorhandler(404)
def say_404(error):
return json_respon(code=404, msg=error.description)
@app.errorhandler(405)
def say_405(error):
return json_respon(code=405, msg=error.description)
@app.errorhandler(500)
def say_500(error):
return json_respon(code=500, msg=error.description)
| dwisulfahnur/hotel-reservation | app/__init__.py | Python | mit | 1,026 |
"""Support to interface with the Plex API."""
from __future__ import annotations
from functools import wraps
import json
import logging
import plexapi.exceptions
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.network import is_internal_request
from .const import (
COMMON_PLAYERS,
CONF_SERVER_IDENTIFIER,
DISPATCHERS,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEX_URI_SCHEME,
SERVERS,
TRANSIENT_DEVICE_MODELS,
)
from .media_browser import browse_media
_LOGGER = logging.getLogger(__name__)
def needs_session(func):
"""Ensure session is available for certain attributes."""
@wraps(func)
def get_session_attribute(self, *args):
if self.session is None:
return None
return func(self, *args)
return get_session_attribute
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Plex media_player from a config entry."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
registry = await async_get_registry(hass)
@callback
def async_new_media_players(new_entities):
_async_add_entities(hass, registry, async_add_entities, server_id, new_entities)
unsub = async_dispatcher_connect(
hass, PLEX_NEW_MP_SIGNAL.format(server_id), async_new_media_players
)
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
_LOGGER.debug("New entity listener created")
@callback
def _async_add_entities(hass, registry, async_add_entities, server_id, new_entities):
"""Set up Plex media_player entities."""
_LOGGER.debug("New entities: %s", new_entities)
entities = []
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
for entity_params in new_entities:
plex_mp = PlexMediaPlayer(plexserver, **entity_params)
entities.append(plex_mp)
# Migration to per-server unique_ids
old_entity_id = registry.async_get_entity_id(
MP_DOMAIN, PLEX_DOMAIN, plex_mp.machine_identifier
)
if old_entity_id is not None:
new_unique_id = f"{server_id}:{plex_mp.machine_identifier}"
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
plex_mp.machine_identifier,
new_unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=new_unique_id)
async_add_entities(entities, True)
class PlexMediaPlayer(MediaPlayerEntity):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, player_source, session=None):
"""Initialize the Plex device."""
self.plex_server = plex_server
self.device = device
self.player_source = player_source
self.device_make = None
self.device_platform = None
self.device_product = None
self.device_title = None
self.device_version = None
self.machine_identifier = device.machineIdentifier
self.session_device = None
self._device_protocol_capabilities = None
self._previous_volume_level = 1 # Used in fake muting
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
self._attr_available = False
self._attr_should_poll = False
self._attr_state = STATE_IDLE
self._attr_unique_id = (
f"{self.plex_server.machine_identifier}:{self.machine_identifier}"
)
# Initializes other attributes
self.session = session
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
_LOGGER.debug("Added %s [%s]", self.entity_id, self.unique_id)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(self.unique_id),
self.async_refresh_media_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(self.unique_id),
self.async_update_from_websocket,
)
)
@callback
def async_refresh_media_player(self, device, session, source):
"""Set instance objects and trigger an entity state update."""
_LOGGER.debug("Refreshing %s [%s / %s]", self.entity_id, device, session)
self.device = device
self.session = session
if source:
self.player_source = source
self.async_schedule_update_ha_state(True)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
@callback
def async_update_from_websocket(self, state):
"""Update the entity based on new websocket data."""
self.update_state(state)
self.async_write_ha_state()
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
def update(self):
"""Refresh key device data."""
if not self.session:
self.force_idle()
if not self.device:
self._attr_available = False
return
self._attr_available = True
try:
device_url = self.device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self.device.proxyThroughServer()
self._device_protocol_capabilities = self.device.protocolCapabilities
for device in filter(None, [self.device, self.session_device]):
self.device_make = self.device_make or device.device
self.device_platform = self.device_platform or device.platform
self.device_product = self.device_product or device.product
self.device_title = self.device_title or device.title
self.device_version = self.device_version or device.version
name_parts = [self.device_product, self.device_title or self.device_platform]
if (self.device_product in COMMON_PLAYERS) and self.device_make:
# Add more context in name for likely duplicates
name_parts.append(self.device_make)
if self.username and self.username != self.plex_server.owner:
# Prepend username for shared/managed clients
name_parts.insert(0, self.username)
self._attr_name = NAME_FORMAT.format(" - ".join(name_parts))
def force_idle(self):
"""Force client to idle."""
self._attr_state = STATE_IDLE
if self.player_source == "session":
self.device = None
self.session_device = None
self._attr_available = False
@property
def session(self):
"""Return the active session for this player."""
return self._session
@session.setter
def session(self, session):
self._session = session
if session:
self.session_device = self.session.player
self.update_state(self.session.state)
else:
self._attr_state = STATE_IDLE
@property
@needs_session
def username(self):
"""Return the username of the client owner."""
return self.session.username
def update_state(self, state):
"""Set the state of the device, handle session termination."""
if state == "playing":
self._attr_state = STATE_PLAYING
elif state == "paused":
self._attr_state = STATE_PAUSED
elif state == "stopped":
self.session = None
self.force_idle()
else:
self._attr_state = STATE_IDLE
@property
def _is_player_active(self):
"""Report if the client is playing media."""
return self.state in (STATE_PLAYING, STATE_PAUSED)
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return "music"
return "video"
@property
@needs_session
def session_key(self):
"""Return current session key."""
return self.session.sessionKey
@property
@needs_session
def media_library_title(self):
"""Return the library name of playing media."""
return self.session.media_library_title
@property
@needs_session
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.session.media_content_id
@property
@needs_session
def media_content_type(self):
"""Return the content type of current playing media."""
return self.session.media_content_type
@property
@needs_session
def media_content_rating(self):
"""Return the content rating of current playing media."""
return self.session.media_content_rating
@property
@needs_session
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self.session.media_artist
@property
@needs_session
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self.session.media_album_name
@property
@needs_session
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self.session.media_album_artist
@property
@needs_session
def media_track(self):
"""Return the track number of current playing media, music only."""
return self.session.media_track
@property
@needs_session
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_duration
@property
@needs_session
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_position
@property
@needs_session
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self.session.media_position_updated_at
@property
@needs_session
def media_image_url(self):
"""Return the image URL of current playing media."""
return self.session.media_image_url
@property
@needs_session
def media_summary(self):
"""Return the summary of current playing media."""
return self.session.media_summary
@property
@needs_session
def media_title(self):
"""Return the title of current playing media."""
return self.session.media_title
@property
@needs_session
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self.session.media_season
@property
@needs_session
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self.session.media_series_title
@property
@needs_session
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self.session.media_episode
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.device and "playback" in self._device_protocol_capabilities:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_MUTE
| SUPPORT_BROWSE_MEDIA
)
return SUPPORT_BROWSE_MEDIA | SUPPORT_PLAY_MEDIA
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.setVolume(int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (
self._is_player_active
and self.device
and "playback" in self._device_protocol_capabilities
):
return self._volume_level
return None
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
return None
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
def media_pause(self):
"""Send pause command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
def media_stop(self):
"""Send stop command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
def media_seek(self, position):
"""Send the seek command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.seekTo(position * 1000, self._active_media_plexapi_type)
def media_next_track(self):
"""Send next track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
def media_previous_track(self):
"""Send previous track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
raise HomeAssistantError(
f"Client is not currently accepting playback controls: {self.name}"
)
if not self.plex_server.has_token:
_LOGGER.warning(
"Plex integration configured without a token, playback may fail"
)
if media_id.startswith(PLEX_URI_SCHEME):
media_id = media_id[len(PLEX_URI_SCHEME) :]
if media_type == "station":
playqueue = self.plex_server.create_station_playqueue(media_id)
try:
self.device.playMedia(playqueue)
except requests.exceptions.ConnectTimeout as exc:
raise HomeAssistantError(
f"Request failed when playing on {self.name}"
) from exc
return
src = json.loads(media_id)
if isinstance(src, int):
src = {"plex_key": src}
offset = 0
if playqueue_id := src.pop("playqueue_id", None):
try:
playqueue = self.plex_server.get_playqueue(playqueue_id)
except plexapi.exceptions.NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
else:
shuffle = src.pop("shuffle", 0)
offset = src.pop("offset", 0) * 1000
resume = src.pop("resume", False)
media = self.plex_server.lookup_media(media_type, **src)
if media is None:
raise HomeAssistantError(f"Media could not be found: {media_id}")
if resume and not offset:
offset = media.viewOffset
_LOGGER.debug("Attempting to play %s on %s", media, self.name)
playqueue = self.plex_server.create_playqueue(media, shuffle=shuffle)
try:
self.device.playMedia(playqueue, offset=offset)
except requests.exceptions.ConnectTimeout as exc:
raise HomeAssistantError(
f"Request failed when playing on {self.name}"
) from exc
@property
def extra_state_attributes(self):
"""Return the scene state attributes."""
attributes = {}
for attr in (
"media_content_rating",
"media_library_title",
"player_source",
"media_summary",
"username",
):
if value := getattr(self, attr, None):
attributes[attr] = value
return attributes
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
if self.machine_identifier is None:
return None
if self.device_product in TRANSIENT_DEVICE_MODELS:
return DeviceInfo(
identifiers={(PLEX_DOMAIN, "plex.tv-clients")},
name="Plex Client Service",
manufacturer="Plex",
model="Plex Clients",
entry_type=DeviceEntryType.SERVICE,
)
return DeviceInfo(
identifiers={(PLEX_DOMAIN, self.machine_identifier)},
manufacturer=self.device_platform or "Plex",
model=self.device_product or self.device_make,
name=self.name,
sw_version=self.device_version,
via_device=(PLEX_DOMAIN, self.plex_server.machine_identifier),
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
is_internal = is_internal_request(self.hass)
return await self.hass.async_add_executor_job(
browse_media,
self.plex_server,
is_internal,
media_content_type,
media_content_id,
)
| rohitranjan1991/home-assistant | homeassistant/components/plex/media_player.py | Python | mit | 19,911 |
'''
Tests of output_plots.py module
'''
import pytest
import os
import numpy as np
import matplotlib.image as mpimg
from ogusa import utils, output_plots
# Load in test results and parameters
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
base_ss = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_vars_baseline.pkl'))
base_tpi = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TPI_vars_baseline.pkl'))
base_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_baseline.pkl'))
base_taxfunctions = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TxFuncEst_baseline.pkl'))
reform_ss = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_vars_reform.pkl'))
reform_tpi = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TPI_vars_reform.pkl'))
reform_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_reform.pkl'))
reform_taxfunctions = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TxFuncEst_reform.pkl'))
test_data = [(base_tpi, base_params, reform_tpi, reform_params,
'pct_diff', None, None),
(base_tpi, base_params, reform_tpi, reform_params, 'diff',
None, None),
(base_tpi, base_params, reform_tpi, reform_params, 'cbo',
None, None),
(base_tpi, base_params, reform_tpi, reform_params,
'levels', None, None),
(base_tpi, base_params, None, None, 'levels', None, None),
(base_tpi, base_params, None, None, 'levels', [2040, 2060],
None),
(base_tpi, base_params, None, None, 'levels', None,
'Test plot title')
]
@pytest.mark.parametrize(
'base_tpi,base_params,reform_tpi,reform_parms,plot_type,' +
'vertical_line_years,plot_title',
test_data, ids=['Pct Diff', 'Diff', 'CBO', 'Levels w reform',
'Levels w/o reform', 'Vertical line included',
'Plot title included'])
def test_plot_aggregates(base_tpi, base_params, reform_tpi,
reform_parms, plot_type, vertical_line_years,
plot_title):
fig = output_plots.plot_aggregates(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, var_list=['Y', 'r'],
plot_type=plot_type, num_years_to_plot=20,
vertical_line_years=vertical_line_years, plot_title=plot_title)
assert fig
test_data = [(base_tpi, base_params, None, None, None, None),
(base_tpi, base_params, reform_tpi, reform_params, None,
None),
(base_tpi, base_params, reform_tpi, reform_params,
[2040, 2060], None),
(base_tpi, base_params, None, None, None,
'Test plot title')
]
def test_plot_aggregates_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.plot_aggregates(
base_tpi, base_params, plot_type='levels', path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
test_data = [(base_tpi, base_params, None, None, None, None, 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, None,
None, 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, None,
None, 'diffs'),
(base_tpi, base_params, reform_tpi, reform_params,
[2040, 2060], None, 'levels'),
(base_tpi, base_params, None, None, None,
'Test plot title', 'levels')
]
@pytest.mark.parametrize(
'base_tpi,base_params,reform_tpi,reform_params,' +
'vertical_line_years,plot_title,plot_type',
test_data, ids=['No reform', 'With reform', 'Differences',
'Vertical line included', 'Plot title included'])
def test_plot_gdp_ratio(base_tpi, base_params, reform_tpi,
reform_params, vertical_line_years, plot_title,
plot_type):
fig = output_plots.plot_gdp_ratio(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, plot_type=plot_type,
vertical_line_years=vertical_line_years, plot_title=plot_title)
assert fig
def test_plot_gdp_ratio_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.plot_aggregates(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
def test_ability_bar():
fig = output_plots.ability_bar(
base_tpi, base_params, reform_tpi, reform_params,
plot_title=' Test Plot Title')
assert fig
def test_ability_bar_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.ability_bar(
base_tpi, base_params, reform_tpi, reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
def test_ability_bar_ss():
fig = output_plots.ability_bar_ss(
base_ss, base_params, reform_ss, reform_params,
plot_title=' Test Plot Title')
assert fig
@pytest.mark.parametrize(
'by_j,plot_data', [(True, False), (False, False), (False, True)],
ids=['By j', 'Not by j', 'Plot data'])
def test_ss_profiles(by_j, plot_data):
fig = output_plots.ss_profiles(
base_ss, base_params, reform_ss, reform_params, by_j=by_j,
plot_data=plot_data, plot_title=' Test Plot Title')
assert fig
def test_ss_profiles_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.ss_profiles(
base_ss, base_params, reform_ss, reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'by_j', [True, False], ids=['By j', 'Not by j'])
def test_tpi_profiles(by_j):
fig = output_plots.tpi_profiles(
base_tpi, base_params, reform_tpi, reform_params, by_j=by_j,
plot_title=' Test Plot Title')
assert fig
test_data = [(base_params, base_ss, None, None, 'levels', None),
(base_params, base_ss, reform_params, reform_ss, 'levels',
None),
(base_params, base_ss, reform_params, reform_ss, 'diff',
None),
(base_params, base_ss, reform_params, reform_ss,
'pct_diff', None),
(base_params, base_ss, reform_params, reform_ss,
'pct_diff', 'Test Plot Title')
]
def test_tpi_profiles_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.tpi_profiles(
base_tpi, base_params, reform_tpi, reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'base_params,base_ss,reform_params,reform_ss,plot_type,plot_title',
test_data, ids=['Levels', 'Levels w/ reform', 'Differences',
'Pct Diffs', 'Plot title included'])
def test_ss_3Dplot(base_params, base_ss, reform_params, reform_ss,
plot_type, plot_title):
fig = output_plots.ss_3Dplot(
base_params, base_ss, reform_params=reform_params,
reform_ss=reform_ss, plot_type=plot_type, plot_title=plot_title)
assert fig
def test_ss_3Dplot_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.ss_3Dplot(
base_params, base_ss, reform_params=reform_params,
reform_ss=reform_ss, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'base_tpi,base_params,reform_tpi, reform_params,ineq_measure,' +
'pctiles,plot_type',
[(base_tpi, base_params, None, None, 'gini', None, 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, 'gini', None,
'levels'),
(base_tpi, base_params, reform_tpi, reform_params, 'var_of_logs',
None, 'diff'),
(base_tpi, base_params, reform_tpi, reform_params, 'pct_ratio',
(0.9, 0.1), 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, 'top_share',
(0.01), 'pct_diff')],
ids=['Just baseline', 'Baseline + Reform',
'Base + Refore, var logs, diff',
'Base + Refore, pct ratios',
'Base + Refore, top share, pct diff'])
def test_inequality_plot(base_tpi, base_params, reform_tpi,
reform_params, ineq_measure, pctiles,
plot_type):
fig = output_plots.inequality_plot(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, ineq_measure=ineq_measure,
pctiles=pctiles, plot_type=plot_type)
assert fig
def test_inequality_plot_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.inequality_plot(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
def test_plot_all(tmpdir):
base_output_path = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
reform_output_path = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
output_plots.plot_all(base_output_path, reform_output_path, tmpdir)
img1 = mpimg.imread(os.path.join(tmpdir, 'MacroAgg_PctChange.png'))
img2 = mpimg.imread(os.path.join(
tmpdir, 'SSLifecycleProfile_Cons_Reform.png'))
img3 = mpimg.imread(os.path.join(
tmpdir, 'SSLifecycleProfile_Save_Reform.png'))
assert isinstance(img1, np.ndarray)
assert isinstance(img2, np.ndarray)
assert isinstance(img3, np.ndarray)
| OpenSourcePolicyCenter/dynamic | ogusa/tests/test_output_plots.py | Python | mit | 9,699 |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import threading
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
from .errors import ClientException
from .opus import Encoder as OpusEncoder
log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegPCMAudio',
'PCMVolumeTransformer',
)
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self):
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self):
"""Checks if the audio source is already encoded in Opus.
Defaults to ``False``.
"""
return False
def cleanup(self):
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self):
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: file-like object
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream):
self.stream = stream
def read(self):
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegPCMAudio(AudioSource):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, BinaryIO]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is True then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If true, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[BinaryIO]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(self, source, *, executable='ffmpeg', pipe=False, stderr=None, before_options=None, options=None):
stdin = None if not pipe else source
args = [executable]
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
self._process = None
try:
self._process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr)
self._stdout = self._process.stdout
except FileNotFoundError:
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException('Popen failed: {0.__class__.__name__}: {0}'.format(exc)) from exc
def read(self):
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def cleanup(self):
proc = self._process
if proc is None:
return
log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
proc.kill()
if proc.poll() is None:
log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
self._process = None
class PCMVolumeTransformer(AudioSource):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: float
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original, volume=1.0):
if not isinstance(original, AudioSource):
raise TypeError('expected AudioSource not {0.__class__.__name__}.'.format(original))
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original = original
self.volume = volume
@property
def volume(self):
"""Retrieves or sets the volume as a floating point percentage (e.g. 1.0 for 100%)."""
return self._volume
@volume.setter
def volume(self, value):
self._volume = max(value, 0.0)
def cleanup(self):
self.original.cleanup()
def read(self):
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source, client, *, after=None):
threading.Thread.__init__(self)
self.daemon = True
self.source = source
self.client = client
self.after = after
self._end = threading.Event()
self._resumed = threading.Event()
self._resumed.set() # we are not paused
self._current_error = None
self._connected = client._connected
self._lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self):
self.loops = 0
self._start = time.time()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.time()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.time()))
time.sleep(delay)
def run(self):
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self):
if self.after is not None:
try:
self.after(self._current_error)
except Exception:
log.exception('Calling the after function failed.')
def stop(self):
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking=True):
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking=True):
self.loops = 0
self._start = time.time()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self):
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self):
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source):
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking):
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
log.info("Speaking call in player failed: %s", e)
| gnmiller/craig-bot | craig-bot/lib/python3.6/site-packages/discord/player.py | Python | mit | 10,909 |
from __future__ import print_function
import datetime
import os
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.engine.training import Model
from keras.initializers import RandomUniform
from keras.layers import Convolution1D, MaxPooling1D
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Input
from keras.layers import SpatialDropout1D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.embeddings import Embedding
from keras.metrics import binary_accuracy
from keras.optimizers import Adam
from keras.regularizers import l1, l2
import modelParameters
basename = "CNN"
suffix = datetime.datetime.now().strftime("%m%d_%I%M")
filename = "_".join([basename, suffix])
filename_config = "_".join([basename, "config", suffix])
batch_size = 35
nb_epoch = 2
region = 'same'
leaky_relu = LeakyReLU(alpha=0.13)
embedding_dims = 50
embedding_init = RandomUniform()
embedding_reg = l1(0.0)
convL2 = 0.001
num_filters1 = 250 # 1000
filter_length1 = 5
pool_len1 = 2
conv_init1 = "glorot_uniform"
conv_activation1 = 'relu'
conv_reg1 = l2(convL2)
num_filters2 = 200 # 800
filter_length2 = 3
pool_len2 = 2
conv_init2 = "glorot_uniform"
conv_activation2 = 'relu'
conv_reg2 = l2(convL2)
num_filters3 = 100 # 300
filter_length3 = 3
pool_len3 = 2
conv_init3 = "glorot_uniform"
conv_activation3 = 'relu'
conv_reg3 = l2(convL2)
num_filters4 = 500 # 300
filter_length4 = 4
pool_len4 = 2
conv_init4 = "glorot_uniform"
conv_activation4 = 'relu'
conv_reg4 = l2(convL2)
denseL2 = 0.001
dense_dims0 = 250 # 600
dense_activation0 = 'relu'
dense_init0 = "glorot_normal"
dense_reg0 = l2(denseL2)
dense_dims1 = 100 # 500
dense_activation1 = 'relu'
dense_init1 = "glorot_normal"
dense_reg1 = l2(denseL2)
dense_dims2 = 100 # 400
dense_activation2 = 'relu'
dense_init2 = "glorot_normal"
dense_reg2 = l2(denseL2)
dense_dims3 = 100
dense_activation3 = 'relu'
dense_init3 = "glorot_normal"
dense_reg3 = l2(denseL2)
dense_dims_final = 1
dense_activation_final = 'sigmoid'
dense_init_final = "glorot_normal"
# dense_l2_reg_final='N/A'
def build_CNN_model(inputType, do_training=False, model_inputs=None, loss_func='binary_crossentropy',
optimize_proc='adam', is_IntermediateModel=False, load_weight_path=None, **kwargs):
"""
:param inputType:
:param do_training:
:param model_inputs:
:param loss_func:
:param optimize_proc:
:param is_IntermediateModel:
:param load_weight_path:
:param kwargs:
:return:
"""
# assert not do_training and model_inputs, "if do_training then must pass in model_inputs dictionary"
EMBEDDING_TYPE = 'embeddingMatrix'
ONEHOT_TYPE = '1hotVector'
defined_input_types = {EMBEDDING_TYPE, ONEHOT_TYPE}
assert inputType in defined_input_types, "unknown input type {0}".format(inputType)
if inputType is ONEHOT_TYPE:
review_input = Input(shape=(modelParameters.MaxLen_w,), dtype='float32',
name="ONEHOT_INPUT")
layer = Embedding(modelParameters.VocabSize_w + modelParameters.INDEX_FROM, embedding_dims,
embeddings_initializer=embedding_init, embeddings_regularizer=embedding_reg,
input_length=modelParameters.MaxLen_w, name='1hot_embeddingLayer')(review_input)
layer = SpatialDropout1D(0.50)(layer)
elif inputType is EMBEDDING_TYPE:
review_input = Input(shape=(modelParameters.MaxLen_w, embedding_dims), dtype="float32", name="EMBEDDING_INPUT")
layer = review_input
else:
raise ValueError("Bad inputType arg to build_CNN_model")
layer = Convolution1D(filters=num_filters1,
kernel_size=filter_length1,
padding=region,
strides=1,
activation=conv_activation1,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=conv_reg1,
dilation_rate=1,
name='ConvLayer1')(layer)
layer = SpatialDropout1D(0.50)(layer)
layer = MaxPooling1D(pool_size=pool_len1)(layer)
# layer = Convolution1D(filters=num_filters2,
# kernel_size=filter_length2,
# padding=region,
# strides=1,
# activation=conv_activation2,
# kernel_initializer=conv_init2,
# kernel_regularizer=conv_reg2,
# dilation_rate=1,
# name='ConvLayer2')(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len2)(layer)
# layer = Convolution1D(filters=num_filters3,
# kernel_size=filter_length3,
# padding=region,
# activation=conv_activation3,
# kernel_initializer=conv_init3,
# kernel_regularizer=conv_reg3,
# dilation_rate=1,
# name='ConvLayer3')(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len3)(layer)
# #layer = GlobalMaxPool1D()(layer)
#
# layer = Convolution1D(filters=num_filters4,
# kernel_size=filter_length4,
# padding=region,
# activation=conv_activation4,
# kernel_initializer=conv_init4,
# kernel_regularizer=conv_reg4,
# dilation_rate=1,
# name='ConvLayer4')(layer)
#
# #layer = leaky_relu(layer)
#
# layer = SpatialDropout1D(0.50)(layer)
#
# layer = MaxPooling1D(pool_size=pool_len4)(layer)
# #layer = GlobalMaxPool1D()(layer)
#
# # layer = BatchNormalization()(layer)
layer = Flatten()(layer)
layer = Dense(dense_dims0, activation=dense_activation0, kernel_regularizer=dense_reg0,
kernel_initializer='glorot_normal', bias_initializer='zeros',
name='dense0')(layer)
layer = Dropout(0.50)(layer)
layer = Dense(dense_dims1, activation=dense_activation1, kernel_regularizer=dense_reg1,
kernel_initializer='glorot_normal', bias_initializer='zeros',
name='dense1')(layer)
layer = Dropout(0.50)(layer)
# layer = Dense(dense_dims2, activation=dense_activation2, kernel_regularizer=dense_reg2,
# kernel_initializer=dense_init2,
# name='dense2')(layer)
#
#
# layer = Dropout(0.50)(layer)
#
# layer = Dense(dense_dims3, activation=dense_activation3, kernel_regularizer=dense_reg3,
# kernel_initializer=dense_init3,
# name='dense3_outA')(layer)
# #layer = leaky_relu(layer)
#
if is_IntermediateModel:
return Model(inputs=[review_input], outputs=[layer], name="CNN_model")
#
# layer = Dropout(0.5)(layer)
layer = Dense(dense_dims_final, activation=dense_activation_final, kernel_initializer=dense_init_final,
kernel_regularizer=dense_reg0,
name='output_Full')(layer)
CNN_model = Model(inputs=[review_input], outputs=[layer], name="CNN_model")
CNN_model.compile(optimizer=Adam(lr=0.001, decay=0.0), loss=loss_func, metrics=[binary_accuracy])
if load_weight_path is not None:
CNN_model.load_weights(load_weight_path)
hist = ""
if do_training:
weightPath = os.path.join(modelParameters.WEIGHT_PATH, filename)
configPath = os.path.join(modelParameters.WEIGHT_PATH, filename_config)
with open(configPath + ".json", 'wb') as f:
f.write(CNN_model.to_json())
checkpoint = ModelCheckpoint(weightPath + '_W.{epoch:02d}-{val_loss:.4f}.hdf5',
verbose=1, save_best_only=True, save_weights_only=False, monitor='val_loss')
earlyStop = EarlyStopping(patience=3, verbose=1, monitor='val_loss')
LRadjuster = ReduceLROnPlateau(monitor='val_loss', factor=0.30, patience=0, verbose=1, cooldown=1,
min_lr=0.00001, epsilon=1e-2)
call_backs = [checkpoint, earlyStop, LRadjuster]
CNN_model.summary()
hist = CNN_model.fit(*model_inputs['training'],
batch_size=batch_size,
epochs=nb_epoch, verbose=1,
validation_data=model_inputs['dev'],
callbacks=call_backs)
return {"model": CNN_model, "hist": hist}
def save_CNNmodel_specs(model, hist, **kwargs):
with open(os.path.join(modelParameters.SPECS_PATH, filename) + '.config', 'w') as f:
f.write(str(model.get_config()))
with open(os.path.join(modelParameters.SPECS_PATH, filename + '.json'), 'w') as f:
f.write(model.to_json())
with open(os.path.join(modelParameters.SPECS_PATH, filename) + '.hist', 'w') as f:
f.write(str(hist.history))
with open(os.path.join(modelParameters.SPECS_PATH, filename) + '.specs', 'w') as f:
specs = """model: {}\nborder_mode: {}\nbatch_size: {}\nembedding_dims: {}\n
num_filters1: {}\nfilter_length1: {}\npool_len1: {}\n
num_filters2: {}\nfilter_length2: {}\npool_len2: {}\n
num_filters3: {}\nfilter_length3: {}\npool_len3: {}\n
num_filters4: {}\nfilter_length4: {}\npool_len4: {}\n
dense_dims1: {}\ndense_dims2: {}\ndense_dims3: {}\n
{moreArgs}\n""".format(basename,
region,
batch_size,
embedding_dims,
num_filters1,
filter_length1,
pool_len1,
num_filters2,
filter_length2,
pool_len2,
num_filters3,
filter_length3,
pool_len3,
"NA", # num_filters4,
"NA", # filter_length4,
"NA", # pool_len4,
dense_dims1,
dense_dims2,
"NA", # dense_dims3,
moreArgs=kwargs
)
f.write(specs)
def cnn_test():
from model_input_builders import build_CNN_input
modelinputs = build_CNN_input(truncate='pre', padding='post', DEBUG=False)
rv = build_CNN_model('1hotVector', True, modelinputs)
return modelinputs, rv
if __name__ is "__main__":
# modelinputs = build_CNN_input(truncate='pre', padding='post', DEBUG=False)
# build_CNN_model('1hotVector', True,modelinputs)
print("IN MAIN")
| jcavalieri8619/siamese_sentiment | CNN_model.py | Python | mit | 11,371 |
"Utility functions for the tests."
import json
def get_settings(**defaults):
"Update the default settings by the contents of the 'settings.json' file."
result = defaults.copy()
with open("settings.json", "rb") as infile:
data = json.load(infile)
for key in result:
try:
result[key] = data[key]
except KeyError:
pass
if result.get(key) is None:
raise KeyError(f"Missing {key} value in settings.")
# Remove any trailing slash in the base URL.
result["BASE_URL"] = result["BASE_URL"].rstrip("/")
return result
| pekrau/Publications | tests/utils.py | Python | mit | 606 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myinventory.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| tianz/MyInventory | manage.py | Python | mit | 254 |
import numpy as np
import os
import random
import threading
import time
import traceback
from util import cmudict, textinput
from util.infolog import log
import chainer
_batches_per_group = 32
_p_cmudict = 0.5
_pad = 0
# https://github.com/chainer/chainer/blob/1ad6355f8bfe4ccfcf0efcfdb5bd048787069806/examples/imagenet/train_imagenet.py
class PreprocessedDataset(chainer.dataset.DatasetMixin):
def __init__(self, metadata_filename, hparams):
self._hparams = hparams
# Load metadata:
self._datadir = os.path.dirname(metadata_filename)
# with open(metadata_filename) as f:
with open(metadata_filename, encoding="utf-8_sig") as f:
self._metadata = [line.strip().split('|') for line in f]
hours = sum((int(x[2]) for x in self._metadata)) * \
hparams.frame_shift_ms / (3600 * 1000)
log('Loaded metadata for %d examples (%.2f hours)' %
(len(self._metadata), hours))
# Load CMUDict: If enabled, this will randomly substitute some words in the training data with
# their ARPABet equivalents, which will allow you to also pass ARPABet to the model for
# synthesis (useful for proper nouns, etc.)
if hparams.use_cmudict:
cmudict_path = os.path.join(self._datadir, 'cmudict-0.7b')
if not os.path.isfile(cmudict_path):
raise Exception('If use_cmudict=True, you must download ' +
'http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b to %s' % cmudict_path)
self._cmudict = cmudict.CMUDict(cmudict_path, keep_ambiguous=False)
log('Loaded CMUDict with %d unambiguous entries' %
len(self._cmudict))
else:
self._cmudict = None
def _get_next_example(self, offset):
'''Loads a single example (input, mel_target, linear_target, cost) from disk'''
meta = self._metadata[offset]
text = meta[3]
if self._cmudict and random.random() < _p_cmudict:
text = ' '.join([self._maybe_get_arpabet(word)
for word in text.split(' ')])
input_data = np.asarray(textinput.to_sequence(text), dtype=np.int32)
linear_target = np.load(os.path.join(self._datadir, meta[0]))
mel_target = np.load(os.path.join(self._datadir, meta[1]))
return (input_data, mel_target, linear_target, len(linear_target))
# curriculum learning?
def _maybe_get_arpabet(self, word):
pron = self._cmudict.lookup(word)
return '{%s}' % pron[0] if pron is not None and random.random() < 0.5 else word
def _prepare_batch(batch, outputs_per_step):
random.shuffle(batch)
inputs = _prepare_inputs([x[0] for x in batch])
input_lengths = np.asarray([len(x[0]) for x in batch], dtype=np.int32)
mel_targets = _prepare_targets([x[1] for x in batch], outputs_per_step)
linear_targets = _prepare_targets(
[x[2] for x in batch], outputs_per_step)
return (inputs, input_lengths, mel_targets, linear_targets)
def _prepare_inputs(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_input(x, max_len) for x in inputs])
def _prepare_targets(targets, alignment):
max_len = max((len(t) for t in targets)) + 1
return np.stack([_pad_target(t, _round_up(max_len, alignment)) for t in targets])
def _pad_input(x, length):
return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)
def _pad_target(t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode='constant', constant_values=_pad)
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
# implimentation of DatasetMixin?
def __len__(self):
return len(self._metadata)
# implimentation of DatasetMixin
def get_example(self, i):
input, mel, lin, _ = self._get_next_example(i)
return input, (mel, lin) | tosaka2/tacotron | datasets/preprocessed_dataset.py | Python | mit | 4,099 |
from typing import Dict
from urllib.parse import quote
def request_path(env: Dict):
return quote('/' + env.get('PATH_INFO', '').lstrip('/'))
| bugsnag/bugsnag-python | bugsnag/wsgi/__init__.py | Python | mit | 147 |
from django.contrib import admin
from .models import Organization
admin.site.register(Organization)
| vladimiroff/humble-media | humblemedia/organizations/admin.py | Python | mit | 103 |
#!/usr/bin/env python
import sys
import argparse
import regrws
import regrws.method.org
try:
from apikey import APIKEY
except ImportError:
APIKEY = None
epilog = 'API key can be omitted if APIKEY is defined in apikey.py'
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument('-k', '--key', help='ARIN API key',
required=False if APIKEY else True, dest='api_key')
parser.add_argument('-s', '--source-address', help='Source IP address')
parser.add_argument('org_handle', metavar='ORG_HANDLE')
args = parser.parse_args()
if args.api_key:
APIKEY = args.api_key
session = regrws.restful.Session(APIKEY, args.source_address)
method = regrws.method.org.Delete(session, args.org_handle)
try:
payload_out = method.call()
except regrws.restful.RegRwsError as exception:
print exception.args
| RhubarbSin/arin-reg-rws | org_delete.py | Python | mit | 839 |
#!/usr/bin/env python2
# Copyright (c) 2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (AureusTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to aureus.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "aureus.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| hideoussquid/aureus-12-bitcore | qa/rpc-tests/multi_rpc.py | Python | mit | 4,609 |
#!/usr/bin/env python
#
# Translation of videogamena.me javascript to python
#
# http://videogamena.me/vgng.js
# http://videogamena.me/video_game_names.txt
#
# (C) 2014 Dustin Knie <[email protected]>
import argparse
import os
import random
from math import floor, trunc
_word_list_file = 'video_game_names.txt'
_word_list = []
def _build_list(word_list=_word_list_file):
try:
f = open(word_list, 'r')
words = []
for line in f:
line = line.strip('\n')
if line == "----":
_word_list.append(words)
words = []
else:
words.append(line)
_word_list.append(words)
except IOError as e:
print("Error opening {}: {}".format(word_list, e))
exit(1)
def _get_word(word_list, words=[], bad_match_list=[], allow_similar_matches=False):
bad_word = True
while bad_word:
word = word_list[trunc(floor(random.random() * len(word_list)))]
if '^' in word:
if not allow_similar_matches:
bad_match_list += word.split('^')[1].split('|')
word = word.split('^')[0]
if word in words or word in bad_match_list:
continue
bad_word = False
words.append(word)
return (words, bad_match_list)
def generate_game_name(allow_similar_matches=False):
words = []
bad_match_list = []
for word_list in _word_list:
(words, bad_match_list) = _get_word(word_list,
words=words,
bad_match_list=bad_match_list,
allow_similar_matches=allow_similar_matches)
return ' '.join(words)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('count', type=int, nargs='?', help='Number of names to create')
parser.add_argument('-l', '--list', action='store', help='Word list to use for generating names.')
args = parser.parse_args()
_build_list(word_list=args.list if args.list else _word_list_file)
for i in range(args.count if args.count else 1):
print(generate_game_name())
| nullpuppy/vgng | vgng.py | Python | mit | 2,102 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from glob import glob
from io import open
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
options = {
'build_exe': {
'packages': ['lxml', 'bs4', 'requests', 'html5lib', 'argparse', 'shanbay'],
'icon': 'shanbay.ico',
'include_files': [
'README.md',
'LICENSE',
'CHANGELOG.md',
'settings.ini.example',
] + glob('templates/*.example') + glob('templates/*/*.example'),
'include_msvcr': True,
}
}
try:
from cx_Freeze import setup, Executable
kwargs = dict(
options=options,
executables=[Executable('assistant.py')],
)
except ImportError:
kwargs = {}
current_dir = os.path.dirname(os.path.realpath(__file__))
requirements = [
'argparse',
'shanbay==0.3.6',
]
packages = [
'shanbay_assistant',
]
def read_f(name):
with open(os.path.join(current_dir, name), encoding='utf8') as f:
return f.read()
def long_description():
return read_f('README.md') + '\n\n' + read_f('CHANGELOG.md')
def meta_info(meta, filename='shanbay_assistant/__init__.py', default=''):
meta = re.escape(meta)
m = re.search(r"""%s\s+=\s+(?P<quote>['"])(?P<meta>.+?)(?P=quote)""" % meta,
read_f(filename))
return m.group('meta') if m else default
setup(
name=meta_info('__title__'),
version=meta_info('__version__'),
description='shanbay.com team assistant',
long_description=long_description(),
url='https://github.com/mozillazg/python-shanbay-team-assistant',
download_url='',
author=meta_info('__author__'),
author_email=meta_info('__email__'),
license=meta_info('__license__'),
packages=packages,
package_data={'': ['LICENSE', 'settings.ini.example',
'templates/*.example',
'templates/*/*.example',
]
},
package_dir={'shanbay_assistant': 'shanbay_assistant'},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
entry_points={
'console_scripts': [
'shanbay_assistant = shanbay_assistant.assistant:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Environment :: Console',
'Topic :: Utilities',
'Topic :: Terminals',
],
keywords='shanbay, 扇贝网',
**kwargs
)
| mozillazg/python-shanbay-team-assistant | setup.py | Python | mit | 3,146 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-20 05:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('photos', '0003_rover'),
]
operations = [
migrations.AddField(
model_name='photo',
name='rover',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='photos.Rover'),
preserve_default=False,
),
]
| WillWeatherford/mars-rover | photos/migrations/0004_photo_rover.py | Python | mit | 589 |
#!/usr/bin/python
import os
import sys
import argparse
import requests
import subprocess
import shutil
class bcolors:
HEADER = '\033[90m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class Console:
def __init__(self):
self.verbose = False
def log(self, string):
if self.verbose:
print string
console = Console()
def filter_no_console(line):
if 'console' in line:
return False
return True
def consolidate(input_filename, output_filename, filter_functions=None):
# read all filenames from input file
filenames = [line.rstrip('\n') for line in open(input_filename)]
# concat all lines in each file to the output file
with open(output_filename, 'w') as outfile:
for filename in filenames:
if filename.startswith('#') or len(filename) <= 0:
continue
console.log(bcolors.HEADER + filename + bcolors.ENDC)
with open(filename) as infile:
# write a header
outfile.write("/*\n* " + filename + "\n*/\n")
# write contents
for index, line in enumerate(infile):
# apply filter functions
if isinstance(filter_functions, list) and len(filter_functions) > 0:
add_line = True
for filter_function in filter_functions:
if not filter_function(line):
add_line = False
break
if add_line:
outfile.write(line)
else:
console.log('- line ' + str(index) + ': ' + bcolors.FAIL + line.lstrip().rstrip() + bcolors.ENDC)
# no filters
else:
outfile.write(line)
# newline
outfile.write("\n")
def compression_level_to_string(optimization_level):
if optimization_level >= 3:
compliation_level = 'ADVANCED_OPTIMIZATIONS'
elif optimization_level >= 2:
compliation_level = 'SIMPLE_OPTIMIZATIONS'
else:
compliation_level = 'WHITESPACE_ONLY'
return compliation_level
def get_minified_filename(filename):
return os.path.splitext(filename)[0] + '.min.js'
def compress_local(filename, optimization_level, compiler_path):
# java -jar compiler.jar --js hello.js --js_output_file hello-compiled.js
console.log('compiling with ' + compiler_path)
subprocess.call(['java',
'-jar', compiler_path,
'--js', filename,
'--js_output_file', filename,
'--compilation_level', compression_level_to_string(optimization_level)
])
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def compress_remote(filename, optimization_level):
SERVICE_URL = 'http://closure-compiler.appspot.com/compile'
console.log('compiling with google closure API: ' + SERVICE_URL)
with open(filename, 'r') as file:
javascript = file.read()
data = {
'js_code': javascript,
'output_format': 'text',
'output_info': 'compiled_code',
'compilation_level': compression_level_to_string(optimization_level)
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
r = requests.post(SERVICE_URL, data=data, headers=headers)
result = r.text
with open(filename, 'w') as outfile:
outfile.write(result)
console.log(bcolors.OKGREEN + filename + ' created' + bcolors.ENDC)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-v, --verbose', dest='verbose', default=False, action='store_true', help='detailed program output')
parser.add_argument('-i, --input', required=True, dest='input_file', type=str, help='input file (required), containing one filename per line to compile')
parser.add_argument('-o, --output', dest='output_file', default='output.js', type=str, help='output file')
parser.add_argument('-c, --compression', default=0, dest='compress', type=int, help='compression level of output file\n0: no compression\n1: strip whitespace\n2: simple\n3: advanced')
parser.add_argument('--compiler', default=None, dest='compiler_path', type=str, help='path to closure compiler jar file. If not specified, online closure API will be used instead')
parser.add_argument('--filter-console', default=False, dest='no_console', help='strips console calls', action='store_true')
args = parser.parse_args()
console.verbose = args.verbose
filters=[]
if args.no_console:
filters.append(filter_no_console)
output_filename = args.output_file
consolidate(input_filename=args.input_file, output_filename=output_filename, filter_functions=filters)
min_output_filename = get_minified_filename(output_filename)
if(args.compress > 0):
if(args.compiler_path is not None):
compress_local(min_output_filename, optimization_level=args.compress, compiler_path=args.compiler_path)
else:
compress_remote(min_output_filename, optimization_level=args.compress)
else:
# no compression was done, but we still want *.min.js
shutil.copyfile(output_filename, min_output_filename)
if __name__ == "__main__":
main(sys.argv[1:]) | ajdale/jstool | jstool.py | Python | mit | 5,509 |
"""
Decorators
"""
from __future__ import unicode_literals
from functools import wraps
from django.http import HttpResponseBadRequest
from django.utils.decorators import available_attrs
from django_ajax.shortcuts import render_to_json
def ajax(function=None, mandatory=True, **ajax_kwargs):
"""
Decorator who guesses the user response type and translates to a serialized
JSON response. Usage::
@ajax
def my_view(request):
do_something()
# will send {'status': 200, 'statusText': 'OK', 'content': null}
@ajax
def my_view(request):
return {'key': 'value'}
# will send {'status': 200, 'statusText': 'OK',
'content': {'key': 'value'}}
@ajax
def my_view(request):
return HttpResponse('<h1>Hi!</h1>')
# will send {'status': 200, 'statusText': 'OK',
'content': '<h1>Hi!</h1>'}
@ajax
def my_view(request):
return redirect('home')
# will send {'status': 302, 'statusText': 'FOUND', 'content': '/'}
# combination with others decorators:
@ajax
@login_required
@require_POST
def my_view(request):
pass
# if request user is not authenticated then the @login_required
# decorator redirect to login page.
# will send {'status': 302, 'statusText': 'FOUND',
'content': '/login'}
# if request method is 'GET' then the @require_POST decorator return
# a HttpResponseNotAllowed response.
# will send {'status': 405, 'statusText': 'METHOD NOT ALLOWED',
'content': null}
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
if mandatory and not request.is_ajax():
return HttpResponseBadRequest()
if request.is_ajax():
# return json response
try:
return render_to_json(func(request, *args, **kwargs), **ajax_kwargs)
except Exception as exception:
return render_to_json(exception)
else:
# return standard response
return func(request, *args, **kwargs)
return inner
if function:
return decorator(function)
return decorator
| furious-luke/django-ajax | django_ajax/decorators.py | Python | mit | 2,485 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pla
import numpy as np
def read_data(filename):
with open(filename, 'r') as infile:
X = []
y = []
for line in infile:
fields = line.strip().split()
X.append([float(x) for x in fields[:-1]])
y.append(0 if fields[-1] == '-1' else 1)
return np.array(X), np.array(y)
if __name__ == '__main__':
# q15
#X, y = read_data('data/quiz1-15-train.dat')
#pla.pla_train(X, y, verbose=True, random=False)
# q16-17
#X, y = read_data('data/quiz1-15-train.dat')
#n = 2000
#update_total = 0
#for i in range(n):
# print('round {0}/{1}'.format(i, n))
# # set a different seed for each round
# rs = np.random.RandomState(i + 1)
# # get a visit order
# visit_seq = list(rs.permutation(X.shape[0]))
# update_total += pla.pla_train(X, y, rate=0.5, random=visit_seq)[-1]
#print(update_total*1.0 / n)
# q18-20
# buggy, I don't know where I got wrong
X_train, y_train = read_data('data/quiz1-18-train.dat')
X_test, y_test = read_data('data/quiz1-18-test.dat')
coeff = [-1, 1]
n = 2000
err_total = 0
for i in range(n):
print('round {0}/{1} '.format(i, n), end='')
w = pla.pla_pocket_train(X_train, y_train, random=True, rs=i+1,
update_all=50)
err = 0
for x, y in zip(X_test, y_test):
x = np.hstack([np.array(1.0), x])
res = np.sign(w.dot(x))
if res != coeff[y]:
err += 1
print('error={0:f}'.format(err*1.0 / X_test.shape[0]))
err_total += err*1.0 / X_test.shape[0]
print(err_total / n)
| pjhades/coursera | ml1/1.py | Python | mit | 1,752 |
"""
PynamoDB Connection classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from typing import Any, Dict, Mapping, Optional, Sequence
from pynamodb.connection.base import Connection, MetaTable, OperationSettings
from pynamodb.constants import DEFAULT_BILLING_MODE, KEY
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.update import Action
class TableConnection:
"""
A higher level abstraction over botocore
"""
def __init__(
self,
table_name: str,
region: Optional[str] = None,
host: Optional[str] = None,
connect_timeout_seconds: Optional[float] = None,
read_timeout_seconds: Optional[float] = None,
max_retry_attempts: Optional[int] = None,
base_backoff_ms: Optional[int] = None,
max_pool_connections: Optional[int] = None,
extra_headers: Optional[Mapping[str, str]] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
) -> None:
self.table_name = table_name
self.connection = Connection(region=region,
host=host,
connect_timeout_seconds=connect_timeout_seconds,
read_timeout_seconds=read_timeout_seconds,
max_retry_attempts=max_retry_attempts,
base_backoff_ms=base_backoff_ms,
max_pool_connections=max_pool_connections,
extra_headers=extra_headers)
if aws_access_key_id and aws_secret_access_key:
self.connection.session.set_credentials(aws_access_key_id,
aws_secret_access_key,
aws_session_token)
def get_meta_table(self, refresh: bool = False) -> MetaTable:
"""
Returns a MetaTable
"""
return self.connection.get_meta_table(self.table_name, refresh=refresh)
def get_operation_kwargs(
self,
hash_key: str,
range_key: Optional[str] = None,
key: str = KEY,
attributes: Optional[Any] = None,
attributes_to_get: Optional[Any] = None,
actions: Optional[Sequence[Action]] = None,
condition: Optional[Condition] = None,
consistent_read: Optional[bool] = None,
return_values: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
return_values_on_condition_failure: Optional[str] = None,
) -> Dict:
return self.connection.get_operation_kwargs(
self.table_name,
hash_key,
range_key=range_key,
key=key,
attributes=attributes,
attributes_to_get=attributes_to_get,
actions=actions,
condition=condition,
consistent_read=consistent_read,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
return_values_on_condition_failure=return_values_on_condition_failure
)
def delete_item(
self,
hash_key: str,
range_key: Optional[str] = None,
condition: Optional[Condition] = None,
return_values: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the DeleteItem operation and returns the result
"""
return self.connection.delete_item(
self.table_name,
hash_key,
range_key=range_key,
condition=condition,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
settings=settings,
)
def update_item(
self,
hash_key: str,
range_key: Optional[str] = None,
actions: Optional[Sequence[Action]] = None,
condition: Optional[Condition] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
return_values: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the UpdateItem operation
"""
return self.connection.update_item(
self.table_name,
hash_key,
range_key=range_key,
actions=actions,
condition=condition,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
return_values=return_values,
settings=settings,
)
def put_item(
self,
hash_key: str,
range_key: Optional[str] = None,
attributes: Optional[Any] = None,
condition: Optional[Condition] = None,
return_values: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the PutItem operation and returns the result
"""
return self.connection.put_item(
self.table_name,
hash_key,
range_key=range_key,
attributes=attributes,
condition=condition,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
settings=settings,
)
def batch_write_item(
self,
put_items: Optional[Any] = None,
delete_items: Optional[Any] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the batch_write_item operation
"""
return self.connection.batch_write_item(
self.table_name,
put_items=put_items,
delete_items=delete_items,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
settings=settings,
)
def batch_get_item(
self,
keys: Sequence[str],
consistent_read: Optional[bool] = None,
return_consumed_capacity: Optional[str] = None,
attributes_to_get: Optional[Any] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the batch get item operation
"""
return self.connection.batch_get_item(
self.table_name,
keys,
consistent_read=consistent_read,
return_consumed_capacity=return_consumed_capacity,
attributes_to_get=attributes_to_get,
settings=settings,
)
def get_item(
self,
hash_key: str,
range_key: Optional[str] = None,
consistent_read: bool = False,
attributes_to_get: Optional[Any] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the GetItem operation and returns the result
"""
return self.connection.get_item(
self.table_name,
hash_key,
range_key=range_key,
consistent_read=consistent_read,
attributes_to_get=attributes_to_get,
settings=settings,
)
def scan(
self,
filter_condition: Optional[Any] = None,
attributes_to_get: Optional[Any] = None,
limit: Optional[int] = None,
return_consumed_capacity: Optional[str] = None,
segment: Optional[int] = None,
total_segments: Optional[int] = None,
exclusive_start_key: Optional[str] = None,
consistent_read: Optional[bool] = None,
index_name: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
select: Optional[str] = None,
) -> Dict:
"""
Performs the scan operation
"""
return self.connection.scan(
self.table_name,
filter_condition=filter_condition,
select=select,
attributes_to_get=attributes_to_get,
limit=limit,
return_consumed_capacity=return_consumed_capacity,
segment=segment,
total_segments=total_segments,
exclusive_start_key=exclusive_start_key,
consistent_read=consistent_read,
index_name=index_name,
settings=settings,
)
def query(
self,
hash_key: str,
range_key_condition: Optional[Condition] = None,
filter_condition: Optional[Any] = None,
attributes_to_get: Optional[Any] = None,
consistent_read: bool = False,
exclusive_start_key: Optional[Any] = None,
index_name: Optional[str] = None,
limit: Optional[int] = None,
return_consumed_capacity: Optional[str] = None,
scan_index_forward: Optional[bool] = None,
select: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the Query operation and returns the result
"""
return self.connection.query(
self.table_name,
hash_key,
range_key_condition=range_key_condition,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
consistent_read=consistent_read,
exclusive_start_key=exclusive_start_key,
index_name=index_name,
limit=limit,
return_consumed_capacity=return_consumed_capacity,
scan_index_forward=scan_index_forward,
select=select,
settings=settings,
)
def describe_table(self) -> Dict:
"""
Performs the DescribeTable operation and returns the result
"""
return self.connection.describe_table(self.table_name)
def delete_table(self) -> Dict:
"""
Performs the DeleteTable operation and returns the result
"""
return self.connection.delete_table(self.table_name)
def update_time_to_live(self, ttl_attr_name: str) -> Dict:
"""
Performs the UpdateTimeToLive operation and returns the result
"""
return self.connection.update_time_to_live(self.table_name, ttl_attr_name)
def update_table(
self,
read_capacity_units: Optional[int] = None,
write_capacity_units: Optional[int] = None,
global_secondary_index_updates: Optional[Any] = None,
) -> Dict:
"""
Performs the UpdateTable operation and returns the result
"""
return self.connection.update_table(
self.table_name,
read_capacity_units=read_capacity_units,
write_capacity_units=write_capacity_units,
global_secondary_index_updates=global_secondary_index_updates)
def create_table(
self,
attribute_definitions: Optional[Any] = None,
key_schema: Optional[Any] = None,
read_capacity_units: Optional[int] = None,
write_capacity_units: Optional[int] = None,
global_secondary_indexes: Optional[Any] = None,
local_secondary_indexes: Optional[Any] = None,
stream_specification: Optional[Dict] = None,
billing_mode: str = DEFAULT_BILLING_MODE,
tags: Optional[Dict[str, str]] = None,
) -> Dict:
"""
Performs the CreateTable operation and returns the result
"""
return self.connection.create_table(
self.table_name,
attribute_definitions=attribute_definitions,
key_schema=key_schema,
read_capacity_units=read_capacity_units,
write_capacity_units=write_capacity_units,
global_secondary_indexes=global_secondary_indexes,
local_secondary_indexes=local_secondary_indexes,
stream_specification=stream_specification,
billing_mode=billing_mode,
tags=tags
)
| jlafon/PynamoDB | pynamodb/connection/table.py | Python | mit | 12,792 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/SWIG/SWIGOUTDIR.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that use of the $SWIGOUTDIR variable causes SCons to recognize
that Java files are created in the specified output directory.
"""
import TestSCons
test = TestSCons.TestSCons()
swig = test.where_is('swig')
if not swig:
test.skip_test('Can not find installed "swig", skipping test.\n')
where_java_include=test.java_where_includes()
if not where_java_include:
test.skip_test('Can not find installed Java include files, skipping test.\n')
test.write(['SConstruct'], """\
env = Environment(tools = ['default', 'swig'],
CPPPATH=%(where_java_include)s,
)
Java_foo_interface = env.SharedLibrary(
'Java_foo_interface',
'Java_foo_interface.i',
SWIGOUTDIR = 'java/build dir',
SWIGFLAGS = '-c++ -java -Wall',
SWIGCXXFILESUFFIX = "_wrap.cpp")
""" % locals())
test.write('Java_foo_interface.i', """\
%module foopack
""")
# SCons should realize that it needs to create the "java/build dir"
# subdirectory to hold the generated .java files.
test.run(arguments = '.')
test.must_exist('java/build dir/foopackJNI.java')
test.must_exist('java/build dir/foopack.java')
# SCons should remove the built .java files.
test.run(arguments = '-c')
test.must_not_exist('java/build dir/foopackJNI.java')
test.must_not_exist('java/build dir/foopack.java')
# SCons should realize it needs to rebuild the removed .java files.
test.not_up_to_date(arguments = '.')
test.must_exist('java/build dir/foopackJNI.java')
test.must_exist('java/build dir/foopack.java')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/SWIG/SWIGOUTDIR.py | Python | mit | 2,897 |
import re
from unittest import TestCase
def mark_quoted_strings(sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
assert len(parts) == len(params) + 1
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = 0
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
assert level > 0
level -= 1
if level == 0:
closing = match.end()
return opening, closing
def transform_except_subselect(sql, func):
"""Call a func on every part of SOQL query except nested (SELECT ...)"""
start = 0
out = []
while sql.find('(SELECT', start) > -1:
pos = sql.find('(SELECT', start)
out.append(func(sql[start:pos]))
start, pos = find_closing_parenthesis(sql, pos)
out.append(sql[start:pos])
start = pos
out.append(func(sql[start:len(sql)]))
return ''.join(out)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()',13), (13,15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()',1)
def test_subselect(self):
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
func = lambda sql: '*transfomed*'
expected = "*transfomed*(SELECT x FROM y)*transfomed*(SELECT p FROM q WHERE r = %s)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
def test_nested_subselect(self):
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
func = lambda x: '*transfomed*'
expected = "*transfomed*(SELECT x, (SELECT p FROM q) FROM y)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("where x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_strings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d")
| chromakey/django-salesforce | salesforce/backend/subselect.py | Python | mit | 4,158 |
#!/usr/local/bin/python
# -*-coding:Utf-8 -*
import os
import math
def GA_settings():
"""Provides the view for the user setting of the GA experiments and returns the settings set"""
options = {}
os.system("clear")
print('===== OPTIONS =====\n')
preset = int(raw_input(
"PRESET\n"
"Use preset ?\n"
"\n\n-> 1: Source based preset\n"
"\n-> 2: I WANT TO SET BY MYSELF\n"
))
os.system("clear")
if preset == 1:
options["iterations"] = int(10000)
options["stopFitness"] = float(0.95)
options["mode"] = 'real'
options['crossMode'] = 'randomMultiPoint'
options["maximalPopulation"] = int(50)
options["mutationMode"] = 'oneNucleotid'
options["mutationProbability"] = float(0.05)
options["verbose"] = False
options["initialPopulation"] = int(100)
options['selectionMode'] = 'tournament'
elif preset == 2:
print('BASICS')
x = int(raw_input('Stop Iterations Number:\n'))
options['iterations'] = int(x)
options['stopFitness'] = float(raw_input(
'Stop Fitness:\n'
))
os.system('clear')
print('SELECTION')
options['selectionMode'] = int(raw_input(
'\nSelection Method:\n'
'--> 1: Roulette method\n'
'--> 2: Tournament method\n'
'--> 3: Roulette without replacement method\n'
))
if options['selectionMode'] == 1:
options['selectionMode'] = 'roulette'
elif options['selectionMode'] == 2:
options['selectionMode'] = 'tournament'
elif options['selectionMode'] == 3:
options['selectionMode'] = 'rouletteWR'
os.system('clear')
print('CROSSOVER & MUTATIONS')
options['mode'] = int(raw_input(
'Mode:\n'
'-> 1: Binary mode\n'
'-> 2: Real mode\n'
))
if options['mode'] == 1:
options['mode'] = 'binary'
elif options['mode'] == 2:
options['mode'] = 'real'
options['crossMode'] = int(raw_input(
'Crossover Mode:\n'
'--> 1: random one point\n'
'--> 2: random multipoint\n'
))
if options['crossMode'] == 1:
options['crossMode'] = 'randomOnePoint'
elif options['crossMode'] == 2:
options['crossMode'] = 'randomMultiPoint'
options['mutationMode'] = int(raw_input(
'Mutation Mode:\n'
'-> 0: Swap mode\n'
'-> 1: Each nucleotid has a chance to be muted, one by one\n'
'-> 2: 1 mutation maximum by child\n'
))
if options['mutationMode'] == 0:
options['mutationMode'] = 'swap'
elif options['mutationMode'] == 1:
options['mutationMode'] = 'everyNucleotid'
elif options['mutationMode'] == 2:
options['mutationMode'] = 'oneNucleotid'
options['mutationProbability'] = float(raw_input(
'Mutation Probability Mode:\n'
'-> 0 < n < 1: Fixed Probability\n'
'-> 2: Random Probability, basically between 1/BitArraySize and 1/PopulationSize\n'
))
os.system('clear')
print("POPULATION")
options["maximalPopulation"] = int(raw_input(
"Maximal Population:\n"
"-> n > 0: elitist insertion, just keep n best individuals\n"
"-> Other: every individual is kept (can slow down the algorythm for several iterations)\n"
"-> WARNING: If you set maximal population = 1 WITH roulette without replacement"
", your computer will explode\n"
))
options["initialPopulation"] = int(raw_input("Initialise with how much individuals ?\n"))
os.system("clear")
print("\nVERBOSE")
options["verbose"] = int(raw_input(
"Verbose Mode\n"
"-> 1: Enabled\n"
"-> 0: Disabled\n"
))
if options['verbose'] == 0:
options['verbose'] = False
elif options['verbose'] == 1:
options['verbose'] = True
os.system("clear")
return options
def ES_settings():
"""Provides the view for the user setting of the ES experiments and returns the settings set"""
os.system("clear")
print('===== OPTIONS =====\n')
options = {}
preset = int(raw_input(
"PRESET\n"
"Use preset ?\n"
"\n\n-> 1: Source based preset\n"
"\n-> 2: I WANT TO SET BY MYSELF\n"
))
os.system("clear")
if preset == 1:
options["iterations"] = int(1000)
options["stopFitness"] = float(0.95)
options["base"] = int(10)
options['verbose'] = False
options['selectionMode'] = int(1)
options['mutationMode'] = '2LRNS'
options['recombinationMode'] = 'weighted'
options['sigmaBoost'] = True
elif preset == 2:
print('\nBASICS')
x = int(raw_input('Stop Iterations Number:\n'))
options["iterations"] = int(x)
options['stopFitness'] = float(raw_input('\nStop Fitness:\n'))
print("\nGENERATIONS")
options["base"] = int(raw_input(
'n setting:\n'
'lambda (number of child from the father) = 8 * n\n'
'mu (number of best child selected to make new father) = lambda / 4\n'
't (global step size) = 1 / (n)^(1/2)\n'
'ti (component step size) = 1 / (n)^(1/4)\n'
))
print('RECOMBINATION')
options['recombinationMode'] = int(raw_input(
'Recombination mode:\n'
'1- Intermediate\n'
'2- Select Best\n'
'3- Weighted\n'
))
if options['recombinationMode'] == 1:
options['recombinationMode'] = 'intermediate'
elif options['recombinationMode'] == 2:
options['recombinationMode'] = 'best'
elif options['recombinationMode'] == 3:
options['recombinationMode'] = 'weighted'
print('MUTATION')
options['mutationMode'] = int(raw_input(
'Mutation mode:\n'
'1- 2 Learning Rates, N Sigmas\n'
'2- 1 Learning Rate, 1 Sigma\n'
))
if options['mutationMode'] == 1:
options['mutationMode'] = '2LRNS'
elif options['mutationMode'] == 2:
options['mutationMode'] = '1LR1S'
print('SIGMA BOOST')
options['sigmaBoost'] = int(raw_input(
'Allow sigma boost YOLO special feature ?\n'
'1- sigma nitro enabled\n'
'2- sigma nitro disabled\n'
))
if options['sigmaBoost'] == 1:
options['sigmaBoost'] = True
elif options['sigmaBoost'] == 2:
options['sigmaBoost'] = False
print("\nVERBOSE")
options["verbose"] = int(raw_input(
"Verbose Mode\n"
"-> 1: Enabled\n"
"-> 0: Disabled\n"
))
os.system("clear")
options['maximalPopulation'] = 2 * options['base']
options['childNumber'] = 8 * options['base']
options['globalLearningRate'] = 1.0 / pow(options['base'], 0.5)
options['localLearningRate'] = 1.0 / pow(options['base'], 0.25)
return options
| goujonpa/jeankevin | views/settingsView.py | Python | mit | 7,278 |
import pygame
from pygame.locals import *
from Constants import Constants
from FadeTransition import FadeTransition
from Menu import Menu
from GW_Label import GW_Label
from GuiWidgetManager import GuiWidgetManager
from xml.sax import make_parser
from Localization import Localization
import os
class ExitMenu(Menu):
"Exit Menu"
def __init__(self):
"Set up the Exit menu"
Menu.__init__(self,"MoleFusion Exit Menu","sprites/back1.jpg")
self.parser = make_parser()
self.curHandler = Localization()
self.parser.setContentHandler(self.curHandler)
self.parser.parse(open("languages/ExitMenu_" + Constants.LANGUAGE + ".xml"))
self.title = GW_Label(self.curHandler.getText("title"),(0.5,0.1),(27,22,24))
self.game_by = GW_Label(self.curHandler.getText("game"),(0.5,0.3),(240,255,220))
self.music_by = GW_Label(self.curHandler.getText("music"),(0.5,0.5),(240,255,220))
self.web = GW_Label(self.curHandler.getText("web"),(0.5,0.7),(255,255,255))
self.widget_man = GuiWidgetManager([self.title,self.game_by,self.music_by,self.web])
self.time_speed=pygame.time.Clock()
self.exit=False
self.on_enter()
def on_enter(self):
pygame.event.clear()
self.screen.blit(self.background, (0, 0))
pygame.display.flip()
self.exit=False
self.widget_man.set_draw(True)
def on_exit(self):
pygame.event.clear()
f = FadeTransition(2000,Constants.FADECOLOR,"to")
del f
self.exit=True
self.widget_man.set_draw(False)
def run(self):
while 1 and self.exit==False:
self.time_speed.tick(Constants.FPS)
for event in pygame.event.get():
if event.type == QUIT:
self.on_exit()
elif event.type == KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.on_exit()
else:
pygame.event.post(event) #Reinject the event into the queue for maybe latter process
self.widget_man.run()
| reality3d/molefusion | modules/ExitMenu.py | Python | mit | 1,885 |
"""
Classes for char-to-int mapping and int-to-int mapping.
:Author: James Taylor ([email protected])
The char-to-int mapping can be used to translate a list of strings
over some alphabet to a single int array (example for encoding a multiple
sequence alignment).
The int-to-int mapping is particularly useful for creating partitions,
and provides methods to merge/split symbols in the output mapping.
The two forms of mapping can be combined, for example to encode a
multiple sequence alignment in a reduced alphabet defined by a partition
of alignment columns. Many of the helper methods provided are for
solving such alignment oriented problems.
This code was originally written for the `ESPERR`_ project which includes
software for searcing for alignment encodings that work well for specific
classification problems using various Markov chain classifiers over the
reduced encodings.
Most of the core implementation is in the pyrex/C extension
"_seqmapping.pyx" for performance reasons (specifically to avoid the
excessive bounds checking that would make a sequence/array lookup heavy
problem like this slow in pure python).
.. _ESPERR: http://www.bx.psu.edu/projects/esperr/
"""
from ._seqmapping import (
CharToIntArrayMapping,
IntToIntMapping,
)
# Char->Int mapping for DNA characters with missing data
DNA = CharToIntArrayMapping()
DNA.set_mapping("a", 0)
DNA.set_mapping("A", 0)
DNA.set_mapping("c", 1)
DNA.set_mapping("C", 1)
DNA.set_mapping("g", 2)
DNA.set_mapping("G", 2)
DNA.set_mapping("t", 3)
DNA.set_mapping("T", 3)
DNA.set_mapping("-", 4)
DNA.set_mapping("*", 5)
# Creating mappings
def alignment_mapping_from_file(f, char_mapping=DNA):
"""
Create a mapping from a file of alignment columns.
"""
columns, symbols = [], []
for line in f:
column, symbol = line.split()
columns.append(column)
symbols.append(int(symbol))
align_count = len(columns[0])
mapping = IntToIntMapping(char_mapping.get_out_size() ** align_count)
for column, symbol in zip(columns, symbols):
index = char_mapping.translate_list(list(column))[0]
mapping.set_mapping(index, symbol)
return align_count, mapping
def second_mapping_from_file(f, first_mapping, char_mapping=DNA):
columns, symbols = [], []
for line in f:
column, symbol = line.split()
columns.append(column)
symbols.append(int(symbol))
mapping = IntToIntMapping(first_mapping.get_out_size())
for column, symbol in zip(columns, symbols):
index = char_mapping.translate_list(list(column))[0]
if first_mapping[index] >= 0:
mapping.set_mapping(first_mapping[index], symbol)
return mapping
def identity_mapping(size):
mapping = IntToIntMapping(size)
for i in range(size):
mapping.set_mapping(i, i)
return mapping
| bxlab/bx-python | lib/bx/seqmapping.py | Python | mit | 2,856 |
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
An Eulerer
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Specials.Simulaters.Populater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import numpy as np
#</ImportSpecificModules>
#<DefineClass>
@DecorationClass()
class EulererClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'EuleringPreFloatsArray',
'EuleringJacMethodStr',
'EuleringStepTimeFloat',
'EuleredPostFloatsArray',
]
def default_init(self,
_EuleringPreFloatsArray=None,
_EuleringJacMethodStr="euler_null",
_EuleringStepTimeFloat=0.1,
_EuleredPostFloatsArray=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def euler_null(self):
#return
return np.zeros(
len(self.EuleringPreFloatsArray)
)
def do_euler(
self,
**_KwargVariablesDict
):
#debug
'''
self.debug(('self.',self,[
'EuleringJacMethodStr'
]))
'''
#Do euler
self.EuleredPostFloatsArray=self.EuleringPreFloatsArray+getattr(
self,self.EuleringJacMethodStr)()*self.EuleringStepTimeFloat
#</DefineClass>
| Ledoux/ShareYourSystem | Pythonlogy/draft/Eulerer/__init__.py | Python | mit | 1,369 |
import os
import signal
import subprocess
import beanstalkc
import time
import pexpect
try:
import unittest2 as unittest
except ImportError:
import unittest
from beanstalkctl.util import BeanstalkdMixin
class BaseSpec(unittest.TestCase, BeanstalkdMixin):
beanstalkd_instance = None
beanstalkd_host = '127.0.0.1'
beanstalkd_port = 11411
def _beanstalkd_path(self):
beanstalkd = os.getenv('BEANSTALKD')
if beanstalkd:
return os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..', beanstalkd))
# assume beanstalkd is
# installed globally
return 'beanstalkd'
beanstalkd_path = property(_beanstalkd_path)
def _start_beanstalkd(self):
print "Using beanstalkd: {0}".format(self.beanstalkd_path)
print "Starting up the beanstalkd instance...",
self.beanstalkd_instance = subprocess.Popen(
[self.beanstalkd_path, '-p', str(self.beanstalkd_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
print 'running as {0}...'.format(self.beanstalkd_instance),
print "done."
def base_setup(self):
self._start_beanstalkd()
beanstalkctl = ' '.join([
os.path.join(
os.path.dirname(self.call('pwd')),
'bin',
'beanstalkctl'),
'--host={0}'.format(self.beanstalkd_host),
'--port={0}'.format(self.beanstalkd_port), ])
self.logfh = open(
'{0}.log'.format(self.__class__.__name__), 'w', 0)
self.beanstalkctl = pexpect.spawn(beanstalkctl, logfile=self.logfh)
self.beanstalkctl.setecho(False)
self.beanstalkctl.expect('beanstalkctl> ')
def base_teardown(self):
self.logfh.close()
if not self.beanstalkd_instance:
return
print "Shutting down the beanstalkd instance...",
self.beanstalkd_instance.terminate()
print "done."
def interact(self, cmd, expect='beanstalkctl> '):
self.beanstalkctl.sendline(cmd)
self.beanstalkctl.expect_exact(expect)
return self.get_response()
def get_response(self):
result = self.beanstalkctl.before
if result.endswith('\x1b[K'):
return result[:-6]
return result
def call(self, command, **env):
"""Run a command on the terminal.
Args:
command (str): the command to execute
Keyword Args:
**env (dict): any keyword arguments are collected into a
dictionary and passed as environment variables directly
to the subprocess call.
Returns:
tuple. A tuple containing `(stdoutdata, stderrdata)`, or None
if unsuccessful.
"""
p = subprocess.Popen(
command,
shell=False,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, error = p.communicate()
if error:
raise Exception(error)
return result
def clean(self, text):
for chunk in ('\r', r'\\x1b[K'):
text = text.replace(chunk, '')
return text.strip()
def skipped(func):
from nose.plugins.skip import SkipTest
def wrapper(*args, **kwargs):
raise SkipTest("Test %s is skipped" % func.__name__)
wrapper.__name__ = func.__name__
return wrapper
| OldhamMade/beanstalkctl | specs/base_spec.py | Python | mit | 3,511 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
HostDiskPartitionInfoPartitionFormat = Enum(
'gpt',
'mbr',
'unknown',
)
| xuru/pyvisdk | pyvisdk/enums/host_disk_partition_info_partition_format.py | Python | mit | 247 |
# -*- coding: utf-8 -*-
"""Status effect data."""
from components.status_effect import StatusEffect
from status_effect_functions import damage_of_time
# todo: generate new object not copy
STATUS_EFFECT_CATALOG = {
'POISONED':
{
'name': 'poisoned',
'tile_path': 'status_effect/poisoned.png',
'color': 'green',
'tick_function': damage_of_time,
'duration': 6,
'function_kwargs': {'init_dmg': 2} # specify as dictionary
},
'OFF_BALANCED':
{
'name': 'off-balanced',
'tile_path': 'status_effect/off_balanced.png',
'color': 'gray',
'duration': 4,
'stats': {'phys_pow': -1,
'defense': -2}
},
'VIGILANT':
{
'name': 'vigilant',
'tile_path': 'status_effect/vigilant.png',
'color': 'blue',
'duration': 6,
'stats': {'defense': 5}
},
}
def generate_status_effect(statfx_id):
"""Return a status effect generated from catalog."""
statfx_data = STATUS_EFFECT_CATALOG.get(statfx_id)
# char and color are set as placeholder, ASCII graphics features will be removed in future.
return StatusEffect(statfx_data.get('name'),
statfx_data.get('tile_path'),
statfx_data.get('color'),
function_kwargs=statfx_data.get('function_kwargs'),
tick_function=statfx_data.get('tick_function'),
duration=statfx_data.get('duration'),
stats=statfx_data.get('stats'))
| kuraha4/roguelike-tutorial-python | src/data/status_effect.py | Python | mit | 1,577 |
# In the 20x20 grid below, four numbers along a diagonal line have been marked in red.
# 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
# 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
# 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
# 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
# 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
# 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
# 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
# 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
# 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
# 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
# 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
# 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
# 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
# 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
# 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
# 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
# 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
# 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
# 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
# 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
# The product of these numbers is 26 x 63 x 78 x 14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction (up, down,
# left, right, or diagonally) in the 20x20 grid?
text = '08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 \
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 \
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 \
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 \
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 \
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 \
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 \
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 \
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 \
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 \
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 \
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 \
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 \
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 \
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 \
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 \
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 \
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 \
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 \
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48'
nums = [int(x) for x in text.split(' ')]
ans = 0
size = 20
for i in range(0, size):
for j in range(3, size):
tmp = nums[j - 3 + i * size] * nums[j - 2 + i * size] \
* nums[j - 1 + i * size] * nums[j + i * size]
ans = max(ans, tmp)
tmp = nums[i + (j - 3) * size] * nums[i + (j - 2) * size] \
* nums[i + (j - 1) * size] * nums[i + j * size]
ans = max(ans, tmp)
for i in range(3, size):
for j in range(3, size):
tmp = nums[j - 3 + (i - 3) * size] * nums[j - 2 + (i - 2) * size] \
* nums[j - 1 + (i - 1) * size] * nums[j + i * size]
ans = max(ans, tmp)
tmp = nums[j + (i - 3) * size] * nums[j - 1 + (i - 2) * size] \
* nums[j - 2 + (i - 1) * size] * nums[j - 3 + i * size]
ans = max(ans, tmp)
print ans | cloudzfy/euler | src/11.py | Python | mit | 3,512 |
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='MKS',
version='0.1.0',
description="A unit system based on meter, kilo, and second",
author='Roderic Day',
author_email='[email protected]',
url='www.permanentsignal.com',
license='MIT',
)
| RodericDay/MKS | setup.py | Python | mit | 375 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("capture", ["capture.pyx"])]
)
| hirolovesbeer/sekiwake | src/setup.py | Python | mit | 223 |
# Generated by Django 2.0 on 2018-02-08 11:45
from django.db import migrations
def forwards(apps, schema_editor):
"""
Change all DancePiece objects into Work objects, and their associated
data into WorkRole and WorkSelection models, then delete the DancePiece.
"""
DancePiece = apps.get_model("spectator_events", "DancePiece")
Work = apps.get_model("spectator_events", "Work")
WorkRole = apps.get_model("spectator_events", "WorkRole")
WorkSelection = apps.get_model("spectator_events", "WorkSelection")
for dp in DancePiece.objects.all():
work = Work.objects.create(
kind="dancepiece", title=dp.title, title_sort=dp.title_sort
)
for role in dp.roles.all():
WorkRole.objects.create(
creator=role.creator,
work=work,
role_name=role.role_name,
role_order=role.role_order,
)
for selection in dp.events.all():
WorkSelection.objects.create(
event=selection.event, work=work, order=selection.order
)
dp.delete()
class Migration(migrations.Migration):
dependencies = [
("spectator_events", "0027_classicalworks_to_works"),
]
operations = [
migrations.RunPython(forwards),
]
| philgyford/django-spectator | spectator/events/migrations/0028_dancepieces_to_works.py | Python | mit | 1,326 |
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import pandas as pd
from deepcpg.data import annotations as annos
def test_join_overlapping():
f = annos.join_overlapping
s, e = f([], [])
assert len(s) == 0
assert len(e) == 0
s = [1, 3, 6]
e = [2, 4, 10]
expect = (s, e)
result = f(s, e)
assert result == expect
x = np.array([[1, 2],
[3, 4], [4, 5],
[6, 8], [8, 8], [8, 9],
[10, 15], [10, 11], [11, 14], [14, 16]]
)
expect = [[1, 2], [3, 5], [6, 9], [10, 16]]
result = np.array(f(x[:, 0], x[:, 1])).T
npt.assert_array_equal(result, expect)
def test_in_which():
f = annos.in_which
ys = [2, 4, 12, 17]
ye = [2, 8, 15, 18]
x = []
expect = []
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
x = [-1, 3, 9, 19]
expect = [-1, -1, -1, -1]
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
x = [-1, 2, 2, 3, 4, 8, 15, 16]
expect = [-1, 0, 0, -1, 1, 1, 2, -1]
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
def test_is_in():
ys = [2, 4, 12, 17]
ye = [2, 8, 15, 18]
x = [-1, 2, 2, 3, 4, 8, 15, 16]
expect = [False, True, True, False, True, True, True, False]
result = annos.is_in(x, ys, ye)
npt.assert_array_equal(result, expect)
def test_distance():
start = [3, 10, 17]
end = [6, 15, 18]
pos = [1, 2, 5, 8, 10, 15, 16, 19]
expect = [2, 1, 0, 2, 0, 0, 1, 1]
start = np.asarray(start)
end = np.asarray(end)
pos = np.asarray(pos)
actual = annos.distance(pos, start, end)
npt.assert_array_equal(actual, expect)
pos = [1, 6, 7, 9]
expect = [2, 0, 1, 1]
start = np.asarray(start)
end = np.asarray(end)
pos = np.asarray(pos)
actual = annos.distance(pos, start, end)
npt.assert_array_equal(actual, expect)
def test_extend_frame():
d = pd.DataFrame({
'chromo': '1',
'start': [2, 3, 3, 1, 1],
'end': [3, 3, 8, 2, 1]
})
d = d.loc[:, ['chromo', 'start', 'end']]
expect = pd.DataFrame({
'chromo': '1',
'start': [1, 2, 3, 1, 1],
'end': [4, 5, 8, 4, 4]
})
expect = expect.loc[:, ['chromo', 'start', 'end']]
actual = annos.extend_len_frame(d, 4)
npt.assert_array_equal(actual.values, expect.values)
def test_group_overlapping():
npt.assert_array_equal(annos.group_overlapping([], []), [])
npt.assert_array_equal(annos.group_overlapping([1], [2]), [0])
s = [1, 5, 7, 11, 13, 16, 22]
e = [3, 8, 9, 15, 17, 20, 24]
g = [0, 1, 1, 2, 2, 2, 3]
a = annos.group_overlapping(s, e)
npt.assert_array_equal(a, g)
| cangermueller/deepcpg | tests/deepcpg/data/test_annos.py | Python | mit | 2,793 |
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root@localhost:3306/microblog'
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
DEBUG = False
| yejianye/microblog | asura/conf/dev.py | Python | mit | 170 |
#empty file
| benno16/extraFiles | FlaskApp/__init__.py | Python | mit | 49 |
# Copyright (C) 2014 Rémi Bèges
# For conditions of distribution and use, see copyright notice in the LICENSE file
from distantio.DistantIO import DistantIO
from distantio.DistantIOProtocol import distantio_protocol
from distantio.SerialPort import SerialPort
from distantio.crc import crc16
| Overdrivr/DistantIO | distantio/__init__.py | Python | mit | 295 |
import datetime
import logging
from unittest.mock import patch
from django.test import TestCase
from django.test.utils import override_settings
from konfera import models
from payments import utils
from payments.models import ProcessedTransaction
def make_payment(new_data):
data = {
'date': datetime.date(2015, 10, 5),
'variable_symbol': '1234',
'transaction_id': '1234',
'amount': 0.0,
'currency': 'EUR',
'comment': '',
'executor': '',
}
data.update(new_data)
return data
logging.disable(logging.WARNING)
class TestGetLastPayements(TestCase):
@patch('django.utils.timezone.now', return_value=datetime.datetime(2016, 9, 29))
@patch('fiobank.FioBank.period', return_value=[])
@override_settings(FIO_BANK_TOKEN='fio_token')
def test__get_last_payments(self, FioBankMockPeriod, timezone_mock):
data = utils._get_last_payments()
self.assertEqual(data, [])
FioBankMockPeriod.assert_called_with('2016-09-26', '2016-09-29')
timezone_mock.assert_called_once_with()
class TestGetNotProcessedPayments(TestCase):
def test_no_processed_payment_is_available(self):
payments = [
make_payment({'transaction_id': '1'}),
make_payment({'transaction_id': '2'}),
]
self.assertEqual(
list(utils._get_not_processed_payments(payments)),
payments
)
def test_processed_payments_filtered(self):
payments = [
make_payment({'transaction_id': '1'}),
make_payment({'transaction_id': '2'}),
make_payment({'transaction_id': '3'}),
]
ProcessedTransaction.objects.create(transaction_id='2', amount=0)
self.assertEqual(
list(utils._get_not_processed_payments(payments)),
[
make_payment({'transaction_id': '1'}),
make_payment({'transaction_id': '3'}),
]
)
class TestGetPaymentsForOrder(TestCase):
def setUp(self):
self.order = models.Order.objects.create(price=200, discount=0)
def test_no_payments(self):
payments = []
self.assertEqual(
list(utils._get_payments_for_order(self.order, payments)),
[]
)
def test_payments_for_different_orders(self):
payments = [
make_payment({'variable_symbol': str(self.order.pk + 7)}),
make_payment({'variable_symbol': str(self.order.pk + 13)}),
]
self.assertEqual(
list(utils._get_payments_for_order(self.order, payments)),
[]
)
def test_payment_found_for_order(self):
payments = [
make_payment({'variable_symbol': self.order.variable_symbol}),
make_payment({'variable_symbol': str(self.order.pk + 13)}),
]
self.assertEqual(
list(utils._get_payments_for_order(self.order, payments)),
[make_payment({'variable_symbol': self.order.variable_symbol})]
)
def test_multiple_payments_found_for_order(self):
payments = [
make_payment({'variable_symbol': self.order.variable_symbol}),
make_payment({'variable_symbol': str(self.order.pk + 13)}),
make_payment({'variable_symbol': self.order.variable_symbol}),
]
self.assertEqual(
list(utils._get_payments_for_order(self.order, payments)),
[
make_payment({'variable_symbol': self.order.variable_symbol}),
make_payment({'variable_symbol': self.order.variable_symbol}),
]
)
class TestProcessPayment(TestCase):
def test_attendee_paid_less(self):
order = models.Order.objects.create(price=100, discount=10)
payment = make_payment({'amount': 80, 'transaction_id': '7'})
utils._process_payment(order, payment)
self.assertEqual(order.amount_paid, 80)
self.assertEqual(order.status, models.order.PARTLY_PAID)
def test_attendee_paid_enough(self):
order = models.Order.objects.create(price=100, discount=10, amount_paid=5, status=models.order.PARTLY_PAID)
payment = make_payment({'amount': 85, 'transaction_id': '7'})
utils._process_payment(order, payment)
self.assertEqual(order.amount_paid, 90)
self.assertEqual(order.status, models.order.PAID)
def test_payment_marked_as_processed(self):
order = models.Order.objects.create(price=100, discount=10)
payment = make_payment({'amount': 80, 'transaction_id': '7'})
self.assertEqual(ProcessedTransaction.objects.count(), 0)
utils._process_payment(order, payment)
self.assertEqual(ProcessedTransaction.objects.count(), 1)
self.assertEqual(ProcessedTransaction.objects.all()[0].transaction_id, '7')
class TestCheckPaymentsStatus(TestCase):
def setUp(self):
self.order1 = models.Order.objects.create(price=200, discount=0)
self.order2 = models.Order.objects.create(price=200, discount=7)
@patch('payments.utils._get_last_payments', return_value=[])
def test_no_payments_available(self, mock_api_call):
""" FioBank doesn't have any payments - no order status should be changed """
utils.check_payments_status()
order1 = models.Order.objects.get(pk=self.order1.pk)
order2 = models.Order.objects.get(pk=self.order2.pk)
self.assertEqual(mock_api_call.call_count, 1)
self.assertEqual(order1.status, models.order.AWAITING)
self.assertEqual(order2.status, models.order.AWAITING)
@patch('payments.utils._get_last_payments')
def test_one_order_is_paid(self, mock_api_call):
""" FioBank doesn't have a payment for order1 - order's status was changed """
mock_api_call.return_value = [
make_payment({'variable_symbol': self.order1.variable_symbol, 'amount': 200, 'transaction_id': '7'}),
]
utils.check_payments_status()
order1 = models.Order.objects.get(pk=self.order1.pk)
order2 = models.Order.objects.get(pk=self.order2.pk)
self.assertEqual(mock_api_call.call_count, 1)
self.assertEqual(order1.status, models.order.PAID)
self.assertEqual(order2.status, models.order.AWAITING)
@patch('payments.utils._get_last_payments')
def test_all_orders_are_paid(self, mock_api_call):
mock_api_call.return_value = [
make_payment({'variable_symbol': self.order1.variable_symbol, 'amount': 200, 'transaction_id': '7'}),
make_payment({'variable_symbol': self.order2.variable_symbol, 'amount': 200, 'transaction_id': '8'}),
]
utils.check_payments_status()
order1 = models.Order.objects.get(pk=self.order1.pk)
order2 = models.Order.objects.get(pk=self.order2.pk)
self.assertEqual(mock_api_call.call_count, 1)
self.assertEqual(order1.status, models.order.PAID)
self.assertEqual(order2.status, models.order.PAID)
@patch('payments.utils._get_last_payments')
def test_order_is_paid_in_multiple_payments(self, mock_api_call):
mock_api_call.return_value = [
make_payment({'variable_symbol': self.order1.variable_symbol, 'amount': 150, 'transaction_id': '7'}),
make_payment({'variable_symbol': self.order1.variable_symbol, 'amount': 50, 'transaction_id': '79'}),
make_payment({'variable_symbol': self.order2.variable_symbol, 'amount': 30, 'transaction_id': '80'}),
]
utils.check_payments_status()
order1 = models.Order.objects.get(pk=self.order1.pk)
order2 = models.Order.objects.get(pk=self.order2.pk)
self.assertEqual(order1.status, models.order.PAID)
self.assertEqual(order2.status, models.order.PARTLY_PAID)
| kapucko/django-konfera | payments/tests.py | Python | mit | 7,832 |
"""
Django settings for news_project project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.core.mail import send_mail
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dtou6-c)r2@t$p2tudrq2gjy92wsfdkst2yng^5y-akom$$f13'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = 'home'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'news_project',
'articles',
'profiles',
'taggit'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'news_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'news_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'newssitedb',
'USER': os.environ.get('DB_USER', ''),
'PASSWORD': os.environ.get("DB_PASSWORD", ''),
'HOST': '127.0.0.1',
'PORT': '5432',
'TEST': {
'NAME': 'IMAGER_TEST_DB'
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Email Settings
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = os.environ.get('EM_PASS', '')
SERVER_EMAIL = '[email protected]'
DEFAULT_FROM_EMAIL = "News Project"
| julienawilson/news-project | news_project/news_project/settings.py | Python | mit | 3,887 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBaseESGD import *
class agilentE4431B(agilentBaseESGD):
"Agilent E4431B ESG-D IVI RF signal generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'ESG-D4000B')
super(agilentE4431B, self).__init__(*args, **kwargs)
self._frequency_low = 250e3
self._frequency_high = 2e9
| Diti24/python-ivi | ivi/agilent/agilentE4431B.py | Python | mit | 1,495 |
import random, sys
if len(sys.argv)!= 2:
print "Usage: python generate.py <how many instructions you want>"
sys.exit()
choices = ("(", ")")
output = ""
for x in range(int(sys.argv[1])):
output += random.choice(choices)
f = open("randout", "w")
f.write(output)
f.close
print "Created an instruction set that is " + sys.argv[1] + " characters long"
| b4ux1t3/adventofcode2015 | day1/generate.py | Python | mit | 366 |
Subsets and Splits